igb_main.c 258 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548954995509551955295539554955595569557955895599560956195629563956495659566956795689569957095719572957395749575957695779578957995809581958295839584958595869587958895899590959195929593959495959596959795989599960096019602960396049605960696079608960996109611961296139614961596169617961896199620962196229623962496259626962796289629963096319632963396349635963696379638963996409641964296439644964596469647964896499650965196529653965496559656965796589659966096619662966396649665966696679668966996709671967296739674967596769677
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2007 - 2018 Intel Corporation. */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/init.h>
  7. #include <linux/bitops.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/ipv6.h>
  12. #include <linux/slab.h>
  13. #include <net/checksum.h>
  14. #include <net/ip6_checksum.h>
  15. #include <net/pkt_sched.h>
  16. #include <net/pkt_cls.h>
  17. #include <linux/net_tstamp.h>
  18. #include <linux/mii.h>
  19. #include <linux/ethtool.h>
  20. #include <linux/if.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/pci.h>
  23. #include <linux/delay.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ip.h>
  26. #include <linux/tcp.h>
  27. #include <linux/sctp.h>
  28. #include <linux/if_ether.h>
  29. #include <linux/aer.h>
  30. #include <linux/prefetch.h>
  31. #include <linux/pm_runtime.h>
  32. #include <linux/etherdevice.h>
  33. #ifdef CONFIG_IGB_DCA
  34. #include <linux/dca.h>
  35. #endif
  36. #include <linux/i2c.h>
  37. #include "igb.h"
  38. #define MAJ 5
  39. #define MIN 4
  40. #define BUILD 0
  41. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  42. __stringify(BUILD) "-k"
  43. enum queue_mode {
  44. QUEUE_MODE_STRICT_PRIORITY,
  45. QUEUE_MODE_STREAM_RESERVATION,
  46. };
  47. enum tx_queue_prio {
  48. TX_QUEUE_PRIO_HIGH,
  49. TX_QUEUE_PRIO_LOW,
  50. };
  51. char igb_driver_name[] = "igb";
  52. char igb_driver_version[] = DRV_VERSION;
  53. static const char igb_driver_string[] =
  54. "Intel(R) Gigabit Ethernet Network Driver";
  55. static const char igb_copyright[] =
  56. "Copyright (c) 2007-2014 Intel Corporation.";
  57. static const struct e1000_info *igb_info_tbl[] = {
  58. [board_82575] = &e1000_82575_info,
  59. };
  60. static const struct pci_device_id igb_pci_tbl[] = {
  61. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
  62. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
  63. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
  64. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
  65. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
  66. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
  67. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
  68. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
  69. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
  70. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
  71. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
  72. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
  73. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
  74. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
  75. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
  76. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
  77. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
  78. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
  79. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
  80. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
  81. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
  82. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
  83. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
  84. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
  85. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  86. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  87. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  88. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  89. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  90. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  91. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
  92. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  93. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  94. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  95. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  96. /* required last entry */
  97. {0, }
  98. };
  99. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  100. static int igb_setup_all_tx_resources(struct igb_adapter *);
  101. static int igb_setup_all_rx_resources(struct igb_adapter *);
  102. static void igb_free_all_tx_resources(struct igb_adapter *);
  103. static void igb_free_all_rx_resources(struct igb_adapter *);
  104. static void igb_setup_mrqc(struct igb_adapter *);
  105. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  106. static void igb_remove(struct pci_dev *pdev);
  107. static int igb_sw_init(struct igb_adapter *);
  108. int igb_open(struct net_device *);
  109. int igb_close(struct net_device *);
  110. static void igb_configure(struct igb_adapter *);
  111. static void igb_configure_tx(struct igb_adapter *);
  112. static void igb_configure_rx(struct igb_adapter *);
  113. static void igb_clean_all_tx_rings(struct igb_adapter *);
  114. static void igb_clean_all_rx_rings(struct igb_adapter *);
  115. static void igb_clean_tx_ring(struct igb_ring *);
  116. static void igb_clean_rx_ring(struct igb_ring *);
  117. static void igb_set_rx_mode(struct net_device *);
  118. static void igb_update_phy_info(struct timer_list *);
  119. static void igb_watchdog(struct timer_list *);
  120. static void igb_watchdog_task(struct work_struct *);
  121. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
  122. static void igb_get_stats64(struct net_device *dev,
  123. struct rtnl_link_stats64 *stats);
  124. static int igb_change_mtu(struct net_device *, int);
  125. static int igb_set_mac(struct net_device *, void *);
  126. static void igb_set_uta(struct igb_adapter *adapter, bool set);
  127. static irqreturn_t igb_intr(int irq, void *);
  128. static irqreturn_t igb_intr_msi(int irq, void *);
  129. static irqreturn_t igb_msix_other(int irq, void *);
  130. static irqreturn_t igb_msix_ring(int irq, void *);
  131. #ifdef CONFIG_IGB_DCA
  132. static void igb_update_dca(struct igb_q_vector *);
  133. static void igb_setup_dca(struct igb_adapter *);
  134. #endif /* CONFIG_IGB_DCA */
  135. static int igb_poll(struct napi_struct *, int);
  136. static bool igb_clean_tx_irq(struct igb_q_vector *, int);
  137. static int igb_clean_rx_irq(struct igb_q_vector *, int);
  138. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  139. static void igb_tx_timeout(struct net_device *);
  140. static void igb_reset_task(struct work_struct *);
  141. static void igb_vlan_mode(struct net_device *netdev,
  142. netdev_features_t features);
  143. static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
  144. static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
  145. static void igb_restore_vlan(struct igb_adapter *);
  146. static void igb_rar_set_index(struct igb_adapter *, u32);
  147. static void igb_ping_all_vfs(struct igb_adapter *);
  148. static void igb_msg_task(struct igb_adapter *);
  149. static void igb_vmm_control(struct igb_adapter *);
  150. static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  151. static void igb_flush_mac_table(struct igb_adapter *);
  152. static int igb_available_rars(struct igb_adapter *, u8);
  153. static void igb_set_default_mac_filter(struct igb_adapter *);
  154. static int igb_uc_sync(struct net_device *, const unsigned char *);
  155. static int igb_uc_unsync(struct net_device *, const unsigned char *);
  156. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  157. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
  158. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  159. int vf, u16 vlan, u8 qos, __be16 vlan_proto);
  160. static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
  161. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  162. bool setting);
  163. static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
  164. bool setting);
  165. static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
  166. struct ifla_vf_info *ivi);
  167. static void igb_check_vf_rate_limit(struct igb_adapter *);
  168. static void igb_nfc_filter_exit(struct igb_adapter *adapter);
  169. static void igb_nfc_filter_restore(struct igb_adapter *adapter);
  170. #ifdef CONFIG_PCI_IOV
  171. static int igb_vf_configure(struct igb_adapter *adapter, int vf);
  172. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
  173. static int igb_disable_sriov(struct pci_dev *dev);
  174. static int igb_pci_disable_sriov(struct pci_dev *dev);
  175. #endif
  176. static int igb_suspend(struct device *);
  177. static int igb_resume(struct device *);
  178. static int igb_runtime_suspend(struct device *dev);
  179. static int igb_runtime_resume(struct device *dev);
  180. static int igb_runtime_idle(struct device *dev);
  181. static const struct dev_pm_ops igb_pm_ops = {
  182. SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
  183. SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
  184. igb_runtime_idle)
  185. };
  186. static void igb_shutdown(struct pci_dev *);
  187. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
  188. #ifdef CONFIG_IGB_DCA
  189. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  190. static struct notifier_block dca_notifier = {
  191. .notifier_call = igb_notify_dca,
  192. .next = NULL,
  193. .priority = 0
  194. };
  195. #endif
  196. #ifdef CONFIG_NET_POLL_CONTROLLER
  197. /* for netdump / net console */
  198. static void igb_netpoll(struct net_device *);
  199. #endif
  200. #ifdef CONFIG_PCI_IOV
  201. static unsigned int max_vfs;
  202. module_param(max_vfs, uint, 0);
  203. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
  204. #endif /* CONFIG_PCI_IOV */
  205. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  206. pci_channel_state_t);
  207. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  208. static void igb_io_resume(struct pci_dev *);
  209. static const struct pci_error_handlers igb_err_handler = {
  210. .error_detected = igb_io_error_detected,
  211. .slot_reset = igb_io_slot_reset,
  212. .resume = igb_io_resume,
  213. };
  214. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
  215. static struct pci_driver igb_driver = {
  216. .name = igb_driver_name,
  217. .id_table = igb_pci_tbl,
  218. .probe = igb_probe,
  219. .remove = igb_remove,
  220. #ifdef CONFIG_PM
  221. .driver.pm = &igb_pm_ops,
  222. #endif
  223. .shutdown = igb_shutdown,
  224. .sriov_configure = igb_pci_sriov_configure,
  225. .err_handler = &igb_err_handler
  226. };
  227. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  228. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  229. MODULE_LICENSE("GPL");
  230. MODULE_VERSION(DRV_VERSION);
  231. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  232. static int debug = -1;
  233. module_param(debug, int, 0);
  234. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  235. struct igb_reg_info {
  236. u32 ofs;
  237. char *name;
  238. };
  239. static const struct igb_reg_info igb_reg_info_tbl[] = {
  240. /* General Registers */
  241. {E1000_CTRL, "CTRL"},
  242. {E1000_STATUS, "STATUS"},
  243. {E1000_CTRL_EXT, "CTRL_EXT"},
  244. /* Interrupt Registers */
  245. {E1000_ICR, "ICR"},
  246. /* RX Registers */
  247. {E1000_RCTL, "RCTL"},
  248. {E1000_RDLEN(0), "RDLEN"},
  249. {E1000_RDH(0), "RDH"},
  250. {E1000_RDT(0), "RDT"},
  251. {E1000_RXDCTL(0), "RXDCTL"},
  252. {E1000_RDBAL(0), "RDBAL"},
  253. {E1000_RDBAH(0), "RDBAH"},
  254. /* TX Registers */
  255. {E1000_TCTL, "TCTL"},
  256. {E1000_TDBAL(0), "TDBAL"},
  257. {E1000_TDBAH(0), "TDBAH"},
  258. {E1000_TDLEN(0), "TDLEN"},
  259. {E1000_TDH(0), "TDH"},
  260. {E1000_TDT(0), "TDT"},
  261. {E1000_TXDCTL(0), "TXDCTL"},
  262. {E1000_TDFH, "TDFH"},
  263. {E1000_TDFT, "TDFT"},
  264. {E1000_TDFHS, "TDFHS"},
  265. {E1000_TDFPC, "TDFPC"},
  266. /* List Terminator */
  267. {}
  268. };
  269. /* igb_regdump - register printout routine */
  270. static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  271. {
  272. int n = 0;
  273. char rname[16];
  274. u32 regs[8];
  275. switch (reginfo->ofs) {
  276. case E1000_RDLEN(0):
  277. for (n = 0; n < 4; n++)
  278. regs[n] = rd32(E1000_RDLEN(n));
  279. break;
  280. case E1000_RDH(0):
  281. for (n = 0; n < 4; n++)
  282. regs[n] = rd32(E1000_RDH(n));
  283. break;
  284. case E1000_RDT(0):
  285. for (n = 0; n < 4; n++)
  286. regs[n] = rd32(E1000_RDT(n));
  287. break;
  288. case E1000_RXDCTL(0):
  289. for (n = 0; n < 4; n++)
  290. regs[n] = rd32(E1000_RXDCTL(n));
  291. break;
  292. case E1000_RDBAL(0):
  293. for (n = 0; n < 4; n++)
  294. regs[n] = rd32(E1000_RDBAL(n));
  295. break;
  296. case E1000_RDBAH(0):
  297. for (n = 0; n < 4; n++)
  298. regs[n] = rd32(E1000_RDBAH(n));
  299. break;
  300. case E1000_TDBAL(0):
  301. for (n = 0; n < 4; n++)
  302. regs[n] = rd32(E1000_RDBAL(n));
  303. break;
  304. case E1000_TDBAH(0):
  305. for (n = 0; n < 4; n++)
  306. regs[n] = rd32(E1000_TDBAH(n));
  307. break;
  308. case E1000_TDLEN(0):
  309. for (n = 0; n < 4; n++)
  310. regs[n] = rd32(E1000_TDLEN(n));
  311. break;
  312. case E1000_TDH(0):
  313. for (n = 0; n < 4; n++)
  314. regs[n] = rd32(E1000_TDH(n));
  315. break;
  316. case E1000_TDT(0):
  317. for (n = 0; n < 4; n++)
  318. regs[n] = rd32(E1000_TDT(n));
  319. break;
  320. case E1000_TXDCTL(0):
  321. for (n = 0; n < 4; n++)
  322. regs[n] = rd32(E1000_TXDCTL(n));
  323. break;
  324. default:
  325. pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
  326. return;
  327. }
  328. snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
  329. pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
  330. regs[2], regs[3]);
  331. }
  332. /* igb_dump - Print registers, Tx-rings and Rx-rings */
  333. static void igb_dump(struct igb_adapter *adapter)
  334. {
  335. struct net_device *netdev = adapter->netdev;
  336. struct e1000_hw *hw = &adapter->hw;
  337. struct igb_reg_info *reginfo;
  338. struct igb_ring *tx_ring;
  339. union e1000_adv_tx_desc *tx_desc;
  340. struct my_u0 { u64 a; u64 b; } *u0;
  341. struct igb_ring *rx_ring;
  342. union e1000_adv_rx_desc *rx_desc;
  343. u32 staterr;
  344. u16 i, n;
  345. if (!netif_msg_hw(adapter))
  346. return;
  347. /* Print netdevice Info */
  348. if (netdev) {
  349. dev_info(&adapter->pdev->dev, "Net device Info\n");
  350. pr_info("Device Name state trans_start\n");
  351. pr_info("%-15s %016lX %016lX\n", netdev->name,
  352. netdev->state, dev_trans_start(netdev));
  353. }
  354. /* Print Registers */
  355. dev_info(&adapter->pdev->dev, "Register Dump\n");
  356. pr_info(" Register Name Value\n");
  357. for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
  358. reginfo->name; reginfo++) {
  359. igb_regdump(hw, reginfo);
  360. }
  361. /* Print TX Ring Summary */
  362. if (!netdev || !netif_running(netdev))
  363. goto exit;
  364. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  365. pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
  366. for (n = 0; n < adapter->num_tx_queues; n++) {
  367. struct igb_tx_buffer *buffer_info;
  368. tx_ring = adapter->tx_ring[n];
  369. buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  370. pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
  371. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  372. (u64)dma_unmap_addr(buffer_info, dma),
  373. dma_unmap_len(buffer_info, len),
  374. buffer_info->next_to_watch,
  375. (u64)buffer_info->time_stamp);
  376. }
  377. /* Print TX Rings */
  378. if (!netif_msg_tx_done(adapter))
  379. goto rx_ring_summary;
  380. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  381. /* Transmit Descriptor Formats
  382. *
  383. * Advanced Transmit Descriptor
  384. * +--------------------------------------------------------------+
  385. * 0 | Buffer Address [63:0] |
  386. * +--------------------------------------------------------------+
  387. * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
  388. * +--------------------------------------------------------------+
  389. * 63 46 45 40 39 38 36 35 32 31 24 15 0
  390. */
  391. for (n = 0; n < adapter->num_tx_queues; n++) {
  392. tx_ring = adapter->tx_ring[n];
  393. pr_info("------------------------------------\n");
  394. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  395. pr_info("------------------------------------\n");
  396. pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
  397. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  398. const char *next_desc;
  399. struct igb_tx_buffer *buffer_info;
  400. tx_desc = IGB_TX_DESC(tx_ring, i);
  401. buffer_info = &tx_ring->tx_buffer_info[i];
  402. u0 = (struct my_u0 *)tx_desc;
  403. if (i == tx_ring->next_to_use &&
  404. i == tx_ring->next_to_clean)
  405. next_desc = " NTC/U";
  406. else if (i == tx_ring->next_to_use)
  407. next_desc = " NTU";
  408. else if (i == tx_ring->next_to_clean)
  409. next_desc = " NTC";
  410. else
  411. next_desc = "";
  412. pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
  413. i, le64_to_cpu(u0->a),
  414. le64_to_cpu(u0->b),
  415. (u64)dma_unmap_addr(buffer_info, dma),
  416. dma_unmap_len(buffer_info, len),
  417. buffer_info->next_to_watch,
  418. (u64)buffer_info->time_stamp,
  419. buffer_info->skb, next_desc);
  420. if (netif_msg_pktdata(adapter) && buffer_info->skb)
  421. print_hex_dump(KERN_INFO, "",
  422. DUMP_PREFIX_ADDRESS,
  423. 16, 1, buffer_info->skb->data,
  424. dma_unmap_len(buffer_info, len),
  425. true);
  426. }
  427. }
  428. /* Print RX Rings Summary */
  429. rx_ring_summary:
  430. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  431. pr_info("Queue [NTU] [NTC]\n");
  432. for (n = 0; n < adapter->num_rx_queues; n++) {
  433. rx_ring = adapter->rx_ring[n];
  434. pr_info(" %5d %5X %5X\n",
  435. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  436. }
  437. /* Print RX Rings */
  438. if (!netif_msg_rx_status(adapter))
  439. goto exit;
  440. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  441. /* Advanced Receive Descriptor (Read) Format
  442. * 63 1 0
  443. * +-----------------------------------------------------+
  444. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  445. * +----------------------------------------------+------+
  446. * 8 | Header Buffer Address [63:1] | DD |
  447. * +-----------------------------------------------------+
  448. *
  449. *
  450. * Advanced Receive Descriptor (Write-Back) Format
  451. *
  452. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  453. * +------------------------------------------------------+
  454. * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
  455. * | Checksum Ident | | | | Type | Type |
  456. * +------------------------------------------------------+
  457. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  458. * +------------------------------------------------------+
  459. * 63 48 47 32 31 20 19 0
  460. */
  461. for (n = 0; n < adapter->num_rx_queues; n++) {
  462. rx_ring = adapter->rx_ring[n];
  463. pr_info("------------------------------------\n");
  464. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  465. pr_info("------------------------------------\n");
  466. pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
  467. pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
  468. for (i = 0; i < rx_ring->count; i++) {
  469. const char *next_desc;
  470. struct igb_rx_buffer *buffer_info;
  471. buffer_info = &rx_ring->rx_buffer_info[i];
  472. rx_desc = IGB_RX_DESC(rx_ring, i);
  473. u0 = (struct my_u0 *)rx_desc;
  474. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  475. if (i == rx_ring->next_to_use)
  476. next_desc = " NTU";
  477. else if (i == rx_ring->next_to_clean)
  478. next_desc = " NTC";
  479. else
  480. next_desc = "";
  481. if (staterr & E1000_RXD_STAT_DD) {
  482. /* Descriptor Done */
  483. pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
  484. "RWB", i,
  485. le64_to_cpu(u0->a),
  486. le64_to_cpu(u0->b),
  487. next_desc);
  488. } else {
  489. pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
  490. "R ", i,
  491. le64_to_cpu(u0->a),
  492. le64_to_cpu(u0->b),
  493. (u64)buffer_info->dma,
  494. next_desc);
  495. if (netif_msg_pktdata(adapter) &&
  496. buffer_info->dma && buffer_info->page) {
  497. print_hex_dump(KERN_INFO, "",
  498. DUMP_PREFIX_ADDRESS,
  499. 16, 1,
  500. page_address(buffer_info->page) +
  501. buffer_info->page_offset,
  502. igb_rx_bufsz(rx_ring), true);
  503. }
  504. }
  505. }
  506. }
  507. exit:
  508. return;
  509. }
  510. /**
  511. * igb_get_i2c_data - Reads the I2C SDA data bit
  512. * @hw: pointer to hardware structure
  513. * @i2cctl: Current value of I2CCTL register
  514. *
  515. * Returns the I2C data bit value
  516. **/
  517. static int igb_get_i2c_data(void *data)
  518. {
  519. struct igb_adapter *adapter = (struct igb_adapter *)data;
  520. struct e1000_hw *hw = &adapter->hw;
  521. s32 i2cctl = rd32(E1000_I2CPARAMS);
  522. return !!(i2cctl & E1000_I2C_DATA_IN);
  523. }
  524. /**
  525. * igb_set_i2c_data - Sets the I2C data bit
  526. * @data: pointer to hardware structure
  527. * @state: I2C data value (0 or 1) to set
  528. *
  529. * Sets the I2C data bit
  530. **/
  531. static void igb_set_i2c_data(void *data, int state)
  532. {
  533. struct igb_adapter *adapter = (struct igb_adapter *)data;
  534. struct e1000_hw *hw = &adapter->hw;
  535. s32 i2cctl = rd32(E1000_I2CPARAMS);
  536. if (state)
  537. i2cctl |= E1000_I2C_DATA_OUT;
  538. else
  539. i2cctl &= ~E1000_I2C_DATA_OUT;
  540. i2cctl &= ~E1000_I2C_DATA_OE_N;
  541. i2cctl |= E1000_I2C_CLK_OE_N;
  542. wr32(E1000_I2CPARAMS, i2cctl);
  543. wrfl();
  544. }
  545. /**
  546. * igb_set_i2c_clk - Sets the I2C SCL clock
  547. * @data: pointer to hardware structure
  548. * @state: state to set clock
  549. *
  550. * Sets the I2C clock line to state
  551. **/
  552. static void igb_set_i2c_clk(void *data, int state)
  553. {
  554. struct igb_adapter *adapter = (struct igb_adapter *)data;
  555. struct e1000_hw *hw = &adapter->hw;
  556. s32 i2cctl = rd32(E1000_I2CPARAMS);
  557. if (state) {
  558. i2cctl |= E1000_I2C_CLK_OUT;
  559. i2cctl &= ~E1000_I2C_CLK_OE_N;
  560. } else {
  561. i2cctl &= ~E1000_I2C_CLK_OUT;
  562. i2cctl &= ~E1000_I2C_CLK_OE_N;
  563. }
  564. wr32(E1000_I2CPARAMS, i2cctl);
  565. wrfl();
  566. }
  567. /**
  568. * igb_get_i2c_clk - Gets the I2C SCL clock state
  569. * @data: pointer to hardware structure
  570. *
  571. * Gets the I2C clock state
  572. **/
  573. static int igb_get_i2c_clk(void *data)
  574. {
  575. struct igb_adapter *adapter = (struct igb_adapter *)data;
  576. struct e1000_hw *hw = &adapter->hw;
  577. s32 i2cctl = rd32(E1000_I2CPARAMS);
  578. return !!(i2cctl & E1000_I2C_CLK_IN);
  579. }
  580. static const struct i2c_algo_bit_data igb_i2c_algo = {
  581. .setsda = igb_set_i2c_data,
  582. .setscl = igb_set_i2c_clk,
  583. .getsda = igb_get_i2c_data,
  584. .getscl = igb_get_i2c_clk,
  585. .udelay = 5,
  586. .timeout = 20,
  587. };
  588. /**
  589. * igb_get_hw_dev - return device
  590. * @hw: pointer to hardware structure
  591. *
  592. * used by hardware layer to print debugging information
  593. **/
  594. struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  595. {
  596. struct igb_adapter *adapter = hw->back;
  597. return adapter->netdev;
  598. }
  599. /**
  600. * igb_init_module - Driver Registration Routine
  601. *
  602. * igb_init_module is the first routine called when the driver is
  603. * loaded. All it does is register with the PCI subsystem.
  604. **/
  605. static int __init igb_init_module(void)
  606. {
  607. int ret;
  608. pr_info("%s - version %s\n",
  609. igb_driver_string, igb_driver_version);
  610. pr_info("%s\n", igb_copyright);
  611. #ifdef CONFIG_IGB_DCA
  612. dca_register_notify(&dca_notifier);
  613. #endif
  614. ret = pci_register_driver(&igb_driver);
  615. return ret;
  616. }
  617. module_init(igb_init_module);
  618. /**
  619. * igb_exit_module - Driver Exit Cleanup Routine
  620. *
  621. * igb_exit_module is called just before the driver is removed
  622. * from memory.
  623. **/
  624. static void __exit igb_exit_module(void)
  625. {
  626. #ifdef CONFIG_IGB_DCA
  627. dca_unregister_notify(&dca_notifier);
  628. #endif
  629. pci_unregister_driver(&igb_driver);
  630. }
  631. module_exit(igb_exit_module);
  632. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  633. /**
  634. * igb_cache_ring_register - Descriptor ring to register mapping
  635. * @adapter: board private structure to initialize
  636. *
  637. * Once we know the feature-set enabled for the device, we'll cache
  638. * the register offset the descriptor ring is assigned to.
  639. **/
  640. static void igb_cache_ring_register(struct igb_adapter *adapter)
  641. {
  642. int i = 0, j = 0;
  643. u32 rbase_offset = adapter->vfs_allocated_count;
  644. switch (adapter->hw.mac.type) {
  645. case e1000_82576:
  646. /* The queues are allocated for virtualization such that VF 0
  647. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  648. * In order to avoid collision we start at the first free queue
  649. * and continue consuming queues in the same sequence
  650. */
  651. if (adapter->vfs_allocated_count) {
  652. for (; i < adapter->rss_queues; i++)
  653. adapter->rx_ring[i]->reg_idx = rbase_offset +
  654. Q_IDX_82576(i);
  655. }
  656. /* Fall through */
  657. case e1000_82575:
  658. case e1000_82580:
  659. case e1000_i350:
  660. case e1000_i354:
  661. case e1000_i210:
  662. case e1000_i211:
  663. /* Fall through */
  664. default:
  665. for (; i < adapter->num_rx_queues; i++)
  666. adapter->rx_ring[i]->reg_idx = rbase_offset + i;
  667. for (; j < adapter->num_tx_queues; j++)
  668. adapter->tx_ring[j]->reg_idx = rbase_offset + j;
  669. break;
  670. }
  671. }
  672. u32 igb_rd32(struct e1000_hw *hw, u32 reg)
  673. {
  674. struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
  675. u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  676. u32 value = 0;
  677. if (E1000_REMOVED(hw_addr))
  678. return ~value;
  679. value = readl(&hw_addr[reg]);
  680. /* reads should not return all F's */
  681. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  682. struct net_device *netdev = igb->netdev;
  683. hw->hw_addr = NULL;
  684. netdev_err(netdev, "PCIe link lost\n");
  685. }
  686. return value;
  687. }
  688. /**
  689. * igb_write_ivar - configure ivar for given MSI-X vector
  690. * @hw: pointer to the HW structure
  691. * @msix_vector: vector number we are allocating to a given ring
  692. * @index: row index of IVAR register to write within IVAR table
  693. * @offset: column offset of in IVAR, should be multiple of 8
  694. *
  695. * This function is intended to handle the writing of the IVAR register
  696. * for adapters 82576 and newer. The IVAR table consists of 2 columns,
  697. * each containing an cause allocation for an Rx and Tx ring, and a
  698. * variable number of rows depending on the number of queues supported.
  699. **/
  700. static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
  701. int index, int offset)
  702. {
  703. u32 ivar = array_rd32(E1000_IVAR0, index);
  704. /* clear any bits that are currently set */
  705. ivar &= ~((u32)0xFF << offset);
  706. /* write vector and valid bit */
  707. ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
  708. array_wr32(E1000_IVAR0, index, ivar);
  709. }
  710. #define IGB_N0_QUEUE -1
  711. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  712. {
  713. struct igb_adapter *adapter = q_vector->adapter;
  714. struct e1000_hw *hw = &adapter->hw;
  715. int rx_queue = IGB_N0_QUEUE;
  716. int tx_queue = IGB_N0_QUEUE;
  717. u32 msixbm = 0;
  718. if (q_vector->rx.ring)
  719. rx_queue = q_vector->rx.ring->reg_idx;
  720. if (q_vector->tx.ring)
  721. tx_queue = q_vector->tx.ring->reg_idx;
  722. switch (hw->mac.type) {
  723. case e1000_82575:
  724. /* The 82575 assigns vectors using a bitmask, which matches the
  725. * bitmask for the EICR/EIMS/EIMC registers. To assign one
  726. * or more queues to a vector, we write the appropriate bits
  727. * into the MSIXBM register for that vector.
  728. */
  729. if (rx_queue > IGB_N0_QUEUE)
  730. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  731. if (tx_queue > IGB_N0_QUEUE)
  732. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  733. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
  734. msixbm |= E1000_EIMS_OTHER;
  735. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  736. q_vector->eims_value = msixbm;
  737. break;
  738. case e1000_82576:
  739. /* 82576 uses a table that essentially consists of 2 columns
  740. * with 8 rows. The ordering is column-major so we use the
  741. * lower 3 bits as the row index, and the 4th bit as the
  742. * column offset.
  743. */
  744. if (rx_queue > IGB_N0_QUEUE)
  745. igb_write_ivar(hw, msix_vector,
  746. rx_queue & 0x7,
  747. (rx_queue & 0x8) << 1);
  748. if (tx_queue > IGB_N0_QUEUE)
  749. igb_write_ivar(hw, msix_vector,
  750. tx_queue & 0x7,
  751. ((tx_queue & 0x8) << 1) + 8);
  752. q_vector->eims_value = BIT(msix_vector);
  753. break;
  754. case e1000_82580:
  755. case e1000_i350:
  756. case e1000_i354:
  757. case e1000_i210:
  758. case e1000_i211:
  759. /* On 82580 and newer adapters the scheme is similar to 82576
  760. * however instead of ordering column-major we have things
  761. * ordered row-major. So we traverse the table by using
  762. * bit 0 as the column offset, and the remaining bits as the
  763. * row index.
  764. */
  765. if (rx_queue > IGB_N0_QUEUE)
  766. igb_write_ivar(hw, msix_vector,
  767. rx_queue >> 1,
  768. (rx_queue & 0x1) << 4);
  769. if (tx_queue > IGB_N0_QUEUE)
  770. igb_write_ivar(hw, msix_vector,
  771. tx_queue >> 1,
  772. ((tx_queue & 0x1) << 4) + 8);
  773. q_vector->eims_value = BIT(msix_vector);
  774. break;
  775. default:
  776. BUG();
  777. break;
  778. }
  779. /* add q_vector eims value to global eims_enable_mask */
  780. adapter->eims_enable_mask |= q_vector->eims_value;
  781. /* configure q_vector to set itr on first interrupt */
  782. q_vector->set_itr = 1;
  783. }
  784. /**
  785. * igb_configure_msix - Configure MSI-X hardware
  786. * @adapter: board private structure to initialize
  787. *
  788. * igb_configure_msix sets up the hardware to properly
  789. * generate MSI-X interrupts.
  790. **/
  791. static void igb_configure_msix(struct igb_adapter *adapter)
  792. {
  793. u32 tmp;
  794. int i, vector = 0;
  795. struct e1000_hw *hw = &adapter->hw;
  796. adapter->eims_enable_mask = 0;
  797. /* set vector for other causes, i.e. link changes */
  798. switch (hw->mac.type) {
  799. case e1000_82575:
  800. tmp = rd32(E1000_CTRL_EXT);
  801. /* enable MSI-X PBA support*/
  802. tmp |= E1000_CTRL_EXT_PBA_CLR;
  803. /* Auto-Mask interrupts upon ICR read. */
  804. tmp |= E1000_CTRL_EXT_EIAME;
  805. tmp |= E1000_CTRL_EXT_IRCA;
  806. wr32(E1000_CTRL_EXT, tmp);
  807. /* enable msix_other interrupt */
  808. array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
  809. adapter->eims_other = E1000_EIMS_OTHER;
  810. break;
  811. case e1000_82576:
  812. case e1000_82580:
  813. case e1000_i350:
  814. case e1000_i354:
  815. case e1000_i210:
  816. case e1000_i211:
  817. /* Turn on MSI-X capability first, or our settings
  818. * won't stick. And it will take days to debug.
  819. */
  820. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  821. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  822. E1000_GPIE_NSICR);
  823. /* enable msix_other interrupt */
  824. adapter->eims_other = BIT(vector);
  825. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  826. wr32(E1000_IVAR_MISC, tmp);
  827. break;
  828. default:
  829. /* do nothing, since nothing else supports MSI-X */
  830. break;
  831. } /* switch (hw->mac.type) */
  832. adapter->eims_enable_mask |= adapter->eims_other;
  833. for (i = 0; i < adapter->num_q_vectors; i++)
  834. igb_assign_vector(adapter->q_vector[i], vector++);
  835. wrfl();
  836. }
  837. /**
  838. * igb_request_msix - Initialize MSI-X interrupts
  839. * @adapter: board private structure to initialize
  840. *
  841. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  842. * kernel.
  843. **/
  844. static int igb_request_msix(struct igb_adapter *adapter)
  845. {
  846. struct net_device *netdev = adapter->netdev;
  847. int i, err = 0, vector = 0, free_vector = 0;
  848. err = request_irq(adapter->msix_entries[vector].vector,
  849. igb_msix_other, 0, netdev->name, adapter);
  850. if (err)
  851. goto err_out;
  852. for (i = 0; i < adapter->num_q_vectors; i++) {
  853. struct igb_q_vector *q_vector = adapter->q_vector[i];
  854. vector++;
  855. q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
  856. if (q_vector->rx.ring && q_vector->tx.ring)
  857. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  858. q_vector->rx.ring->queue_index);
  859. else if (q_vector->tx.ring)
  860. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  861. q_vector->tx.ring->queue_index);
  862. else if (q_vector->rx.ring)
  863. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  864. q_vector->rx.ring->queue_index);
  865. else
  866. sprintf(q_vector->name, "%s-unused", netdev->name);
  867. err = request_irq(adapter->msix_entries[vector].vector,
  868. igb_msix_ring, 0, q_vector->name,
  869. q_vector);
  870. if (err)
  871. goto err_free;
  872. }
  873. igb_configure_msix(adapter);
  874. return 0;
  875. err_free:
  876. /* free already assigned IRQs */
  877. free_irq(adapter->msix_entries[free_vector++].vector, adapter);
  878. vector--;
  879. for (i = 0; i < vector; i++) {
  880. free_irq(adapter->msix_entries[free_vector++].vector,
  881. adapter->q_vector[i]);
  882. }
  883. err_out:
  884. return err;
  885. }
  886. /**
  887. * igb_free_q_vector - Free memory allocated for specific interrupt vector
  888. * @adapter: board private structure to initialize
  889. * @v_idx: Index of vector to be freed
  890. *
  891. * This function frees the memory allocated to the q_vector.
  892. **/
  893. static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
  894. {
  895. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  896. adapter->q_vector[v_idx] = NULL;
  897. /* igb_get_stats64() might access the rings on this vector,
  898. * we must wait a grace period before freeing it.
  899. */
  900. if (q_vector)
  901. kfree_rcu(q_vector, rcu);
  902. }
  903. /**
  904. * igb_reset_q_vector - Reset config for interrupt vector
  905. * @adapter: board private structure to initialize
  906. * @v_idx: Index of vector to be reset
  907. *
  908. * If NAPI is enabled it will delete any references to the
  909. * NAPI struct. This is preparation for igb_free_q_vector.
  910. **/
  911. static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
  912. {
  913. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  914. /* Coming from igb_set_interrupt_capability, the vectors are not yet
  915. * allocated. So, q_vector is NULL so we should stop here.
  916. */
  917. if (!q_vector)
  918. return;
  919. if (q_vector->tx.ring)
  920. adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
  921. if (q_vector->rx.ring)
  922. adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
  923. netif_napi_del(&q_vector->napi);
  924. }
  925. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  926. {
  927. int v_idx = adapter->num_q_vectors;
  928. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  929. pci_disable_msix(adapter->pdev);
  930. else if (adapter->flags & IGB_FLAG_HAS_MSI)
  931. pci_disable_msi(adapter->pdev);
  932. while (v_idx--)
  933. igb_reset_q_vector(adapter, v_idx);
  934. }
  935. /**
  936. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  937. * @adapter: board private structure to initialize
  938. *
  939. * This function frees the memory allocated to the q_vectors. In addition if
  940. * NAPI is enabled it will delete any references to the NAPI struct prior
  941. * to freeing the q_vector.
  942. **/
  943. static void igb_free_q_vectors(struct igb_adapter *adapter)
  944. {
  945. int v_idx = adapter->num_q_vectors;
  946. adapter->num_tx_queues = 0;
  947. adapter->num_rx_queues = 0;
  948. adapter->num_q_vectors = 0;
  949. while (v_idx--) {
  950. igb_reset_q_vector(adapter, v_idx);
  951. igb_free_q_vector(adapter, v_idx);
  952. }
  953. }
  954. /**
  955. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  956. * @adapter: board private structure to initialize
  957. *
  958. * This function resets the device so that it has 0 Rx queues, Tx queues, and
  959. * MSI-X interrupts allocated.
  960. */
  961. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  962. {
  963. igb_free_q_vectors(adapter);
  964. igb_reset_interrupt_capability(adapter);
  965. }
  966. /**
  967. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  968. * @adapter: board private structure to initialize
  969. * @msix: boolean value of MSIX capability
  970. *
  971. * Attempt to configure interrupts using the best available
  972. * capabilities of the hardware and kernel.
  973. **/
  974. static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
  975. {
  976. int err;
  977. int numvecs, i;
  978. if (!msix)
  979. goto msi_only;
  980. adapter->flags |= IGB_FLAG_HAS_MSIX;
  981. /* Number of supported queues. */
  982. adapter->num_rx_queues = adapter->rss_queues;
  983. if (adapter->vfs_allocated_count)
  984. adapter->num_tx_queues = 1;
  985. else
  986. adapter->num_tx_queues = adapter->rss_queues;
  987. /* start with one vector for every Rx queue */
  988. numvecs = adapter->num_rx_queues;
  989. /* if Tx handler is separate add 1 for every Tx queue */
  990. if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
  991. numvecs += adapter->num_tx_queues;
  992. /* store the number of vectors reserved for queues */
  993. adapter->num_q_vectors = numvecs;
  994. /* add 1 vector for link status interrupts */
  995. numvecs++;
  996. for (i = 0; i < numvecs; i++)
  997. adapter->msix_entries[i].entry = i;
  998. err = pci_enable_msix_range(adapter->pdev,
  999. adapter->msix_entries,
  1000. numvecs,
  1001. numvecs);
  1002. if (err > 0)
  1003. return;
  1004. igb_reset_interrupt_capability(adapter);
  1005. /* If we can't do MSI-X, try MSI */
  1006. msi_only:
  1007. adapter->flags &= ~IGB_FLAG_HAS_MSIX;
  1008. #ifdef CONFIG_PCI_IOV
  1009. /* disable SR-IOV for non MSI-X configurations */
  1010. if (adapter->vf_data) {
  1011. struct e1000_hw *hw = &adapter->hw;
  1012. /* disable iov and allow time for transactions to clear */
  1013. pci_disable_sriov(adapter->pdev);
  1014. msleep(500);
  1015. kfree(adapter->vf_mac_list);
  1016. adapter->vf_mac_list = NULL;
  1017. kfree(adapter->vf_data);
  1018. adapter->vf_data = NULL;
  1019. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1020. wrfl();
  1021. msleep(100);
  1022. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  1023. }
  1024. #endif
  1025. adapter->vfs_allocated_count = 0;
  1026. adapter->rss_queues = 1;
  1027. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  1028. adapter->num_rx_queues = 1;
  1029. adapter->num_tx_queues = 1;
  1030. adapter->num_q_vectors = 1;
  1031. if (!pci_enable_msi(adapter->pdev))
  1032. adapter->flags |= IGB_FLAG_HAS_MSI;
  1033. }
  1034. static void igb_add_ring(struct igb_ring *ring,
  1035. struct igb_ring_container *head)
  1036. {
  1037. head->ring = ring;
  1038. head->count++;
  1039. }
  1040. /**
  1041. * igb_alloc_q_vector - Allocate memory for a single interrupt vector
  1042. * @adapter: board private structure to initialize
  1043. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  1044. * @v_idx: index of vector in adapter struct
  1045. * @txr_count: total number of Tx rings to allocate
  1046. * @txr_idx: index of first Tx ring to allocate
  1047. * @rxr_count: total number of Rx rings to allocate
  1048. * @rxr_idx: index of first Rx ring to allocate
  1049. *
  1050. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1051. **/
  1052. static int igb_alloc_q_vector(struct igb_adapter *adapter,
  1053. int v_count, int v_idx,
  1054. int txr_count, int txr_idx,
  1055. int rxr_count, int rxr_idx)
  1056. {
  1057. struct igb_q_vector *q_vector;
  1058. struct igb_ring *ring;
  1059. int ring_count, size;
  1060. /* igb only supports 1 Tx and/or 1 Rx queue per vector */
  1061. if (txr_count > 1 || rxr_count > 1)
  1062. return -ENOMEM;
  1063. ring_count = txr_count + rxr_count;
  1064. size = sizeof(struct igb_q_vector) +
  1065. (sizeof(struct igb_ring) * ring_count);
  1066. /* allocate q_vector and rings */
  1067. q_vector = adapter->q_vector[v_idx];
  1068. if (!q_vector) {
  1069. q_vector = kzalloc(size, GFP_KERNEL);
  1070. } else if (size > ksize(q_vector)) {
  1071. kfree_rcu(q_vector, rcu);
  1072. q_vector = kzalloc(size, GFP_KERNEL);
  1073. } else {
  1074. memset(q_vector, 0, size);
  1075. }
  1076. if (!q_vector)
  1077. return -ENOMEM;
  1078. /* initialize NAPI */
  1079. netif_napi_add(adapter->netdev, &q_vector->napi,
  1080. igb_poll, 64);
  1081. /* tie q_vector and adapter together */
  1082. adapter->q_vector[v_idx] = q_vector;
  1083. q_vector->adapter = adapter;
  1084. /* initialize work limits */
  1085. q_vector->tx.work_limit = adapter->tx_work_limit;
  1086. /* initialize ITR configuration */
  1087. q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
  1088. q_vector->itr_val = IGB_START_ITR;
  1089. /* initialize pointer to rings */
  1090. ring = q_vector->ring;
  1091. /* intialize ITR */
  1092. if (rxr_count) {
  1093. /* rx or rx/tx vector */
  1094. if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
  1095. q_vector->itr_val = adapter->rx_itr_setting;
  1096. } else {
  1097. /* tx only vector */
  1098. if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
  1099. q_vector->itr_val = adapter->tx_itr_setting;
  1100. }
  1101. if (txr_count) {
  1102. /* assign generic ring traits */
  1103. ring->dev = &adapter->pdev->dev;
  1104. ring->netdev = adapter->netdev;
  1105. /* configure backlink on ring */
  1106. ring->q_vector = q_vector;
  1107. /* update q_vector Tx values */
  1108. igb_add_ring(ring, &q_vector->tx);
  1109. /* For 82575, context index must be unique per ring. */
  1110. if (adapter->hw.mac.type == e1000_82575)
  1111. set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
  1112. /* apply Tx specific ring traits */
  1113. ring->count = adapter->tx_ring_count;
  1114. ring->queue_index = txr_idx;
  1115. ring->cbs_enable = false;
  1116. ring->idleslope = 0;
  1117. ring->sendslope = 0;
  1118. ring->hicredit = 0;
  1119. ring->locredit = 0;
  1120. u64_stats_init(&ring->tx_syncp);
  1121. u64_stats_init(&ring->tx_syncp2);
  1122. /* assign ring to adapter */
  1123. adapter->tx_ring[txr_idx] = ring;
  1124. /* push pointer to next ring */
  1125. ring++;
  1126. }
  1127. if (rxr_count) {
  1128. /* assign generic ring traits */
  1129. ring->dev = &adapter->pdev->dev;
  1130. ring->netdev = adapter->netdev;
  1131. /* configure backlink on ring */
  1132. ring->q_vector = q_vector;
  1133. /* update q_vector Rx values */
  1134. igb_add_ring(ring, &q_vector->rx);
  1135. /* set flag indicating ring supports SCTP checksum offload */
  1136. if (adapter->hw.mac.type >= e1000_82576)
  1137. set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  1138. /* On i350, i354, i210, and i211, loopback VLAN packets
  1139. * have the tag byte-swapped.
  1140. */
  1141. if (adapter->hw.mac.type >= e1000_i350)
  1142. set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  1143. /* apply Rx specific ring traits */
  1144. ring->count = adapter->rx_ring_count;
  1145. ring->queue_index = rxr_idx;
  1146. u64_stats_init(&ring->rx_syncp);
  1147. /* assign ring to adapter */
  1148. adapter->rx_ring[rxr_idx] = ring;
  1149. }
  1150. return 0;
  1151. }
  1152. /**
  1153. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  1154. * @adapter: board private structure to initialize
  1155. *
  1156. * We allocate one q_vector per queue interrupt. If allocation fails we
  1157. * return -ENOMEM.
  1158. **/
  1159. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  1160. {
  1161. int q_vectors = adapter->num_q_vectors;
  1162. int rxr_remaining = adapter->num_rx_queues;
  1163. int txr_remaining = adapter->num_tx_queues;
  1164. int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  1165. int err;
  1166. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  1167. for (; rxr_remaining; v_idx++) {
  1168. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1169. 0, 0, 1, rxr_idx);
  1170. if (err)
  1171. goto err_out;
  1172. /* update counts and index */
  1173. rxr_remaining--;
  1174. rxr_idx++;
  1175. }
  1176. }
  1177. for (; v_idx < q_vectors; v_idx++) {
  1178. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  1179. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  1180. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1181. tqpv, txr_idx, rqpv, rxr_idx);
  1182. if (err)
  1183. goto err_out;
  1184. /* update counts and index */
  1185. rxr_remaining -= rqpv;
  1186. txr_remaining -= tqpv;
  1187. rxr_idx++;
  1188. txr_idx++;
  1189. }
  1190. return 0;
  1191. err_out:
  1192. adapter->num_tx_queues = 0;
  1193. adapter->num_rx_queues = 0;
  1194. adapter->num_q_vectors = 0;
  1195. while (v_idx--)
  1196. igb_free_q_vector(adapter, v_idx);
  1197. return -ENOMEM;
  1198. }
  1199. /**
  1200. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  1201. * @adapter: board private structure to initialize
  1202. * @msix: boolean value of MSIX capability
  1203. *
  1204. * This function initializes the interrupts and allocates all of the queues.
  1205. **/
  1206. static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
  1207. {
  1208. struct pci_dev *pdev = adapter->pdev;
  1209. int err;
  1210. igb_set_interrupt_capability(adapter, msix);
  1211. err = igb_alloc_q_vectors(adapter);
  1212. if (err) {
  1213. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  1214. goto err_alloc_q_vectors;
  1215. }
  1216. igb_cache_ring_register(adapter);
  1217. return 0;
  1218. err_alloc_q_vectors:
  1219. igb_reset_interrupt_capability(adapter);
  1220. return err;
  1221. }
  1222. /**
  1223. * igb_request_irq - initialize interrupts
  1224. * @adapter: board private structure to initialize
  1225. *
  1226. * Attempts to configure interrupts using the best available
  1227. * capabilities of the hardware and kernel.
  1228. **/
  1229. static int igb_request_irq(struct igb_adapter *adapter)
  1230. {
  1231. struct net_device *netdev = adapter->netdev;
  1232. struct pci_dev *pdev = adapter->pdev;
  1233. int err = 0;
  1234. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1235. err = igb_request_msix(adapter);
  1236. if (!err)
  1237. goto request_done;
  1238. /* fall back to MSI */
  1239. igb_free_all_tx_resources(adapter);
  1240. igb_free_all_rx_resources(adapter);
  1241. igb_clear_interrupt_scheme(adapter);
  1242. err = igb_init_interrupt_scheme(adapter, false);
  1243. if (err)
  1244. goto request_done;
  1245. igb_setup_all_tx_resources(adapter);
  1246. igb_setup_all_rx_resources(adapter);
  1247. igb_configure(adapter);
  1248. }
  1249. igb_assign_vector(adapter->q_vector[0], 0);
  1250. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  1251. err = request_irq(pdev->irq, igb_intr_msi, 0,
  1252. netdev->name, adapter);
  1253. if (!err)
  1254. goto request_done;
  1255. /* fall back to legacy interrupts */
  1256. igb_reset_interrupt_capability(adapter);
  1257. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  1258. }
  1259. err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
  1260. netdev->name, adapter);
  1261. if (err)
  1262. dev_err(&pdev->dev, "Error %d getting interrupt\n",
  1263. err);
  1264. request_done:
  1265. return err;
  1266. }
  1267. static void igb_free_irq(struct igb_adapter *adapter)
  1268. {
  1269. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1270. int vector = 0, i;
  1271. free_irq(adapter->msix_entries[vector++].vector, adapter);
  1272. for (i = 0; i < adapter->num_q_vectors; i++)
  1273. free_irq(adapter->msix_entries[vector++].vector,
  1274. adapter->q_vector[i]);
  1275. } else {
  1276. free_irq(adapter->pdev->irq, adapter);
  1277. }
  1278. }
  1279. /**
  1280. * igb_irq_disable - Mask off interrupt generation on the NIC
  1281. * @adapter: board private structure
  1282. **/
  1283. static void igb_irq_disable(struct igb_adapter *adapter)
  1284. {
  1285. struct e1000_hw *hw = &adapter->hw;
  1286. /* we need to be careful when disabling interrupts. The VFs are also
  1287. * mapped into these registers and so clearing the bits can cause
  1288. * issues on the VF drivers so we only need to clear what we set
  1289. */
  1290. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1291. u32 regval = rd32(E1000_EIAM);
  1292. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  1293. wr32(E1000_EIMC, adapter->eims_enable_mask);
  1294. regval = rd32(E1000_EIAC);
  1295. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  1296. }
  1297. wr32(E1000_IAM, 0);
  1298. wr32(E1000_IMC, ~0);
  1299. wrfl();
  1300. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1301. int i;
  1302. for (i = 0; i < adapter->num_q_vectors; i++)
  1303. synchronize_irq(adapter->msix_entries[i].vector);
  1304. } else {
  1305. synchronize_irq(adapter->pdev->irq);
  1306. }
  1307. }
  1308. /**
  1309. * igb_irq_enable - Enable default interrupt generation settings
  1310. * @adapter: board private structure
  1311. **/
  1312. static void igb_irq_enable(struct igb_adapter *adapter)
  1313. {
  1314. struct e1000_hw *hw = &adapter->hw;
  1315. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1316. u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
  1317. u32 regval = rd32(E1000_EIAC);
  1318. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  1319. regval = rd32(E1000_EIAM);
  1320. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  1321. wr32(E1000_EIMS, adapter->eims_enable_mask);
  1322. if (adapter->vfs_allocated_count) {
  1323. wr32(E1000_MBVFIMR, 0xFF);
  1324. ims |= E1000_IMS_VMMB;
  1325. }
  1326. wr32(E1000_IMS, ims);
  1327. } else {
  1328. wr32(E1000_IMS, IMS_ENABLE_MASK |
  1329. E1000_IMS_DRSTA);
  1330. wr32(E1000_IAM, IMS_ENABLE_MASK |
  1331. E1000_IMS_DRSTA);
  1332. }
  1333. }
  1334. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  1335. {
  1336. struct e1000_hw *hw = &adapter->hw;
  1337. u16 pf_id = adapter->vfs_allocated_count;
  1338. u16 vid = adapter->hw.mng_cookie.vlan_id;
  1339. u16 old_vid = adapter->mng_vlan_id;
  1340. if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  1341. /* add VID to filter table */
  1342. igb_vfta_set(hw, vid, pf_id, true, true);
  1343. adapter->mng_vlan_id = vid;
  1344. } else {
  1345. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1346. }
  1347. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  1348. (vid != old_vid) &&
  1349. !test_bit(old_vid, adapter->active_vlans)) {
  1350. /* remove VID from filter table */
  1351. igb_vfta_set(hw, vid, pf_id, false, true);
  1352. }
  1353. }
  1354. /**
  1355. * igb_release_hw_control - release control of the h/w to f/w
  1356. * @adapter: address of board private structure
  1357. *
  1358. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  1359. * For ASF and Pass Through versions of f/w this means that the
  1360. * driver is no longer loaded.
  1361. **/
  1362. static void igb_release_hw_control(struct igb_adapter *adapter)
  1363. {
  1364. struct e1000_hw *hw = &adapter->hw;
  1365. u32 ctrl_ext;
  1366. /* Let firmware take over control of h/w */
  1367. ctrl_ext = rd32(E1000_CTRL_EXT);
  1368. wr32(E1000_CTRL_EXT,
  1369. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1370. }
  1371. /**
  1372. * igb_get_hw_control - get control of the h/w from f/w
  1373. * @adapter: address of board private structure
  1374. *
  1375. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  1376. * For ASF and Pass Through versions of f/w this means that
  1377. * the driver is loaded.
  1378. **/
  1379. static void igb_get_hw_control(struct igb_adapter *adapter)
  1380. {
  1381. struct e1000_hw *hw = &adapter->hw;
  1382. u32 ctrl_ext;
  1383. /* Let firmware know the driver has taken over */
  1384. ctrl_ext = rd32(E1000_CTRL_EXT);
  1385. wr32(E1000_CTRL_EXT,
  1386. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1387. }
  1388. static void enable_fqtss(struct igb_adapter *adapter, bool enable)
  1389. {
  1390. struct net_device *netdev = adapter->netdev;
  1391. struct e1000_hw *hw = &adapter->hw;
  1392. WARN_ON(hw->mac.type != e1000_i210);
  1393. if (enable)
  1394. adapter->flags |= IGB_FLAG_FQTSS;
  1395. else
  1396. adapter->flags &= ~IGB_FLAG_FQTSS;
  1397. if (netif_running(netdev))
  1398. schedule_work(&adapter->reset_task);
  1399. }
  1400. static bool is_fqtss_enabled(struct igb_adapter *adapter)
  1401. {
  1402. return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
  1403. }
  1404. static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
  1405. enum tx_queue_prio prio)
  1406. {
  1407. u32 val;
  1408. WARN_ON(hw->mac.type != e1000_i210);
  1409. WARN_ON(queue < 0 || queue > 4);
  1410. val = rd32(E1000_I210_TXDCTL(queue));
  1411. if (prio == TX_QUEUE_PRIO_HIGH)
  1412. val |= E1000_TXDCTL_PRIORITY;
  1413. else
  1414. val &= ~E1000_TXDCTL_PRIORITY;
  1415. wr32(E1000_I210_TXDCTL(queue), val);
  1416. }
  1417. static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
  1418. {
  1419. u32 val;
  1420. WARN_ON(hw->mac.type != e1000_i210);
  1421. WARN_ON(queue < 0 || queue > 1);
  1422. val = rd32(E1000_I210_TQAVCC(queue));
  1423. if (mode == QUEUE_MODE_STREAM_RESERVATION)
  1424. val |= E1000_TQAVCC_QUEUEMODE;
  1425. else
  1426. val &= ~E1000_TQAVCC_QUEUEMODE;
  1427. wr32(E1000_I210_TQAVCC(queue), val);
  1428. }
  1429. static bool is_any_cbs_enabled(struct igb_adapter *adapter)
  1430. {
  1431. int i;
  1432. for (i = 0; i < adapter->num_tx_queues; i++) {
  1433. if (adapter->tx_ring[i]->cbs_enable)
  1434. return true;
  1435. }
  1436. return false;
  1437. }
  1438. static bool is_any_txtime_enabled(struct igb_adapter *adapter)
  1439. {
  1440. int i;
  1441. for (i = 0; i < adapter->num_tx_queues; i++) {
  1442. if (adapter->tx_ring[i]->launchtime_enable)
  1443. return true;
  1444. }
  1445. return false;
  1446. }
  1447. /**
  1448. * igb_config_tx_modes - Configure "Qav Tx mode" features on igb
  1449. * @adapter: pointer to adapter struct
  1450. * @queue: queue number
  1451. *
  1452. * Configure CBS and Launchtime for a given hardware queue.
  1453. * Parameters are retrieved from the correct Tx ring, so
  1454. * igb_save_cbs_params() and igb_save_txtime_params() should be used
  1455. * for setting those correctly prior to this function being called.
  1456. **/
  1457. static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
  1458. {
  1459. struct igb_ring *ring = adapter->tx_ring[queue];
  1460. struct net_device *netdev = adapter->netdev;
  1461. struct e1000_hw *hw = &adapter->hw;
  1462. u32 tqavcc, tqavctrl;
  1463. u16 value;
  1464. WARN_ON(hw->mac.type != e1000_i210);
  1465. WARN_ON(queue < 0 || queue > 1);
  1466. /* If any of the Qav features is enabled, configure queues as SR and
  1467. * with HIGH PRIO. If none is, then configure them with LOW PRIO and
  1468. * as SP.
  1469. */
  1470. if (ring->cbs_enable || ring->launchtime_enable) {
  1471. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
  1472. set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
  1473. } else {
  1474. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
  1475. set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
  1476. }
  1477. /* If CBS is enabled, set DataTranARB and config its parameters. */
  1478. if (ring->cbs_enable || queue == 0) {
  1479. /* i210 does not allow the queue 0 to be in the Strict
  1480. * Priority mode while the Qav mode is enabled, so,
  1481. * instead of disabling strict priority mode, we give
  1482. * queue 0 the maximum of credits possible.
  1483. *
  1484. * See section 8.12.19 of the i210 datasheet, "Note:
  1485. * Queue0 QueueMode must be set to 1b when
  1486. * TransmitMode is set to Qav."
  1487. */
  1488. if (queue == 0 && !ring->cbs_enable) {
  1489. /* max "linkspeed" idleslope in kbps */
  1490. ring->idleslope = 1000000;
  1491. ring->hicredit = ETH_FRAME_LEN;
  1492. }
  1493. /* Always set data transfer arbitration to credit-based
  1494. * shaper algorithm on TQAVCTRL if CBS is enabled for any of
  1495. * the queues.
  1496. */
  1497. tqavctrl = rd32(E1000_I210_TQAVCTRL);
  1498. tqavctrl |= E1000_TQAVCTRL_DATATRANARB;
  1499. wr32(E1000_I210_TQAVCTRL, tqavctrl);
  1500. /* According to i210 datasheet section 7.2.7.7, we should set
  1501. * the 'idleSlope' field from TQAVCC register following the
  1502. * equation:
  1503. *
  1504. * For 100 Mbps link speed:
  1505. *
  1506. * value = BW * 0x7735 * 0.2 (E1)
  1507. *
  1508. * For 1000Mbps link speed:
  1509. *
  1510. * value = BW * 0x7735 * 2 (E2)
  1511. *
  1512. * E1 and E2 can be merged into one equation as shown below.
  1513. * Note that 'link-speed' is in Mbps.
  1514. *
  1515. * value = BW * 0x7735 * 2 * link-speed
  1516. * -------------- (E3)
  1517. * 1000
  1518. *
  1519. * 'BW' is the percentage bandwidth out of full link speed
  1520. * which can be found with the following equation. Note that
  1521. * idleSlope here is the parameter from this function which
  1522. * is in kbps.
  1523. *
  1524. * BW = idleSlope
  1525. * ----------------- (E4)
  1526. * link-speed * 1000
  1527. *
  1528. * That said, we can come up with a generic equation to
  1529. * calculate the value we should set it TQAVCC register by
  1530. * replacing 'BW' in E3 by E4. The resulting equation is:
  1531. *
  1532. * value = idleSlope * 0x7735 * 2 * link-speed
  1533. * ----------------- -------------- (E5)
  1534. * link-speed * 1000 1000
  1535. *
  1536. * 'link-speed' is present in both sides of the fraction so
  1537. * it is canceled out. The final equation is the following:
  1538. *
  1539. * value = idleSlope * 61034
  1540. * ----------------- (E6)
  1541. * 1000000
  1542. *
  1543. * NOTE: For i210, given the above, we can see that idleslope
  1544. * is represented in 16.38431 kbps units by the value at
  1545. * the TQAVCC register (1Gbps / 61034), which reduces
  1546. * the granularity for idleslope increments.
  1547. * For instance, if you want to configure a 2576kbps
  1548. * idleslope, the value to be written on the register
  1549. * would have to be 157.23. If rounded down, you end
  1550. * up with less bandwidth available than originally
  1551. * required (~2572 kbps). If rounded up, you end up
  1552. * with a higher bandwidth (~2589 kbps). Below the
  1553. * approach we take is to always round up the
  1554. * calculated value, so the resulting bandwidth might
  1555. * be slightly higher for some configurations.
  1556. */
  1557. value = DIV_ROUND_UP_ULL(ring->idleslope * 61034ULL, 1000000);
  1558. tqavcc = rd32(E1000_I210_TQAVCC(queue));
  1559. tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
  1560. tqavcc |= value;
  1561. wr32(E1000_I210_TQAVCC(queue), tqavcc);
  1562. wr32(E1000_I210_TQAVHC(queue),
  1563. 0x80000000 + ring->hicredit * 0x7735);
  1564. } else {
  1565. /* Set idleSlope to zero. */
  1566. tqavcc = rd32(E1000_I210_TQAVCC(queue));
  1567. tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
  1568. wr32(E1000_I210_TQAVCC(queue), tqavcc);
  1569. /* Set hiCredit to zero. */
  1570. wr32(E1000_I210_TQAVHC(queue), 0);
  1571. /* If CBS is not enabled for any queues anymore, then return to
  1572. * the default state of Data Transmission Arbitration on
  1573. * TQAVCTRL.
  1574. */
  1575. if (!is_any_cbs_enabled(adapter)) {
  1576. tqavctrl = rd32(E1000_I210_TQAVCTRL);
  1577. tqavctrl &= ~E1000_TQAVCTRL_DATATRANARB;
  1578. wr32(E1000_I210_TQAVCTRL, tqavctrl);
  1579. }
  1580. }
  1581. /* If LaunchTime is enabled, set DataTranTIM. */
  1582. if (ring->launchtime_enable) {
  1583. /* Always set DataTranTIM on TQAVCTRL if LaunchTime is enabled
  1584. * for any of the SR queues, and configure fetchtime delta.
  1585. * XXX NOTE:
  1586. * - LaunchTime will be enabled for all SR queues.
  1587. * - A fixed offset can be added relative to the launch
  1588. * time of all packets if configured at reg LAUNCH_OS0.
  1589. * We are keeping it as 0 for now (default value).
  1590. */
  1591. tqavctrl = rd32(E1000_I210_TQAVCTRL);
  1592. tqavctrl |= E1000_TQAVCTRL_DATATRANTIM |
  1593. E1000_TQAVCTRL_FETCHTIME_DELTA;
  1594. wr32(E1000_I210_TQAVCTRL, tqavctrl);
  1595. } else {
  1596. /* If Launchtime is not enabled for any SR queues anymore,
  1597. * then clear DataTranTIM on TQAVCTRL and clear fetchtime delta,
  1598. * effectively disabling Launchtime.
  1599. */
  1600. if (!is_any_txtime_enabled(adapter)) {
  1601. tqavctrl = rd32(E1000_I210_TQAVCTRL);
  1602. tqavctrl &= ~E1000_TQAVCTRL_DATATRANTIM;
  1603. tqavctrl &= ~E1000_TQAVCTRL_FETCHTIME_DELTA;
  1604. wr32(E1000_I210_TQAVCTRL, tqavctrl);
  1605. }
  1606. }
  1607. /* XXX: In i210 controller the sendSlope and loCredit parameters from
  1608. * CBS are not configurable by software so we don't do any 'controller
  1609. * configuration' in respect to these parameters.
  1610. */
  1611. netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \
  1612. idleslope %d sendslope %d hiCredit %d \
  1613. locredit %d\n",
  1614. (ring->cbs_enable) ? "enabled" : "disabled",
  1615. (ring->launchtime_enable) ? "enabled" : "disabled", queue,
  1616. ring->idleslope, ring->sendslope, ring->hicredit,
  1617. ring->locredit);
  1618. }
  1619. static int igb_save_txtime_params(struct igb_adapter *adapter, int queue,
  1620. bool enable)
  1621. {
  1622. struct igb_ring *ring;
  1623. if (queue < 0 || queue > adapter->num_tx_queues)
  1624. return -EINVAL;
  1625. ring = adapter->tx_ring[queue];
  1626. ring->launchtime_enable = enable;
  1627. return 0;
  1628. }
  1629. static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
  1630. bool enable, int idleslope, int sendslope,
  1631. int hicredit, int locredit)
  1632. {
  1633. struct igb_ring *ring;
  1634. if (queue < 0 || queue > adapter->num_tx_queues)
  1635. return -EINVAL;
  1636. ring = adapter->tx_ring[queue];
  1637. ring->cbs_enable = enable;
  1638. ring->idleslope = idleslope;
  1639. ring->sendslope = sendslope;
  1640. ring->hicredit = hicredit;
  1641. ring->locredit = locredit;
  1642. return 0;
  1643. }
  1644. /**
  1645. * igb_setup_tx_mode - Switch to/from Qav Tx mode when applicable
  1646. * @adapter: pointer to adapter struct
  1647. *
  1648. * Configure TQAVCTRL register switching the controller's Tx mode
  1649. * if FQTSS mode is enabled or disabled. Additionally, will issue
  1650. * a call to igb_config_tx_modes() per queue so any previously saved
  1651. * Tx parameters are applied.
  1652. **/
  1653. static void igb_setup_tx_mode(struct igb_adapter *adapter)
  1654. {
  1655. struct net_device *netdev = adapter->netdev;
  1656. struct e1000_hw *hw = &adapter->hw;
  1657. u32 val;
  1658. /* Only i210 controller supports changing the transmission mode. */
  1659. if (hw->mac.type != e1000_i210)
  1660. return;
  1661. if (is_fqtss_enabled(adapter)) {
  1662. int i, max_queue;
  1663. /* Configure TQAVCTRL register: set transmit mode to 'Qav',
  1664. * set data fetch arbitration to 'round robin', set SP_WAIT_SR
  1665. * so SP queues wait for SR ones.
  1666. */
  1667. val = rd32(E1000_I210_TQAVCTRL);
  1668. val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_SP_WAIT_SR;
  1669. val &= ~E1000_TQAVCTRL_DATAFETCHARB;
  1670. wr32(E1000_I210_TQAVCTRL, val);
  1671. /* Configure Tx and Rx packet buffers sizes as described in
  1672. * i210 datasheet section 7.2.7.7.
  1673. */
  1674. val = rd32(E1000_TXPBS);
  1675. val &= ~I210_TXPBSIZE_MASK;
  1676. val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
  1677. I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
  1678. wr32(E1000_TXPBS, val);
  1679. val = rd32(E1000_RXPBS);
  1680. val &= ~I210_RXPBSIZE_MASK;
  1681. val |= I210_RXPBSIZE_PB_32KB;
  1682. wr32(E1000_RXPBS, val);
  1683. /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
  1684. * register should not exceed the buffer size programmed in
  1685. * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
  1686. * so according to the datasheet we should set MAX_TPKT_SIZE to
  1687. * 4kB / 64.
  1688. *
  1689. * However, when we do so, no frame from queue 2 and 3 are
  1690. * transmitted. It seems the MAX_TPKT_SIZE should not be great
  1691. * or _equal_ to the buffer size programmed in TXPBS. For this
  1692. * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
  1693. */
  1694. val = (4096 - 1) / 64;
  1695. wr32(E1000_I210_DTXMXPKTSZ, val);
  1696. /* Since FQTSS mode is enabled, apply any CBS configuration
  1697. * previously set. If no previous CBS configuration has been
  1698. * done, then the initial configuration is applied, which means
  1699. * CBS is disabled.
  1700. */
  1701. max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
  1702. adapter->num_tx_queues : I210_SR_QUEUES_NUM;
  1703. for (i = 0; i < max_queue; i++) {
  1704. igb_config_tx_modes(adapter, i);
  1705. }
  1706. } else {
  1707. wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
  1708. wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
  1709. wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
  1710. val = rd32(E1000_I210_TQAVCTRL);
  1711. /* According to Section 8.12.21, the other flags we've set when
  1712. * enabling FQTSS are not relevant when disabling FQTSS so we
  1713. * don't set they here.
  1714. */
  1715. val &= ~E1000_TQAVCTRL_XMIT_MODE;
  1716. wr32(E1000_I210_TQAVCTRL, val);
  1717. }
  1718. netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
  1719. "enabled" : "disabled");
  1720. }
  1721. /**
  1722. * igb_configure - configure the hardware for RX and TX
  1723. * @adapter: private board structure
  1724. **/
  1725. static void igb_configure(struct igb_adapter *adapter)
  1726. {
  1727. struct net_device *netdev = adapter->netdev;
  1728. int i;
  1729. igb_get_hw_control(adapter);
  1730. igb_set_rx_mode(netdev);
  1731. igb_setup_tx_mode(adapter);
  1732. igb_restore_vlan(adapter);
  1733. igb_setup_tctl(adapter);
  1734. igb_setup_mrqc(adapter);
  1735. igb_setup_rctl(adapter);
  1736. igb_nfc_filter_restore(adapter);
  1737. igb_configure_tx(adapter);
  1738. igb_configure_rx(adapter);
  1739. igb_rx_fifo_flush_82575(&adapter->hw);
  1740. /* call igb_desc_unused which always leaves
  1741. * at least 1 descriptor unused to make sure
  1742. * next_to_use != next_to_clean
  1743. */
  1744. for (i = 0; i < adapter->num_rx_queues; i++) {
  1745. struct igb_ring *ring = adapter->rx_ring[i];
  1746. igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
  1747. }
  1748. }
  1749. /**
  1750. * igb_power_up_link - Power up the phy/serdes link
  1751. * @adapter: address of board private structure
  1752. **/
  1753. void igb_power_up_link(struct igb_adapter *adapter)
  1754. {
  1755. igb_reset_phy(&adapter->hw);
  1756. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1757. igb_power_up_phy_copper(&adapter->hw);
  1758. else
  1759. igb_power_up_serdes_link_82575(&adapter->hw);
  1760. igb_setup_link(&adapter->hw);
  1761. }
  1762. /**
  1763. * igb_power_down_link - Power down the phy/serdes link
  1764. * @adapter: address of board private structure
  1765. */
  1766. static void igb_power_down_link(struct igb_adapter *adapter)
  1767. {
  1768. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1769. igb_power_down_phy_copper_82575(&adapter->hw);
  1770. else
  1771. igb_shutdown_serdes_link_82575(&adapter->hw);
  1772. }
  1773. /**
  1774. * Detect and switch function for Media Auto Sense
  1775. * @adapter: address of the board private structure
  1776. **/
  1777. static void igb_check_swap_media(struct igb_adapter *adapter)
  1778. {
  1779. struct e1000_hw *hw = &adapter->hw;
  1780. u32 ctrl_ext, connsw;
  1781. bool swap_now = false;
  1782. ctrl_ext = rd32(E1000_CTRL_EXT);
  1783. connsw = rd32(E1000_CONNSW);
  1784. /* need to live swap if current media is copper and we have fiber/serdes
  1785. * to go to.
  1786. */
  1787. if ((hw->phy.media_type == e1000_media_type_copper) &&
  1788. (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
  1789. swap_now = true;
  1790. } else if (!(connsw & E1000_CONNSW_SERDESD)) {
  1791. /* copper signal takes time to appear */
  1792. if (adapter->copper_tries < 4) {
  1793. adapter->copper_tries++;
  1794. connsw |= E1000_CONNSW_AUTOSENSE_CONF;
  1795. wr32(E1000_CONNSW, connsw);
  1796. return;
  1797. } else {
  1798. adapter->copper_tries = 0;
  1799. if ((connsw & E1000_CONNSW_PHYSD) &&
  1800. (!(connsw & E1000_CONNSW_PHY_PDN))) {
  1801. swap_now = true;
  1802. connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
  1803. wr32(E1000_CONNSW, connsw);
  1804. }
  1805. }
  1806. }
  1807. if (!swap_now)
  1808. return;
  1809. switch (hw->phy.media_type) {
  1810. case e1000_media_type_copper:
  1811. netdev_info(adapter->netdev,
  1812. "MAS: changing media to fiber/serdes\n");
  1813. ctrl_ext |=
  1814. E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1815. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1816. adapter->copper_tries = 0;
  1817. break;
  1818. case e1000_media_type_internal_serdes:
  1819. case e1000_media_type_fiber:
  1820. netdev_info(adapter->netdev,
  1821. "MAS: changing media to copper\n");
  1822. ctrl_ext &=
  1823. ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1824. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1825. break;
  1826. default:
  1827. /* shouldn't get here during regular operation */
  1828. netdev_err(adapter->netdev,
  1829. "AMS: Invalid media type found, returning\n");
  1830. break;
  1831. }
  1832. wr32(E1000_CTRL_EXT, ctrl_ext);
  1833. }
  1834. /**
  1835. * igb_up - Open the interface and prepare it to handle traffic
  1836. * @adapter: board private structure
  1837. **/
  1838. int igb_up(struct igb_adapter *adapter)
  1839. {
  1840. struct e1000_hw *hw = &adapter->hw;
  1841. int i;
  1842. /* hardware has been reset, we need to reload some things */
  1843. igb_configure(adapter);
  1844. clear_bit(__IGB_DOWN, &adapter->state);
  1845. for (i = 0; i < adapter->num_q_vectors; i++)
  1846. napi_enable(&(adapter->q_vector[i]->napi));
  1847. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  1848. igb_configure_msix(adapter);
  1849. else
  1850. igb_assign_vector(adapter->q_vector[0], 0);
  1851. /* Clear any pending interrupts. */
  1852. rd32(E1000_TSICR);
  1853. rd32(E1000_ICR);
  1854. igb_irq_enable(adapter);
  1855. /* notify VFs that reset has been completed */
  1856. if (adapter->vfs_allocated_count) {
  1857. u32 reg_data = rd32(E1000_CTRL_EXT);
  1858. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1859. wr32(E1000_CTRL_EXT, reg_data);
  1860. }
  1861. netif_tx_start_all_queues(adapter->netdev);
  1862. /* start the watchdog. */
  1863. hw->mac.get_link_status = 1;
  1864. schedule_work(&adapter->watchdog_task);
  1865. if ((adapter->flags & IGB_FLAG_EEE) &&
  1866. (!hw->dev_spec._82575.eee_disable))
  1867. adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
  1868. return 0;
  1869. }
  1870. void igb_down(struct igb_adapter *adapter)
  1871. {
  1872. struct net_device *netdev = adapter->netdev;
  1873. struct e1000_hw *hw = &adapter->hw;
  1874. u32 tctl, rctl;
  1875. int i;
  1876. /* signal that we're down so the interrupt handler does not
  1877. * reschedule our watchdog timer
  1878. */
  1879. set_bit(__IGB_DOWN, &adapter->state);
  1880. /* disable receives in the hardware */
  1881. rctl = rd32(E1000_RCTL);
  1882. wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
  1883. /* flush and sleep below */
  1884. igb_nfc_filter_exit(adapter);
  1885. netif_carrier_off(netdev);
  1886. netif_tx_stop_all_queues(netdev);
  1887. /* disable transmits in the hardware */
  1888. tctl = rd32(E1000_TCTL);
  1889. tctl &= ~E1000_TCTL_EN;
  1890. wr32(E1000_TCTL, tctl);
  1891. /* flush both disables and wait for them to finish */
  1892. wrfl();
  1893. usleep_range(10000, 11000);
  1894. igb_irq_disable(adapter);
  1895. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  1896. for (i = 0; i < adapter->num_q_vectors; i++) {
  1897. if (adapter->q_vector[i]) {
  1898. napi_synchronize(&adapter->q_vector[i]->napi);
  1899. napi_disable(&adapter->q_vector[i]->napi);
  1900. }
  1901. }
  1902. del_timer_sync(&adapter->watchdog_timer);
  1903. del_timer_sync(&adapter->phy_info_timer);
  1904. /* record the stats before reset*/
  1905. spin_lock(&adapter->stats64_lock);
  1906. igb_update_stats(adapter);
  1907. spin_unlock(&adapter->stats64_lock);
  1908. adapter->link_speed = 0;
  1909. adapter->link_duplex = 0;
  1910. if (!pci_channel_offline(adapter->pdev))
  1911. igb_reset(adapter);
  1912. /* clear VLAN promisc flag so VFTA will be updated if necessary */
  1913. adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
  1914. igb_clean_all_tx_rings(adapter);
  1915. igb_clean_all_rx_rings(adapter);
  1916. #ifdef CONFIG_IGB_DCA
  1917. /* since we reset the hardware DCA settings were cleared */
  1918. igb_setup_dca(adapter);
  1919. #endif
  1920. }
  1921. void igb_reinit_locked(struct igb_adapter *adapter)
  1922. {
  1923. WARN_ON(in_interrupt());
  1924. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  1925. usleep_range(1000, 2000);
  1926. igb_down(adapter);
  1927. igb_up(adapter);
  1928. clear_bit(__IGB_RESETTING, &adapter->state);
  1929. }
  1930. /** igb_enable_mas - Media Autosense re-enable after swap
  1931. *
  1932. * @adapter: adapter struct
  1933. **/
  1934. static void igb_enable_mas(struct igb_adapter *adapter)
  1935. {
  1936. struct e1000_hw *hw = &adapter->hw;
  1937. u32 connsw = rd32(E1000_CONNSW);
  1938. /* configure for SerDes media detect */
  1939. if ((hw->phy.media_type == e1000_media_type_copper) &&
  1940. (!(connsw & E1000_CONNSW_SERDESD))) {
  1941. connsw |= E1000_CONNSW_ENRGSRC;
  1942. connsw |= E1000_CONNSW_AUTOSENSE_EN;
  1943. wr32(E1000_CONNSW, connsw);
  1944. wrfl();
  1945. }
  1946. }
  1947. void igb_reset(struct igb_adapter *adapter)
  1948. {
  1949. struct pci_dev *pdev = adapter->pdev;
  1950. struct e1000_hw *hw = &adapter->hw;
  1951. struct e1000_mac_info *mac = &hw->mac;
  1952. struct e1000_fc_info *fc = &hw->fc;
  1953. u32 pba, hwm;
  1954. /* Repartition Pba for greater than 9k mtu
  1955. * To take effect CTRL.RST is required.
  1956. */
  1957. switch (mac->type) {
  1958. case e1000_i350:
  1959. case e1000_i354:
  1960. case e1000_82580:
  1961. pba = rd32(E1000_RXPBS);
  1962. pba = igb_rxpbs_adjust_82580(pba);
  1963. break;
  1964. case e1000_82576:
  1965. pba = rd32(E1000_RXPBS);
  1966. pba &= E1000_RXPBS_SIZE_MASK_82576;
  1967. break;
  1968. case e1000_82575:
  1969. case e1000_i210:
  1970. case e1000_i211:
  1971. default:
  1972. pba = E1000_PBA_34K;
  1973. break;
  1974. }
  1975. if (mac->type == e1000_82575) {
  1976. u32 min_rx_space, min_tx_space, needed_tx_space;
  1977. /* write Rx PBA so that hardware can report correct Tx PBA */
  1978. wr32(E1000_PBA, pba);
  1979. /* To maintain wire speed transmits, the Tx FIFO should be
  1980. * large enough to accommodate two full transmit packets,
  1981. * rounded up to the next 1KB and expressed in KB. Likewise,
  1982. * the Rx FIFO should be large enough to accommodate at least
  1983. * one full receive packet and is similarly rounded up and
  1984. * expressed in KB.
  1985. */
  1986. min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
  1987. /* The Tx FIFO also stores 16 bytes of information about the Tx
  1988. * but don't include Ethernet FCS because hardware appends it.
  1989. * We only need to round down to the nearest 512 byte block
  1990. * count since the value we care about is 2 frames, not 1.
  1991. */
  1992. min_tx_space = adapter->max_frame_size;
  1993. min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
  1994. min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
  1995. /* upper 16 bits has Tx packet buffer allocation size in KB */
  1996. needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
  1997. /* If current Tx allocation is less than the min Tx FIFO size,
  1998. * and the min Tx FIFO size is less than the current Rx FIFO
  1999. * allocation, take space away from current Rx allocation.
  2000. */
  2001. if (needed_tx_space < pba) {
  2002. pba -= needed_tx_space;
  2003. /* if short on Rx space, Rx wins and must trump Tx
  2004. * adjustment
  2005. */
  2006. if (pba < min_rx_space)
  2007. pba = min_rx_space;
  2008. }
  2009. /* adjust PBA for jumbo frames */
  2010. wr32(E1000_PBA, pba);
  2011. }
  2012. /* flow control settings
  2013. * The high water mark must be low enough to fit one full frame
  2014. * after transmitting the pause frame. As such we must have enough
  2015. * space to allow for us to complete our current transmit and then
  2016. * receive the frame that is in progress from the link partner.
  2017. * Set it to:
  2018. * - the full Rx FIFO size minus one full Tx plus one full Rx frame
  2019. */
  2020. hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
  2021. fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
  2022. fc->low_water = fc->high_water - 16;
  2023. fc->pause_time = 0xFFFF;
  2024. fc->send_xon = 1;
  2025. fc->current_mode = fc->requested_mode;
  2026. /* disable receive for all VFs and wait one second */
  2027. if (adapter->vfs_allocated_count) {
  2028. int i;
  2029. for (i = 0 ; i < adapter->vfs_allocated_count; i++)
  2030. adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
  2031. /* ping all the active vfs to let them know we are going down */
  2032. igb_ping_all_vfs(adapter);
  2033. /* disable transmits and receives */
  2034. wr32(E1000_VFRE, 0);
  2035. wr32(E1000_VFTE, 0);
  2036. }
  2037. /* Allow time for pending master requests to run */
  2038. hw->mac.ops.reset_hw(hw);
  2039. wr32(E1000_WUC, 0);
  2040. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  2041. /* need to resetup here after media swap */
  2042. adapter->ei.get_invariants(hw);
  2043. adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
  2044. }
  2045. if ((mac->type == e1000_82575) &&
  2046. (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
  2047. igb_enable_mas(adapter);
  2048. }
  2049. if (hw->mac.ops.init_hw(hw))
  2050. dev_err(&pdev->dev, "Hardware Error\n");
  2051. /* RAR registers were cleared during init_hw, clear mac table */
  2052. igb_flush_mac_table(adapter);
  2053. __dev_uc_unsync(adapter->netdev, NULL);
  2054. /* Recover default RAR entry */
  2055. igb_set_default_mac_filter(adapter);
  2056. /* Flow control settings reset on hardware reset, so guarantee flow
  2057. * control is off when forcing speed.
  2058. */
  2059. if (!hw->mac.autoneg)
  2060. igb_force_mac_fc(hw);
  2061. igb_init_dmac(adapter, pba);
  2062. #ifdef CONFIG_IGB_HWMON
  2063. /* Re-initialize the thermal sensor on i350 devices. */
  2064. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  2065. if (mac->type == e1000_i350 && hw->bus.func == 0) {
  2066. /* If present, re-initialize the external thermal sensor
  2067. * interface.
  2068. */
  2069. if (adapter->ets)
  2070. mac->ops.init_thermal_sensor_thresh(hw);
  2071. }
  2072. }
  2073. #endif
  2074. /* Re-establish EEE setting */
  2075. if (hw->phy.media_type == e1000_media_type_copper) {
  2076. switch (mac->type) {
  2077. case e1000_i350:
  2078. case e1000_i210:
  2079. case e1000_i211:
  2080. igb_set_eee_i350(hw, true, true);
  2081. break;
  2082. case e1000_i354:
  2083. igb_set_eee_i354(hw, true, true);
  2084. break;
  2085. default:
  2086. break;
  2087. }
  2088. }
  2089. if (!netif_running(adapter->netdev))
  2090. igb_power_down_link(adapter);
  2091. igb_update_mng_vlan(adapter);
  2092. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  2093. wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  2094. /* Re-enable PTP, where applicable. */
  2095. if (adapter->ptp_flags & IGB_PTP_ENABLED)
  2096. igb_ptp_reset(adapter);
  2097. igb_get_phy_info(hw);
  2098. }
  2099. static netdev_features_t igb_fix_features(struct net_device *netdev,
  2100. netdev_features_t features)
  2101. {
  2102. /* Since there is no support for separate Rx/Tx vlan accel
  2103. * enable/disable make sure Tx flag is always in same state as Rx.
  2104. */
  2105. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2106. features |= NETIF_F_HW_VLAN_CTAG_TX;
  2107. else
  2108. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  2109. return features;
  2110. }
  2111. static int igb_set_features(struct net_device *netdev,
  2112. netdev_features_t features)
  2113. {
  2114. netdev_features_t changed = netdev->features ^ features;
  2115. struct igb_adapter *adapter = netdev_priv(netdev);
  2116. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  2117. igb_vlan_mode(netdev, features);
  2118. if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
  2119. return 0;
  2120. if (!(features & NETIF_F_NTUPLE)) {
  2121. struct hlist_node *node2;
  2122. struct igb_nfc_filter *rule;
  2123. spin_lock(&adapter->nfc_lock);
  2124. hlist_for_each_entry_safe(rule, node2,
  2125. &adapter->nfc_filter_list, nfc_node) {
  2126. igb_erase_filter(adapter, rule);
  2127. hlist_del(&rule->nfc_node);
  2128. kfree(rule);
  2129. }
  2130. spin_unlock(&adapter->nfc_lock);
  2131. adapter->nfc_filter_count = 0;
  2132. }
  2133. netdev->features = features;
  2134. if (netif_running(netdev))
  2135. igb_reinit_locked(adapter);
  2136. else
  2137. igb_reset(adapter);
  2138. return 0;
  2139. }
  2140. static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  2141. struct net_device *dev,
  2142. const unsigned char *addr, u16 vid,
  2143. u16 flags)
  2144. {
  2145. /* guarantee we can provide a unique filter for the unicast address */
  2146. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
  2147. struct igb_adapter *adapter = netdev_priv(dev);
  2148. int vfn = adapter->vfs_allocated_count;
  2149. if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
  2150. return -ENOMEM;
  2151. }
  2152. return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
  2153. }
  2154. #define IGB_MAX_MAC_HDR_LEN 127
  2155. #define IGB_MAX_NETWORK_HDR_LEN 511
  2156. static netdev_features_t
  2157. igb_features_check(struct sk_buff *skb, struct net_device *dev,
  2158. netdev_features_t features)
  2159. {
  2160. unsigned int network_hdr_len, mac_hdr_len;
  2161. /* Make certain the headers can be described by a context descriptor */
  2162. mac_hdr_len = skb_network_header(skb) - skb->data;
  2163. if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
  2164. return features & ~(NETIF_F_HW_CSUM |
  2165. NETIF_F_SCTP_CRC |
  2166. NETIF_F_HW_VLAN_CTAG_TX |
  2167. NETIF_F_TSO |
  2168. NETIF_F_TSO6);
  2169. network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
  2170. if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
  2171. return features & ~(NETIF_F_HW_CSUM |
  2172. NETIF_F_SCTP_CRC |
  2173. NETIF_F_TSO |
  2174. NETIF_F_TSO6);
  2175. /* We can only support IPV4 TSO in tunnels if we can mangle the
  2176. * inner IP ID field, so strip TSO if MANGLEID is not supported.
  2177. */
  2178. if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
  2179. features &= ~NETIF_F_TSO;
  2180. return features;
  2181. }
  2182. static void igb_offload_apply(struct igb_adapter *adapter, s32 queue)
  2183. {
  2184. if (!is_fqtss_enabled(adapter)) {
  2185. enable_fqtss(adapter, true);
  2186. return;
  2187. }
  2188. igb_config_tx_modes(adapter, queue);
  2189. if (!is_any_cbs_enabled(adapter) && !is_any_txtime_enabled(adapter))
  2190. enable_fqtss(adapter, false);
  2191. }
  2192. static int igb_offload_cbs(struct igb_adapter *adapter,
  2193. struct tc_cbs_qopt_offload *qopt)
  2194. {
  2195. struct e1000_hw *hw = &adapter->hw;
  2196. int err;
  2197. /* CBS offloading is only supported by i210 controller. */
  2198. if (hw->mac.type != e1000_i210)
  2199. return -EOPNOTSUPP;
  2200. /* CBS offloading is only supported by queue 0 and queue 1. */
  2201. if (qopt->queue < 0 || qopt->queue > 1)
  2202. return -EINVAL;
  2203. err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
  2204. qopt->idleslope, qopt->sendslope,
  2205. qopt->hicredit, qopt->locredit);
  2206. if (err)
  2207. return err;
  2208. igb_offload_apply(adapter, qopt->queue);
  2209. return 0;
  2210. }
  2211. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  2212. #define VLAN_PRIO_FULL_MASK (0x07)
  2213. static int igb_parse_cls_flower(struct igb_adapter *adapter,
  2214. struct tc_cls_flower_offload *f,
  2215. int traffic_class,
  2216. struct igb_nfc_filter *input)
  2217. {
  2218. struct netlink_ext_ack *extack = f->common.extack;
  2219. if (f->dissector->used_keys &
  2220. ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
  2221. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  2222. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  2223. BIT(FLOW_DISSECTOR_KEY_VLAN))) {
  2224. NL_SET_ERR_MSG_MOD(extack,
  2225. "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
  2226. return -EOPNOTSUPP;
  2227. }
  2228. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  2229. struct flow_dissector_key_eth_addrs *key, *mask;
  2230. key = skb_flow_dissector_target(f->dissector,
  2231. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2232. f->key);
  2233. mask = skb_flow_dissector_target(f->dissector,
  2234. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2235. f->mask);
  2236. if (!is_zero_ether_addr(mask->dst)) {
  2237. if (!is_broadcast_ether_addr(mask->dst)) {
  2238. NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
  2239. return -EINVAL;
  2240. }
  2241. input->filter.match_flags |=
  2242. IGB_FILTER_FLAG_DST_MAC_ADDR;
  2243. ether_addr_copy(input->filter.dst_addr, key->dst);
  2244. }
  2245. if (!is_zero_ether_addr(mask->src)) {
  2246. if (!is_broadcast_ether_addr(mask->src)) {
  2247. NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
  2248. return -EINVAL;
  2249. }
  2250. input->filter.match_flags |=
  2251. IGB_FILTER_FLAG_SRC_MAC_ADDR;
  2252. ether_addr_copy(input->filter.src_addr, key->src);
  2253. }
  2254. }
  2255. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  2256. struct flow_dissector_key_basic *key, *mask;
  2257. key = skb_flow_dissector_target(f->dissector,
  2258. FLOW_DISSECTOR_KEY_BASIC,
  2259. f->key);
  2260. mask = skb_flow_dissector_target(f->dissector,
  2261. FLOW_DISSECTOR_KEY_BASIC,
  2262. f->mask);
  2263. if (mask->n_proto) {
  2264. if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
  2265. NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
  2266. return -EINVAL;
  2267. }
  2268. input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
  2269. input->filter.etype = key->n_proto;
  2270. }
  2271. }
  2272. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  2273. struct flow_dissector_key_vlan *key, *mask;
  2274. key = skb_flow_dissector_target(f->dissector,
  2275. FLOW_DISSECTOR_KEY_VLAN,
  2276. f->key);
  2277. mask = skb_flow_dissector_target(f->dissector,
  2278. FLOW_DISSECTOR_KEY_VLAN,
  2279. f->mask);
  2280. if (mask->vlan_priority) {
  2281. if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
  2282. NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
  2283. return -EINVAL;
  2284. }
  2285. input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
  2286. input->filter.vlan_tci = key->vlan_priority;
  2287. }
  2288. }
  2289. input->action = traffic_class;
  2290. input->cookie = f->cookie;
  2291. return 0;
  2292. }
  2293. static int igb_configure_clsflower(struct igb_adapter *adapter,
  2294. struct tc_cls_flower_offload *cls_flower)
  2295. {
  2296. struct netlink_ext_ack *extack = cls_flower->common.extack;
  2297. struct igb_nfc_filter *filter, *f;
  2298. int err, tc;
  2299. tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
  2300. if (tc < 0) {
  2301. NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
  2302. return -EINVAL;
  2303. }
  2304. filter = kzalloc(sizeof(*filter), GFP_KERNEL);
  2305. if (!filter)
  2306. return -ENOMEM;
  2307. err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
  2308. if (err < 0)
  2309. goto err_parse;
  2310. spin_lock(&adapter->nfc_lock);
  2311. hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
  2312. if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
  2313. err = -EEXIST;
  2314. NL_SET_ERR_MSG_MOD(extack,
  2315. "This filter is already set in ethtool");
  2316. goto err_locked;
  2317. }
  2318. }
  2319. hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
  2320. if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
  2321. err = -EEXIST;
  2322. NL_SET_ERR_MSG_MOD(extack,
  2323. "This filter is already set in cls_flower");
  2324. goto err_locked;
  2325. }
  2326. }
  2327. err = igb_add_filter(adapter, filter);
  2328. if (err < 0) {
  2329. NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
  2330. goto err_locked;
  2331. }
  2332. hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
  2333. spin_unlock(&adapter->nfc_lock);
  2334. return 0;
  2335. err_locked:
  2336. spin_unlock(&adapter->nfc_lock);
  2337. err_parse:
  2338. kfree(filter);
  2339. return err;
  2340. }
  2341. static int igb_delete_clsflower(struct igb_adapter *adapter,
  2342. struct tc_cls_flower_offload *cls_flower)
  2343. {
  2344. struct igb_nfc_filter *filter;
  2345. int err;
  2346. spin_lock(&adapter->nfc_lock);
  2347. hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
  2348. if (filter->cookie == cls_flower->cookie)
  2349. break;
  2350. if (!filter) {
  2351. err = -ENOENT;
  2352. goto out;
  2353. }
  2354. err = igb_erase_filter(adapter, filter);
  2355. if (err < 0)
  2356. goto out;
  2357. hlist_del(&filter->nfc_node);
  2358. kfree(filter);
  2359. out:
  2360. spin_unlock(&adapter->nfc_lock);
  2361. return err;
  2362. }
  2363. static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
  2364. struct tc_cls_flower_offload *cls_flower)
  2365. {
  2366. switch (cls_flower->command) {
  2367. case TC_CLSFLOWER_REPLACE:
  2368. return igb_configure_clsflower(adapter, cls_flower);
  2369. case TC_CLSFLOWER_DESTROY:
  2370. return igb_delete_clsflower(adapter, cls_flower);
  2371. case TC_CLSFLOWER_STATS:
  2372. return -EOPNOTSUPP;
  2373. default:
  2374. return -EOPNOTSUPP;
  2375. }
  2376. }
  2377. static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  2378. void *cb_priv)
  2379. {
  2380. struct igb_adapter *adapter = cb_priv;
  2381. if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
  2382. return -EOPNOTSUPP;
  2383. switch (type) {
  2384. case TC_SETUP_CLSFLOWER:
  2385. return igb_setup_tc_cls_flower(adapter, type_data);
  2386. default:
  2387. return -EOPNOTSUPP;
  2388. }
  2389. }
  2390. static int igb_setup_tc_block(struct igb_adapter *adapter,
  2391. struct tc_block_offload *f)
  2392. {
  2393. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  2394. return -EOPNOTSUPP;
  2395. switch (f->command) {
  2396. case TC_BLOCK_BIND:
  2397. return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
  2398. adapter, adapter, f->extack);
  2399. case TC_BLOCK_UNBIND:
  2400. tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
  2401. adapter);
  2402. return 0;
  2403. default:
  2404. return -EOPNOTSUPP;
  2405. }
  2406. }
  2407. static int igb_offload_txtime(struct igb_adapter *adapter,
  2408. struct tc_etf_qopt_offload *qopt)
  2409. {
  2410. struct e1000_hw *hw = &adapter->hw;
  2411. int err;
  2412. /* Launchtime offloading is only supported by i210 controller. */
  2413. if (hw->mac.type != e1000_i210)
  2414. return -EOPNOTSUPP;
  2415. /* Launchtime offloading is only supported by queues 0 and 1. */
  2416. if (qopt->queue < 0 || qopt->queue > 1)
  2417. return -EINVAL;
  2418. err = igb_save_txtime_params(adapter, qopt->queue, qopt->enable);
  2419. if (err)
  2420. return err;
  2421. igb_offload_apply(adapter, qopt->queue);
  2422. return 0;
  2423. }
  2424. static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
  2425. void *type_data)
  2426. {
  2427. struct igb_adapter *adapter = netdev_priv(dev);
  2428. switch (type) {
  2429. case TC_SETUP_QDISC_CBS:
  2430. return igb_offload_cbs(adapter, type_data);
  2431. case TC_SETUP_BLOCK:
  2432. return igb_setup_tc_block(adapter, type_data);
  2433. case TC_SETUP_QDISC_ETF:
  2434. return igb_offload_txtime(adapter, type_data);
  2435. default:
  2436. return -EOPNOTSUPP;
  2437. }
  2438. }
  2439. static const struct net_device_ops igb_netdev_ops = {
  2440. .ndo_open = igb_open,
  2441. .ndo_stop = igb_close,
  2442. .ndo_start_xmit = igb_xmit_frame,
  2443. .ndo_get_stats64 = igb_get_stats64,
  2444. .ndo_set_rx_mode = igb_set_rx_mode,
  2445. .ndo_set_mac_address = igb_set_mac,
  2446. .ndo_change_mtu = igb_change_mtu,
  2447. .ndo_do_ioctl = igb_ioctl,
  2448. .ndo_tx_timeout = igb_tx_timeout,
  2449. .ndo_validate_addr = eth_validate_addr,
  2450. .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
  2451. .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
  2452. .ndo_set_vf_mac = igb_ndo_set_vf_mac,
  2453. .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
  2454. .ndo_set_vf_rate = igb_ndo_set_vf_bw,
  2455. .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
  2456. .ndo_set_vf_trust = igb_ndo_set_vf_trust,
  2457. .ndo_get_vf_config = igb_ndo_get_vf_config,
  2458. #ifdef CONFIG_NET_POLL_CONTROLLER
  2459. .ndo_poll_controller = igb_netpoll,
  2460. #endif
  2461. .ndo_fix_features = igb_fix_features,
  2462. .ndo_set_features = igb_set_features,
  2463. .ndo_fdb_add = igb_ndo_fdb_add,
  2464. .ndo_features_check = igb_features_check,
  2465. .ndo_setup_tc = igb_setup_tc,
  2466. };
  2467. /**
  2468. * igb_set_fw_version - Configure version string for ethtool
  2469. * @adapter: adapter struct
  2470. **/
  2471. void igb_set_fw_version(struct igb_adapter *adapter)
  2472. {
  2473. struct e1000_hw *hw = &adapter->hw;
  2474. struct e1000_fw_version fw;
  2475. igb_get_fw_version(hw, &fw);
  2476. switch (hw->mac.type) {
  2477. case e1000_i210:
  2478. case e1000_i211:
  2479. if (!(igb_get_flash_presence_i210(hw))) {
  2480. snprintf(adapter->fw_version,
  2481. sizeof(adapter->fw_version),
  2482. "%2d.%2d-%d",
  2483. fw.invm_major, fw.invm_minor,
  2484. fw.invm_img_type);
  2485. break;
  2486. }
  2487. /* fall through */
  2488. default:
  2489. /* if option is rom valid, display its version too */
  2490. if (fw.or_valid) {
  2491. snprintf(adapter->fw_version,
  2492. sizeof(adapter->fw_version),
  2493. "%d.%d, 0x%08x, %d.%d.%d",
  2494. fw.eep_major, fw.eep_minor, fw.etrack_id,
  2495. fw.or_major, fw.or_build, fw.or_patch);
  2496. /* no option rom */
  2497. } else if (fw.etrack_id != 0X0000) {
  2498. snprintf(adapter->fw_version,
  2499. sizeof(adapter->fw_version),
  2500. "%d.%d, 0x%08x",
  2501. fw.eep_major, fw.eep_minor, fw.etrack_id);
  2502. } else {
  2503. snprintf(adapter->fw_version,
  2504. sizeof(adapter->fw_version),
  2505. "%d.%d.%d",
  2506. fw.eep_major, fw.eep_minor, fw.eep_build);
  2507. }
  2508. break;
  2509. }
  2510. }
  2511. /**
  2512. * igb_init_mas - init Media Autosense feature if enabled in the NVM
  2513. *
  2514. * @adapter: adapter struct
  2515. **/
  2516. static void igb_init_mas(struct igb_adapter *adapter)
  2517. {
  2518. struct e1000_hw *hw = &adapter->hw;
  2519. u16 eeprom_data;
  2520. hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
  2521. switch (hw->bus.func) {
  2522. case E1000_FUNC_0:
  2523. if (eeprom_data & IGB_MAS_ENABLE_0) {
  2524. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2525. netdev_info(adapter->netdev,
  2526. "MAS: Enabling Media Autosense for port %d\n",
  2527. hw->bus.func);
  2528. }
  2529. break;
  2530. case E1000_FUNC_1:
  2531. if (eeprom_data & IGB_MAS_ENABLE_1) {
  2532. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2533. netdev_info(adapter->netdev,
  2534. "MAS: Enabling Media Autosense for port %d\n",
  2535. hw->bus.func);
  2536. }
  2537. break;
  2538. case E1000_FUNC_2:
  2539. if (eeprom_data & IGB_MAS_ENABLE_2) {
  2540. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2541. netdev_info(adapter->netdev,
  2542. "MAS: Enabling Media Autosense for port %d\n",
  2543. hw->bus.func);
  2544. }
  2545. break;
  2546. case E1000_FUNC_3:
  2547. if (eeprom_data & IGB_MAS_ENABLE_3) {
  2548. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2549. netdev_info(adapter->netdev,
  2550. "MAS: Enabling Media Autosense for port %d\n",
  2551. hw->bus.func);
  2552. }
  2553. break;
  2554. default:
  2555. /* Shouldn't get here */
  2556. netdev_err(adapter->netdev,
  2557. "MAS: Invalid port configuration, returning\n");
  2558. break;
  2559. }
  2560. }
  2561. /**
  2562. * igb_init_i2c - Init I2C interface
  2563. * @adapter: pointer to adapter structure
  2564. **/
  2565. static s32 igb_init_i2c(struct igb_adapter *adapter)
  2566. {
  2567. s32 status = 0;
  2568. /* I2C interface supported on i350 devices */
  2569. if (adapter->hw.mac.type != e1000_i350)
  2570. return 0;
  2571. /* Initialize the i2c bus which is controlled by the registers.
  2572. * This bus will use the i2c_algo_bit structue that implements
  2573. * the protocol through toggling of the 4 bits in the register.
  2574. */
  2575. adapter->i2c_adap.owner = THIS_MODULE;
  2576. adapter->i2c_algo = igb_i2c_algo;
  2577. adapter->i2c_algo.data = adapter;
  2578. adapter->i2c_adap.algo_data = &adapter->i2c_algo;
  2579. adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
  2580. strlcpy(adapter->i2c_adap.name, "igb BB",
  2581. sizeof(adapter->i2c_adap.name));
  2582. status = i2c_bit_add_bus(&adapter->i2c_adap);
  2583. return status;
  2584. }
  2585. /**
  2586. * igb_probe - Device Initialization Routine
  2587. * @pdev: PCI device information struct
  2588. * @ent: entry in igb_pci_tbl
  2589. *
  2590. * Returns 0 on success, negative on failure
  2591. *
  2592. * igb_probe initializes an adapter identified by a pci_dev structure.
  2593. * The OS initialization, configuring of the adapter private structure,
  2594. * and a hardware reset occur.
  2595. **/
  2596. static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2597. {
  2598. struct net_device *netdev;
  2599. struct igb_adapter *adapter;
  2600. struct e1000_hw *hw;
  2601. u16 eeprom_data = 0;
  2602. s32 ret_val;
  2603. static int global_quad_port_a; /* global quad port a indication */
  2604. const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
  2605. int err, pci_using_dac;
  2606. u8 part_str[E1000_PBANUM_LENGTH];
  2607. /* Catch broken hardware that put the wrong VF device ID in
  2608. * the PCIe SR-IOV capability.
  2609. */
  2610. if (pdev->is_virtfn) {
  2611. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  2612. pci_name(pdev), pdev->vendor, pdev->device);
  2613. return -EINVAL;
  2614. }
  2615. err = pci_enable_device_mem(pdev);
  2616. if (err)
  2617. return err;
  2618. pci_using_dac = 0;
  2619. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2620. if (!err) {
  2621. pci_using_dac = 1;
  2622. } else {
  2623. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2624. if (err) {
  2625. dev_err(&pdev->dev,
  2626. "No usable DMA configuration, aborting\n");
  2627. goto err_dma;
  2628. }
  2629. }
  2630. err = pci_request_mem_regions(pdev, igb_driver_name);
  2631. if (err)
  2632. goto err_pci_reg;
  2633. pci_enable_pcie_error_reporting(pdev);
  2634. pci_set_master(pdev);
  2635. pci_save_state(pdev);
  2636. err = -ENOMEM;
  2637. netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
  2638. IGB_MAX_TX_QUEUES);
  2639. if (!netdev)
  2640. goto err_alloc_etherdev;
  2641. SET_NETDEV_DEV(netdev, &pdev->dev);
  2642. pci_set_drvdata(pdev, netdev);
  2643. adapter = netdev_priv(netdev);
  2644. adapter->netdev = netdev;
  2645. adapter->pdev = pdev;
  2646. hw = &adapter->hw;
  2647. hw->back = adapter;
  2648. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  2649. err = -EIO;
  2650. adapter->io_addr = pci_iomap(pdev, 0, 0);
  2651. if (!adapter->io_addr)
  2652. goto err_ioremap;
  2653. /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
  2654. hw->hw_addr = adapter->io_addr;
  2655. netdev->netdev_ops = &igb_netdev_ops;
  2656. igb_set_ethtool_ops(netdev);
  2657. netdev->watchdog_timeo = 5 * HZ;
  2658. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  2659. netdev->mem_start = pci_resource_start(pdev, 0);
  2660. netdev->mem_end = pci_resource_end(pdev, 0);
  2661. /* PCI config space info */
  2662. hw->vendor_id = pdev->vendor;
  2663. hw->device_id = pdev->device;
  2664. hw->revision_id = pdev->revision;
  2665. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2666. hw->subsystem_device_id = pdev->subsystem_device;
  2667. /* Copy the default MAC, PHY and NVM function pointers */
  2668. memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
  2669. memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
  2670. memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
  2671. /* Initialize skew-specific constants */
  2672. err = ei->get_invariants(hw);
  2673. if (err)
  2674. goto err_sw_init;
  2675. /* setup the private structure */
  2676. err = igb_sw_init(adapter);
  2677. if (err)
  2678. goto err_sw_init;
  2679. igb_get_bus_info_pcie(hw);
  2680. hw->phy.autoneg_wait_to_complete = false;
  2681. /* Copper options */
  2682. if (hw->phy.media_type == e1000_media_type_copper) {
  2683. hw->phy.mdix = AUTO_ALL_MODES;
  2684. hw->phy.disable_polarity_correction = false;
  2685. hw->phy.ms_type = e1000_ms_hw_default;
  2686. }
  2687. if (igb_check_reset_block(hw))
  2688. dev_info(&pdev->dev,
  2689. "PHY reset is blocked due to SOL/IDER session.\n");
  2690. /* features is initialized to 0 in allocation, it might have bits
  2691. * set by igb_sw_init so we should use an or instead of an
  2692. * assignment.
  2693. */
  2694. netdev->features |= NETIF_F_SG |
  2695. NETIF_F_TSO |
  2696. NETIF_F_TSO6 |
  2697. NETIF_F_RXHASH |
  2698. NETIF_F_RXCSUM |
  2699. NETIF_F_HW_CSUM;
  2700. if (hw->mac.type >= e1000_82576)
  2701. netdev->features |= NETIF_F_SCTP_CRC;
  2702. if (hw->mac.type >= e1000_i350)
  2703. netdev->features |= NETIF_F_HW_TC;
  2704. #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
  2705. NETIF_F_GSO_GRE_CSUM | \
  2706. NETIF_F_GSO_IPXIP4 | \
  2707. NETIF_F_GSO_IPXIP6 | \
  2708. NETIF_F_GSO_UDP_TUNNEL | \
  2709. NETIF_F_GSO_UDP_TUNNEL_CSUM)
  2710. netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
  2711. netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
  2712. /* copy netdev features into list of user selectable features */
  2713. netdev->hw_features |= netdev->features |
  2714. NETIF_F_HW_VLAN_CTAG_RX |
  2715. NETIF_F_HW_VLAN_CTAG_TX |
  2716. NETIF_F_RXALL;
  2717. if (hw->mac.type >= e1000_i350)
  2718. netdev->hw_features |= NETIF_F_NTUPLE;
  2719. if (pci_using_dac)
  2720. netdev->features |= NETIF_F_HIGHDMA;
  2721. netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
  2722. netdev->mpls_features |= NETIF_F_HW_CSUM;
  2723. netdev->hw_enc_features |= netdev->vlan_features;
  2724. /* set this bit last since it cannot be part of vlan_features */
  2725. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  2726. NETIF_F_HW_VLAN_CTAG_RX |
  2727. NETIF_F_HW_VLAN_CTAG_TX;
  2728. netdev->priv_flags |= IFF_SUPP_NOFCS;
  2729. netdev->priv_flags |= IFF_UNICAST_FLT;
  2730. /* MTU range: 68 - 9216 */
  2731. netdev->min_mtu = ETH_MIN_MTU;
  2732. netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
  2733. adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
  2734. /* before reading the NVM, reset the controller to put the device in a
  2735. * known good starting state
  2736. */
  2737. hw->mac.ops.reset_hw(hw);
  2738. /* make sure the NVM is good , i211/i210 parts can have special NVM
  2739. * that doesn't contain a checksum
  2740. */
  2741. switch (hw->mac.type) {
  2742. case e1000_i210:
  2743. case e1000_i211:
  2744. if (igb_get_flash_presence_i210(hw)) {
  2745. if (hw->nvm.ops.validate(hw) < 0) {
  2746. dev_err(&pdev->dev,
  2747. "The NVM Checksum Is Not Valid\n");
  2748. err = -EIO;
  2749. goto err_eeprom;
  2750. }
  2751. }
  2752. break;
  2753. default:
  2754. if (hw->nvm.ops.validate(hw) < 0) {
  2755. dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
  2756. err = -EIO;
  2757. goto err_eeprom;
  2758. }
  2759. break;
  2760. }
  2761. if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
  2762. /* copy the MAC address out of the NVM */
  2763. if (hw->mac.ops.read_mac_addr(hw))
  2764. dev_err(&pdev->dev, "NVM Read Error\n");
  2765. }
  2766. memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
  2767. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2768. dev_err(&pdev->dev, "Invalid MAC Address\n");
  2769. err = -EIO;
  2770. goto err_eeprom;
  2771. }
  2772. igb_set_default_mac_filter(adapter);
  2773. /* get firmware version for ethtool -i */
  2774. igb_set_fw_version(adapter);
  2775. /* configure RXPBSIZE and TXPBSIZE */
  2776. if (hw->mac.type == e1000_i210) {
  2777. wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
  2778. wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
  2779. }
  2780. timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
  2781. timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
  2782. INIT_WORK(&adapter->reset_task, igb_reset_task);
  2783. INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
  2784. /* Initialize link properties that are user-changeable */
  2785. adapter->fc_autoneg = true;
  2786. hw->mac.autoneg = true;
  2787. hw->phy.autoneg_advertised = 0x2f;
  2788. hw->fc.requested_mode = e1000_fc_default;
  2789. hw->fc.current_mode = e1000_fc_default;
  2790. igb_validate_mdi_setting(hw);
  2791. /* By default, support wake on port A */
  2792. if (hw->bus.func == 0)
  2793. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2794. /* Check the NVM for wake support on non-port A ports */
  2795. if (hw->mac.type >= e1000_82580)
  2796. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
  2797. NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
  2798. &eeprom_data);
  2799. else if (hw->bus.func == 1)
  2800. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  2801. if (eeprom_data & IGB_EEPROM_APME)
  2802. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2803. /* now that we have the eeprom settings, apply the special cases where
  2804. * the eeprom may be wrong or the board simply won't support wake on
  2805. * lan on a particular port
  2806. */
  2807. switch (pdev->device) {
  2808. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  2809. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2810. break;
  2811. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  2812. case E1000_DEV_ID_82576_FIBER:
  2813. case E1000_DEV_ID_82576_SERDES:
  2814. /* Wake events only supported on port A for dual fiber
  2815. * regardless of eeprom setting
  2816. */
  2817. if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
  2818. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2819. break;
  2820. case E1000_DEV_ID_82576_QUAD_COPPER:
  2821. case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
  2822. /* if quad port adapter, disable WoL on all but port A */
  2823. if (global_quad_port_a != 0)
  2824. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2825. else
  2826. adapter->flags |= IGB_FLAG_QUAD_PORT_A;
  2827. /* Reset for multiple quad port adapters */
  2828. if (++global_quad_port_a == 4)
  2829. global_quad_port_a = 0;
  2830. break;
  2831. default:
  2832. /* If the device can't wake, don't set software support */
  2833. if (!device_can_wakeup(&adapter->pdev->dev))
  2834. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2835. }
  2836. /* initialize the wol settings based on the eeprom settings */
  2837. if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
  2838. adapter->wol |= E1000_WUFC_MAG;
  2839. /* Some vendors want WoL disabled by default, but still supported */
  2840. if ((hw->mac.type == e1000_i350) &&
  2841. (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
  2842. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2843. adapter->wol = 0;
  2844. }
  2845. /* Some vendors want the ability to Use the EEPROM setting as
  2846. * enable/disable only, and not for capability
  2847. */
  2848. if (((hw->mac.type == e1000_i350) ||
  2849. (hw->mac.type == e1000_i354)) &&
  2850. (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
  2851. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2852. adapter->wol = 0;
  2853. }
  2854. if (hw->mac.type == e1000_i350) {
  2855. if (((pdev->subsystem_device == 0x5001) ||
  2856. (pdev->subsystem_device == 0x5002)) &&
  2857. (hw->bus.func == 0)) {
  2858. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2859. adapter->wol = 0;
  2860. }
  2861. if (pdev->subsystem_device == 0x1F52)
  2862. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2863. }
  2864. device_set_wakeup_enable(&adapter->pdev->dev,
  2865. adapter->flags & IGB_FLAG_WOL_SUPPORTED);
  2866. /* reset the hardware with the new settings */
  2867. igb_reset(adapter);
  2868. /* Init the I2C interface */
  2869. err = igb_init_i2c(adapter);
  2870. if (err) {
  2871. dev_err(&pdev->dev, "failed to init i2c interface\n");
  2872. goto err_eeprom;
  2873. }
  2874. /* let the f/w know that the h/w is now under the control of the
  2875. * driver.
  2876. */
  2877. igb_get_hw_control(adapter);
  2878. strcpy(netdev->name, "eth%d");
  2879. err = register_netdev(netdev);
  2880. if (err)
  2881. goto err_register;
  2882. /* carrier off reporting is important to ethtool even BEFORE open */
  2883. netif_carrier_off(netdev);
  2884. #ifdef CONFIG_IGB_DCA
  2885. if (dca_add_requester(&pdev->dev) == 0) {
  2886. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  2887. dev_info(&pdev->dev, "DCA enabled\n");
  2888. igb_setup_dca(adapter);
  2889. }
  2890. #endif
  2891. #ifdef CONFIG_IGB_HWMON
  2892. /* Initialize the thermal sensor on i350 devices. */
  2893. if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
  2894. u16 ets_word;
  2895. /* Read the NVM to determine if this i350 device supports an
  2896. * external thermal sensor.
  2897. */
  2898. hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
  2899. if (ets_word != 0x0000 && ets_word != 0xFFFF)
  2900. adapter->ets = true;
  2901. else
  2902. adapter->ets = false;
  2903. if (igb_sysfs_init(adapter))
  2904. dev_err(&pdev->dev,
  2905. "failed to allocate sysfs resources\n");
  2906. } else {
  2907. adapter->ets = false;
  2908. }
  2909. #endif
  2910. /* Check if Media Autosense is enabled */
  2911. adapter->ei = *ei;
  2912. if (hw->dev_spec._82575.mas_capable)
  2913. igb_init_mas(adapter);
  2914. /* do hw tstamp init after resetting */
  2915. igb_ptp_init(adapter);
  2916. dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
  2917. /* print bus type/speed/width info, not applicable to i354 */
  2918. if (hw->mac.type != e1000_i354) {
  2919. dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
  2920. netdev->name,
  2921. ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
  2922. (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
  2923. "unknown"),
  2924. ((hw->bus.width == e1000_bus_width_pcie_x4) ?
  2925. "Width x4" :
  2926. (hw->bus.width == e1000_bus_width_pcie_x2) ?
  2927. "Width x2" :
  2928. (hw->bus.width == e1000_bus_width_pcie_x1) ?
  2929. "Width x1" : "unknown"), netdev->dev_addr);
  2930. }
  2931. if ((hw->mac.type >= e1000_i210 ||
  2932. igb_get_flash_presence_i210(hw))) {
  2933. ret_val = igb_read_part_string(hw, part_str,
  2934. E1000_PBANUM_LENGTH);
  2935. } else {
  2936. ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  2937. }
  2938. if (ret_val)
  2939. strcpy(part_str, "Unknown");
  2940. dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
  2941. dev_info(&pdev->dev,
  2942. "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
  2943. (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
  2944. (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
  2945. adapter->num_rx_queues, adapter->num_tx_queues);
  2946. if (hw->phy.media_type == e1000_media_type_copper) {
  2947. switch (hw->mac.type) {
  2948. case e1000_i350:
  2949. case e1000_i210:
  2950. case e1000_i211:
  2951. /* Enable EEE for internal copper PHY devices */
  2952. err = igb_set_eee_i350(hw, true, true);
  2953. if ((!err) &&
  2954. (!hw->dev_spec._82575.eee_disable)) {
  2955. adapter->eee_advert =
  2956. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2957. adapter->flags |= IGB_FLAG_EEE;
  2958. }
  2959. break;
  2960. case e1000_i354:
  2961. if ((rd32(E1000_CTRL_EXT) &
  2962. E1000_CTRL_EXT_LINK_MODE_SGMII)) {
  2963. err = igb_set_eee_i354(hw, true, true);
  2964. if ((!err) &&
  2965. (!hw->dev_spec._82575.eee_disable)) {
  2966. adapter->eee_advert =
  2967. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2968. adapter->flags |= IGB_FLAG_EEE;
  2969. }
  2970. }
  2971. break;
  2972. default:
  2973. break;
  2974. }
  2975. }
  2976. pm_runtime_put_noidle(&pdev->dev);
  2977. return 0;
  2978. err_register:
  2979. igb_release_hw_control(adapter);
  2980. memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
  2981. err_eeprom:
  2982. if (!igb_check_reset_block(hw))
  2983. igb_reset_phy(hw);
  2984. if (hw->flash_address)
  2985. iounmap(hw->flash_address);
  2986. err_sw_init:
  2987. kfree(adapter->mac_table);
  2988. kfree(adapter->shadow_vfta);
  2989. igb_clear_interrupt_scheme(adapter);
  2990. #ifdef CONFIG_PCI_IOV
  2991. igb_disable_sriov(pdev);
  2992. #endif
  2993. pci_iounmap(pdev, adapter->io_addr);
  2994. err_ioremap:
  2995. free_netdev(netdev);
  2996. err_alloc_etherdev:
  2997. pci_release_mem_regions(pdev);
  2998. err_pci_reg:
  2999. err_dma:
  3000. pci_disable_device(pdev);
  3001. return err;
  3002. }
  3003. #ifdef CONFIG_PCI_IOV
  3004. static int igb_disable_sriov(struct pci_dev *pdev)
  3005. {
  3006. struct net_device *netdev = pci_get_drvdata(pdev);
  3007. struct igb_adapter *adapter = netdev_priv(netdev);
  3008. struct e1000_hw *hw = &adapter->hw;
  3009. /* reclaim resources allocated to VFs */
  3010. if (adapter->vf_data) {
  3011. /* disable iov and allow time for transactions to clear */
  3012. if (pci_vfs_assigned(pdev)) {
  3013. dev_warn(&pdev->dev,
  3014. "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
  3015. return -EPERM;
  3016. } else {
  3017. pci_disable_sriov(pdev);
  3018. msleep(500);
  3019. }
  3020. kfree(adapter->vf_mac_list);
  3021. adapter->vf_mac_list = NULL;
  3022. kfree(adapter->vf_data);
  3023. adapter->vf_data = NULL;
  3024. adapter->vfs_allocated_count = 0;
  3025. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  3026. wrfl();
  3027. msleep(100);
  3028. dev_info(&pdev->dev, "IOV Disabled\n");
  3029. /* Re-enable DMA Coalescing flag since IOV is turned off */
  3030. adapter->flags |= IGB_FLAG_DMAC;
  3031. }
  3032. return 0;
  3033. }
  3034. static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
  3035. {
  3036. struct net_device *netdev = pci_get_drvdata(pdev);
  3037. struct igb_adapter *adapter = netdev_priv(netdev);
  3038. int old_vfs = pci_num_vf(pdev);
  3039. struct vf_mac_filter *mac_list;
  3040. int err = 0;
  3041. int num_vf_mac_filters, i;
  3042. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
  3043. err = -EPERM;
  3044. goto out;
  3045. }
  3046. if (!num_vfs)
  3047. goto out;
  3048. if (old_vfs) {
  3049. dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
  3050. old_vfs, max_vfs);
  3051. adapter->vfs_allocated_count = old_vfs;
  3052. } else
  3053. adapter->vfs_allocated_count = num_vfs;
  3054. adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
  3055. sizeof(struct vf_data_storage), GFP_KERNEL);
  3056. /* if allocation failed then we do not support SR-IOV */
  3057. if (!adapter->vf_data) {
  3058. adapter->vfs_allocated_count = 0;
  3059. err = -ENOMEM;
  3060. goto out;
  3061. }
  3062. /* Due to the limited number of RAR entries calculate potential
  3063. * number of MAC filters available for the VFs. Reserve entries
  3064. * for PF default MAC, PF MAC filters and at least one RAR entry
  3065. * for each VF for VF MAC.
  3066. */
  3067. num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
  3068. (1 + IGB_PF_MAC_FILTERS_RESERVED +
  3069. adapter->vfs_allocated_count);
  3070. adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
  3071. sizeof(struct vf_mac_filter),
  3072. GFP_KERNEL);
  3073. mac_list = adapter->vf_mac_list;
  3074. INIT_LIST_HEAD(&adapter->vf_macs.l);
  3075. if (adapter->vf_mac_list) {
  3076. /* Initialize list of VF MAC filters */
  3077. for (i = 0; i < num_vf_mac_filters; i++) {
  3078. mac_list->vf = -1;
  3079. mac_list->free = true;
  3080. list_add(&mac_list->l, &adapter->vf_macs.l);
  3081. mac_list++;
  3082. }
  3083. } else {
  3084. /* If we could not allocate memory for the VF MAC filters
  3085. * we can continue without this feature but warn user.
  3086. */
  3087. dev_err(&pdev->dev,
  3088. "Unable to allocate memory for VF MAC filter list\n");
  3089. }
  3090. /* only call pci_enable_sriov() if no VFs are allocated already */
  3091. if (!old_vfs) {
  3092. err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
  3093. if (err)
  3094. goto err_out;
  3095. }
  3096. dev_info(&pdev->dev, "%d VFs allocated\n",
  3097. adapter->vfs_allocated_count);
  3098. for (i = 0; i < adapter->vfs_allocated_count; i++)
  3099. igb_vf_configure(adapter, i);
  3100. /* DMA Coalescing is not supported in IOV mode. */
  3101. adapter->flags &= ~IGB_FLAG_DMAC;
  3102. goto out;
  3103. err_out:
  3104. kfree(adapter->vf_mac_list);
  3105. adapter->vf_mac_list = NULL;
  3106. kfree(adapter->vf_data);
  3107. adapter->vf_data = NULL;
  3108. adapter->vfs_allocated_count = 0;
  3109. out:
  3110. return err;
  3111. }
  3112. #endif
  3113. /**
  3114. * igb_remove_i2c - Cleanup I2C interface
  3115. * @adapter: pointer to adapter structure
  3116. **/
  3117. static void igb_remove_i2c(struct igb_adapter *adapter)
  3118. {
  3119. /* free the adapter bus structure */
  3120. i2c_del_adapter(&adapter->i2c_adap);
  3121. }
  3122. /**
  3123. * igb_remove - Device Removal Routine
  3124. * @pdev: PCI device information struct
  3125. *
  3126. * igb_remove is called by the PCI subsystem to alert the driver
  3127. * that it should release a PCI device. The could be caused by a
  3128. * Hot-Plug event, or because the driver is going to be removed from
  3129. * memory.
  3130. **/
  3131. static void igb_remove(struct pci_dev *pdev)
  3132. {
  3133. struct net_device *netdev = pci_get_drvdata(pdev);
  3134. struct igb_adapter *adapter = netdev_priv(netdev);
  3135. struct e1000_hw *hw = &adapter->hw;
  3136. pm_runtime_get_noresume(&pdev->dev);
  3137. #ifdef CONFIG_IGB_HWMON
  3138. igb_sysfs_exit(adapter);
  3139. #endif
  3140. igb_remove_i2c(adapter);
  3141. igb_ptp_stop(adapter);
  3142. /* The watchdog timer may be rescheduled, so explicitly
  3143. * disable watchdog from being rescheduled.
  3144. */
  3145. set_bit(__IGB_DOWN, &adapter->state);
  3146. del_timer_sync(&adapter->watchdog_timer);
  3147. del_timer_sync(&adapter->phy_info_timer);
  3148. cancel_work_sync(&adapter->reset_task);
  3149. cancel_work_sync(&adapter->watchdog_task);
  3150. #ifdef CONFIG_IGB_DCA
  3151. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  3152. dev_info(&pdev->dev, "DCA disabled\n");
  3153. dca_remove_requester(&pdev->dev);
  3154. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  3155. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  3156. }
  3157. #endif
  3158. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  3159. * would have already happened in close and is redundant.
  3160. */
  3161. igb_release_hw_control(adapter);
  3162. #ifdef CONFIG_PCI_IOV
  3163. igb_disable_sriov(pdev);
  3164. #endif
  3165. unregister_netdev(netdev);
  3166. igb_clear_interrupt_scheme(adapter);
  3167. pci_iounmap(pdev, adapter->io_addr);
  3168. if (hw->flash_address)
  3169. iounmap(hw->flash_address);
  3170. pci_release_mem_regions(pdev);
  3171. kfree(adapter->mac_table);
  3172. kfree(adapter->shadow_vfta);
  3173. free_netdev(netdev);
  3174. pci_disable_pcie_error_reporting(pdev);
  3175. pci_disable_device(pdev);
  3176. }
  3177. /**
  3178. * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
  3179. * @adapter: board private structure to initialize
  3180. *
  3181. * This function initializes the vf specific data storage and then attempts to
  3182. * allocate the VFs. The reason for ordering it this way is because it is much
  3183. * mor expensive time wise to disable SR-IOV than it is to allocate and free
  3184. * the memory for the VFs.
  3185. **/
  3186. static void igb_probe_vfs(struct igb_adapter *adapter)
  3187. {
  3188. #ifdef CONFIG_PCI_IOV
  3189. struct pci_dev *pdev = adapter->pdev;
  3190. struct e1000_hw *hw = &adapter->hw;
  3191. /* Virtualization features not supported on i210 family. */
  3192. if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
  3193. return;
  3194. /* Of the below we really only want the effect of getting
  3195. * IGB_FLAG_HAS_MSIX set (if available), without which
  3196. * igb_enable_sriov() has no effect.
  3197. */
  3198. igb_set_interrupt_capability(adapter, true);
  3199. igb_reset_interrupt_capability(adapter);
  3200. pci_sriov_set_totalvfs(pdev, 7);
  3201. igb_enable_sriov(pdev, max_vfs);
  3202. #endif /* CONFIG_PCI_IOV */
  3203. }
  3204. unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
  3205. {
  3206. struct e1000_hw *hw = &adapter->hw;
  3207. unsigned int max_rss_queues;
  3208. /* Determine the maximum number of RSS queues supported. */
  3209. switch (hw->mac.type) {
  3210. case e1000_i211:
  3211. max_rss_queues = IGB_MAX_RX_QUEUES_I211;
  3212. break;
  3213. case e1000_82575:
  3214. case e1000_i210:
  3215. max_rss_queues = IGB_MAX_RX_QUEUES_82575;
  3216. break;
  3217. case e1000_i350:
  3218. /* I350 cannot do RSS and SR-IOV at the same time */
  3219. if (!!adapter->vfs_allocated_count) {
  3220. max_rss_queues = 1;
  3221. break;
  3222. }
  3223. /* fall through */
  3224. case e1000_82576:
  3225. if (!!adapter->vfs_allocated_count) {
  3226. max_rss_queues = 2;
  3227. break;
  3228. }
  3229. /* fall through */
  3230. case e1000_82580:
  3231. case e1000_i354:
  3232. default:
  3233. max_rss_queues = IGB_MAX_RX_QUEUES;
  3234. break;
  3235. }
  3236. return max_rss_queues;
  3237. }
  3238. static void igb_init_queue_configuration(struct igb_adapter *adapter)
  3239. {
  3240. u32 max_rss_queues;
  3241. max_rss_queues = igb_get_max_rss_queues(adapter);
  3242. adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
  3243. igb_set_flag_queue_pairs(adapter, max_rss_queues);
  3244. }
  3245. void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
  3246. const u32 max_rss_queues)
  3247. {
  3248. struct e1000_hw *hw = &adapter->hw;
  3249. /* Determine if we need to pair queues. */
  3250. switch (hw->mac.type) {
  3251. case e1000_82575:
  3252. case e1000_i211:
  3253. /* Device supports enough interrupts without queue pairing. */
  3254. break;
  3255. case e1000_82576:
  3256. case e1000_82580:
  3257. case e1000_i350:
  3258. case e1000_i354:
  3259. case e1000_i210:
  3260. default:
  3261. /* If rss_queues > half of max_rss_queues, pair the queues in
  3262. * order to conserve interrupts due to limited supply.
  3263. */
  3264. if (adapter->rss_queues > (max_rss_queues / 2))
  3265. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  3266. else
  3267. adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
  3268. break;
  3269. }
  3270. }
  3271. /**
  3272. * igb_sw_init - Initialize general software structures (struct igb_adapter)
  3273. * @adapter: board private structure to initialize
  3274. *
  3275. * igb_sw_init initializes the Adapter private data structure.
  3276. * Fields are initialized based on PCI device information and
  3277. * OS network device settings (MTU size).
  3278. **/
  3279. static int igb_sw_init(struct igb_adapter *adapter)
  3280. {
  3281. struct e1000_hw *hw = &adapter->hw;
  3282. struct net_device *netdev = adapter->netdev;
  3283. struct pci_dev *pdev = adapter->pdev;
  3284. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  3285. /* set default ring sizes */
  3286. adapter->tx_ring_count = IGB_DEFAULT_TXD;
  3287. adapter->rx_ring_count = IGB_DEFAULT_RXD;
  3288. /* set default ITR values */
  3289. adapter->rx_itr_setting = IGB_DEFAULT_ITR;
  3290. adapter->tx_itr_setting = IGB_DEFAULT_ITR;
  3291. /* set default work limits */
  3292. adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
  3293. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
  3294. VLAN_HLEN;
  3295. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  3296. spin_lock_init(&adapter->nfc_lock);
  3297. spin_lock_init(&adapter->stats64_lock);
  3298. #ifdef CONFIG_PCI_IOV
  3299. switch (hw->mac.type) {
  3300. case e1000_82576:
  3301. case e1000_i350:
  3302. if (max_vfs > 7) {
  3303. dev_warn(&pdev->dev,
  3304. "Maximum of 7 VFs per PF, using max\n");
  3305. max_vfs = adapter->vfs_allocated_count = 7;
  3306. } else
  3307. adapter->vfs_allocated_count = max_vfs;
  3308. if (adapter->vfs_allocated_count)
  3309. dev_warn(&pdev->dev,
  3310. "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
  3311. break;
  3312. default:
  3313. break;
  3314. }
  3315. #endif /* CONFIG_PCI_IOV */
  3316. /* Assume MSI-X interrupts, will be checked during IRQ allocation */
  3317. adapter->flags |= IGB_FLAG_HAS_MSIX;
  3318. adapter->mac_table = kcalloc(hw->mac.rar_entry_count,
  3319. sizeof(struct igb_mac_addr),
  3320. GFP_ATOMIC);
  3321. if (!adapter->mac_table)
  3322. return -ENOMEM;
  3323. igb_probe_vfs(adapter);
  3324. igb_init_queue_configuration(adapter);
  3325. /* Setup and initialize a copy of the hw vlan table array */
  3326. adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
  3327. GFP_ATOMIC);
  3328. if (!adapter->shadow_vfta)
  3329. return -ENOMEM;
  3330. /* This call may decrease the number of queues */
  3331. if (igb_init_interrupt_scheme(adapter, true)) {
  3332. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  3333. return -ENOMEM;
  3334. }
  3335. /* Explicitly disable IRQ since the NIC can be in any state. */
  3336. igb_irq_disable(adapter);
  3337. if (hw->mac.type >= e1000_i350)
  3338. adapter->flags &= ~IGB_FLAG_DMAC;
  3339. set_bit(__IGB_DOWN, &adapter->state);
  3340. return 0;
  3341. }
  3342. /**
  3343. * igb_open - Called when a network interface is made active
  3344. * @netdev: network interface device structure
  3345. *
  3346. * Returns 0 on success, negative value on failure
  3347. *
  3348. * The open entry point is called when a network interface is made
  3349. * active by the system (IFF_UP). At this point all resources needed
  3350. * for transmit and receive operations are allocated, the interrupt
  3351. * handler is registered with the OS, the watchdog timer is started,
  3352. * and the stack is notified that the interface is ready.
  3353. **/
  3354. static int __igb_open(struct net_device *netdev, bool resuming)
  3355. {
  3356. struct igb_adapter *adapter = netdev_priv(netdev);
  3357. struct e1000_hw *hw = &adapter->hw;
  3358. struct pci_dev *pdev = adapter->pdev;
  3359. int err;
  3360. int i;
  3361. /* disallow open during test */
  3362. if (test_bit(__IGB_TESTING, &adapter->state)) {
  3363. WARN_ON(resuming);
  3364. return -EBUSY;
  3365. }
  3366. if (!resuming)
  3367. pm_runtime_get_sync(&pdev->dev);
  3368. netif_carrier_off(netdev);
  3369. /* allocate transmit descriptors */
  3370. err = igb_setup_all_tx_resources(adapter);
  3371. if (err)
  3372. goto err_setup_tx;
  3373. /* allocate receive descriptors */
  3374. err = igb_setup_all_rx_resources(adapter);
  3375. if (err)
  3376. goto err_setup_rx;
  3377. igb_power_up_link(adapter);
  3378. /* before we allocate an interrupt, we must be ready to handle it.
  3379. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  3380. * as soon as we call pci_request_irq, so we have to setup our
  3381. * clean_rx handler before we do so.
  3382. */
  3383. igb_configure(adapter);
  3384. err = igb_request_irq(adapter);
  3385. if (err)
  3386. goto err_req_irq;
  3387. /* Notify the stack of the actual queue counts. */
  3388. err = netif_set_real_num_tx_queues(adapter->netdev,
  3389. adapter->num_tx_queues);
  3390. if (err)
  3391. goto err_set_queues;
  3392. err = netif_set_real_num_rx_queues(adapter->netdev,
  3393. adapter->num_rx_queues);
  3394. if (err)
  3395. goto err_set_queues;
  3396. /* From here on the code is the same as igb_up() */
  3397. clear_bit(__IGB_DOWN, &adapter->state);
  3398. for (i = 0; i < adapter->num_q_vectors; i++)
  3399. napi_enable(&(adapter->q_vector[i]->napi));
  3400. /* Clear any pending interrupts. */
  3401. rd32(E1000_TSICR);
  3402. rd32(E1000_ICR);
  3403. igb_irq_enable(adapter);
  3404. /* notify VFs that reset has been completed */
  3405. if (adapter->vfs_allocated_count) {
  3406. u32 reg_data = rd32(E1000_CTRL_EXT);
  3407. reg_data |= E1000_CTRL_EXT_PFRSTD;
  3408. wr32(E1000_CTRL_EXT, reg_data);
  3409. }
  3410. netif_tx_start_all_queues(netdev);
  3411. if (!resuming)
  3412. pm_runtime_put(&pdev->dev);
  3413. /* start the watchdog. */
  3414. hw->mac.get_link_status = 1;
  3415. schedule_work(&adapter->watchdog_task);
  3416. return 0;
  3417. err_set_queues:
  3418. igb_free_irq(adapter);
  3419. err_req_irq:
  3420. igb_release_hw_control(adapter);
  3421. igb_power_down_link(adapter);
  3422. igb_free_all_rx_resources(adapter);
  3423. err_setup_rx:
  3424. igb_free_all_tx_resources(adapter);
  3425. err_setup_tx:
  3426. igb_reset(adapter);
  3427. if (!resuming)
  3428. pm_runtime_put(&pdev->dev);
  3429. return err;
  3430. }
  3431. int igb_open(struct net_device *netdev)
  3432. {
  3433. return __igb_open(netdev, false);
  3434. }
  3435. /**
  3436. * igb_close - Disables a network interface
  3437. * @netdev: network interface device structure
  3438. *
  3439. * Returns 0, this is not allowed to fail
  3440. *
  3441. * The close entry point is called when an interface is de-activated
  3442. * by the OS. The hardware is still under the driver's control, but
  3443. * needs to be disabled. A global MAC reset is issued to stop the
  3444. * hardware, and all transmit and receive resources are freed.
  3445. **/
  3446. static int __igb_close(struct net_device *netdev, bool suspending)
  3447. {
  3448. struct igb_adapter *adapter = netdev_priv(netdev);
  3449. struct pci_dev *pdev = adapter->pdev;
  3450. WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
  3451. if (!suspending)
  3452. pm_runtime_get_sync(&pdev->dev);
  3453. igb_down(adapter);
  3454. igb_free_irq(adapter);
  3455. igb_free_all_tx_resources(adapter);
  3456. igb_free_all_rx_resources(adapter);
  3457. if (!suspending)
  3458. pm_runtime_put_sync(&pdev->dev);
  3459. return 0;
  3460. }
  3461. int igb_close(struct net_device *netdev)
  3462. {
  3463. if (netif_device_present(netdev) || netdev->dismantle)
  3464. return __igb_close(netdev, false);
  3465. return 0;
  3466. }
  3467. /**
  3468. * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  3469. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  3470. *
  3471. * Return 0 on success, negative on failure
  3472. **/
  3473. int igb_setup_tx_resources(struct igb_ring *tx_ring)
  3474. {
  3475. struct device *dev = tx_ring->dev;
  3476. int size;
  3477. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  3478. tx_ring->tx_buffer_info = vmalloc(size);
  3479. if (!tx_ring->tx_buffer_info)
  3480. goto err;
  3481. /* round up to nearest 4K */
  3482. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  3483. tx_ring->size = ALIGN(tx_ring->size, 4096);
  3484. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  3485. &tx_ring->dma, GFP_KERNEL);
  3486. if (!tx_ring->desc)
  3487. goto err;
  3488. tx_ring->next_to_use = 0;
  3489. tx_ring->next_to_clean = 0;
  3490. return 0;
  3491. err:
  3492. vfree(tx_ring->tx_buffer_info);
  3493. tx_ring->tx_buffer_info = NULL;
  3494. dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
  3495. return -ENOMEM;
  3496. }
  3497. /**
  3498. * igb_setup_all_tx_resources - wrapper to allocate Tx resources
  3499. * (Descriptors) for all queues
  3500. * @adapter: board private structure
  3501. *
  3502. * Return 0 on success, negative on failure
  3503. **/
  3504. static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  3505. {
  3506. struct pci_dev *pdev = adapter->pdev;
  3507. int i, err = 0;
  3508. for (i = 0; i < adapter->num_tx_queues; i++) {
  3509. err = igb_setup_tx_resources(adapter->tx_ring[i]);
  3510. if (err) {
  3511. dev_err(&pdev->dev,
  3512. "Allocation for Tx Queue %u failed\n", i);
  3513. for (i--; i >= 0; i--)
  3514. igb_free_tx_resources(adapter->tx_ring[i]);
  3515. break;
  3516. }
  3517. }
  3518. return err;
  3519. }
  3520. /**
  3521. * igb_setup_tctl - configure the transmit control registers
  3522. * @adapter: Board private structure
  3523. **/
  3524. void igb_setup_tctl(struct igb_adapter *adapter)
  3525. {
  3526. struct e1000_hw *hw = &adapter->hw;
  3527. u32 tctl;
  3528. /* disable queue 0 which is enabled by default on 82575 and 82576 */
  3529. wr32(E1000_TXDCTL(0), 0);
  3530. /* Program the Transmit Control Register */
  3531. tctl = rd32(E1000_TCTL);
  3532. tctl &= ~E1000_TCTL_CT;
  3533. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  3534. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  3535. igb_config_collision_dist(hw);
  3536. /* Enable transmits */
  3537. tctl |= E1000_TCTL_EN;
  3538. wr32(E1000_TCTL, tctl);
  3539. }
  3540. /**
  3541. * igb_configure_tx_ring - Configure transmit ring after Reset
  3542. * @adapter: board private structure
  3543. * @ring: tx ring to configure
  3544. *
  3545. * Configure a transmit ring after a reset.
  3546. **/
  3547. void igb_configure_tx_ring(struct igb_adapter *adapter,
  3548. struct igb_ring *ring)
  3549. {
  3550. struct e1000_hw *hw = &adapter->hw;
  3551. u32 txdctl = 0;
  3552. u64 tdba = ring->dma;
  3553. int reg_idx = ring->reg_idx;
  3554. wr32(E1000_TDLEN(reg_idx),
  3555. ring->count * sizeof(union e1000_adv_tx_desc));
  3556. wr32(E1000_TDBAL(reg_idx),
  3557. tdba & 0x00000000ffffffffULL);
  3558. wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  3559. ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
  3560. wr32(E1000_TDH(reg_idx), 0);
  3561. writel(0, ring->tail);
  3562. txdctl |= IGB_TX_PTHRESH;
  3563. txdctl |= IGB_TX_HTHRESH << 8;
  3564. txdctl |= IGB_TX_WTHRESH << 16;
  3565. /* reinitialize tx_buffer_info */
  3566. memset(ring->tx_buffer_info, 0,
  3567. sizeof(struct igb_tx_buffer) * ring->count);
  3568. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  3569. wr32(E1000_TXDCTL(reg_idx), txdctl);
  3570. }
  3571. /**
  3572. * igb_configure_tx - Configure transmit Unit after Reset
  3573. * @adapter: board private structure
  3574. *
  3575. * Configure the Tx unit of the MAC after a reset.
  3576. **/
  3577. static void igb_configure_tx(struct igb_adapter *adapter)
  3578. {
  3579. struct e1000_hw *hw = &adapter->hw;
  3580. int i;
  3581. /* disable the queues */
  3582. for (i = 0; i < adapter->num_tx_queues; i++)
  3583. wr32(E1000_TXDCTL(adapter->tx_ring[i]->reg_idx), 0);
  3584. wrfl();
  3585. usleep_range(10000, 20000);
  3586. for (i = 0; i < adapter->num_tx_queues; i++)
  3587. igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
  3588. }
  3589. /**
  3590. * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  3591. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  3592. *
  3593. * Returns 0 on success, negative on failure
  3594. **/
  3595. int igb_setup_rx_resources(struct igb_ring *rx_ring)
  3596. {
  3597. struct device *dev = rx_ring->dev;
  3598. int size;
  3599. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  3600. rx_ring->rx_buffer_info = vmalloc(size);
  3601. if (!rx_ring->rx_buffer_info)
  3602. goto err;
  3603. /* Round up to nearest 4K */
  3604. rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
  3605. rx_ring->size = ALIGN(rx_ring->size, 4096);
  3606. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  3607. &rx_ring->dma, GFP_KERNEL);
  3608. if (!rx_ring->desc)
  3609. goto err;
  3610. rx_ring->next_to_alloc = 0;
  3611. rx_ring->next_to_clean = 0;
  3612. rx_ring->next_to_use = 0;
  3613. return 0;
  3614. err:
  3615. vfree(rx_ring->rx_buffer_info);
  3616. rx_ring->rx_buffer_info = NULL;
  3617. dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
  3618. return -ENOMEM;
  3619. }
  3620. /**
  3621. * igb_setup_all_rx_resources - wrapper to allocate Rx resources
  3622. * (Descriptors) for all queues
  3623. * @adapter: board private structure
  3624. *
  3625. * Return 0 on success, negative on failure
  3626. **/
  3627. static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  3628. {
  3629. struct pci_dev *pdev = adapter->pdev;
  3630. int i, err = 0;
  3631. for (i = 0; i < adapter->num_rx_queues; i++) {
  3632. err = igb_setup_rx_resources(adapter->rx_ring[i]);
  3633. if (err) {
  3634. dev_err(&pdev->dev,
  3635. "Allocation for Rx Queue %u failed\n", i);
  3636. for (i--; i >= 0; i--)
  3637. igb_free_rx_resources(adapter->rx_ring[i]);
  3638. break;
  3639. }
  3640. }
  3641. return err;
  3642. }
  3643. /**
  3644. * igb_setup_mrqc - configure the multiple receive queue control registers
  3645. * @adapter: Board private structure
  3646. **/
  3647. static void igb_setup_mrqc(struct igb_adapter *adapter)
  3648. {
  3649. struct e1000_hw *hw = &adapter->hw;
  3650. u32 mrqc, rxcsum;
  3651. u32 j, num_rx_queues;
  3652. u32 rss_key[10];
  3653. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  3654. for (j = 0; j < 10; j++)
  3655. wr32(E1000_RSSRK(j), rss_key[j]);
  3656. num_rx_queues = adapter->rss_queues;
  3657. switch (hw->mac.type) {
  3658. case e1000_82576:
  3659. /* 82576 supports 2 RSS queues for SR-IOV */
  3660. if (adapter->vfs_allocated_count)
  3661. num_rx_queues = 2;
  3662. break;
  3663. default:
  3664. break;
  3665. }
  3666. if (adapter->rss_indir_tbl_init != num_rx_queues) {
  3667. for (j = 0; j < IGB_RETA_SIZE; j++)
  3668. adapter->rss_indir_tbl[j] =
  3669. (j * num_rx_queues) / IGB_RETA_SIZE;
  3670. adapter->rss_indir_tbl_init = num_rx_queues;
  3671. }
  3672. igb_write_rss_indir_tbl(adapter);
  3673. /* Disable raw packet checksumming so that RSS hash is placed in
  3674. * descriptor on writeback. No need to enable TCP/UDP/IP checksum
  3675. * offloads as they are enabled by default
  3676. */
  3677. rxcsum = rd32(E1000_RXCSUM);
  3678. rxcsum |= E1000_RXCSUM_PCSD;
  3679. if (adapter->hw.mac.type >= e1000_82576)
  3680. /* Enable Receive Checksum Offload for SCTP */
  3681. rxcsum |= E1000_RXCSUM_CRCOFL;
  3682. /* Don't need to set TUOFL or IPOFL, they default to 1 */
  3683. wr32(E1000_RXCSUM, rxcsum);
  3684. /* Generate RSS hash based on packet types, TCP/UDP
  3685. * port numbers and/or IPv4/v6 src and dst addresses
  3686. */
  3687. mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
  3688. E1000_MRQC_RSS_FIELD_IPV4_TCP |
  3689. E1000_MRQC_RSS_FIELD_IPV6 |
  3690. E1000_MRQC_RSS_FIELD_IPV6_TCP |
  3691. E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
  3692. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
  3693. mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
  3694. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
  3695. mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
  3696. /* If VMDq is enabled then we set the appropriate mode for that, else
  3697. * we default to RSS so that an RSS hash is calculated per packet even
  3698. * if we are only using one queue
  3699. */
  3700. if (adapter->vfs_allocated_count) {
  3701. if (hw->mac.type > e1000_82575) {
  3702. /* Set the default pool for the PF's first queue */
  3703. u32 vtctl = rd32(E1000_VT_CTL);
  3704. vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
  3705. E1000_VT_CTL_DISABLE_DEF_POOL);
  3706. vtctl |= adapter->vfs_allocated_count <<
  3707. E1000_VT_CTL_DEFAULT_POOL_SHIFT;
  3708. wr32(E1000_VT_CTL, vtctl);
  3709. }
  3710. if (adapter->rss_queues > 1)
  3711. mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
  3712. else
  3713. mrqc |= E1000_MRQC_ENABLE_VMDQ;
  3714. } else {
  3715. if (hw->mac.type != e1000_i211)
  3716. mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
  3717. }
  3718. igb_vmm_control(adapter);
  3719. wr32(E1000_MRQC, mrqc);
  3720. }
  3721. /**
  3722. * igb_setup_rctl - configure the receive control registers
  3723. * @adapter: Board private structure
  3724. **/
  3725. void igb_setup_rctl(struct igb_adapter *adapter)
  3726. {
  3727. struct e1000_hw *hw = &adapter->hw;
  3728. u32 rctl;
  3729. rctl = rd32(E1000_RCTL);
  3730. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  3731. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  3732. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
  3733. (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  3734. /* enable stripping of CRC. It's unlikely this will break BMC
  3735. * redirection as it did with e1000. Newer features require
  3736. * that the HW strips the CRC.
  3737. */
  3738. rctl |= E1000_RCTL_SECRC;
  3739. /* disable store bad packets and clear size bits. */
  3740. rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
  3741. /* enable LPE to allow for reception of jumbo frames */
  3742. rctl |= E1000_RCTL_LPE;
  3743. /* disable queue 0 to prevent tail write w/o re-config */
  3744. wr32(E1000_RXDCTL(0), 0);
  3745. /* Attention!!! For SR-IOV PF driver operations you must enable
  3746. * queue drop for all VF and PF queues to prevent head of line blocking
  3747. * if an un-trusted VF does not provide descriptors to hardware.
  3748. */
  3749. if (adapter->vfs_allocated_count) {
  3750. /* set all queue drop enable bits */
  3751. wr32(E1000_QDE, ALL_QUEUES);
  3752. }
  3753. /* This is useful for sniffing bad packets. */
  3754. if (adapter->netdev->features & NETIF_F_RXALL) {
  3755. /* UPE and MPE will be handled by normal PROMISC logic
  3756. * in e1000e_set_rx_mode
  3757. */
  3758. rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
  3759. E1000_RCTL_BAM | /* RX All Bcast Pkts */
  3760. E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
  3761. rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
  3762. E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
  3763. /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
  3764. * and that breaks VLANs.
  3765. */
  3766. }
  3767. wr32(E1000_RCTL, rctl);
  3768. }
  3769. static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
  3770. int vfn)
  3771. {
  3772. struct e1000_hw *hw = &adapter->hw;
  3773. u32 vmolr;
  3774. if (size > MAX_JUMBO_FRAME_SIZE)
  3775. size = MAX_JUMBO_FRAME_SIZE;
  3776. vmolr = rd32(E1000_VMOLR(vfn));
  3777. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  3778. vmolr |= size | E1000_VMOLR_LPE;
  3779. wr32(E1000_VMOLR(vfn), vmolr);
  3780. return 0;
  3781. }
  3782. static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
  3783. int vfn, bool enable)
  3784. {
  3785. struct e1000_hw *hw = &adapter->hw;
  3786. u32 val, reg;
  3787. if (hw->mac.type < e1000_82576)
  3788. return;
  3789. if (hw->mac.type == e1000_i350)
  3790. reg = E1000_DVMOLR(vfn);
  3791. else
  3792. reg = E1000_VMOLR(vfn);
  3793. val = rd32(reg);
  3794. if (enable)
  3795. val |= E1000_VMOLR_STRVLAN;
  3796. else
  3797. val &= ~(E1000_VMOLR_STRVLAN);
  3798. wr32(reg, val);
  3799. }
  3800. static inline void igb_set_vmolr(struct igb_adapter *adapter,
  3801. int vfn, bool aupe)
  3802. {
  3803. struct e1000_hw *hw = &adapter->hw;
  3804. u32 vmolr;
  3805. /* This register exists only on 82576 and newer so if we are older then
  3806. * we should exit and do nothing
  3807. */
  3808. if (hw->mac.type < e1000_82576)
  3809. return;
  3810. vmolr = rd32(E1000_VMOLR(vfn));
  3811. if (aupe)
  3812. vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
  3813. else
  3814. vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  3815. /* clear all bits that might not be set */
  3816. vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
  3817. if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
  3818. vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
  3819. /* for VMDq only allow the VFs and pool 0 to accept broadcast and
  3820. * multicast packets
  3821. */
  3822. if (vfn <= adapter->vfs_allocated_count)
  3823. vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
  3824. wr32(E1000_VMOLR(vfn), vmolr);
  3825. }
  3826. /**
  3827. * igb_configure_rx_ring - Configure a receive ring after Reset
  3828. * @adapter: board private structure
  3829. * @ring: receive ring to be configured
  3830. *
  3831. * Configure the Rx unit of the MAC after a reset.
  3832. **/
  3833. void igb_configure_rx_ring(struct igb_adapter *adapter,
  3834. struct igb_ring *ring)
  3835. {
  3836. struct e1000_hw *hw = &adapter->hw;
  3837. union e1000_adv_rx_desc *rx_desc;
  3838. u64 rdba = ring->dma;
  3839. int reg_idx = ring->reg_idx;
  3840. u32 srrctl = 0, rxdctl = 0;
  3841. /* disable the queue */
  3842. wr32(E1000_RXDCTL(reg_idx), 0);
  3843. /* Set DMA base address registers */
  3844. wr32(E1000_RDBAL(reg_idx),
  3845. rdba & 0x00000000ffffffffULL);
  3846. wr32(E1000_RDBAH(reg_idx), rdba >> 32);
  3847. wr32(E1000_RDLEN(reg_idx),
  3848. ring->count * sizeof(union e1000_adv_rx_desc));
  3849. /* initialize head and tail */
  3850. ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
  3851. wr32(E1000_RDH(reg_idx), 0);
  3852. writel(0, ring->tail);
  3853. /* set descriptor configuration */
  3854. srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  3855. if (ring_uses_large_buffer(ring))
  3856. srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  3857. else
  3858. srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  3859. srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
  3860. if (hw->mac.type >= e1000_82580)
  3861. srrctl |= E1000_SRRCTL_TIMESTAMP;
  3862. /* Only set Drop Enable if we are supporting multiple queues */
  3863. if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
  3864. srrctl |= E1000_SRRCTL_DROP_EN;
  3865. wr32(E1000_SRRCTL(reg_idx), srrctl);
  3866. /* set filtering for VMDQ pools */
  3867. igb_set_vmolr(adapter, reg_idx & 0x7, true);
  3868. rxdctl |= IGB_RX_PTHRESH;
  3869. rxdctl |= IGB_RX_HTHRESH << 8;
  3870. rxdctl |= IGB_RX_WTHRESH << 16;
  3871. /* initialize rx_buffer_info */
  3872. memset(ring->rx_buffer_info, 0,
  3873. sizeof(struct igb_rx_buffer) * ring->count);
  3874. /* initialize Rx descriptor 0 */
  3875. rx_desc = IGB_RX_DESC(ring, 0);
  3876. rx_desc->wb.upper.length = 0;
  3877. /* enable receive descriptor fetching */
  3878. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  3879. wr32(E1000_RXDCTL(reg_idx), rxdctl);
  3880. }
  3881. static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
  3882. struct igb_ring *rx_ring)
  3883. {
  3884. /* set build_skb and buffer size flags */
  3885. clear_ring_build_skb_enabled(rx_ring);
  3886. clear_ring_uses_large_buffer(rx_ring);
  3887. if (adapter->flags & IGB_FLAG_RX_LEGACY)
  3888. return;
  3889. set_ring_build_skb_enabled(rx_ring);
  3890. #if (PAGE_SIZE < 8192)
  3891. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  3892. return;
  3893. set_ring_uses_large_buffer(rx_ring);
  3894. #endif
  3895. }
  3896. /**
  3897. * igb_configure_rx - Configure receive Unit after Reset
  3898. * @adapter: board private structure
  3899. *
  3900. * Configure the Rx unit of the MAC after a reset.
  3901. **/
  3902. static void igb_configure_rx(struct igb_adapter *adapter)
  3903. {
  3904. int i;
  3905. /* set the correct pool for the PF default MAC address in entry 0 */
  3906. igb_set_default_mac_filter(adapter);
  3907. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  3908. * the Base and Length of the Rx Descriptor Ring
  3909. */
  3910. for (i = 0; i < adapter->num_rx_queues; i++) {
  3911. struct igb_ring *rx_ring = adapter->rx_ring[i];
  3912. igb_set_rx_buffer_len(adapter, rx_ring);
  3913. igb_configure_rx_ring(adapter, rx_ring);
  3914. }
  3915. }
  3916. /**
  3917. * igb_free_tx_resources - Free Tx Resources per Queue
  3918. * @tx_ring: Tx descriptor ring for a specific queue
  3919. *
  3920. * Free all transmit software resources
  3921. **/
  3922. void igb_free_tx_resources(struct igb_ring *tx_ring)
  3923. {
  3924. igb_clean_tx_ring(tx_ring);
  3925. vfree(tx_ring->tx_buffer_info);
  3926. tx_ring->tx_buffer_info = NULL;
  3927. /* if not set, then don't free */
  3928. if (!tx_ring->desc)
  3929. return;
  3930. dma_free_coherent(tx_ring->dev, tx_ring->size,
  3931. tx_ring->desc, tx_ring->dma);
  3932. tx_ring->desc = NULL;
  3933. }
  3934. /**
  3935. * igb_free_all_tx_resources - Free Tx Resources for All Queues
  3936. * @adapter: board private structure
  3937. *
  3938. * Free all transmit software resources
  3939. **/
  3940. static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  3941. {
  3942. int i;
  3943. for (i = 0; i < adapter->num_tx_queues; i++)
  3944. if (adapter->tx_ring[i])
  3945. igb_free_tx_resources(adapter->tx_ring[i]);
  3946. }
  3947. /**
  3948. * igb_clean_tx_ring - Free Tx Buffers
  3949. * @tx_ring: ring to be cleaned
  3950. **/
  3951. static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  3952. {
  3953. u16 i = tx_ring->next_to_clean;
  3954. struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
  3955. while (i != tx_ring->next_to_use) {
  3956. union e1000_adv_tx_desc *eop_desc, *tx_desc;
  3957. /* Free all the Tx ring sk_buffs */
  3958. dev_kfree_skb_any(tx_buffer->skb);
  3959. /* unmap skb header data */
  3960. dma_unmap_single(tx_ring->dev,
  3961. dma_unmap_addr(tx_buffer, dma),
  3962. dma_unmap_len(tx_buffer, len),
  3963. DMA_TO_DEVICE);
  3964. /* check for eop_desc to determine the end of the packet */
  3965. eop_desc = tx_buffer->next_to_watch;
  3966. tx_desc = IGB_TX_DESC(tx_ring, i);
  3967. /* unmap remaining buffers */
  3968. while (tx_desc != eop_desc) {
  3969. tx_buffer++;
  3970. tx_desc++;
  3971. i++;
  3972. if (unlikely(i == tx_ring->count)) {
  3973. i = 0;
  3974. tx_buffer = tx_ring->tx_buffer_info;
  3975. tx_desc = IGB_TX_DESC(tx_ring, 0);
  3976. }
  3977. /* unmap any remaining paged data */
  3978. if (dma_unmap_len(tx_buffer, len))
  3979. dma_unmap_page(tx_ring->dev,
  3980. dma_unmap_addr(tx_buffer, dma),
  3981. dma_unmap_len(tx_buffer, len),
  3982. DMA_TO_DEVICE);
  3983. }
  3984. /* move us one more past the eop_desc for start of next pkt */
  3985. tx_buffer++;
  3986. i++;
  3987. if (unlikely(i == tx_ring->count)) {
  3988. i = 0;
  3989. tx_buffer = tx_ring->tx_buffer_info;
  3990. }
  3991. }
  3992. /* reset BQL for queue */
  3993. netdev_tx_reset_queue(txring_txq(tx_ring));
  3994. /* reset next_to_use and next_to_clean */
  3995. tx_ring->next_to_use = 0;
  3996. tx_ring->next_to_clean = 0;
  3997. }
  3998. /**
  3999. * igb_clean_all_tx_rings - Free Tx Buffers for all queues
  4000. * @adapter: board private structure
  4001. **/
  4002. static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  4003. {
  4004. int i;
  4005. for (i = 0; i < adapter->num_tx_queues; i++)
  4006. if (adapter->tx_ring[i])
  4007. igb_clean_tx_ring(adapter->tx_ring[i]);
  4008. }
  4009. /**
  4010. * igb_free_rx_resources - Free Rx Resources
  4011. * @rx_ring: ring to clean the resources from
  4012. *
  4013. * Free all receive software resources
  4014. **/
  4015. void igb_free_rx_resources(struct igb_ring *rx_ring)
  4016. {
  4017. igb_clean_rx_ring(rx_ring);
  4018. vfree(rx_ring->rx_buffer_info);
  4019. rx_ring->rx_buffer_info = NULL;
  4020. /* if not set, then don't free */
  4021. if (!rx_ring->desc)
  4022. return;
  4023. dma_free_coherent(rx_ring->dev, rx_ring->size,
  4024. rx_ring->desc, rx_ring->dma);
  4025. rx_ring->desc = NULL;
  4026. }
  4027. /**
  4028. * igb_free_all_rx_resources - Free Rx Resources for All Queues
  4029. * @adapter: board private structure
  4030. *
  4031. * Free all receive software resources
  4032. **/
  4033. static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  4034. {
  4035. int i;
  4036. for (i = 0; i < adapter->num_rx_queues; i++)
  4037. if (adapter->rx_ring[i])
  4038. igb_free_rx_resources(adapter->rx_ring[i]);
  4039. }
  4040. /**
  4041. * igb_clean_rx_ring - Free Rx Buffers per Queue
  4042. * @rx_ring: ring to free buffers from
  4043. **/
  4044. static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  4045. {
  4046. u16 i = rx_ring->next_to_clean;
  4047. if (rx_ring->skb)
  4048. dev_kfree_skb(rx_ring->skb);
  4049. rx_ring->skb = NULL;
  4050. /* Free all the Rx ring sk_buffs */
  4051. while (i != rx_ring->next_to_alloc) {
  4052. struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
  4053. /* Invalidate cache lines that may have been written to by
  4054. * device so that we avoid corrupting memory.
  4055. */
  4056. dma_sync_single_range_for_cpu(rx_ring->dev,
  4057. buffer_info->dma,
  4058. buffer_info->page_offset,
  4059. igb_rx_bufsz(rx_ring),
  4060. DMA_FROM_DEVICE);
  4061. /* free resources associated with mapping */
  4062. dma_unmap_page_attrs(rx_ring->dev,
  4063. buffer_info->dma,
  4064. igb_rx_pg_size(rx_ring),
  4065. DMA_FROM_DEVICE,
  4066. IGB_RX_DMA_ATTR);
  4067. __page_frag_cache_drain(buffer_info->page,
  4068. buffer_info->pagecnt_bias);
  4069. i++;
  4070. if (i == rx_ring->count)
  4071. i = 0;
  4072. }
  4073. rx_ring->next_to_alloc = 0;
  4074. rx_ring->next_to_clean = 0;
  4075. rx_ring->next_to_use = 0;
  4076. }
  4077. /**
  4078. * igb_clean_all_rx_rings - Free Rx Buffers for all queues
  4079. * @adapter: board private structure
  4080. **/
  4081. static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  4082. {
  4083. int i;
  4084. for (i = 0; i < adapter->num_rx_queues; i++)
  4085. if (adapter->rx_ring[i])
  4086. igb_clean_rx_ring(adapter->rx_ring[i]);
  4087. }
  4088. /**
  4089. * igb_set_mac - Change the Ethernet Address of the NIC
  4090. * @netdev: network interface device structure
  4091. * @p: pointer to an address structure
  4092. *
  4093. * Returns 0 on success, negative on failure
  4094. **/
  4095. static int igb_set_mac(struct net_device *netdev, void *p)
  4096. {
  4097. struct igb_adapter *adapter = netdev_priv(netdev);
  4098. struct e1000_hw *hw = &adapter->hw;
  4099. struct sockaddr *addr = p;
  4100. if (!is_valid_ether_addr(addr->sa_data))
  4101. return -EADDRNOTAVAIL;
  4102. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  4103. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  4104. /* set the correct pool for the new PF MAC address in entry 0 */
  4105. igb_set_default_mac_filter(adapter);
  4106. return 0;
  4107. }
  4108. /**
  4109. * igb_write_mc_addr_list - write multicast addresses to MTA
  4110. * @netdev: network interface device structure
  4111. *
  4112. * Writes multicast address list to the MTA hash table.
  4113. * Returns: -ENOMEM on failure
  4114. * 0 on no addresses written
  4115. * X on writing X addresses to MTA
  4116. **/
  4117. static int igb_write_mc_addr_list(struct net_device *netdev)
  4118. {
  4119. struct igb_adapter *adapter = netdev_priv(netdev);
  4120. struct e1000_hw *hw = &adapter->hw;
  4121. struct netdev_hw_addr *ha;
  4122. u8 *mta_list;
  4123. int i;
  4124. if (netdev_mc_empty(netdev)) {
  4125. /* nothing to program, so clear mc list */
  4126. igb_update_mc_addr_list(hw, NULL, 0);
  4127. igb_restore_vf_multicasts(adapter);
  4128. return 0;
  4129. }
  4130. mta_list = kcalloc(netdev_mc_count(netdev), 6, GFP_ATOMIC);
  4131. if (!mta_list)
  4132. return -ENOMEM;
  4133. /* The shared function expects a packed array of only addresses. */
  4134. i = 0;
  4135. netdev_for_each_mc_addr(ha, netdev)
  4136. memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  4137. igb_update_mc_addr_list(hw, mta_list, i);
  4138. kfree(mta_list);
  4139. return netdev_mc_count(netdev);
  4140. }
  4141. static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
  4142. {
  4143. struct e1000_hw *hw = &adapter->hw;
  4144. u32 i, pf_id;
  4145. switch (hw->mac.type) {
  4146. case e1000_i210:
  4147. case e1000_i211:
  4148. case e1000_i350:
  4149. /* VLAN filtering needed for VLAN prio filter */
  4150. if (adapter->netdev->features & NETIF_F_NTUPLE)
  4151. break;
  4152. /* fall through */
  4153. case e1000_82576:
  4154. case e1000_82580:
  4155. case e1000_i354:
  4156. /* VLAN filtering needed for pool filtering */
  4157. if (adapter->vfs_allocated_count)
  4158. break;
  4159. /* fall through */
  4160. default:
  4161. return 1;
  4162. }
  4163. /* We are already in VLAN promisc, nothing to do */
  4164. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  4165. return 0;
  4166. if (!adapter->vfs_allocated_count)
  4167. goto set_vfta;
  4168. /* Add PF to all active pools */
  4169. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  4170. for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
  4171. u32 vlvf = rd32(E1000_VLVF(i));
  4172. vlvf |= BIT(pf_id);
  4173. wr32(E1000_VLVF(i), vlvf);
  4174. }
  4175. set_vfta:
  4176. /* Set all bits in the VLAN filter table array */
  4177. for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
  4178. hw->mac.ops.write_vfta(hw, i, ~0U);
  4179. /* Set flag so we don't redo unnecessary work */
  4180. adapter->flags |= IGB_FLAG_VLAN_PROMISC;
  4181. return 0;
  4182. }
  4183. #define VFTA_BLOCK_SIZE 8
  4184. static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
  4185. {
  4186. struct e1000_hw *hw = &adapter->hw;
  4187. u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
  4188. u32 vid_start = vfta_offset * 32;
  4189. u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
  4190. u32 i, vid, word, bits, pf_id;
  4191. /* guarantee that we don't scrub out management VLAN */
  4192. vid = adapter->mng_vlan_id;
  4193. if (vid >= vid_start && vid < vid_end)
  4194. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  4195. if (!adapter->vfs_allocated_count)
  4196. goto set_vfta;
  4197. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  4198. for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
  4199. u32 vlvf = rd32(E1000_VLVF(i));
  4200. /* pull VLAN ID from VLVF */
  4201. vid = vlvf & VLAN_VID_MASK;
  4202. /* only concern ourselves with a certain range */
  4203. if (vid < vid_start || vid >= vid_end)
  4204. continue;
  4205. if (vlvf & E1000_VLVF_VLANID_ENABLE) {
  4206. /* record VLAN ID in VFTA */
  4207. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  4208. /* if PF is part of this then continue */
  4209. if (test_bit(vid, adapter->active_vlans))
  4210. continue;
  4211. }
  4212. /* remove PF from the pool */
  4213. bits = ~BIT(pf_id);
  4214. bits &= rd32(E1000_VLVF(i));
  4215. wr32(E1000_VLVF(i), bits);
  4216. }
  4217. set_vfta:
  4218. /* extract values from active_vlans and write back to VFTA */
  4219. for (i = VFTA_BLOCK_SIZE; i--;) {
  4220. vid = (vfta_offset + i) * 32;
  4221. word = vid / BITS_PER_LONG;
  4222. bits = vid % BITS_PER_LONG;
  4223. vfta[i] |= adapter->active_vlans[word] >> bits;
  4224. hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
  4225. }
  4226. }
  4227. static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
  4228. {
  4229. u32 i;
  4230. /* We are not in VLAN promisc, nothing to do */
  4231. if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  4232. return;
  4233. /* Set flag so we don't redo unnecessary work */
  4234. adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
  4235. for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
  4236. igb_scrub_vfta(adapter, i);
  4237. }
  4238. /**
  4239. * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  4240. * @netdev: network interface device structure
  4241. *
  4242. * The set_rx_mode entry point is called whenever the unicast or multicast
  4243. * address lists or the network interface flags are updated. This routine is
  4244. * responsible for configuring the hardware for proper unicast, multicast,
  4245. * promiscuous mode, and all-multi behavior.
  4246. **/
  4247. static void igb_set_rx_mode(struct net_device *netdev)
  4248. {
  4249. struct igb_adapter *adapter = netdev_priv(netdev);
  4250. struct e1000_hw *hw = &adapter->hw;
  4251. unsigned int vfn = adapter->vfs_allocated_count;
  4252. u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
  4253. int count;
  4254. /* Check for Promiscuous and All Multicast modes */
  4255. if (netdev->flags & IFF_PROMISC) {
  4256. rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
  4257. vmolr |= E1000_VMOLR_MPME;
  4258. /* enable use of UTA filter to force packets to default pool */
  4259. if (hw->mac.type == e1000_82576)
  4260. vmolr |= E1000_VMOLR_ROPE;
  4261. } else {
  4262. if (netdev->flags & IFF_ALLMULTI) {
  4263. rctl |= E1000_RCTL_MPE;
  4264. vmolr |= E1000_VMOLR_MPME;
  4265. } else {
  4266. /* Write addresses to the MTA, if the attempt fails
  4267. * then we should just turn on promiscuous mode so
  4268. * that we can at least receive multicast traffic
  4269. */
  4270. count = igb_write_mc_addr_list(netdev);
  4271. if (count < 0) {
  4272. rctl |= E1000_RCTL_MPE;
  4273. vmolr |= E1000_VMOLR_MPME;
  4274. } else if (count) {
  4275. vmolr |= E1000_VMOLR_ROMPE;
  4276. }
  4277. }
  4278. }
  4279. /* Write addresses to available RAR registers, if there is not
  4280. * sufficient space to store all the addresses then enable
  4281. * unicast promiscuous mode
  4282. */
  4283. if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
  4284. rctl |= E1000_RCTL_UPE;
  4285. vmolr |= E1000_VMOLR_ROPE;
  4286. }
  4287. /* enable VLAN filtering by default */
  4288. rctl |= E1000_RCTL_VFE;
  4289. /* disable VLAN filtering for modes that require it */
  4290. if ((netdev->flags & IFF_PROMISC) ||
  4291. (netdev->features & NETIF_F_RXALL)) {
  4292. /* if we fail to set all rules then just clear VFE */
  4293. if (igb_vlan_promisc_enable(adapter))
  4294. rctl &= ~E1000_RCTL_VFE;
  4295. } else {
  4296. igb_vlan_promisc_disable(adapter);
  4297. }
  4298. /* update state of unicast, multicast, and VLAN filtering modes */
  4299. rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
  4300. E1000_RCTL_VFE);
  4301. wr32(E1000_RCTL, rctl);
  4302. #if (PAGE_SIZE < 8192)
  4303. if (!adapter->vfs_allocated_count) {
  4304. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  4305. rlpml = IGB_MAX_FRAME_BUILD_SKB;
  4306. }
  4307. #endif
  4308. wr32(E1000_RLPML, rlpml);
  4309. /* In order to support SR-IOV and eventually VMDq it is necessary to set
  4310. * the VMOLR to enable the appropriate modes. Without this workaround
  4311. * we will have issues with VLAN tag stripping not being done for frames
  4312. * that are only arriving because we are the default pool
  4313. */
  4314. if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
  4315. return;
  4316. /* set UTA to appropriate mode */
  4317. igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
  4318. vmolr |= rd32(E1000_VMOLR(vfn)) &
  4319. ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
  4320. /* enable Rx jumbo frames, restrict as needed to support build_skb */
  4321. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  4322. #if (PAGE_SIZE < 8192)
  4323. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  4324. vmolr |= IGB_MAX_FRAME_BUILD_SKB;
  4325. else
  4326. #endif
  4327. vmolr |= MAX_JUMBO_FRAME_SIZE;
  4328. vmolr |= E1000_VMOLR_LPE;
  4329. wr32(E1000_VMOLR(vfn), vmolr);
  4330. igb_restore_vf_multicasts(adapter);
  4331. }
  4332. static void igb_check_wvbr(struct igb_adapter *adapter)
  4333. {
  4334. struct e1000_hw *hw = &adapter->hw;
  4335. u32 wvbr = 0;
  4336. switch (hw->mac.type) {
  4337. case e1000_82576:
  4338. case e1000_i350:
  4339. wvbr = rd32(E1000_WVBR);
  4340. if (!wvbr)
  4341. return;
  4342. break;
  4343. default:
  4344. break;
  4345. }
  4346. adapter->wvbr |= wvbr;
  4347. }
  4348. #define IGB_STAGGERED_QUEUE_OFFSET 8
  4349. static void igb_spoof_check(struct igb_adapter *adapter)
  4350. {
  4351. int j;
  4352. if (!adapter->wvbr)
  4353. return;
  4354. for (j = 0; j < adapter->vfs_allocated_count; j++) {
  4355. if (adapter->wvbr & BIT(j) ||
  4356. adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
  4357. dev_warn(&adapter->pdev->dev,
  4358. "Spoof event(s) detected on VF %d\n", j);
  4359. adapter->wvbr &=
  4360. ~(BIT(j) |
  4361. BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
  4362. }
  4363. }
  4364. }
  4365. /* Need to wait a few seconds after link up to get diagnostic information from
  4366. * the phy
  4367. */
  4368. static void igb_update_phy_info(struct timer_list *t)
  4369. {
  4370. struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
  4371. igb_get_phy_info(&adapter->hw);
  4372. }
  4373. /**
  4374. * igb_has_link - check shared code for link and determine up/down
  4375. * @adapter: pointer to driver private info
  4376. **/
  4377. bool igb_has_link(struct igb_adapter *adapter)
  4378. {
  4379. struct e1000_hw *hw = &adapter->hw;
  4380. bool link_active = false;
  4381. /* get_link_status is set on LSC (link status) interrupt or
  4382. * rx sequence error interrupt. get_link_status will stay
  4383. * false until the e1000_check_for_link establishes link
  4384. * for copper adapters ONLY
  4385. */
  4386. switch (hw->phy.media_type) {
  4387. case e1000_media_type_copper:
  4388. if (!hw->mac.get_link_status)
  4389. return true;
  4390. /* fall through */
  4391. case e1000_media_type_internal_serdes:
  4392. hw->mac.ops.check_for_link(hw);
  4393. link_active = !hw->mac.get_link_status;
  4394. break;
  4395. default:
  4396. case e1000_media_type_unknown:
  4397. break;
  4398. }
  4399. if (((hw->mac.type == e1000_i210) ||
  4400. (hw->mac.type == e1000_i211)) &&
  4401. (hw->phy.id == I210_I_PHY_ID)) {
  4402. if (!netif_carrier_ok(adapter->netdev)) {
  4403. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  4404. } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
  4405. adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
  4406. adapter->link_check_timeout = jiffies;
  4407. }
  4408. }
  4409. return link_active;
  4410. }
  4411. static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
  4412. {
  4413. bool ret = false;
  4414. u32 ctrl_ext, thstat;
  4415. /* check for thermal sensor event on i350 copper only */
  4416. if (hw->mac.type == e1000_i350) {
  4417. thstat = rd32(E1000_THSTAT);
  4418. ctrl_ext = rd32(E1000_CTRL_EXT);
  4419. if ((hw->phy.media_type == e1000_media_type_copper) &&
  4420. !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
  4421. ret = !!(thstat & event);
  4422. }
  4423. return ret;
  4424. }
  4425. /**
  4426. * igb_check_lvmmc - check for malformed packets received
  4427. * and indicated in LVMMC register
  4428. * @adapter: pointer to adapter
  4429. **/
  4430. static void igb_check_lvmmc(struct igb_adapter *adapter)
  4431. {
  4432. struct e1000_hw *hw = &adapter->hw;
  4433. u32 lvmmc;
  4434. lvmmc = rd32(E1000_LVMMC);
  4435. if (lvmmc) {
  4436. if (unlikely(net_ratelimit())) {
  4437. netdev_warn(adapter->netdev,
  4438. "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
  4439. lvmmc);
  4440. }
  4441. }
  4442. }
  4443. /**
  4444. * igb_watchdog - Timer Call-back
  4445. * @data: pointer to adapter cast into an unsigned long
  4446. **/
  4447. static void igb_watchdog(struct timer_list *t)
  4448. {
  4449. struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
  4450. /* Do the rest outside of interrupt context */
  4451. schedule_work(&adapter->watchdog_task);
  4452. }
  4453. static void igb_watchdog_task(struct work_struct *work)
  4454. {
  4455. struct igb_adapter *adapter = container_of(work,
  4456. struct igb_adapter,
  4457. watchdog_task);
  4458. struct e1000_hw *hw = &adapter->hw;
  4459. struct e1000_phy_info *phy = &hw->phy;
  4460. struct net_device *netdev = adapter->netdev;
  4461. u32 link;
  4462. int i;
  4463. u32 connsw;
  4464. u16 phy_data, retry_count = 20;
  4465. link = igb_has_link(adapter);
  4466. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
  4467. if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
  4468. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  4469. else
  4470. link = false;
  4471. }
  4472. /* Force link down if we have fiber to swap to */
  4473. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  4474. if (hw->phy.media_type == e1000_media_type_copper) {
  4475. connsw = rd32(E1000_CONNSW);
  4476. if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
  4477. link = 0;
  4478. }
  4479. }
  4480. if (link) {
  4481. /* Perform a reset if the media type changed. */
  4482. if (hw->dev_spec._82575.media_changed) {
  4483. hw->dev_spec._82575.media_changed = false;
  4484. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  4485. igb_reset(adapter);
  4486. }
  4487. /* Cancel scheduled suspend requests. */
  4488. pm_runtime_resume(netdev->dev.parent);
  4489. if (!netif_carrier_ok(netdev)) {
  4490. u32 ctrl;
  4491. hw->mac.ops.get_speed_and_duplex(hw,
  4492. &adapter->link_speed,
  4493. &adapter->link_duplex);
  4494. ctrl = rd32(E1000_CTRL);
  4495. /* Links status message must follow this format */
  4496. netdev_info(netdev,
  4497. "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
  4498. netdev->name,
  4499. adapter->link_speed,
  4500. adapter->link_duplex == FULL_DUPLEX ?
  4501. "Full" : "Half",
  4502. (ctrl & E1000_CTRL_TFCE) &&
  4503. (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
  4504. (ctrl & E1000_CTRL_RFCE) ? "RX" :
  4505. (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
  4506. /* disable EEE if enabled */
  4507. if ((adapter->flags & IGB_FLAG_EEE) &&
  4508. (adapter->link_duplex == HALF_DUPLEX)) {
  4509. dev_info(&adapter->pdev->dev,
  4510. "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
  4511. adapter->hw.dev_spec._82575.eee_disable = true;
  4512. adapter->flags &= ~IGB_FLAG_EEE;
  4513. }
  4514. /* check if SmartSpeed worked */
  4515. igb_check_downshift(hw);
  4516. if (phy->speed_downgraded)
  4517. netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
  4518. /* check for thermal sensor event */
  4519. if (igb_thermal_sensor_event(hw,
  4520. E1000_THSTAT_LINK_THROTTLE))
  4521. netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
  4522. /* adjust timeout factor according to speed/duplex */
  4523. adapter->tx_timeout_factor = 1;
  4524. switch (adapter->link_speed) {
  4525. case SPEED_10:
  4526. adapter->tx_timeout_factor = 14;
  4527. break;
  4528. case SPEED_100:
  4529. /* maybe add some timeout factor ? */
  4530. break;
  4531. }
  4532. if (adapter->link_speed != SPEED_1000)
  4533. goto no_wait;
  4534. /* wait for Remote receiver status OK */
  4535. retry_read_status:
  4536. if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
  4537. &phy_data)) {
  4538. if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
  4539. retry_count) {
  4540. msleep(100);
  4541. retry_count--;
  4542. goto retry_read_status;
  4543. } else if (!retry_count) {
  4544. dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
  4545. }
  4546. } else {
  4547. dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
  4548. }
  4549. no_wait:
  4550. netif_carrier_on(netdev);
  4551. igb_ping_all_vfs(adapter);
  4552. igb_check_vf_rate_limit(adapter);
  4553. /* link state has changed, schedule phy info update */
  4554. if (!test_bit(__IGB_DOWN, &adapter->state))
  4555. mod_timer(&adapter->phy_info_timer,
  4556. round_jiffies(jiffies + 2 * HZ));
  4557. }
  4558. } else {
  4559. if (netif_carrier_ok(netdev)) {
  4560. adapter->link_speed = 0;
  4561. adapter->link_duplex = 0;
  4562. /* check for thermal sensor event */
  4563. if (igb_thermal_sensor_event(hw,
  4564. E1000_THSTAT_PWR_DOWN)) {
  4565. netdev_err(netdev, "The network adapter was stopped because it overheated\n");
  4566. }
  4567. /* Links status message must follow this format */
  4568. netdev_info(netdev, "igb: %s NIC Link is Down\n",
  4569. netdev->name);
  4570. netif_carrier_off(netdev);
  4571. igb_ping_all_vfs(adapter);
  4572. /* link state has changed, schedule phy info update */
  4573. if (!test_bit(__IGB_DOWN, &adapter->state))
  4574. mod_timer(&adapter->phy_info_timer,
  4575. round_jiffies(jiffies + 2 * HZ));
  4576. /* link is down, time to check for alternate media */
  4577. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  4578. igb_check_swap_media(adapter);
  4579. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  4580. schedule_work(&adapter->reset_task);
  4581. /* return immediately */
  4582. return;
  4583. }
  4584. }
  4585. pm_schedule_suspend(netdev->dev.parent,
  4586. MSEC_PER_SEC * 5);
  4587. /* also check for alternate media here */
  4588. } else if (!netif_carrier_ok(netdev) &&
  4589. (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
  4590. igb_check_swap_media(adapter);
  4591. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  4592. schedule_work(&adapter->reset_task);
  4593. /* return immediately */
  4594. return;
  4595. }
  4596. }
  4597. }
  4598. spin_lock(&adapter->stats64_lock);
  4599. igb_update_stats(adapter);
  4600. spin_unlock(&adapter->stats64_lock);
  4601. for (i = 0; i < adapter->num_tx_queues; i++) {
  4602. struct igb_ring *tx_ring = adapter->tx_ring[i];
  4603. if (!netif_carrier_ok(netdev)) {
  4604. /* We've lost link, so the controller stops DMA,
  4605. * but we've got queued Tx work that's never going
  4606. * to get done, so reset controller to flush Tx.
  4607. * (Do the reset outside of interrupt context).
  4608. */
  4609. if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
  4610. adapter->tx_timeout_count++;
  4611. schedule_work(&adapter->reset_task);
  4612. /* return immediately since reset is imminent */
  4613. return;
  4614. }
  4615. }
  4616. /* Force detection of hung controller every watchdog period */
  4617. set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  4618. }
  4619. /* Cause software interrupt to ensure Rx ring is cleaned */
  4620. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  4621. u32 eics = 0;
  4622. for (i = 0; i < adapter->num_q_vectors; i++)
  4623. eics |= adapter->q_vector[i]->eims_value;
  4624. wr32(E1000_EICS, eics);
  4625. } else {
  4626. wr32(E1000_ICS, E1000_ICS_RXDMT0);
  4627. }
  4628. igb_spoof_check(adapter);
  4629. igb_ptp_rx_hang(adapter);
  4630. igb_ptp_tx_hang(adapter);
  4631. /* Check LVMMC register on i350/i354 only */
  4632. if ((adapter->hw.mac.type == e1000_i350) ||
  4633. (adapter->hw.mac.type == e1000_i354))
  4634. igb_check_lvmmc(adapter);
  4635. /* Reset the timer */
  4636. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  4637. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
  4638. mod_timer(&adapter->watchdog_timer,
  4639. round_jiffies(jiffies + HZ));
  4640. else
  4641. mod_timer(&adapter->watchdog_timer,
  4642. round_jiffies(jiffies + 2 * HZ));
  4643. }
  4644. }
  4645. enum latency_range {
  4646. lowest_latency = 0,
  4647. low_latency = 1,
  4648. bulk_latency = 2,
  4649. latency_invalid = 255
  4650. };
  4651. /**
  4652. * igb_update_ring_itr - update the dynamic ITR value based on packet size
  4653. * @q_vector: pointer to q_vector
  4654. *
  4655. * Stores a new ITR value based on strictly on packet size. This
  4656. * algorithm is less sophisticated than that used in igb_update_itr,
  4657. * due to the difficulty of synchronizing statistics across multiple
  4658. * receive rings. The divisors and thresholds used by this function
  4659. * were determined based on theoretical maximum wire speed and testing
  4660. * data, in order to minimize response time while increasing bulk
  4661. * throughput.
  4662. * This functionality is controlled by ethtool's coalescing settings.
  4663. * NOTE: This function is called only when operating in a multiqueue
  4664. * receive environment.
  4665. **/
  4666. static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  4667. {
  4668. int new_val = q_vector->itr_val;
  4669. int avg_wire_size = 0;
  4670. struct igb_adapter *adapter = q_vector->adapter;
  4671. unsigned int packets;
  4672. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  4673. * ints/sec - ITR timer value of 120 ticks.
  4674. */
  4675. if (adapter->link_speed != SPEED_1000) {
  4676. new_val = IGB_4K_ITR;
  4677. goto set_itr_val;
  4678. }
  4679. packets = q_vector->rx.total_packets;
  4680. if (packets)
  4681. avg_wire_size = q_vector->rx.total_bytes / packets;
  4682. packets = q_vector->tx.total_packets;
  4683. if (packets)
  4684. avg_wire_size = max_t(u32, avg_wire_size,
  4685. q_vector->tx.total_bytes / packets);
  4686. /* if avg_wire_size isn't set no work was done */
  4687. if (!avg_wire_size)
  4688. goto clear_counts;
  4689. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  4690. avg_wire_size += 24;
  4691. /* Don't starve jumbo frames */
  4692. avg_wire_size = min(avg_wire_size, 3000);
  4693. /* Give a little boost to mid-size frames */
  4694. if ((avg_wire_size > 300) && (avg_wire_size < 1200))
  4695. new_val = avg_wire_size / 3;
  4696. else
  4697. new_val = avg_wire_size / 2;
  4698. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4699. if (new_val < IGB_20K_ITR &&
  4700. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  4701. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  4702. new_val = IGB_20K_ITR;
  4703. set_itr_val:
  4704. if (new_val != q_vector->itr_val) {
  4705. q_vector->itr_val = new_val;
  4706. q_vector->set_itr = 1;
  4707. }
  4708. clear_counts:
  4709. q_vector->rx.total_bytes = 0;
  4710. q_vector->rx.total_packets = 0;
  4711. q_vector->tx.total_bytes = 0;
  4712. q_vector->tx.total_packets = 0;
  4713. }
  4714. /**
  4715. * igb_update_itr - update the dynamic ITR value based on statistics
  4716. * @q_vector: pointer to q_vector
  4717. * @ring_container: ring info to update the itr for
  4718. *
  4719. * Stores a new ITR value based on packets and byte
  4720. * counts during the last interrupt. The advantage of per interrupt
  4721. * computation is faster updates and more accurate ITR for the current
  4722. * traffic pattern. Constants in this function were computed
  4723. * based on theoretical maximum wire speed and thresholds were set based
  4724. * on testing data as well as attempting to minimize response time
  4725. * while increasing bulk throughput.
  4726. * This functionality is controlled by ethtool's coalescing settings.
  4727. * NOTE: These calculations are only valid when operating in a single-
  4728. * queue environment.
  4729. **/
  4730. static void igb_update_itr(struct igb_q_vector *q_vector,
  4731. struct igb_ring_container *ring_container)
  4732. {
  4733. unsigned int packets = ring_container->total_packets;
  4734. unsigned int bytes = ring_container->total_bytes;
  4735. u8 itrval = ring_container->itr;
  4736. /* no packets, exit with status unchanged */
  4737. if (packets == 0)
  4738. return;
  4739. switch (itrval) {
  4740. case lowest_latency:
  4741. /* handle TSO and jumbo frames */
  4742. if (bytes/packets > 8000)
  4743. itrval = bulk_latency;
  4744. else if ((packets < 5) && (bytes > 512))
  4745. itrval = low_latency;
  4746. break;
  4747. case low_latency: /* 50 usec aka 20000 ints/s */
  4748. if (bytes > 10000) {
  4749. /* this if handles the TSO accounting */
  4750. if (bytes/packets > 8000)
  4751. itrval = bulk_latency;
  4752. else if ((packets < 10) || ((bytes/packets) > 1200))
  4753. itrval = bulk_latency;
  4754. else if ((packets > 35))
  4755. itrval = lowest_latency;
  4756. } else if (bytes/packets > 2000) {
  4757. itrval = bulk_latency;
  4758. } else if (packets <= 2 && bytes < 512) {
  4759. itrval = lowest_latency;
  4760. }
  4761. break;
  4762. case bulk_latency: /* 250 usec aka 4000 ints/s */
  4763. if (bytes > 25000) {
  4764. if (packets > 35)
  4765. itrval = low_latency;
  4766. } else if (bytes < 1500) {
  4767. itrval = low_latency;
  4768. }
  4769. break;
  4770. }
  4771. /* clear work counters since we have the values we need */
  4772. ring_container->total_bytes = 0;
  4773. ring_container->total_packets = 0;
  4774. /* write updated itr to ring container */
  4775. ring_container->itr = itrval;
  4776. }
  4777. static void igb_set_itr(struct igb_q_vector *q_vector)
  4778. {
  4779. struct igb_adapter *adapter = q_vector->adapter;
  4780. u32 new_itr = q_vector->itr_val;
  4781. u8 current_itr = 0;
  4782. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  4783. if (adapter->link_speed != SPEED_1000) {
  4784. current_itr = 0;
  4785. new_itr = IGB_4K_ITR;
  4786. goto set_itr_now;
  4787. }
  4788. igb_update_itr(q_vector, &q_vector->tx);
  4789. igb_update_itr(q_vector, &q_vector->rx);
  4790. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  4791. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4792. if (current_itr == lowest_latency &&
  4793. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  4794. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  4795. current_itr = low_latency;
  4796. switch (current_itr) {
  4797. /* counts and packets in update_itr are dependent on these numbers */
  4798. case lowest_latency:
  4799. new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
  4800. break;
  4801. case low_latency:
  4802. new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
  4803. break;
  4804. case bulk_latency:
  4805. new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
  4806. break;
  4807. default:
  4808. break;
  4809. }
  4810. set_itr_now:
  4811. if (new_itr != q_vector->itr_val) {
  4812. /* this attempts to bias the interrupt rate towards Bulk
  4813. * by adding intermediate steps when interrupt rate is
  4814. * increasing
  4815. */
  4816. new_itr = new_itr > q_vector->itr_val ?
  4817. max((new_itr * q_vector->itr_val) /
  4818. (new_itr + (q_vector->itr_val >> 2)),
  4819. new_itr) : new_itr;
  4820. /* Don't write the value here; it resets the adapter's
  4821. * internal timer, and causes us to delay far longer than
  4822. * we should between interrupts. Instead, we write the ITR
  4823. * value at the beginning of the next interrupt so the timing
  4824. * ends up being correct.
  4825. */
  4826. q_vector->itr_val = new_itr;
  4827. q_vector->set_itr = 1;
  4828. }
  4829. }
  4830. static void igb_tx_ctxtdesc(struct igb_ring *tx_ring,
  4831. struct igb_tx_buffer *first,
  4832. u32 vlan_macip_lens, u32 type_tucmd,
  4833. u32 mss_l4len_idx)
  4834. {
  4835. struct e1000_adv_tx_context_desc *context_desc;
  4836. u16 i = tx_ring->next_to_use;
  4837. struct timespec64 ts;
  4838. context_desc = IGB_TX_CTXTDESC(tx_ring, i);
  4839. i++;
  4840. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  4841. /* set bits to identify this as an advanced context descriptor */
  4842. type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  4843. /* For 82575, context index must be unique per ring. */
  4844. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  4845. mss_l4len_idx |= tx_ring->reg_idx << 4;
  4846. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4847. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  4848. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  4849. /* We assume there is always a valid tx time available. Invalid times
  4850. * should have been handled by the upper layers.
  4851. */
  4852. if (tx_ring->launchtime_enable) {
  4853. ts = ns_to_timespec64(first->skb->tstamp);
  4854. context_desc->seqnum_seed = cpu_to_le32(ts.tv_nsec / 32);
  4855. } else {
  4856. context_desc->seqnum_seed = 0;
  4857. }
  4858. }
  4859. static int igb_tso(struct igb_ring *tx_ring,
  4860. struct igb_tx_buffer *first,
  4861. u8 *hdr_len)
  4862. {
  4863. u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
  4864. struct sk_buff *skb = first->skb;
  4865. union {
  4866. struct iphdr *v4;
  4867. struct ipv6hdr *v6;
  4868. unsigned char *hdr;
  4869. } ip;
  4870. union {
  4871. struct tcphdr *tcp;
  4872. unsigned char *hdr;
  4873. } l4;
  4874. u32 paylen, l4_offset;
  4875. int err;
  4876. if (skb->ip_summed != CHECKSUM_PARTIAL)
  4877. return 0;
  4878. if (!skb_is_gso(skb))
  4879. return 0;
  4880. err = skb_cow_head(skb, 0);
  4881. if (err < 0)
  4882. return err;
  4883. ip.hdr = skb_network_header(skb);
  4884. l4.hdr = skb_checksum_start(skb);
  4885. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  4886. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  4887. /* initialize outer IP header fields */
  4888. if (ip.v4->version == 4) {
  4889. unsigned char *csum_start = skb_checksum_start(skb);
  4890. unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
  4891. /* IP header will have to cancel out any data that
  4892. * is not a part of the outer IP header
  4893. */
  4894. ip.v4->check = csum_fold(csum_partial(trans_start,
  4895. csum_start - trans_start,
  4896. 0));
  4897. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  4898. ip.v4->tot_len = 0;
  4899. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4900. IGB_TX_FLAGS_CSUM |
  4901. IGB_TX_FLAGS_IPV4;
  4902. } else {
  4903. ip.v6->payload_len = 0;
  4904. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4905. IGB_TX_FLAGS_CSUM;
  4906. }
  4907. /* determine offset of inner transport header */
  4908. l4_offset = l4.hdr - skb->data;
  4909. /* compute length of segmentation header */
  4910. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  4911. /* remove payload length from inner checksum */
  4912. paylen = skb->len - l4_offset;
  4913. csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
  4914. /* update gso size and bytecount with header size */
  4915. first->gso_segs = skb_shinfo(skb)->gso_segs;
  4916. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  4917. /* MSS L4LEN IDX */
  4918. mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
  4919. mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
  4920. /* VLAN MACLEN IPLEN */
  4921. vlan_macip_lens = l4.hdr - ip.hdr;
  4922. vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
  4923. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4924. igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens,
  4925. type_tucmd, mss_l4len_idx);
  4926. return 1;
  4927. }
  4928. static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
  4929. {
  4930. unsigned int offset = 0;
  4931. ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
  4932. return offset == skb_checksum_start_offset(skb);
  4933. }
  4934. static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
  4935. {
  4936. struct sk_buff *skb = first->skb;
  4937. u32 vlan_macip_lens = 0;
  4938. u32 type_tucmd = 0;
  4939. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  4940. csum_failed:
  4941. if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
  4942. return;
  4943. goto no_csum;
  4944. }
  4945. switch (skb->csum_offset) {
  4946. case offsetof(struct tcphdr, check):
  4947. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  4948. /* fall through */
  4949. case offsetof(struct udphdr, check):
  4950. break;
  4951. case offsetof(struct sctphdr, checksum):
  4952. /* validate that this is actually an SCTP request */
  4953. if (((first->protocol == htons(ETH_P_IP)) &&
  4954. (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
  4955. ((first->protocol == htons(ETH_P_IPV6)) &&
  4956. igb_ipv6_csum_is_sctp(skb))) {
  4957. type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
  4958. break;
  4959. }
  4960. /* fall through */
  4961. default:
  4962. skb_checksum_help(skb);
  4963. goto csum_failed;
  4964. }
  4965. /* update TX checksum flag */
  4966. first->tx_flags |= IGB_TX_FLAGS_CSUM;
  4967. vlan_macip_lens = skb_checksum_start_offset(skb) -
  4968. skb_network_offset(skb);
  4969. no_csum:
  4970. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  4971. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4972. igb_tx_ctxtdesc(tx_ring, first, vlan_macip_lens, type_tucmd, 0);
  4973. }
  4974. #define IGB_SET_FLAG(_input, _flag, _result) \
  4975. ((_flag <= _result) ? \
  4976. ((u32)(_input & _flag) * (_result / _flag)) : \
  4977. ((u32)(_input & _flag) / (_flag / _result)))
  4978. static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
  4979. {
  4980. /* set type for advanced descriptor with frame checksum insertion */
  4981. u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
  4982. E1000_ADVTXD_DCMD_DEXT |
  4983. E1000_ADVTXD_DCMD_IFCS;
  4984. /* set HW vlan bit if vlan is present */
  4985. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
  4986. (E1000_ADVTXD_DCMD_VLE));
  4987. /* set segmentation bits for TSO */
  4988. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
  4989. (E1000_ADVTXD_DCMD_TSE));
  4990. /* set timestamp bit if present */
  4991. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
  4992. (E1000_ADVTXD_MAC_TSTAMP));
  4993. /* insert frame checksum */
  4994. cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
  4995. return cmd_type;
  4996. }
  4997. static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
  4998. union e1000_adv_tx_desc *tx_desc,
  4999. u32 tx_flags, unsigned int paylen)
  5000. {
  5001. u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
  5002. /* 82575 requires a unique index per ring */
  5003. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  5004. olinfo_status |= tx_ring->reg_idx << 4;
  5005. /* insert L4 checksum */
  5006. olinfo_status |= IGB_SET_FLAG(tx_flags,
  5007. IGB_TX_FLAGS_CSUM,
  5008. (E1000_TXD_POPTS_TXSM << 8));
  5009. /* insert IPv4 checksum */
  5010. olinfo_status |= IGB_SET_FLAG(tx_flags,
  5011. IGB_TX_FLAGS_IPV4,
  5012. (E1000_TXD_POPTS_IXSM << 8));
  5013. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  5014. }
  5015. static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  5016. {
  5017. struct net_device *netdev = tx_ring->netdev;
  5018. netif_stop_subqueue(netdev, tx_ring->queue_index);
  5019. /* Herbert's original patch had:
  5020. * smp_mb__after_netif_stop_queue();
  5021. * but since that doesn't exist yet, just open code it.
  5022. */
  5023. smp_mb();
  5024. /* We need to check again in a case another CPU has just
  5025. * made room available.
  5026. */
  5027. if (igb_desc_unused(tx_ring) < size)
  5028. return -EBUSY;
  5029. /* A reprieve! */
  5030. netif_wake_subqueue(netdev, tx_ring->queue_index);
  5031. u64_stats_update_begin(&tx_ring->tx_syncp2);
  5032. tx_ring->tx_stats.restart_queue2++;
  5033. u64_stats_update_end(&tx_ring->tx_syncp2);
  5034. return 0;
  5035. }
  5036. static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  5037. {
  5038. if (igb_desc_unused(tx_ring) >= size)
  5039. return 0;
  5040. return __igb_maybe_stop_tx(tx_ring, size);
  5041. }
  5042. static int igb_tx_map(struct igb_ring *tx_ring,
  5043. struct igb_tx_buffer *first,
  5044. const u8 hdr_len)
  5045. {
  5046. struct sk_buff *skb = first->skb;
  5047. struct igb_tx_buffer *tx_buffer;
  5048. union e1000_adv_tx_desc *tx_desc;
  5049. struct skb_frag_struct *frag;
  5050. dma_addr_t dma;
  5051. unsigned int data_len, size;
  5052. u32 tx_flags = first->tx_flags;
  5053. u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
  5054. u16 i = tx_ring->next_to_use;
  5055. tx_desc = IGB_TX_DESC(tx_ring, i);
  5056. igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
  5057. size = skb_headlen(skb);
  5058. data_len = skb->data_len;
  5059. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  5060. tx_buffer = first;
  5061. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  5062. if (dma_mapping_error(tx_ring->dev, dma))
  5063. goto dma_error;
  5064. /* record length, and DMA address */
  5065. dma_unmap_len_set(tx_buffer, len, size);
  5066. dma_unmap_addr_set(tx_buffer, dma, dma);
  5067. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  5068. while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
  5069. tx_desc->read.cmd_type_len =
  5070. cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
  5071. i++;
  5072. tx_desc++;
  5073. if (i == tx_ring->count) {
  5074. tx_desc = IGB_TX_DESC(tx_ring, 0);
  5075. i = 0;
  5076. }
  5077. tx_desc->read.olinfo_status = 0;
  5078. dma += IGB_MAX_DATA_PER_TXD;
  5079. size -= IGB_MAX_DATA_PER_TXD;
  5080. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  5081. }
  5082. if (likely(!data_len))
  5083. break;
  5084. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
  5085. i++;
  5086. tx_desc++;
  5087. if (i == tx_ring->count) {
  5088. tx_desc = IGB_TX_DESC(tx_ring, 0);
  5089. i = 0;
  5090. }
  5091. tx_desc->read.olinfo_status = 0;
  5092. size = skb_frag_size(frag);
  5093. data_len -= size;
  5094. dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
  5095. size, DMA_TO_DEVICE);
  5096. tx_buffer = &tx_ring->tx_buffer_info[i];
  5097. }
  5098. /* write last descriptor with RS and EOP bits */
  5099. cmd_type |= size | IGB_TXD_DCMD;
  5100. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  5101. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  5102. /* set the timestamp */
  5103. first->time_stamp = jiffies;
  5104. /* Force memory writes to complete before letting h/w know there
  5105. * are new descriptors to fetch. (Only applicable for weak-ordered
  5106. * memory model archs, such as IA-64).
  5107. *
  5108. * We also need this memory barrier to make certain all of the
  5109. * status bits have been updated before next_to_watch is written.
  5110. */
  5111. dma_wmb();
  5112. /* set next_to_watch value indicating a packet is present */
  5113. first->next_to_watch = tx_desc;
  5114. i++;
  5115. if (i == tx_ring->count)
  5116. i = 0;
  5117. tx_ring->next_to_use = i;
  5118. /* Make sure there is space in the ring for the next send. */
  5119. igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
  5120. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  5121. writel(i, tx_ring->tail);
  5122. /* we need this if more than one processor can write to our tail
  5123. * at a time, it synchronizes IO on IA64/Altix systems
  5124. */
  5125. mmiowb();
  5126. }
  5127. return 0;
  5128. dma_error:
  5129. dev_err(tx_ring->dev, "TX DMA map failed\n");
  5130. tx_buffer = &tx_ring->tx_buffer_info[i];
  5131. /* clear dma mappings for failed tx_buffer_info map */
  5132. while (tx_buffer != first) {
  5133. if (dma_unmap_len(tx_buffer, len))
  5134. dma_unmap_page(tx_ring->dev,
  5135. dma_unmap_addr(tx_buffer, dma),
  5136. dma_unmap_len(tx_buffer, len),
  5137. DMA_TO_DEVICE);
  5138. dma_unmap_len_set(tx_buffer, len, 0);
  5139. if (i-- == 0)
  5140. i += tx_ring->count;
  5141. tx_buffer = &tx_ring->tx_buffer_info[i];
  5142. }
  5143. if (dma_unmap_len(tx_buffer, len))
  5144. dma_unmap_single(tx_ring->dev,
  5145. dma_unmap_addr(tx_buffer, dma),
  5146. dma_unmap_len(tx_buffer, len),
  5147. DMA_TO_DEVICE);
  5148. dma_unmap_len_set(tx_buffer, len, 0);
  5149. dev_kfree_skb_any(tx_buffer->skb);
  5150. tx_buffer->skb = NULL;
  5151. tx_ring->next_to_use = i;
  5152. return -1;
  5153. }
  5154. netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
  5155. struct igb_ring *tx_ring)
  5156. {
  5157. struct igb_tx_buffer *first;
  5158. int tso;
  5159. u32 tx_flags = 0;
  5160. unsigned short f;
  5161. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  5162. __be16 protocol = vlan_get_protocol(skb);
  5163. u8 hdr_len = 0;
  5164. /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
  5165. * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
  5166. * + 2 desc gap to keep tail from touching head,
  5167. * + 1 desc for context descriptor,
  5168. * otherwise try next time
  5169. */
  5170. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  5171. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  5172. if (igb_maybe_stop_tx(tx_ring, count + 3)) {
  5173. /* this is a hard error */
  5174. return NETDEV_TX_BUSY;
  5175. }
  5176. /* record the location of the first descriptor for this packet */
  5177. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  5178. first->skb = skb;
  5179. first->bytecount = skb->len;
  5180. first->gso_segs = 1;
  5181. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  5182. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  5183. if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
  5184. !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
  5185. &adapter->state)) {
  5186. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  5187. tx_flags |= IGB_TX_FLAGS_TSTAMP;
  5188. adapter->ptp_tx_skb = skb_get(skb);
  5189. adapter->ptp_tx_start = jiffies;
  5190. if (adapter->hw.mac.type == e1000_82576)
  5191. schedule_work(&adapter->ptp_tx_work);
  5192. } else {
  5193. adapter->tx_hwtstamp_skipped++;
  5194. }
  5195. }
  5196. if (skb_vlan_tag_present(skb)) {
  5197. tx_flags |= IGB_TX_FLAGS_VLAN;
  5198. tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
  5199. }
  5200. /* record initial flags and protocol */
  5201. first->tx_flags = tx_flags;
  5202. first->protocol = protocol;
  5203. tso = igb_tso(tx_ring, first, &hdr_len);
  5204. if (tso < 0)
  5205. goto out_drop;
  5206. else if (!tso)
  5207. igb_tx_csum(tx_ring, first);
  5208. skb_tx_timestamp(skb);
  5209. if (igb_tx_map(tx_ring, first, hdr_len))
  5210. goto cleanup_tx_tstamp;
  5211. return NETDEV_TX_OK;
  5212. out_drop:
  5213. dev_kfree_skb_any(first->skb);
  5214. first->skb = NULL;
  5215. cleanup_tx_tstamp:
  5216. if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
  5217. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  5218. dev_kfree_skb_any(adapter->ptp_tx_skb);
  5219. adapter->ptp_tx_skb = NULL;
  5220. if (adapter->hw.mac.type == e1000_82576)
  5221. cancel_work_sync(&adapter->ptp_tx_work);
  5222. clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
  5223. }
  5224. return NETDEV_TX_OK;
  5225. }
  5226. static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
  5227. struct sk_buff *skb)
  5228. {
  5229. unsigned int r_idx = skb->queue_mapping;
  5230. if (r_idx >= adapter->num_tx_queues)
  5231. r_idx = r_idx % adapter->num_tx_queues;
  5232. return adapter->tx_ring[r_idx];
  5233. }
  5234. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
  5235. struct net_device *netdev)
  5236. {
  5237. struct igb_adapter *adapter = netdev_priv(netdev);
  5238. /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
  5239. * in order to meet this minimum size requirement.
  5240. */
  5241. if (skb_put_padto(skb, 17))
  5242. return NETDEV_TX_OK;
  5243. return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
  5244. }
  5245. /**
  5246. * igb_tx_timeout - Respond to a Tx Hang
  5247. * @netdev: network interface device structure
  5248. **/
  5249. static void igb_tx_timeout(struct net_device *netdev)
  5250. {
  5251. struct igb_adapter *adapter = netdev_priv(netdev);
  5252. struct e1000_hw *hw = &adapter->hw;
  5253. /* Do the reset outside of interrupt context */
  5254. adapter->tx_timeout_count++;
  5255. if (hw->mac.type >= e1000_82580)
  5256. hw->dev_spec._82575.global_device_reset = true;
  5257. schedule_work(&adapter->reset_task);
  5258. wr32(E1000_EICS,
  5259. (adapter->eims_enable_mask & ~adapter->eims_other));
  5260. }
  5261. static void igb_reset_task(struct work_struct *work)
  5262. {
  5263. struct igb_adapter *adapter;
  5264. adapter = container_of(work, struct igb_adapter, reset_task);
  5265. igb_dump(adapter);
  5266. netdev_err(adapter->netdev, "Reset adapter\n");
  5267. igb_reinit_locked(adapter);
  5268. }
  5269. /**
  5270. * igb_get_stats64 - Get System Network Statistics
  5271. * @netdev: network interface device structure
  5272. * @stats: rtnl_link_stats64 pointer
  5273. **/
  5274. static void igb_get_stats64(struct net_device *netdev,
  5275. struct rtnl_link_stats64 *stats)
  5276. {
  5277. struct igb_adapter *adapter = netdev_priv(netdev);
  5278. spin_lock(&adapter->stats64_lock);
  5279. igb_update_stats(adapter);
  5280. memcpy(stats, &adapter->stats64, sizeof(*stats));
  5281. spin_unlock(&adapter->stats64_lock);
  5282. }
  5283. /**
  5284. * igb_change_mtu - Change the Maximum Transfer Unit
  5285. * @netdev: network interface device structure
  5286. * @new_mtu: new value for maximum frame size
  5287. *
  5288. * Returns 0 on success, negative on failure
  5289. **/
  5290. static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  5291. {
  5292. struct igb_adapter *adapter = netdev_priv(netdev);
  5293. struct pci_dev *pdev = adapter->pdev;
  5294. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  5295. /* adjust max frame to be at least the size of a standard frame */
  5296. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  5297. max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
  5298. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  5299. usleep_range(1000, 2000);
  5300. /* igb_down has a dependency on max_frame_size */
  5301. adapter->max_frame_size = max_frame;
  5302. if (netif_running(netdev))
  5303. igb_down(adapter);
  5304. dev_info(&pdev->dev, "changing MTU from %d to %d\n",
  5305. netdev->mtu, new_mtu);
  5306. netdev->mtu = new_mtu;
  5307. if (netif_running(netdev))
  5308. igb_up(adapter);
  5309. else
  5310. igb_reset(adapter);
  5311. clear_bit(__IGB_RESETTING, &adapter->state);
  5312. return 0;
  5313. }
  5314. /**
  5315. * igb_update_stats - Update the board statistics counters
  5316. * @adapter: board private structure
  5317. **/
  5318. void igb_update_stats(struct igb_adapter *adapter)
  5319. {
  5320. struct rtnl_link_stats64 *net_stats = &adapter->stats64;
  5321. struct e1000_hw *hw = &adapter->hw;
  5322. struct pci_dev *pdev = adapter->pdev;
  5323. u32 reg, mpc;
  5324. int i;
  5325. u64 bytes, packets;
  5326. unsigned int start;
  5327. u64 _bytes, _packets;
  5328. /* Prevent stats update while adapter is being reset, or if the pci
  5329. * connection is down.
  5330. */
  5331. if (adapter->link_speed == 0)
  5332. return;
  5333. if (pci_channel_offline(pdev))
  5334. return;
  5335. bytes = 0;
  5336. packets = 0;
  5337. rcu_read_lock();
  5338. for (i = 0; i < adapter->num_rx_queues; i++) {
  5339. struct igb_ring *ring = adapter->rx_ring[i];
  5340. u32 rqdpc = rd32(E1000_RQDPC(i));
  5341. if (hw->mac.type >= e1000_i210)
  5342. wr32(E1000_RQDPC(i), 0);
  5343. if (rqdpc) {
  5344. ring->rx_stats.drops += rqdpc;
  5345. net_stats->rx_fifo_errors += rqdpc;
  5346. }
  5347. do {
  5348. start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
  5349. _bytes = ring->rx_stats.bytes;
  5350. _packets = ring->rx_stats.packets;
  5351. } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
  5352. bytes += _bytes;
  5353. packets += _packets;
  5354. }
  5355. net_stats->rx_bytes = bytes;
  5356. net_stats->rx_packets = packets;
  5357. bytes = 0;
  5358. packets = 0;
  5359. for (i = 0; i < adapter->num_tx_queues; i++) {
  5360. struct igb_ring *ring = adapter->tx_ring[i];
  5361. do {
  5362. start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
  5363. _bytes = ring->tx_stats.bytes;
  5364. _packets = ring->tx_stats.packets;
  5365. } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
  5366. bytes += _bytes;
  5367. packets += _packets;
  5368. }
  5369. net_stats->tx_bytes = bytes;
  5370. net_stats->tx_packets = packets;
  5371. rcu_read_unlock();
  5372. /* read stats registers */
  5373. adapter->stats.crcerrs += rd32(E1000_CRCERRS);
  5374. adapter->stats.gprc += rd32(E1000_GPRC);
  5375. adapter->stats.gorc += rd32(E1000_GORCL);
  5376. rd32(E1000_GORCH); /* clear GORCL */
  5377. adapter->stats.bprc += rd32(E1000_BPRC);
  5378. adapter->stats.mprc += rd32(E1000_MPRC);
  5379. adapter->stats.roc += rd32(E1000_ROC);
  5380. adapter->stats.prc64 += rd32(E1000_PRC64);
  5381. adapter->stats.prc127 += rd32(E1000_PRC127);
  5382. adapter->stats.prc255 += rd32(E1000_PRC255);
  5383. adapter->stats.prc511 += rd32(E1000_PRC511);
  5384. adapter->stats.prc1023 += rd32(E1000_PRC1023);
  5385. adapter->stats.prc1522 += rd32(E1000_PRC1522);
  5386. adapter->stats.symerrs += rd32(E1000_SYMERRS);
  5387. adapter->stats.sec += rd32(E1000_SEC);
  5388. mpc = rd32(E1000_MPC);
  5389. adapter->stats.mpc += mpc;
  5390. net_stats->rx_fifo_errors += mpc;
  5391. adapter->stats.scc += rd32(E1000_SCC);
  5392. adapter->stats.ecol += rd32(E1000_ECOL);
  5393. adapter->stats.mcc += rd32(E1000_MCC);
  5394. adapter->stats.latecol += rd32(E1000_LATECOL);
  5395. adapter->stats.dc += rd32(E1000_DC);
  5396. adapter->stats.rlec += rd32(E1000_RLEC);
  5397. adapter->stats.xonrxc += rd32(E1000_XONRXC);
  5398. adapter->stats.xontxc += rd32(E1000_XONTXC);
  5399. adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
  5400. adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
  5401. adapter->stats.fcruc += rd32(E1000_FCRUC);
  5402. adapter->stats.gptc += rd32(E1000_GPTC);
  5403. adapter->stats.gotc += rd32(E1000_GOTCL);
  5404. rd32(E1000_GOTCH); /* clear GOTCL */
  5405. adapter->stats.rnbc += rd32(E1000_RNBC);
  5406. adapter->stats.ruc += rd32(E1000_RUC);
  5407. adapter->stats.rfc += rd32(E1000_RFC);
  5408. adapter->stats.rjc += rd32(E1000_RJC);
  5409. adapter->stats.tor += rd32(E1000_TORH);
  5410. adapter->stats.tot += rd32(E1000_TOTH);
  5411. adapter->stats.tpr += rd32(E1000_TPR);
  5412. adapter->stats.ptc64 += rd32(E1000_PTC64);
  5413. adapter->stats.ptc127 += rd32(E1000_PTC127);
  5414. adapter->stats.ptc255 += rd32(E1000_PTC255);
  5415. adapter->stats.ptc511 += rd32(E1000_PTC511);
  5416. adapter->stats.ptc1023 += rd32(E1000_PTC1023);
  5417. adapter->stats.ptc1522 += rd32(E1000_PTC1522);
  5418. adapter->stats.mptc += rd32(E1000_MPTC);
  5419. adapter->stats.bptc += rd32(E1000_BPTC);
  5420. adapter->stats.tpt += rd32(E1000_TPT);
  5421. adapter->stats.colc += rd32(E1000_COLC);
  5422. adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
  5423. /* read internal phy specific stats */
  5424. reg = rd32(E1000_CTRL_EXT);
  5425. if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
  5426. adapter->stats.rxerrc += rd32(E1000_RXERRC);
  5427. /* this stat has invalid values on i210/i211 */
  5428. if ((hw->mac.type != e1000_i210) &&
  5429. (hw->mac.type != e1000_i211))
  5430. adapter->stats.tncrs += rd32(E1000_TNCRS);
  5431. }
  5432. adapter->stats.tsctc += rd32(E1000_TSCTC);
  5433. adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  5434. adapter->stats.iac += rd32(E1000_IAC);
  5435. adapter->stats.icrxoc += rd32(E1000_ICRXOC);
  5436. adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
  5437. adapter->stats.icrxatc += rd32(E1000_ICRXATC);
  5438. adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
  5439. adapter->stats.ictxatc += rd32(E1000_ICTXATC);
  5440. adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
  5441. adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
  5442. adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
  5443. /* Fill out the OS statistics structure */
  5444. net_stats->multicast = adapter->stats.mprc;
  5445. net_stats->collisions = adapter->stats.colc;
  5446. /* Rx Errors */
  5447. /* RLEC on some newer hardware can be incorrect so build
  5448. * our own version based on RUC and ROC
  5449. */
  5450. net_stats->rx_errors = adapter->stats.rxerrc +
  5451. adapter->stats.crcerrs + adapter->stats.algnerrc +
  5452. adapter->stats.ruc + adapter->stats.roc +
  5453. adapter->stats.cexterr;
  5454. net_stats->rx_length_errors = adapter->stats.ruc +
  5455. adapter->stats.roc;
  5456. net_stats->rx_crc_errors = adapter->stats.crcerrs;
  5457. net_stats->rx_frame_errors = adapter->stats.algnerrc;
  5458. net_stats->rx_missed_errors = adapter->stats.mpc;
  5459. /* Tx Errors */
  5460. net_stats->tx_errors = adapter->stats.ecol +
  5461. adapter->stats.latecol;
  5462. net_stats->tx_aborted_errors = adapter->stats.ecol;
  5463. net_stats->tx_window_errors = adapter->stats.latecol;
  5464. net_stats->tx_carrier_errors = adapter->stats.tncrs;
  5465. /* Tx Dropped needs to be maintained elsewhere */
  5466. /* Management Stats */
  5467. adapter->stats.mgptc += rd32(E1000_MGTPTC);
  5468. adapter->stats.mgprc += rd32(E1000_MGTPRC);
  5469. adapter->stats.mgpdc += rd32(E1000_MGTPDC);
  5470. /* OS2BMC Stats */
  5471. reg = rd32(E1000_MANC);
  5472. if (reg & E1000_MANC_EN_BMC2OS) {
  5473. adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
  5474. adapter->stats.o2bspc += rd32(E1000_O2BSPC);
  5475. adapter->stats.b2ospc += rd32(E1000_B2OSPC);
  5476. adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
  5477. }
  5478. }
  5479. static void igb_tsync_interrupt(struct igb_adapter *adapter)
  5480. {
  5481. struct e1000_hw *hw = &adapter->hw;
  5482. struct ptp_clock_event event;
  5483. struct timespec64 ts;
  5484. u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
  5485. if (tsicr & TSINTR_SYS_WRAP) {
  5486. event.type = PTP_CLOCK_PPS;
  5487. if (adapter->ptp_caps.pps)
  5488. ptp_clock_event(adapter->ptp_clock, &event);
  5489. ack |= TSINTR_SYS_WRAP;
  5490. }
  5491. if (tsicr & E1000_TSICR_TXTS) {
  5492. /* retrieve hardware timestamp */
  5493. schedule_work(&adapter->ptp_tx_work);
  5494. ack |= E1000_TSICR_TXTS;
  5495. }
  5496. if (tsicr & TSINTR_TT0) {
  5497. spin_lock(&adapter->tmreg_lock);
  5498. ts = timespec64_add(adapter->perout[0].start,
  5499. adapter->perout[0].period);
  5500. /* u32 conversion of tv_sec is safe until y2106 */
  5501. wr32(E1000_TRGTTIML0, ts.tv_nsec);
  5502. wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
  5503. tsauxc = rd32(E1000_TSAUXC);
  5504. tsauxc |= TSAUXC_EN_TT0;
  5505. wr32(E1000_TSAUXC, tsauxc);
  5506. adapter->perout[0].start = ts;
  5507. spin_unlock(&adapter->tmreg_lock);
  5508. ack |= TSINTR_TT0;
  5509. }
  5510. if (tsicr & TSINTR_TT1) {
  5511. spin_lock(&adapter->tmreg_lock);
  5512. ts = timespec64_add(adapter->perout[1].start,
  5513. adapter->perout[1].period);
  5514. wr32(E1000_TRGTTIML1, ts.tv_nsec);
  5515. wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
  5516. tsauxc = rd32(E1000_TSAUXC);
  5517. tsauxc |= TSAUXC_EN_TT1;
  5518. wr32(E1000_TSAUXC, tsauxc);
  5519. adapter->perout[1].start = ts;
  5520. spin_unlock(&adapter->tmreg_lock);
  5521. ack |= TSINTR_TT1;
  5522. }
  5523. if (tsicr & TSINTR_AUTT0) {
  5524. nsec = rd32(E1000_AUXSTMPL0);
  5525. sec = rd32(E1000_AUXSTMPH0);
  5526. event.type = PTP_CLOCK_EXTTS;
  5527. event.index = 0;
  5528. event.timestamp = sec * 1000000000ULL + nsec;
  5529. ptp_clock_event(adapter->ptp_clock, &event);
  5530. ack |= TSINTR_AUTT0;
  5531. }
  5532. if (tsicr & TSINTR_AUTT1) {
  5533. nsec = rd32(E1000_AUXSTMPL1);
  5534. sec = rd32(E1000_AUXSTMPH1);
  5535. event.type = PTP_CLOCK_EXTTS;
  5536. event.index = 1;
  5537. event.timestamp = sec * 1000000000ULL + nsec;
  5538. ptp_clock_event(adapter->ptp_clock, &event);
  5539. ack |= TSINTR_AUTT1;
  5540. }
  5541. /* acknowledge the interrupts */
  5542. wr32(E1000_TSICR, ack);
  5543. }
  5544. static irqreturn_t igb_msix_other(int irq, void *data)
  5545. {
  5546. struct igb_adapter *adapter = data;
  5547. struct e1000_hw *hw = &adapter->hw;
  5548. u32 icr = rd32(E1000_ICR);
  5549. /* reading ICR causes bit 31 of EICR to be cleared */
  5550. if (icr & E1000_ICR_DRSTA)
  5551. schedule_work(&adapter->reset_task);
  5552. if (icr & E1000_ICR_DOUTSYNC) {
  5553. /* HW is reporting DMA is out of sync */
  5554. adapter->stats.doosync++;
  5555. /* The DMA Out of Sync is also indication of a spoof event
  5556. * in IOV mode. Check the Wrong VM Behavior register to
  5557. * see if it is really a spoof event.
  5558. */
  5559. igb_check_wvbr(adapter);
  5560. }
  5561. /* Check for a mailbox event */
  5562. if (icr & E1000_ICR_VMMB)
  5563. igb_msg_task(adapter);
  5564. if (icr & E1000_ICR_LSC) {
  5565. hw->mac.get_link_status = 1;
  5566. /* guard against interrupt when we're going down */
  5567. if (!test_bit(__IGB_DOWN, &adapter->state))
  5568. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  5569. }
  5570. if (icr & E1000_ICR_TS)
  5571. igb_tsync_interrupt(adapter);
  5572. wr32(E1000_EIMS, adapter->eims_other);
  5573. return IRQ_HANDLED;
  5574. }
  5575. static void igb_write_itr(struct igb_q_vector *q_vector)
  5576. {
  5577. struct igb_adapter *adapter = q_vector->adapter;
  5578. u32 itr_val = q_vector->itr_val & 0x7FFC;
  5579. if (!q_vector->set_itr)
  5580. return;
  5581. if (!itr_val)
  5582. itr_val = 0x4;
  5583. if (adapter->hw.mac.type == e1000_82575)
  5584. itr_val |= itr_val << 16;
  5585. else
  5586. itr_val |= E1000_EITR_CNT_IGNR;
  5587. writel(itr_val, q_vector->itr_register);
  5588. q_vector->set_itr = 0;
  5589. }
  5590. static irqreturn_t igb_msix_ring(int irq, void *data)
  5591. {
  5592. struct igb_q_vector *q_vector = data;
  5593. /* Write the ITR value calculated from the previous interrupt. */
  5594. igb_write_itr(q_vector);
  5595. napi_schedule(&q_vector->napi);
  5596. return IRQ_HANDLED;
  5597. }
  5598. #ifdef CONFIG_IGB_DCA
  5599. static void igb_update_tx_dca(struct igb_adapter *adapter,
  5600. struct igb_ring *tx_ring,
  5601. int cpu)
  5602. {
  5603. struct e1000_hw *hw = &adapter->hw;
  5604. u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
  5605. if (hw->mac.type != e1000_82575)
  5606. txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
  5607. /* We can enable relaxed ordering for reads, but not writes when
  5608. * DCA is enabled. This is due to a known issue in some chipsets
  5609. * which will cause the DCA tag to be cleared.
  5610. */
  5611. txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
  5612. E1000_DCA_TXCTRL_DATA_RRO_EN |
  5613. E1000_DCA_TXCTRL_DESC_DCA_EN;
  5614. wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
  5615. }
  5616. static void igb_update_rx_dca(struct igb_adapter *adapter,
  5617. struct igb_ring *rx_ring,
  5618. int cpu)
  5619. {
  5620. struct e1000_hw *hw = &adapter->hw;
  5621. u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
  5622. if (hw->mac.type != e1000_82575)
  5623. rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
  5624. /* We can enable relaxed ordering for reads, but not writes when
  5625. * DCA is enabled. This is due to a known issue in some chipsets
  5626. * which will cause the DCA tag to be cleared.
  5627. */
  5628. rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
  5629. E1000_DCA_RXCTRL_DESC_DCA_EN;
  5630. wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
  5631. }
  5632. static void igb_update_dca(struct igb_q_vector *q_vector)
  5633. {
  5634. struct igb_adapter *adapter = q_vector->adapter;
  5635. int cpu = get_cpu();
  5636. if (q_vector->cpu == cpu)
  5637. goto out_no_update;
  5638. if (q_vector->tx.ring)
  5639. igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
  5640. if (q_vector->rx.ring)
  5641. igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
  5642. q_vector->cpu = cpu;
  5643. out_no_update:
  5644. put_cpu();
  5645. }
  5646. static void igb_setup_dca(struct igb_adapter *adapter)
  5647. {
  5648. struct e1000_hw *hw = &adapter->hw;
  5649. int i;
  5650. if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
  5651. return;
  5652. /* Always use CB2 mode, difference is masked in the CB driver. */
  5653. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  5654. for (i = 0; i < adapter->num_q_vectors; i++) {
  5655. adapter->q_vector[i]->cpu = -1;
  5656. igb_update_dca(adapter->q_vector[i]);
  5657. }
  5658. }
  5659. static int __igb_notify_dca(struct device *dev, void *data)
  5660. {
  5661. struct net_device *netdev = dev_get_drvdata(dev);
  5662. struct igb_adapter *adapter = netdev_priv(netdev);
  5663. struct pci_dev *pdev = adapter->pdev;
  5664. struct e1000_hw *hw = &adapter->hw;
  5665. unsigned long event = *(unsigned long *)data;
  5666. switch (event) {
  5667. case DCA_PROVIDER_ADD:
  5668. /* if already enabled, don't do it again */
  5669. if (adapter->flags & IGB_FLAG_DCA_ENABLED)
  5670. break;
  5671. if (dca_add_requester(dev) == 0) {
  5672. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  5673. dev_info(&pdev->dev, "DCA enabled\n");
  5674. igb_setup_dca(adapter);
  5675. break;
  5676. }
  5677. /* Fall Through since DCA is disabled. */
  5678. case DCA_PROVIDER_REMOVE:
  5679. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  5680. /* without this a class_device is left
  5681. * hanging around in the sysfs model
  5682. */
  5683. dca_remove_requester(dev);
  5684. dev_info(&pdev->dev, "DCA disabled\n");
  5685. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  5686. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  5687. }
  5688. break;
  5689. }
  5690. return 0;
  5691. }
  5692. static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  5693. void *p)
  5694. {
  5695. int ret_val;
  5696. ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
  5697. __igb_notify_dca);
  5698. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  5699. }
  5700. #endif /* CONFIG_IGB_DCA */
  5701. #ifdef CONFIG_PCI_IOV
  5702. static int igb_vf_configure(struct igb_adapter *adapter, int vf)
  5703. {
  5704. unsigned char mac_addr[ETH_ALEN];
  5705. eth_zero_addr(mac_addr);
  5706. igb_set_vf_mac(adapter, vf, mac_addr);
  5707. /* By default spoof check is enabled for all VFs */
  5708. adapter->vf_data[vf].spoofchk_enabled = true;
  5709. /* By default VFs are not trusted */
  5710. adapter->vf_data[vf].trusted = false;
  5711. return 0;
  5712. }
  5713. #endif
  5714. static void igb_ping_all_vfs(struct igb_adapter *adapter)
  5715. {
  5716. struct e1000_hw *hw = &adapter->hw;
  5717. u32 ping;
  5718. int i;
  5719. for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
  5720. ping = E1000_PF_CONTROL_MSG;
  5721. if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
  5722. ping |= E1000_VT_MSGTYPE_CTS;
  5723. igb_write_mbx(hw, &ping, 1, i);
  5724. }
  5725. }
  5726. static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  5727. {
  5728. struct e1000_hw *hw = &adapter->hw;
  5729. u32 vmolr = rd32(E1000_VMOLR(vf));
  5730. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5731. vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
  5732. IGB_VF_FLAG_MULTI_PROMISC);
  5733. vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  5734. if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
  5735. vmolr |= E1000_VMOLR_MPME;
  5736. vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
  5737. *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
  5738. } else {
  5739. /* if we have hashes and we are clearing a multicast promisc
  5740. * flag we need to write the hashes to the MTA as this step
  5741. * was previously skipped
  5742. */
  5743. if (vf_data->num_vf_mc_hashes > 30) {
  5744. vmolr |= E1000_VMOLR_MPME;
  5745. } else if (vf_data->num_vf_mc_hashes) {
  5746. int j;
  5747. vmolr |= E1000_VMOLR_ROMPE;
  5748. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  5749. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  5750. }
  5751. }
  5752. wr32(E1000_VMOLR(vf), vmolr);
  5753. /* there are flags left unprocessed, likely not supported */
  5754. if (*msgbuf & E1000_VT_MSGINFO_MASK)
  5755. return -EINVAL;
  5756. return 0;
  5757. }
  5758. static int igb_set_vf_multicasts(struct igb_adapter *adapter,
  5759. u32 *msgbuf, u32 vf)
  5760. {
  5761. int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  5762. u16 *hash_list = (u16 *)&msgbuf[1];
  5763. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5764. int i;
  5765. /* salt away the number of multicast addresses assigned
  5766. * to this VF for later use to restore when the PF multi cast
  5767. * list changes
  5768. */
  5769. vf_data->num_vf_mc_hashes = n;
  5770. /* only up to 30 hash values supported */
  5771. if (n > 30)
  5772. n = 30;
  5773. /* store the hashes for later use */
  5774. for (i = 0; i < n; i++)
  5775. vf_data->vf_mc_hashes[i] = hash_list[i];
  5776. /* Flush and reset the mta with the new values */
  5777. igb_set_rx_mode(adapter->netdev);
  5778. return 0;
  5779. }
  5780. static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
  5781. {
  5782. struct e1000_hw *hw = &adapter->hw;
  5783. struct vf_data_storage *vf_data;
  5784. int i, j;
  5785. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  5786. u32 vmolr = rd32(E1000_VMOLR(i));
  5787. vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  5788. vf_data = &adapter->vf_data[i];
  5789. if ((vf_data->num_vf_mc_hashes > 30) ||
  5790. (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
  5791. vmolr |= E1000_VMOLR_MPME;
  5792. } else if (vf_data->num_vf_mc_hashes) {
  5793. vmolr |= E1000_VMOLR_ROMPE;
  5794. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  5795. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  5796. }
  5797. wr32(E1000_VMOLR(i), vmolr);
  5798. }
  5799. }
  5800. static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
  5801. {
  5802. struct e1000_hw *hw = &adapter->hw;
  5803. u32 pool_mask, vlvf_mask, i;
  5804. /* create mask for VF and other pools */
  5805. pool_mask = E1000_VLVF_POOLSEL_MASK;
  5806. vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
  5807. /* drop PF from pool bits */
  5808. pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
  5809. adapter->vfs_allocated_count);
  5810. /* Find the vlan filter for this id */
  5811. for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
  5812. u32 vlvf = rd32(E1000_VLVF(i));
  5813. u32 vfta_mask, vid, vfta;
  5814. /* remove the vf from the pool */
  5815. if (!(vlvf & vlvf_mask))
  5816. continue;
  5817. /* clear out bit from VLVF */
  5818. vlvf ^= vlvf_mask;
  5819. /* if other pools are present, just remove ourselves */
  5820. if (vlvf & pool_mask)
  5821. goto update_vlvfb;
  5822. /* if PF is present, leave VFTA */
  5823. if (vlvf & E1000_VLVF_POOLSEL_MASK)
  5824. goto update_vlvf;
  5825. vid = vlvf & E1000_VLVF_VLANID_MASK;
  5826. vfta_mask = BIT(vid % 32);
  5827. /* clear bit from VFTA */
  5828. vfta = adapter->shadow_vfta[vid / 32];
  5829. if (vfta & vfta_mask)
  5830. hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
  5831. update_vlvf:
  5832. /* clear pool selection enable */
  5833. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  5834. vlvf &= E1000_VLVF_POOLSEL_MASK;
  5835. else
  5836. vlvf = 0;
  5837. update_vlvfb:
  5838. /* clear pool bits */
  5839. wr32(E1000_VLVF(i), vlvf);
  5840. }
  5841. }
  5842. static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
  5843. {
  5844. u32 vlvf;
  5845. int idx;
  5846. /* short cut the special case */
  5847. if (vlan == 0)
  5848. return 0;
  5849. /* Search for the VLAN id in the VLVF entries */
  5850. for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
  5851. vlvf = rd32(E1000_VLVF(idx));
  5852. if ((vlvf & VLAN_VID_MASK) == vlan)
  5853. break;
  5854. }
  5855. return idx;
  5856. }
  5857. static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
  5858. {
  5859. struct e1000_hw *hw = &adapter->hw;
  5860. u32 bits, pf_id;
  5861. int idx;
  5862. idx = igb_find_vlvf_entry(hw, vid);
  5863. if (!idx)
  5864. return;
  5865. /* See if any other pools are set for this VLAN filter
  5866. * entry other than the PF.
  5867. */
  5868. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  5869. bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
  5870. bits &= rd32(E1000_VLVF(idx));
  5871. /* Disable the filter so this falls into the default pool. */
  5872. if (!bits) {
  5873. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  5874. wr32(E1000_VLVF(idx), BIT(pf_id));
  5875. else
  5876. wr32(E1000_VLVF(idx), 0);
  5877. }
  5878. }
  5879. static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
  5880. bool add, u32 vf)
  5881. {
  5882. int pf_id = adapter->vfs_allocated_count;
  5883. struct e1000_hw *hw = &adapter->hw;
  5884. int err;
  5885. /* If VLAN overlaps with one the PF is currently monitoring make
  5886. * sure that we are able to allocate a VLVF entry. This may be
  5887. * redundant but it guarantees PF will maintain visibility to
  5888. * the VLAN.
  5889. */
  5890. if (add && test_bit(vid, adapter->active_vlans)) {
  5891. err = igb_vfta_set(hw, vid, pf_id, true, false);
  5892. if (err)
  5893. return err;
  5894. }
  5895. err = igb_vfta_set(hw, vid, vf, add, false);
  5896. if (add && !err)
  5897. return err;
  5898. /* If we failed to add the VF VLAN or we are removing the VF VLAN
  5899. * we may need to drop the PF pool bit in order to allow us to free
  5900. * up the VLVF resources.
  5901. */
  5902. if (test_bit(vid, adapter->active_vlans) ||
  5903. (adapter->flags & IGB_FLAG_VLAN_PROMISC))
  5904. igb_update_pf_vlvf(adapter, vid);
  5905. return err;
  5906. }
  5907. static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
  5908. {
  5909. struct e1000_hw *hw = &adapter->hw;
  5910. if (vid)
  5911. wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
  5912. else
  5913. wr32(E1000_VMVIR(vf), 0);
  5914. }
  5915. static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
  5916. u16 vlan, u8 qos)
  5917. {
  5918. int err;
  5919. err = igb_set_vf_vlan(adapter, vlan, true, vf);
  5920. if (err)
  5921. return err;
  5922. igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
  5923. igb_set_vmolr(adapter, vf, !vlan);
  5924. /* revoke access to previous VLAN */
  5925. if (vlan != adapter->vf_data[vf].pf_vlan)
  5926. igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
  5927. false, vf);
  5928. adapter->vf_data[vf].pf_vlan = vlan;
  5929. adapter->vf_data[vf].pf_qos = qos;
  5930. igb_set_vf_vlan_strip(adapter, vf, true);
  5931. dev_info(&adapter->pdev->dev,
  5932. "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
  5933. if (test_bit(__IGB_DOWN, &adapter->state)) {
  5934. dev_warn(&adapter->pdev->dev,
  5935. "The VF VLAN has been set, but the PF device is not up.\n");
  5936. dev_warn(&adapter->pdev->dev,
  5937. "Bring the PF device up before attempting to use the VF device.\n");
  5938. }
  5939. return err;
  5940. }
  5941. static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
  5942. {
  5943. /* Restore tagless access via VLAN 0 */
  5944. igb_set_vf_vlan(adapter, 0, true, vf);
  5945. igb_set_vmvir(adapter, 0, vf);
  5946. igb_set_vmolr(adapter, vf, true);
  5947. /* Remove any PF assigned VLAN */
  5948. if (adapter->vf_data[vf].pf_vlan)
  5949. igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
  5950. false, vf);
  5951. adapter->vf_data[vf].pf_vlan = 0;
  5952. adapter->vf_data[vf].pf_qos = 0;
  5953. igb_set_vf_vlan_strip(adapter, vf, false);
  5954. return 0;
  5955. }
  5956. static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
  5957. u16 vlan, u8 qos, __be16 vlan_proto)
  5958. {
  5959. struct igb_adapter *adapter = netdev_priv(netdev);
  5960. if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
  5961. return -EINVAL;
  5962. if (vlan_proto != htons(ETH_P_8021Q))
  5963. return -EPROTONOSUPPORT;
  5964. return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
  5965. igb_disable_port_vlan(adapter, vf);
  5966. }
  5967. static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  5968. {
  5969. int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  5970. int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
  5971. int ret;
  5972. if (adapter->vf_data[vf].pf_vlan)
  5973. return -1;
  5974. /* VLAN 0 is a special case, don't allow it to be removed */
  5975. if (!vid && !add)
  5976. return 0;
  5977. ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
  5978. if (!ret)
  5979. igb_set_vf_vlan_strip(adapter, vf, !!vid);
  5980. return ret;
  5981. }
  5982. static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
  5983. {
  5984. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5985. /* clear flags - except flag that indicates PF has set the MAC */
  5986. vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
  5987. vf_data->last_nack = jiffies;
  5988. /* reset vlans for device */
  5989. igb_clear_vf_vfta(adapter, vf);
  5990. igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
  5991. igb_set_vmvir(adapter, vf_data->pf_vlan |
  5992. (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
  5993. igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
  5994. igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
  5995. /* reset multicast table array for vf */
  5996. adapter->vf_data[vf].num_vf_mc_hashes = 0;
  5997. /* Flush and reset the mta with the new values */
  5998. igb_set_rx_mode(adapter->netdev);
  5999. }
  6000. static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
  6001. {
  6002. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  6003. /* clear mac address as we were hotplug removed/added */
  6004. if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
  6005. eth_zero_addr(vf_mac);
  6006. /* process remaining reset events */
  6007. igb_vf_reset(adapter, vf);
  6008. }
  6009. static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
  6010. {
  6011. struct e1000_hw *hw = &adapter->hw;
  6012. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  6013. u32 reg, msgbuf[3];
  6014. u8 *addr = (u8 *)(&msgbuf[1]);
  6015. /* process all the same items cleared in a function level reset */
  6016. igb_vf_reset(adapter, vf);
  6017. /* set vf mac address */
  6018. igb_set_vf_mac(adapter, vf, vf_mac);
  6019. /* enable transmit and receive for vf */
  6020. reg = rd32(E1000_VFTE);
  6021. wr32(E1000_VFTE, reg | BIT(vf));
  6022. reg = rd32(E1000_VFRE);
  6023. wr32(E1000_VFRE, reg | BIT(vf));
  6024. adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
  6025. /* reply to reset with ack and vf mac address */
  6026. if (!is_zero_ether_addr(vf_mac)) {
  6027. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
  6028. memcpy(addr, vf_mac, ETH_ALEN);
  6029. } else {
  6030. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
  6031. }
  6032. igb_write_mbx(hw, msgbuf, 3, vf);
  6033. }
  6034. static void igb_flush_mac_table(struct igb_adapter *adapter)
  6035. {
  6036. struct e1000_hw *hw = &adapter->hw;
  6037. int i;
  6038. for (i = 0; i < hw->mac.rar_entry_count; i++) {
  6039. adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
  6040. memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
  6041. adapter->mac_table[i].queue = 0;
  6042. igb_rar_set_index(adapter, i);
  6043. }
  6044. }
  6045. static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
  6046. {
  6047. struct e1000_hw *hw = &adapter->hw;
  6048. /* do not count rar entries reserved for VFs MAC addresses */
  6049. int rar_entries = hw->mac.rar_entry_count -
  6050. adapter->vfs_allocated_count;
  6051. int i, count = 0;
  6052. for (i = 0; i < rar_entries; i++) {
  6053. /* do not count default entries */
  6054. if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
  6055. continue;
  6056. /* do not count "in use" entries for different queues */
  6057. if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
  6058. (adapter->mac_table[i].queue != queue))
  6059. continue;
  6060. count++;
  6061. }
  6062. return count;
  6063. }
  6064. /* Set default MAC address for the PF in the first RAR entry */
  6065. static void igb_set_default_mac_filter(struct igb_adapter *adapter)
  6066. {
  6067. struct igb_mac_addr *mac_table = &adapter->mac_table[0];
  6068. ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
  6069. mac_table->queue = adapter->vfs_allocated_count;
  6070. mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
  6071. igb_rar_set_index(adapter, 0);
  6072. }
  6073. /* If the filter to be added and an already existing filter express
  6074. * the same address and address type, it should be possible to only
  6075. * override the other configurations, for example the queue to steer
  6076. * traffic.
  6077. */
  6078. static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
  6079. const u8 *addr, const u8 flags)
  6080. {
  6081. if (!(entry->state & IGB_MAC_STATE_IN_USE))
  6082. return true;
  6083. if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
  6084. (flags & IGB_MAC_STATE_SRC_ADDR))
  6085. return false;
  6086. if (!ether_addr_equal(addr, entry->addr))
  6087. return false;
  6088. return true;
  6089. }
  6090. /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
  6091. * 'flags' is used to indicate what kind of match is made, match is by
  6092. * default for the destination address, if matching by source address
  6093. * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
  6094. */
  6095. static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
  6096. const u8 *addr, const u8 queue,
  6097. const u8 flags)
  6098. {
  6099. struct e1000_hw *hw = &adapter->hw;
  6100. int rar_entries = hw->mac.rar_entry_count -
  6101. adapter->vfs_allocated_count;
  6102. int i;
  6103. if (is_zero_ether_addr(addr))
  6104. return -EINVAL;
  6105. /* Search for the first empty entry in the MAC table.
  6106. * Do not touch entries at the end of the table reserved for the VF MAC
  6107. * addresses.
  6108. */
  6109. for (i = 0; i < rar_entries; i++) {
  6110. if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
  6111. addr, flags))
  6112. continue;
  6113. ether_addr_copy(adapter->mac_table[i].addr, addr);
  6114. adapter->mac_table[i].queue = queue;
  6115. adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
  6116. igb_rar_set_index(adapter, i);
  6117. return i;
  6118. }
  6119. return -ENOSPC;
  6120. }
  6121. static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
  6122. const u8 queue)
  6123. {
  6124. return igb_add_mac_filter_flags(adapter, addr, queue, 0);
  6125. }
  6126. /* Remove a MAC filter for 'addr' directing matching traffic to
  6127. * 'queue', 'flags' is used to indicate what kind of match need to be
  6128. * removed, match is by default for the destination address, if
  6129. * matching by source address is to be removed the flag
  6130. * IGB_MAC_STATE_SRC_ADDR can be used.
  6131. */
  6132. static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
  6133. const u8 *addr, const u8 queue,
  6134. const u8 flags)
  6135. {
  6136. struct e1000_hw *hw = &adapter->hw;
  6137. int rar_entries = hw->mac.rar_entry_count -
  6138. adapter->vfs_allocated_count;
  6139. int i;
  6140. if (is_zero_ether_addr(addr))
  6141. return -EINVAL;
  6142. /* Search for matching entry in the MAC table based on given address
  6143. * and queue. Do not touch entries at the end of the table reserved
  6144. * for the VF MAC addresses.
  6145. */
  6146. for (i = 0; i < rar_entries; i++) {
  6147. if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
  6148. continue;
  6149. if ((adapter->mac_table[i].state & flags) != flags)
  6150. continue;
  6151. if (adapter->mac_table[i].queue != queue)
  6152. continue;
  6153. if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
  6154. continue;
  6155. /* When a filter for the default address is "deleted",
  6156. * we return it to its initial configuration
  6157. */
  6158. if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
  6159. adapter->mac_table[i].state =
  6160. IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
  6161. adapter->mac_table[i].queue =
  6162. adapter->vfs_allocated_count;
  6163. } else {
  6164. adapter->mac_table[i].state = 0;
  6165. adapter->mac_table[i].queue = 0;
  6166. memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
  6167. }
  6168. igb_rar_set_index(adapter, i);
  6169. return 0;
  6170. }
  6171. return -ENOENT;
  6172. }
  6173. static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
  6174. const u8 queue)
  6175. {
  6176. return igb_del_mac_filter_flags(adapter, addr, queue, 0);
  6177. }
  6178. int igb_add_mac_steering_filter(struct igb_adapter *adapter,
  6179. const u8 *addr, u8 queue, u8 flags)
  6180. {
  6181. struct e1000_hw *hw = &adapter->hw;
  6182. /* In theory, this should be supported on 82575 as well, but
  6183. * that part wasn't easily accessible during development.
  6184. */
  6185. if (hw->mac.type != e1000_i210)
  6186. return -EOPNOTSUPP;
  6187. return igb_add_mac_filter_flags(adapter, addr, queue,
  6188. IGB_MAC_STATE_QUEUE_STEERING | flags);
  6189. }
  6190. int igb_del_mac_steering_filter(struct igb_adapter *adapter,
  6191. const u8 *addr, u8 queue, u8 flags)
  6192. {
  6193. return igb_del_mac_filter_flags(adapter, addr, queue,
  6194. IGB_MAC_STATE_QUEUE_STEERING | flags);
  6195. }
  6196. static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
  6197. {
  6198. struct igb_adapter *adapter = netdev_priv(netdev);
  6199. int ret;
  6200. ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
  6201. return min_t(int, ret, 0);
  6202. }
  6203. static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
  6204. {
  6205. struct igb_adapter *adapter = netdev_priv(netdev);
  6206. igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
  6207. return 0;
  6208. }
  6209. static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
  6210. const u32 info, const u8 *addr)
  6211. {
  6212. struct pci_dev *pdev = adapter->pdev;
  6213. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6214. struct list_head *pos;
  6215. struct vf_mac_filter *entry = NULL;
  6216. int ret = 0;
  6217. switch (info) {
  6218. case E1000_VF_MAC_FILTER_CLR:
  6219. /* remove all unicast MAC filters related to the current VF */
  6220. list_for_each(pos, &adapter->vf_macs.l) {
  6221. entry = list_entry(pos, struct vf_mac_filter, l);
  6222. if (entry->vf == vf) {
  6223. entry->vf = -1;
  6224. entry->free = true;
  6225. igb_del_mac_filter(adapter, entry->vf_mac, vf);
  6226. }
  6227. }
  6228. break;
  6229. case E1000_VF_MAC_FILTER_ADD:
  6230. if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
  6231. !vf_data->trusted) {
  6232. dev_warn(&pdev->dev,
  6233. "VF %d requested MAC filter but is administratively denied\n",
  6234. vf);
  6235. return -EINVAL;
  6236. }
  6237. if (!is_valid_ether_addr(addr)) {
  6238. dev_warn(&pdev->dev,
  6239. "VF %d attempted to set invalid MAC filter\n",
  6240. vf);
  6241. return -EINVAL;
  6242. }
  6243. /* try to find empty slot in the list */
  6244. list_for_each(pos, &adapter->vf_macs.l) {
  6245. entry = list_entry(pos, struct vf_mac_filter, l);
  6246. if (entry->free)
  6247. break;
  6248. }
  6249. if (entry && entry->free) {
  6250. entry->free = false;
  6251. entry->vf = vf;
  6252. ether_addr_copy(entry->vf_mac, addr);
  6253. ret = igb_add_mac_filter(adapter, addr, vf);
  6254. ret = min_t(int, ret, 0);
  6255. } else {
  6256. ret = -ENOSPC;
  6257. }
  6258. if (ret == -ENOSPC)
  6259. dev_warn(&pdev->dev,
  6260. "VF %d has requested MAC filter but there is no space for it\n",
  6261. vf);
  6262. break;
  6263. default:
  6264. ret = -EINVAL;
  6265. break;
  6266. }
  6267. return ret;
  6268. }
  6269. static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  6270. {
  6271. struct pci_dev *pdev = adapter->pdev;
  6272. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6273. u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
  6274. /* The VF MAC Address is stored in a packed array of bytes
  6275. * starting at the second 32 bit word of the msg array
  6276. */
  6277. unsigned char *addr = (unsigned char *)&msg[1];
  6278. int ret = 0;
  6279. if (!info) {
  6280. if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
  6281. !vf_data->trusted) {
  6282. dev_warn(&pdev->dev,
  6283. "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
  6284. vf);
  6285. return -EINVAL;
  6286. }
  6287. if (!is_valid_ether_addr(addr)) {
  6288. dev_warn(&pdev->dev,
  6289. "VF %d attempted to set invalid MAC\n",
  6290. vf);
  6291. return -EINVAL;
  6292. }
  6293. ret = igb_set_vf_mac(adapter, vf, addr);
  6294. } else {
  6295. ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
  6296. }
  6297. return ret;
  6298. }
  6299. static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
  6300. {
  6301. struct e1000_hw *hw = &adapter->hw;
  6302. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6303. u32 msg = E1000_VT_MSGTYPE_NACK;
  6304. /* if device isn't clear to send it shouldn't be reading either */
  6305. if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
  6306. time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
  6307. igb_write_mbx(hw, &msg, 1, vf);
  6308. vf_data->last_nack = jiffies;
  6309. }
  6310. }
  6311. static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  6312. {
  6313. struct pci_dev *pdev = adapter->pdev;
  6314. u32 msgbuf[E1000_VFMAILBOX_SIZE];
  6315. struct e1000_hw *hw = &adapter->hw;
  6316. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6317. s32 retval;
  6318. retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
  6319. if (retval) {
  6320. /* if receive failed revoke VF CTS stats and restart init */
  6321. dev_err(&pdev->dev, "Error receiving message from VF\n");
  6322. vf_data->flags &= ~IGB_VF_FLAG_CTS;
  6323. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  6324. goto unlock;
  6325. goto out;
  6326. }
  6327. /* this is a message we already processed, do nothing */
  6328. if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
  6329. goto unlock;
  6330. /* until the vf completes a reset it should not be
  6331. * allowed to start any configuration.
  6332. */
  6333. if (msgbuf[0] == E1000_VF_RESET) {
  6334. /* unlocks mailbox */
  6335. igb_vf_reset_msg(adapter, vf);
  6336. return;
  6337. }
  6338. if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
  6339. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  6340. goto unlock;
  6341. retval = -1;
  6342. goto out;
  6343. }
  6344. switch ((msgbuf[0] & 0xFFFF)) {
  6345. case E1000_VF_SET_MAC_ADDR:
  6346. retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
  6347. break;
  6348. case E1000_VF_SET_PROMISC:
  6349. retval = igb_set_vf_promisc(adapter, msgbuf, vf);
  6350. break;
  6351. case E1000_VF_SET_MULTICAST:
  6352. retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
  6353. break;
  6354. case E1000_VF_SET_LPE:
  6355. retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
  6356. break;
  6357. case E1000_VF_SET_VLAN:
  6358. retval = -1;
  6359. if (vf_data->pf_vlan)
  6360. dev_warn(&pdev->dev,
  6361. "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
  6362. vf);
  6363. else
  6364. retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
  6365. break;
  6366. default:
  6367. dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
  6368. retval = -1;
  6369. break;
  6370. }
  6371. msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
  6372. out:
  6373. /* notify the VF of the results of what it sent us */
  6374. if (retval)
  6375. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  6376. else
  6377. msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
  6378. /* unlocks mailbox */
  6379. igb_write_mbx(hw, msgbuf, 1, vf);
  6380. return;
  6381. unlock:
  6382. igb_unlock_mbx(hw, vf);
  6383. }
  6384. static void igb_msg_task(struct igb_adapter *adapter)
  6385. {
  6386. struct e1000_hw *hw = &adapter->hw;
  6387. u32 vf;
  6388. for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
  6389. /* process any reset requests */
  6390. if (!igb_check_for_rst(hw, vf))
  6391. igb_vf_reset_event(adapter, vf);
  6392. /* process any messages pending */
  6393. if (!igb_check_for_msg(hw, vf))
  6394. igb_rcv_msg_from_vf(adapter, vf);
  6395. /* process any acks */
  6396. if (!igb_check_for_ack(hw, vf))
  6397. igb_rcv_ack_from_vf(adapter, vf);
  6398. }
  6399. }
  6400. /**
  6401. * igb_set_uta - Set unicast filter table address
  6402. * @adapter: board private structure
  6403. * @set: boolean indicating if we are setting or clearing bits
  6404. *
  6405. * The unicast table address is a register array of 32-bit registers.
  6406. * The table is meant to be used in a way similar to how the MTA is used
  6407. * however due to certain limitations in the hardware it is necessary to
  6408. * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
  6409. * enable bit to allow vlan tag stripping when promiscuous mode is enabled
  6410. **/
  6411. static void igb_set_uta(struct igb_adapter *adapter, bool set)
  6412. {
  6413. struct e1000_hw *hw = &adapter->hw;
  6414. u32 uta = set ? ~0 : 0;
  6415. int i;
  6416. /* we only need to do this if VMDq is enabled */
  6417. if (!adapter->vfs_allocated_count)
  6418. return;
  6419. for (i = hw->mac.uta_reg_count; i--;)
  6420. array_wr32(E1000_UTA, i, uta);
  6421. }
  6422. /**
  6423. * igb_intr_msi - Interrupt Handler
  6424. * @irq: interrupt number
  6425. * @data: pointer to a network interface device structure
  6426. **/
  6427. static irqreturn_t igb_intr_msi(int irq, void *data)
  6428. {
  6429. struct igb_adapter *adapter = data;
  6430. struct igb_q_vector *q_vector = adapter->q_vector[0];
  6431. struct e1000_hw *hw = &adapter->hw;
  6432. /* read ICR disables interrupts using IAM */
  6433. u32 icr = rd32(E1000_ICR);
  6434. igb_write_itr(q_vector);
  6435. if (icr & E1000_ICR_DRSTA)
  6436. schedule_work(&adapter->reset_task);
  6437. if (icr & E1000_ICR_DOUTSYNC) {
  6438. /* HW is reporting DMA is out of sync */
  6439. adapter->stats.doosync++;
  6440. }
  6441. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  6442. hw->mac.get_link_status = 1;
  6443. if (!test_bit(__IGB_DOWN, &adapter->state))
  6444. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  6445. }
  6446. if (icr & E1000_ICR_TS)
  6447. igb_tsync_interrupt(adapter);
  6448. napi_schedule(&q_vector->napi);
  6449. return IRQ_HANDLED;
  6450. }
  6451. /**
  6452. * igb_intr - Legacy Interrupt Handler
  6453. * @irq: interrupt number
  6454. * @data: pointer to a network interface device structure
  6455. **/
  6456. static irqreturn_t igb_intr(int irq, void *data)
  6457. {
  6458. struct igb_adapter *adapter = data;
  6459. struct igb_q_vector *q_vector = adapter->q_vector[0];
  6460. struct e1000_hw *hw = &adapter->hw;
  6461. /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  6462. * need for the IMC write
  6463. */
  6464. u32 icr = rd32(E1000_ICR);
  6465. /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  6466. * not set, then the adapter didn't send an interrupt
  6467. */
  6468. if (!(icr & E1000_ICR_INT_ASSERTED))
  6469. return IRQ_NONE;
  6470. igb_write_itr(q_vector);
  6471. if (icr & E1000_ICR_DRSTA)
  6472. schedule_work(&adapter->reset_task);
  6473. if (icr & E1000_ICR_DOUTSYNC) {
  6474. /* HW is reporting DMA is out of sync */
  6475. adapter->stats.doosync++;
  6476. }
  6477. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  6478. hw->mac.get_link_status = 1;
  6479. /* guard against interrupt when we're going down */
  6480. if (!test_bit(__IGB_DOWN, &adapter->state))
  6481. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  6482. }
  6483. if (icr & E1000_ICR_TS)
  6484. igb_tsync_interrupt(adapter);
  6485. napi_schedule(&q_vector->napi);
  6486. return IRQ_HANDLED;
  6487. }
  6488. static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
  6489. {
  6490. struct igb_adapter *adapter = q_vector->adapter;
  6491. struct e1000_hw *hw = &adapter->hw;
  6492. if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
  6493. (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
  6494. if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
  6495. igb_set_itr(q_vector);
  6496. else
  6497. igb_update_ring_itr(q_vector);
  6498. }
  6499. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  6500. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  6501. wr32(E1000_EIMS, q_vector->eims_value);
  6502. else
  6503. igb_irq_enable(adapter);
  6504. }
  6505. }
  6506. /**
  6507. * igb_poll - NAPI Rx polling callback
  6508. * @napi: napi polling structure
  6509. * @budget: count of how many packets we should handle
  6510. **/
  6511. static int igb_poll(struct napi_struct *napi, int budget)
  6512. {
  6513. struct igb_q_vector *q_vector = container_of(napi,
  6514. struct igb_q_vector,
  6515. napi);
  6516. bool clean_complete = true;
  6517. int work_done = 0;
  6518. #ifdef CONFIG_IGB_DCA
  6519. if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
  6520. igb_update_dca(q_vector);
  6521. #endif
  6522. if (q_vector->tx.ring)
  6523. clean_complete = igb_clean_tx_irq(q_vector, budget);
  6524. if (q_vector->rx.ring) {
  6525. int cleaned = igb_clean_rx_irq(q_vector, budget);
  6526. work_done += cleaned;
  6527. if (cleaned >= budget)
  6528. clean_complete = false;
  6529. }
  6530. /* If all work not completed, return budget and keep polling */
  6531. if (!clean_complete)
  6532. return budget;
  6533. /* If not enough Rx work done, exit the polling mode */
  6534. napi_complete_done(napi, work_done);
  6535. igb_ring_irq_enable(q_vector);
  6536. return 0;
  6537. }
  6538. /**
  6539. * igb_clean_tx_irq - Reclaim resources after transmit completes
  6540. * @q_vector: pointer to q_vector containing needed info
  6541. * @napi_budget: Used to determine if we are in netpoll
  6542. *
  6543. * returns true if ring is completely cleaned
  6544. **/
  6545. static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
  6546. {
  6547. struct igb_adapter *adapter = q_vector->adapter;
  6548. struct igb_ring *tx_ring = q_vector->tx.ring;
  6549. struct igb_tx_buffer *tx_buffer;
  6550. union e1000_adv_tx_desc *tx_desc;
  6551. unsigned int total_bytes = 0, total_packets = 0;
  6552. unsigned int budget = q_vector->tx.work_limit;
  6553. unsigned int i = tx_ring->next_to_clean;
  6554. if (test_bit(__IGB_DOWN, &adapter->state))
  6555. return true;
  6556. tx_buffer = &tx_ring->tx_buffer_info[i];
  6557. tx_desc = IGB_TX_DESC(tx_ring, i);
  6558. i -= tx_ring->count;
  6559. do {
  6560. union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
  6561. /* if next_to_watch is not set then there is no work pending */
  6562. if (!eop_desc)
  6563. break;
  6564. /* prevent any other reads prior to eop_desc */
  6565. smp_rmb();
  6566. /* if DD is not set pending work has not been completed */
  6567. if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
  6568. break;
  6569. /* clear next_to_watch to prevent false hangs */
  6570. tx_buffer->next_to_watch = NULL;
  6571. /* update the statistics for this packet */
  6572. total_bytes += tx_buffer->bytecount;
  6573. total_packets += tx_buffer->gso_segs;
  6574. /* free the skb */
  6575. napi_consume_skb(tx_buffer->skb, napi_budget);
  6576. /* unmap skb header data */
  6577. dma_unmap_single(tx_ring->dev,
  6578. dma_unmap_addr(tx_buffer, dma),
  6579. dma_unmap_len(tx_buffer, len),
  6580. DMA_TO_DEVICE);
  6581. /* clear tx_buffer data */
  6582. dma_unmap_len_set(tx_buffer, len, 0);
  6583. /* clear last DMA location and unmap remaining buffers */
  6584. while (tx_desc != eop_desc) {
  6585. tx_buffer++;
  6586. tx_desc++;
  6587. i++;
  6588. if (unlikely(!i)) {
  6589. i -= tx_ring->count;
  6590. tx_buffer = tx_ring->tx_buffer_info;
  6591. tx_desc = IGB_TX_DESC(tx_ring, 0);
  6592. }
  6593. /* unmap any remaining paged data */
  6594. if (dma_unmap_len(tx_buffer, len)) {
  6595. dma_unmap_page(tx_ring->dev,
  6596. dma_unmap_addr(tx_buffer, dma),
  6597. dma_unmap_len(tx_buffer, len),
  6598. DMA_TO_DEVICE);
  6599. dma_unmap_len_set(tx_buffer, len, 0);
  6600. }
  6601. }
  6602. /* move us one more past the eop_desc for start of next pkt */
  6603. tx_buffer++;
  6604. tx_desc++;
  6605. i++;
  6606. if (unlikely(!i)) {
  6607. i -= tx_ring->count;
  6608. tx_buffer = tx_ring->tx_buffer_info;
  6609. tx_desc = IGB_TX_DESC(tx_ring, 0);
  6610. }
  6611. /* issue prefetch for next Tx descriptor */
  6612. prefetch(tx_desc);
  6613. /* update budget accounting */
  6614. budget--;
  6615. } while (likely(budget));
  6616. netdev_tx_completed_queue(txring_txq(tx_ring),
  6617. total_packets, total_bytes);
  6618. i += tx_ring->count;
  6619. tx_ring->next_to_clean = i;
  6620. u64_stats_update_begin(&tx_ring->tx_syncp);
  6621. tx_ring->tx_stats.bytes += total_bytes;
  6622. tx_ring->tx_stats.packets += total_packets;
  6623. u64_stats_update_end(&tx_ring->tx_syncp);
  6624. q_vector->tx.total_bytes += total_bytes;
  6625. q_vector->tx.total_packets += total_packets;
  6626. if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
  6627. struct e1000_hw *hw = &adapter->hw;
  6628. /* Detect a transmit hang in hardware, this serializes the
  6629. * check with the clearing of time_stamp and movement of i
  6630. */
  6631. clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  6632. if (tx_buffer->next_to_watch &&
  6633. time_after(jiffies, tx_buffer->time_stamp +
  6634. (adapter->tx_timeout_factor * HZ)) &&
  6635. !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
  6636. /* detected Tx unit hang */
  6637. dev_err(tx_ring->dev,
  6638. "Detected Tx Unit Hang\n"
  6639. " Tx Queue <%d>\n"
  6640. " TDH <%x>\n"
  6641. " TDT <%x>\n"
  6642. " next_to_use <%x>\n"
  6643. " next_to_clean <%x>\n"
  6644. "buffer_info[next_to_clean]\n"
  6645. " time_stamp <%lx>\n"
  6646. " next_to_watch <%p>\n"
  6647. " jiffies <%lx>\n"
  6648. " desc.status <%x>\n",
  6649. tx_ring->queue_index,
  6650. rd32(E1000_TDH(tx_ring->reg_idx)),
  6651. readl(tx_ring->tail),
  6652. tx_ring->next_to_use,
  6653. tx_ring->next_to_clean,
  6654. tx_buffer->time_stamp,
  6655. tx_buffer->next_to_watch,
  6656. jiffies,
  6657. tx_buffer->next_to_watch->wb.status);
  6658. netif_stop_subqueue(tx_ring->netdev,
  6659. tx_ring->queue_index);
  6660. /* we are about to reset, no point in enabling stuff */
  6661. return true;
  6662. }
  6663. }
  6664. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  6665. if (unlikely(total_packets &&
  6666. netif_carrier_ok(tx_ring->netdev) &&
  6667. igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
  6668. /* Make sure that anybody stopping the queue after this
  6669. * sees the new next_to_clean.
  6670. */
  6671. smp_mb();
  6672. if (__netif_subqueue_stopped(tx_ring->netdev,
  6673. tx_ring->queue_index) &&
  6674. !(test_bit(__IGB_DOWN, &adapter->state))) {
  6675. netif_wake_subqueue(tx_ring->netdev,
  6676. tx_ring->queue_index);
  6677. u64_stats_update_begin(&tx_ring->tx_syncp);
  6678. tx_ring->tx_stats.restart_queue++;
  6679. u64_stats_update_end(&tx_ring->tx_syncp);
  6680. }
  6681. }
  6682. return !!budget;
  6683. }
  6684. /**
  6685. * igb_reuse_rx_page - page flip buffer and store it back on the ring
  6686. * @rx_ring: rx descriptor ring to store buffers on
  6687. * @old_buff: donor buffer to have page reused
  6688. *
  6689. * Synchronizes page for reuse by the adapter
  6690. **/
  6691. static void igb_reuse_rx_page(struct igb_ring *rx_ring,
  6692. struct igb_rx_buffer *old_buff)
  6693. {
  6694. struct igb_rx_buffer *new_buff;
  6695. u16 nta = rx_ring->next_to_alloc;
  6696. new_buff = &rx_ring->rx_buffer_info[nta];
  6697. /* update, and store next to alloc */
  6698. nta++;
  6699. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  6700. /* Transfer page from old buffer to new buffer.
  6701. * Move each member individually to avoid possible store
  6702. * forwarding stalls.
  6703. */
  6704. new_buff->dma = old_buff->dma;
  6705. new_buff->page = old_buff->page;
  6706. new_buff->page_offset = old_buff->page_offset;
  6707. new_buff->pagecnt_bias = old_buff->pagecnt_bias;
  6708. }
  6709. static inline bool igb_page_is_reserved(struct page *page)
  6710. {
  6711. return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
  6712. }
  6713. static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
  6714. {
  6715. unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
  6716. struct page *page = rx_buffer->page;
  6717. /* avoid re-using remote pages */
  6718. if (unlikely(igb_page_is_reserved(page)))
  6719. return false;
  6720. #if (PAGE_SIZE < 8192)
  6721. /* if we are only owner of page we can reuse it */
  6722. if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
  6723. return false;
  6724. #else
  6725. #define IGB_LAST_OFFSET \
  6726. (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
  6727. if (rx_buffer->page_offset > IGB_LAST_OFFSET)
  6728. return false;
  6729. #endif
  6730. /* If we have drained the page fragment pool we need to update
  6731. * the pagecnt_bias and page count so that we fully restock the
  6732. * number of references the driver holds.
  6733. */
  6734. if (unlikely(!pagecnt_bias)) {
  6735. page_ref_add(page, USHRT_MAX);
  6736. rx_buffer->pagecnt_bias = USHRT_MAX;
  6737. }
  6738. return true;
  6739. }
  6740. /**
  6741. * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
  6742. * @rx_ring: rx descriptor ring to transact packets on
  6743. * @rx_buffer: buffer containing page to add
  6744. * @skb: sk_buff to place the data into
  6745. * @size: size of buffer to be added
  6746. *
  6747. * This function will add the data contained in rx_buffer->page to the skb.
  6748. **/
  6749. static void igb_add_rx_frag(struct igb_ring *rx_ring,
  6750. struct igb_rx_buffer *rx_buffer,
  6751. struct sk_buff *skb,
  6752. unsigned int size)
  6753. {
  6754. #if (PAGE_SIZE < 8192)
  6755. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6756. #else
  6757. unsigned int truesize = ring_uses_build_skb(rx_ring) ?
  6758. SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
  6759. SKB_DATA_ALIGN(size);
  6760. #endif
  6761. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
  6762. rx_buffer->page_offset, size, truesize);
  6763. #if (PAGE_SIZE < 8192)
  6764. rx_buffer->page_offset ^= truesize;
  6765. #else
  6766. rx_buffer->page_offset += truesize;
  6767. #endif
  6768. }
  6769. static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
  6770. struct igb_rx_buffer *rx_buffer,
  6771. union e1000_adv_rx_desc *rx_desc,
  6772. unsigned int size)
  6773. {
  6774. void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
  6775. #if (PAGE_SIZE < 8192)
  6776. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6777. #else
  6778. unsigned int truesize = SKB_DATA_ALIGN(size);
  6779. #endif
  6780. unsigned int headlen;
  6781. struct sk_buff *skb;
  6782. /* prefetch first cache line of first page */
  6783. prefetch(va);
  6784. #if L1_CACHE_BYTES < 128
  6785. prefetch(va + L1_CACHE_BYTES);
  6786. #endif
  6787. /* allocate a skb to store the frags */
  6788. skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
  6789. if (unlikely(!skb))
  6790. return NULL;
  6791. if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
  6792. igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
  6793. va += IGB_TS_HDR_LEN;
  6794. size -= IGB_TS_HDR_LEN;
  6795. }
  6796. /* Determine available headroom for copy */
  6797. headlen = size;
  6798. if (headlen > IGB_RX_HDR_LEN)
  6799. headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
  6800. /* align pull length to size of long to optimize memcpy performance */
  6801. memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
  6802. /* update all of the pointers */
  6803. size -= headlen;
  6804. if (size) {
  6805. skb_add_rx_frag(skb, 0, rx_buffer->page,
  6806. (va + headlen) - page_address(rx_buffer->page),
  6807. size, truesize);
  6808. #if (PAGE_SIZE < 8192)
  6809. rx_buffer->page_offset ^= truesize;
  6810. #else
  6811. rx_buffer->page_offset += truesize;
  6812. #endif
  6813. } else {
  6814. rx_buffer->pagecnt_bias++;
  6815. }
  6816. return skb;
  6817. }
  6818. static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
  6819. struct igb_rx_buffer *rx_buffer,
  6820. union e1000_adv_rx_desc *rx_desc,
  6821. unsigned int size)
  6822. {
  6823. void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
  6824. #if (PAGE_SIZE < 8192)
  6825. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6826. #else
  6827. unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
  6828. SKB_DATA_ALIGN(IGB_SKB_PAD + size);
  6829. #endif
  6830. struct sk_buff *skb;
  6831. /* prefetch first cache line of first page */
  6832. prefetch(va);
  6833. #if L1_CACHE_BYTES < 128
  6834. prefetch(va + L1_CACHE_BYTES);
  6835. #endif
  6836. /* build an skb around the page buffer */
  6837. skb = build_skb(va - IGB_SKB_PAD, truesize);
  6838. if (unlikely(!skb))
  6839. return NULL;
  6840. /* update pointers within the skb to store the data */
  6841. skb_reserve(skb, IGB_SKB_PAD);
  6842. __skb_put(skb, size);
  6843. /* pull timestamp out of packet data */
  6844. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
  6845. igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
  6846. __skb_pull(skb, IGB_TS_HDR_LEN);
  6847. }
  6848. /* update buffer offset */
  6849. #if (PAGE_SIZE < 8192)
  6850. rx_buffer->page_offset ^= truesize;
  6851. #else
  6852. rx_buffer->page_offset += truesize;
  6853. #endif
  6854. return skb;
  6855. }
  6856. static inline void igb_rx_checksum(struct igb_ring *ring,
  6857. union e1000_adv_rx_desc *rx_desc,
  6858. struct sk_buff *skb)
  6859. {
  6860. skb_checksum_none_assert(skb);
  6861. /* Ignore Checksum bit is set */
  6862. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
  6863. return;
  6864. /* Rx checksum disabled via ethtool */
  6865. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  6866. return;
  6867. /* TCP/UDP checksum error bit is set */
  6868. if (igb_test_staterr(rx_desc,
  6869. E1000_RXDEXT_STATERR_TCPE |
  6870. E1000_RXDEXT_STATERR_IPE)) {
  6871. /* work around errata with sctp packets where the TCPE aka
  6872. * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
  6873. * packets, (aka let the stack check the crc32c)
  6874. */
  6875. if (!((skb->len == 60) &&
  6876. test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
  6877. u64_stats_update_begin(&ring->rx_syncp);
  6878. ring->rx_stats.csum_err++;
  6879. u64_stats_update_end(&ring->rx_syncp);
  6880. }
  6881. /* let the stack verify checksum errors */
  6882. return;
  6883. }
  6884. /* It must be a TCP or UDP packet with a valid checksum */
  6885. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
  6886. E1000_RXD_STAT_UDPCS))
  6887. skb->ip_summed = CHECKSUM_UNNECESSARY;
  6888. dev_dbg(ring->dev, "cksum success: bits %08X\n",
  6889. le32_to_cpu(rx_desc->wb.upper.status_error));
  6890. }
  6891. static inline void igb_rx_hash(struct igb_ring *ring,
  6892. union e1000_adv_rx_desc *rx_desc,
  6893. struct sk_buff *skb)
  6894. {
  6895. if (ring->netdev->features & NETIF_F_RXHASH)
  6896. skb_set_hash(skb,
  6897. le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
  6898. PKT_HASH_TYPE_L3);
  6899. }
  6900. /**
  6901. * igb_is_non_eop - process handling of non-EOP buffers
  6902. * @rx_ring: Rx ring being processed
  6903. * @rx_desc: Rx descriptor for current buffer
  6904. * @skb: current socket buffer containing buffer in progress
  6905. *
  6906. * This function updates next to clean. If the buffer is an EOP buffer
  6907. * this function exits returning false, otherwise it will place the
  6908. * sk_buff in the next buffer to be chained and return true indicating
  6909. * that this is in fact a non-EOP buffer.
  6910. **/
  6911. static bool igb_is_non_eop(struct igb_ring *rx_ring,
  6912. union e1000_adv_rx_desc *rx_desc)
  6913. {
  6914. u32 ntc = rx_ring->next_to_clean + 1;
  6915. /* fetch, update, and store next to clean */
  6916. ntc = (ntc < rx_ring->count) ? ntc : 0;
  6917. rx_ring->next_to_clean = ntc;
  6918. prefetch(IGB_RX_DESC(rx_ring, ntc));
  6919. if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
  6920. return false;
  6921. return true;
  6922. }
  6923. /**
  6924. * igb_cleanup_headers - Correct corrupted or empty headers
  6925. * @rx_ring: rx descriptor ring packet is being transacted on
  6926. * @rx_desc: pointer to the EOP Rx descriptor
  6927. * @skb: pointer to current skb being fixed
  6928. *
  6929. * Address the case where we are pulling data in on pages only
  6930. * and as such no data is present in the skb header.
  6931. *
  6932. * In addition if skb is not at least 60 bytes we need to pad it so that
  6933. * it is large enough to qualify as a valid Ethernet frame.
  6934. *
  6935. * Returns true if an error was encountered and skb was freed.
  6936. **/
  6937. static bool igb_cleanup_headers(struct igb_ring *rx_ring,
  6938. union e1000_adv_rx_desc *rx_desc,
  6939. struct sk_buff *skb)
  6940. {
  6941. if (unlikely((igb_test_staterr(rx_desc,
  6942. E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
  6943. struct net_device *netdev = rx_ring->netdev;
  6944. if (!(netdev->features & NETIF_F_RXALL)) {
  6945. dev_kfree_skb_any(skb);
  6946. return true;
  6947. }
  6948. }
  6949. /* if eth_skb_pad returns an error the skb was freed */
  6950. if (eth_skb_pad(skb))
  6951. return true;
  6952. return false;
  6953. }
  6954. /**
  6955. * igb_process_skb_fields - Populate skb header fields from Rx descriptor
  6956. * @rx_ring: rx descriptor ring packet is being transacted on
  6957. * @rx_desc: pointer to the EOP Rx descriptor
  6958. * @skb: pointer to current skb being populated
  6959. *
  6960. * This function checks the ring, descriptor, and packet information in
  6961. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  6962. * other fields within the skb.
  6963. **/
  6964. static void igb_process_skb_fields(struct igb_ring *rx_ring,
  6965. union e1000_adv_rx_desc *rx_desc,
  6966. struct sk_buff *skb)
  6967. {
  6968. struct net_device *dev = rx_ring->netdev;
  6969. igb_rx_hash(rx_ring, rx_desc, skb);
  6970. igb_rx_checksum(rx_ring, rx_desc, skb);
  6971. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
  6972. !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
  6973. igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
  6974. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  6975. igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
  6976. u16 vid;
  6977. if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
  6978. test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
  6979. vid = be16_to_cpu(rx_desc->wb.upper.vlan);
  6980. else
  6981. vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  6982. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  6983. }
  6984. skb_record_rx_queue(skb, rx_ring->queue_index);
  6985. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  6986. }
  6987. static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
  6988. const unsigned int size)
  6989. {
  6990. struct igb_rx_buffer *rx_buffer;
  6991. rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
  6992. prefetchw(rx_buffer->page);
  6993. /* we are reusing so sync this buffer for CPU use */
  6994. dma_sync_single_range_for_cpu(rx_ring->dev,
  6995. rx_buffer->dma,
  6996. rx_buffer->page_offset,
  6997. size,
  6998. DMA_FROM_DEVICE);
  6999. rx_buffer->pagecnt_bias--;
  7000. return rx_buffer;
  7001. }
  7002. static void igb_put_rx_buffer(struct igb_ring *rx_ring,
  7003. struct igb_rx_buffer *rx_buffer)
  7004. {
  7005. if (igb_can_reuse_rx_page(rx_buffer)) {
  7006. /* hand second half of page back to the ring */
  7007. igb_reuse_rx_page(rx_ring, rx_buffer);
  7008. } else {
  7009. /* We are not reusing the buffer so unmap it and free
  7010. * any references we are holding to it
  7011. */
  7012. dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
  7013. igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
  7014. IGB_RX_DMA_ATTR);
  7015. __page_frag_cache_drain(rx_buffer->page,
  7016. rx_buffer->pagecnt_bias);
  7017. }
  7018. /* clear contents of rx_buffer */
  7019. rx_buffer->page = NULL;
  7020. }
  7021. static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
  7022. {
  7023. struct igb_ring *rx_ring = q_vector->rx.ring;
  7024. struct sk_buff *skb = rx_ring->skb;
  7025. unsigned int total_bytes = 0, total_packets = 0;
  7026. u16 cleaned_count = igb_desc_unused(rx_ring);
  7027. while (likely(total_packets < budget)) {
  7028. union e1000_adv_rx_desc *rx_desc;
  7029. struct igb_rx_buffer *rx_buffer;
  7030. unsigned int size;
  7031. /* return some buffers to hardware, one at a time is too slow */
  7032. if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
  7033. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  7034. cleaned_count = 0;
  7035. }
  7036. rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
  7037. size = le16_to_cpu(rx_desc->wb.upper.length);
  7038. if (!size)
  7039. break;
  7040. /* This memory barrier is needed to keep us from reading
  7041. * any other fields out of the rx_desc until we know the
  7042. * descriptor has been written back
  7043. */
  7044. dma_rmb();
  7045. rx_buffer = igb_get_rx_buffer(rx_ring, size);
  7046. /* retrieve a buffer from the ring */
  7047. if (skb)
  7048. igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
  7049. else if (ring_uses_build_skb(rx_ring))
  7050. skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
  7051. else
  7052. skb = igb_construct_skb(rx_ring, rx_buffer,
  7053. rx_desc, size);
  7054. /* exit if we failed to retrieve a buffer */
  7055. if (!skb) {
  7056. rx_ring->rx_stats.alloc_failed++;
  7057. rx_buffer->pagecnt_bias++;
  7058. break;
  7059. }
  7060. igb_put_rx_buffer(rx_ring, rx_buffer);
  7061. cleaned_count++;
  7062. /* fetch next buffer in frame if non-eop */
  7063. if (igb_is_non_eop(rx_ring, rx_desc))
  7064. continue;
  7065. /* verify the packet layout is correct */
  7066. if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
  7067. skb = NULL;
  7068. continue;
  7069. }
  7070. /* probably a little skewed due to removing CRC */
  7071. total_bytes += skb->len;
  7072. /* populate checksum, timestamp, VLAN, and protocol */
  7073. igb_process_skb_fields(rx_ring, rx_desc, skb);
  7074. napi_gro_receive(&q_vector->napi, skb);
  7075. /* reset skb pointer */
  7076. skb = NULL;
  7077. /* update budget accounting */
  7078. total_packets++;
  7079. }
  7080. /* place incomplete frames back on ring for completion */
  7081. rx_ring->skb = skb;
  7082. u64_stats_update_begin(&rx_ring->rx_syncp);
  7083. rx_ring->rx_stats.packets += total_packets;
  7084. rx_ring->rx_stats.bytes += total_bytes;
  7085. u64_stats_update_end(&rx_ring->rx_syncp);
  7086. q_vector->rx.total_packets += total_packets;
  7087. q_vector->rx.total_bytes += total_bytes;
  7088. if (cleaned_count)
  7089. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  7090. return total_packets;
  7091. }
  7092. static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
  7093. {
  7094. return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
  7095. }
  7096. static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
  7097. struct igb_rx_buffer *bi)
  7098. {
  7099. struct page *page = bi->page;
  7100. dma_addr_t dma;
  7101. /* since we are recycling buffers we should seldom need to alloc */
  7102. if (likely(page))
  7103. return true;
  7104. /* alloc new page for storage */
  7105. page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
  7106. if (unlikely(!page)) {
  7107. rx_ring->rx_stats.alloc_failed++;
  7108. return false;
  7109. }
  7110. /* map page for use */
  7111. dma = dma_map_page_attrs(rx_ring->dev, page, 0,
  7112. igb_rx_pg_size(rx_ring),
  7113. DMA_FROM_DEVICE,
  7114. IGB_RX_DMA_ATTR);
  7115. /* if mapping failed free memory back to system since
  7116. * there isn't much point in holding memory we can't use
  7117. */
  7118. if (dma_mapping_error(rx_ring->dev, dma)) {
  7119. __free_pages(page, igb_rx_pg_order(rx_ring));
  7120. rx_ring->rx_stats.alloc_failed++;
  7121. return false;
  7122. }
  7123. bi->dma = dma;
  7124. bi->page = page;
  7125. bi->page_offset = igb_rx_offset(rx_ring);
  7126. bi->pagecnt_bias = 1;
  7127. return true;
  7128. }
  7129. /**
  7130. * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  7131. * @adapter: address of board private structure
  7132. **/
  7133. void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
  7134. {
  7135. union e1000_adv_rx_desc *rx_desc;
  7136. struct igb_rx_buffer *bi;
  7137. u16 i = rx_ring->next_to_use;
  7138. u16 bufsz;
  7139. /* nothing to do */
  7140. if (!cleaned_count)
  7141. return;
  7142. rx_desc = IGB_RX_DESC(rx_ring, i);
  7143. bi = &rx_ring->rx_buffer_info[i];
  7144. i -= rx_ring->count;
  7145. bufsz = igb_rx_bufsz(rx_ring);
  7146. do {
  7147. if (!igb_alloc_mapped_page(rx_ring, bi))
  7148. break;
  7149. /* sync the buffer for use by the device */
  7150. dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
  7151. bi->page_offset, bufsz,
  7152. DMA_FROM_DEVICE);
  7153. /* Refresh the desc even if buffer_addrs didn't change
  7154. * because each write-back erases this info.
  7155. */
  7156. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  7157. rx_desc++;
  7158. bi++;
  7159. i++;
  7160. if (unlikely(!i)) {
  7161. rx_desc = IGB_RX_DESC(rx_ring, 0);
  7162. bi = rx_ring->rx_buffer_info;
  7163. i -= rx_ring->count;
  7164. }
  7165. /* clear the length for the next_to_use descriptor */
  7166. rx_desc->wb.upper.length = 0;
  7167. cleaned_count--;
  7168. } while (cleaned_count);
  7169. i += rx_ring->count;
  7170. if (rx_ring->next_to_use != i) {
  7171. /* record the next descriptor to use */
  7172. rx_ring->next_to_use = i;
  7173. /* update next to alloc since we have filled the ring */
  7174. rx_ring->next_to_alloc = i;
  7175. /* Force memory writes to complete before letting h/w
  7176. * know there are new descriptors to fetch. (Only
  7177. * applicable for weak-ordered memory model archs,
  7178. * such as IA-64).
  7179. */
  7180. dma_wmb();
  7181. writel(i, rx_ring->tail);
  7182. }
  7183. }
  7184. /**
  7185. * igb_mii_ioctl -
  7186. * @netdev:
  7187. * @ifreq:
  7188. * @cmd:
  7189. **/
  7190. static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7191. {
  7192. struct igb_adapter *adapter = netdev_priv(netdev);
  7193. struct mii_ioctl_data *data = if_mii(ifr);
  7194. if (adapter->hw.phy.media_type != e1000_media_type_copper)
  7195. return -EOPNOTSUPP;
  7196. switch (cmd) {
  7197. case SIOCGMIIPHY:
  7198. data->phy_id = adapter->hw.phy.addr;
  7199. break;
  7200. case SIOCGMIIREG:
  7201. if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  7202. &data->val_out))
  7203. return -EIO;
  7204. break;
  7205. case SIOCSMIIREG:
  7206. default:
  7207. return -EOPNOTSUPP;
  7208. }
  7209. return 0;
  7210. }
  7211. /**
  7212. * igb_ioctl -
  7213. * @netdev:
  7214. * @ifreq:
  7215. * @cmd:
  7216. **/
  7217. static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7218. {
  7219. switch (cmd) {
  7220. case SIOCGMIIPHY:
  7221. case SIOCGMIIREG:
  7222. case SIOCSMIIREG:
  7223. return igb_mii_ioctl(netdev, ifr, cmd);
  7224. case SIOCGHWTSTAMP:
  7225. return igb_ptp_get_ts_config(netdev, ifr);
  7226. case SIOCSHWTSTAMP:
  7227. return igb_ptp_set_ts_config(netdev, ifr);
  7228. default:
  7229. return -EOPNOTSUPP;
  7230. }
  7231. }
  7232. void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  7233. {
  7234. struct igb_adapter *adapter = hw->back;
  7235. pci_read_config_word(adapter->pdev, reg, value);
  7236. }
  7237. void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  7238. {
  7239. struct igb_adapter *adapter = hw->back;
  7240. pci_write_config_word(adapter->pdev, reg, *value);
  7241. }
  7242. s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  7243. {
  7244. struct igb_adapter *adapter = hw->back;
  7245. if (pcie_capability_read_word(adapter->pdev, reg, value))
  7246. return -E1000_ERR_CONFIG;
  7247. return 0;
  7248. }
  7249. s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  7250. {
  7251. struct igb_adapter *adapter = hw->back;
  7252. if (pcie_capability_write_word(adapter->pdev, reg, *value))
  7253. return -E1000_ERR_CONFIG;
  7254. return 0;
  7255. }
  7256. static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
  7257. {
  7258. struct igb_adapter *adapter = netdev_priv(netdev);
  7259. struct e1000_hw *hw = &adapter->hw;
  7260. u32 ctrl, rctl;
  7261. bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
  7262. if (enable) {
  7263. /* enable VLAN tag insert/strip */
  7264. ctrl = rd32(E1000_CTRL);
  7265. ctrl |= E1000_CTRL_VME;
  7266. wr32(E1000_CTRL, ctrl);
  7267. /* Disable CFI check */
  7268. rctl = rd32(E1000_RCTL);
  7269. rctl &= ~E1000_RCTL_CFIEN;
  7270. wr32(E1000_RCTL, rctl);
  7271. } else {
  7272. /* disable VLAN tag insert/strip */
  7273. ctrl = rd32(E1000_CTRL);
  7274. ctrl &= ~E1000_CTRL_VME;
  7275. wr32(E1000_CTRL, ctrl);
  7276. }
  7277. igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
  7278. }
  7279. static int igb_vlan_rx_add_vid(struct net_device *netdev,
  7280. __be16 proto, u16 vid)
  7281. {
  7282. struct igb_adapter *adapter = netdev_priv(netdev);
  7283. struct e1000_hw *hw = &adapter->hw;
  7284. int pf_id = adapter->vfs_allocated_count;
  7285. /* add the filter since PF can receive vlans w/o entry in vlvf */
  7286. if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  7287. igb_vfta_set(hw, vid, pf_id, true, !!vid);
  7288. set_bit(vid, adapter->active_vlans);
  7289. return 0;
  7290. }
  7291. static int igb_vlan_rx_kill_vid(struct net_device *netdev,
  7292. __be16 proto, u16 vid)
  7293. {
  7294. struct igb_adapter *adapter = netdev_priv(netdev);
  7295. int pf_id = adapter->vfs_allocated_count;
  7296. struct e1000_hw *hw = &adapter->hw;
  7297. /* remove VID from filter table */
  7298. if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  7299. igb_vfta_set(hw, vid, pf_id, false, true);
  7300. clear_bit(vid, adapter->active_vlans);
  7301. return 0;
  7302. }
  7303. static void igb_restore_vlan(struct igb_adapter *adapter)
  7304. {
  7305. u16 vid = 1;
  7306. igb_vlan_mode(adapter->netdev, adapter->netdev->features);
  7307. igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
  7308. for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
  7309. igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  7310. }
  7311. int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
  7312. {
  7313. struct pci_dev *pdev = adapter->pdev;
  7314. struct e1000_mac_info *mac = &adapter->hw.mac;
  7315. mac->autoneg = 0;
  7316. /* Make sure dplx is at most 1 bit and lsb of speed is not set
  7317. * for the switch() below to work
  7318. */
  7319. if ((spd & 1) || (dplx & ~1))
  7320. goto err_inval;
  7321. /* Fiber NIC's only allow 1000 gbps Full duplex
  7322. * and 100Mbps Full duplex for 100baseFx sfp
  7323. */
  7324. if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
  7325. switch (spd + dplx) {
  7326. case SPEED_10 + DUPLEX_HALF:
  7327. case SPEED_10 + DUPLEX_FULL:
  7328. case SPEED_100 + DUPLEX_HALF:
  7329. goto err_inval;
  7330. default:
  7331. break;
  7332. }
  7333. }
  7334. switch (spd + dplx) {
  7335. case SPEED_10 + DUPLEX_HALF:
  7336. mac->forced_speed_duplex = ADVERTISE_10_HALF;
  7337. break;
  7338. case SPEED_10 + DUPLEX_FULL:
  7339. mac->forced_speed_duplex = ADVERTISE_10_FULL;
  7340. break;
  7341. case SPEED_100 + DUPLEX_HALF:
  7342. mac->forced_speed_duplex = ADVERTISE_100_HALF;
  7343. break;
  7344. case SPEED_100 + DUPLEX_FULL:
  7345. mac->forced_speed_duplex = ADVERTISE_100_FULL;
  7346. break;
  7347. case SPEED_1000 + DUPLEX_FULL:
  7348. mac->autoneg = 1;
  7349. adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
  7350. break;
  7351. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  7352. default:
  7353. goto err_inval;
  7354. }
  7355. /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
  7356. adapter->hw.phy.mdix = AUTO_ALL_MODES;
  7357. return 0;
  7358. err_inval:
  7359. dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
  7360. return -EINVAL;
  7361. }
  7362. static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
  7363. bool runtime)
  7364. {
  7365. struct net_device *netdev = pci_get_drvdata(pdev);
  7366. struct igb_adapter *adapter = netdev_priv(netdev);
  7367. struct e1000_hw *hw = &adapter->hw;
  7368. u32 ctrl, rctl, status;
  7369. u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
  7370. #ifdef CONFIG_PM
  7371. int retval = 0;
  7372. #endif
  7373. rtnl_lock();
  7374. netif_device_detach(netdev);
  7375. if (netif_running(netdev))
  7376. __igb_close(netdev, true);
  7377. igb_ptp_suspend(adapter);
  7378. igb_clear_interrupt_scheme(adapter);
  7379. rtnl_unlock();
  7380. #ifdef CONFIG_PM
  7381. retval = pci_save_state(pdev);
  7382. if (retval)
  7383. return retval;
  7384. #endif
  7385. status = rd32(E1000_STATUS);
  7386. if (status & E1000_STATUS_LU)
  7387. wufc &= ~E1000_WUFC_LNKC;
  7388. if (wufc) {
  7389. igb_setup_rctl(adapter);
  7390. igb_set_rx_mode(netdev);
  7391. /* turn on all-multi mode if wake on multicast is enabled */
  7392. if (wufc & E1000_WUFC_MC) {
  7393. rctl = rd32(E1000_RCTL);
  7394. rctl |= E1000_RCTL_MPE;
  7395. wr32(E1000_RCTL, rctl);
  7396. }
  7397. ctrl = rd32(E1000_CTRL);
  7398. /* advertise wake from D3Cold */
  7399. #define E1000_CTRL_ADVD3WUC 0x00100000
  7400. /* phy power management enable */
  7401. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  7402. ctrl |= E1000_CTRL_ADVD3WUC;
  7403. wr32(E1000_CTRL, ctrl);
  7404. /* Allow time for pending master requests to run */
  7405. igb_disable_pcie_master(hw);
  7406. wr32(E1000_WUC, E1000_WUC_PME_EN);
  7407. wr32(E1000_WUFC, wufc);
  7408. } else {
  7409. wr32(E1000_WUC, 0);
  7410. wr32(E1000_WUFC, 0);
  7411. }
  7412. *enable_wake = wufc || adapter->en_mng_pt;
  7413. if (!*enable_wake)
  7414. igb_power_down_link(adapter);
  7415. else
  7416. igb_power_up_link(adapter);
  7417. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  7418. * would have already happened in close and is redundant.
  7419. */
  7420. igb_release_hw_control(adapter);
  7421. pci_disable_device(pdev);
  7422. return 0;
  7423. }
  7424. static void igb_deliver_wake_packet(struct net_device *netdev)
  7425. {
  7426. struct igb_adapter *adapter = netdev_priv(netdev);
  7427. struct e1000_hw *hw = &adapter->hw;
  7428. struct sk_buff *skb;
  7429. u32 wupl;
  7430. wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
  7431. /* WUPM stores only the first 128 bytes of the wake packet.
  7432. * Read the packet only if we have the whole thing.
  7433. */
  7434. if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
  7435. return;
  7436. skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
  7437. if (!skb)
  7438. return;
  7439. skb_put(skb, wupl);
  7440. /* Ensure reads are 32-bit aligned */
  7441. wupl = roundup(wupl, 4);
  7442. memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
  7443. skb->protocol = eth_type_trans(skb, netdev);
  7444. netif_rx(skb);
  7445. }
  7446. static int __maybe_unused igb_suspend(struct device *dev)
  7447. {
  7448. int retval;
  7449. bool wake;
  7450. struct pci_dev *pdev = to_pci_dev(dev);
  7451. retval = __igb_shutdown(pdev, &wake, 0);
  7452. if (retval)
  7453. return retval;
  7454. if (wake) {
  7455. pci_prepare_to_sleep(pdev);
  7456. } else {
  7457. pci_wake_from_d3(pdev, false);
  7458. pci_set_power_state(pdev, PCI_D3hot);
  7459. }
  7460. return 0;
  7461. }
  7462. static int __maybe_unused igb_resume(struct device *dev)
  7463. {
  7464. struct pci_dev *pdev = to_pci_dev(dev);
  7465. struct net_device *netdev = pci_get_drvdata(pdev);
  7466. struct igb_adapter *adapter = netdev_priv(netdev);
  7467. struct e1000_hw *hw = &adapter->hw;
  7468. u32 err, val;
  7469. pci_set_power_state(pdev, PCI_D0);
  7470. pci_restore_state(pdev);
  7471. pci_save_state(pdev);
  7472. if (!pci_device_is_present(pdev))
  7473. return -ENODEV;
  7474. err = pci_enable_device_mem(pdev);
  7475. if (err) {
  7476. dev_err(&pdev->dev,
  7477. "igb: Cannot enable PCI device from suspend\n");
  7478. return err;
  7479. }
  7480. pci_set_master(pdev);
  7481. pci_enable_wake(pdev, PCI_D3hot, 0);
  7482. pci_enable_wake(pdev, PCI_D3cold, 0);
  7483. if (igb_init_interrupt_scheme(adapter, true)) {
  7484. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  7485. return -ENOMEM;
  7486. }
  7487. igb_reset(adapter);
  7488. /* let the f/w know that the h/w is now under the control of the
  7489. * driver.
  7490. */
  7491. igb_get_hw_control(adapter);
  7492. val = rd32(E1000_WUS);
  7493. if (val & WAKE_PKT_WUS)
  7494. igb_deliver_wake_packet(netdev);
  7495. wr32(E1000_WUS, ~0);
  7496. rtnl_lock();
  7497. if (!err && netif_running(netdev))
  7498. err = __igb_open(netdev, true);
  7499. if (!err)
  7500. netif_device_attach(netdev);
  7501. rtnl_unlock();
  7502. return err;
  7503. }
  7504. static int __maybe_unused igb_runtime_idle(struct device *dev)
  7505. {
  7506. struct pci_dev *pdev = to_pci_dev(dev);
  7507. struct net_device *netdev = pci_get_drvdata(pdev);
  7508. struct igb_adapter *adapter = netdev_priv(netdev);
  7509. if (!igb_has_link(adapter))
  7510. pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
  7511. return -EBUSY;
  7512. }
  7513. static int __maybe_unused igb_runtime_suspend(struct device *dev)
  7514. {
  7515. struct pci_dev *pdev = to_pci_dev(dev);
  7516. int retval;
  7517. bool wake;
  7518. retval = __igb_shutdown(pdev, &wake, 1);
  7519. if (retval)
  7520. return retval;
  7521. if (wake) {
  7522. pci_prepare_to_sleep(pdev);
  7523. } else {
  7524. pci_wake_from_d3(pdev, false);
  7525. pci_set_power_state(pdev, PCI_D3hot);
  7526. }
  7527. return 0;
  7528. }
  7529. static int __maybe_unused igb_runtime_resume(struct device *dev)
  7530. {
  7531. return igb_resume(dev);
  7532. }
  7533. static void igb_shutdown(struct pci_dev *pdev)
  7534. {
  7535. bool wake;
  7536. __igb_shutdown(pdev, &wake, 0);
  7537. if (system_state == SYSTEM_POWER_OFF) {
  7538. pci_wake_from_d3(pdev, wake);
  7539. pci_set_power_state(pdev, PCI_D3hot);
  7540. }
  7541. }
  7542. #ifdef CONFIG_PCI_IOV
  7543. static int igb_sriov_reinit(struct pci_dev *dev)
  7544. {
  7545. struct net_device *netdev = pci_get_drvdata(dev);
  7546. struct igb_adapter *adapter = netdev_priv(netdev);
  7547. struct pci_dev *pdev = adapter->pdev;
  7548. rtnl_lock();
  7549. if (netif_running(netdev))
  7550. igb_close(netdev);
  7551. else
  7552. igb_reset(adapter);
  7553. igb_clear_interrupt_scheme(adapter);
  7554. igb_init_queue_configuration(adapter);
  7555. if (igb_init_interrupt_scheme(adapter, true)) {
  7556. rtnl_unlock();
  7557. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  7558. return -ENOMEM;
  7559. }
  7560. if (netif_running(netdev))
  7561. igb_open(netdev);
  7562. rtnl_unlock();
  7563. return 0;
  7564. }
  7565. static int igb_pci_disable_sriov(struct pci_dev *dev)
  7566. {
  7567. int err = igb_disable_sriov(dev);
  7568. if (!err)
  7569. err = igb_sriov_reinit(dev);
  7570. return err;
  7571. }
  7572. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
  7573. {
  7574. int err = igb_enable_sriov(dev, num_vfs);
  7575. if (err)
  7576. goto out;
  7577. err = igb_sriov_reinit(dev);
  7578. if (!err)
  7579. return num_vfs;
  7580. out:
  7581. return err;
  7582. }
  7583. #endif
  7584. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
  7585. {
  7586. #ifdef CONFIG_PCI_IOV
  7587. if (num_vfs == 0)
  7588. return igb_pci_disable_sriov(dev);
  7589. else
  7590. return igb_pci_enable_sriov(dev, num_vfs);
  7591. #endif
  7592. return 0;
  7593. }
  7594. #ifdef CONFIG_NET_POLL_CONTROLLER
  7595. /* Polling 'interrupt' - used by things like netconsole to send skbs
  7596. * without having to re-enable interrupts. It's not called while
  7597. * the interrupt routine is executing.
  7598. */
  7599. static void igb_netpoll(struct net_device *netdev)
  7600. {
  7601. struct igb_adapter *adapter = netdev_priv(netdev);
  7602. struct e1000_hw *hw = &adapter->hw;
  7603. struct igb_q_vector *q_vector;
  7604. int i;
  7605. for (i = 0; i < adapter->num_q_vectors; i++) {
  7606. q_vector = adapter->q_vector[i];
  7607. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  7608. wr32(E1000_EIMC, q_vector->eims_value);
  7609. else
  7610. igb_irq_disable(adapter);
  7611. napi_schedule(&q_vector->napi);
  7612. }
  7613. }
  7614. #endif /* CONFIG_NET_POLL_CONTROLLER */
  7615. /**
  7616. * igb_io_error_detected - called when PCI error is detected
  7617. * @pdev: Pointer to PCI device
  7618. * @state: The current pci connection state
  7619. *
  7620. * This function is called after a PCI bus error affecting
  7621. * this device has been detected.
  7622. **/
  7623. static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  7624. pci_channel_state_t state)
  7625. {
  7626. struct net_device *netdev = pci_get_drvdata(pdev);
  7627. struct igb_adapter *adapter = netdev_priv(netdev);
  7628. netif_device_detach(netdev);
  7629. if (state == pci_channel_io_perm_failure)
  7630. return PCI_ERS_RESULT_DISCONNECT;
  7631. if (netif_running(netdev))
  7632. igb_down(adapter);
  7633. pci_disable_device(pdev);
  7634. /* Request a slot slot reset. */
  7635. return PCI_ERS_RESULT_NEED_RESET;
  7636. }
  7637. /**
  7638. * igb_io_slot_reset - called after the pci bus has been reset.
  7639. * @pdev: Pointer to PCI device
  7640. *
  7641. * Restart the card from scratch, as if from a cold-boot. Implementation
  7642. * resembles the first-half of the igb_resume routine.
  7643. **/
  7644. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  7645. {
  7646. struct net_device *netdev = pci_get_drvdata(pdev);
  7647. struct igb_adapter *adapter = netdev_priv(netdev);
  7648. struct e1000_hw *hw = &adapter->hw;
  7649. pci_ers_result_t result;
  7650. int err;
  7651. if (pci_enable_device_mem(pdev)) {
  7652. dev_err(&pdev->dev,
  7653. "Cannot re-enable PCI device after reset.\n");
  7654. result = PCI_ERS_RESULT_DISCONNECT;
  7655. } else {
  7656. pci_set_master(pdev);
  7657. pci_restore_state(pdev);
  7658. pci_save_state(pdev);
  7659. pci_enable_wake(pdev, PCI_D3hot, 0);
  7660. pci_enable_wake(pdev, PCI_D3cold, 0);
  7661. /* In case of PCI error, adapter lose its HW address
  7662. * so we should re-assign it here.
  7663. */
  7664. hw->hw_addr = adapter->io_addr;
  7665. igb_reset(adapter);
  7666. wr32(E1000_WUS, ~0);
  7667. result = PCI_ERS_RESULT_RECOVERED;
  7668. }
  7669. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  7670. if (err) {
  7671. dev_err(&pdev->dev,
  7672. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  7673. err);
  7674. /* non-fatal, continue */
  7675. }
  7676. return result;
  7677. }
  7678. /**
  7679. * igb_io_resume - called when traffic can start flowing again.
  7680. * @pdev: Pointer to PCI device
  7681. *
  7682. * This callback is called when the error recovery driver tells us that
  7683. * its OK to resume normal operation. Implementation resembles the
  7684. * second-half of the igb_resume routine.
  7685. */
  7686. static void igb_io_resume(struct pci_dev *pdev)
  7687. {
  7688. struct net_device *netdev = pci_get_drvdata(pdev);
  7689. struct igb_adapter *adapter = netdev_priv(netdev);
  7690. if (netif_running(netdev)) {
  7691. if (igb_up(adapter)) {
  7692. dev_err(&pdev->dev, "igb_up failed after reset\n");
  7693. return;
  7694. }
  7695. }
  7696. netif_device_attach(netdev);
  7697. /* let the f/w know that the h/w is now under the control of the
  7698. * driver.
  7699. */
  7700. igb_get_hw_control(adapter);
  7701. }
  7702. /**
  7703. * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
  7704. * @adapter: Pointer to adapter structure
  7705. * @index: Index of the RAR entry which need to be synced with MAC table
  7706. **/
  7707. static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
  7708. {
  7709. struct e1000_hw *hw = &adapter->hw;
  7710. u32 rar_low, rar_high;
  7711. u8 *addr = adapter->mac_table[index].addr;
  7712. /* HW expects these to be in network order when they are plugged
  7713. * into the registers which are little endian. In order to guarantee
  7714. * that ordering we need to do an leXX_to_cpup here in order to be
  7715. * ready for the byteswap that occurs with writel
  7716. */
  7717. rar_low = le32_to_cpup((__le32 *)(addr));
  7718. rar_high = le16_to_cpup((__le16 *)(addr + 4));
  7719. /* Indicate to hardware the Address is Valid. */
  7720. if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
  7721. if (is_valid_ether_addr(addr))
  7722. rar_high |= E1000_RAH_AV;
  7723. if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
  7724. rar_high |= E1000_RAH_ASEL_SRC_ADDR;
  7725. switch (hw->mac.type) {
  7726. case e1000_82575:
  7727. case e1000_i210:
  7728. if (adapter->mac_table[index].state &
  7729. IGB_MAC_STATE_QUEUE_STEERING)
  7730. rar_high |= E1000_RAH_QSEL_ENABLE;
  7731. rar_high |= E1000_RAH_POOL_1 *
  7732. adapter->mac_table[index].queue;
  7733. break;
  7734. default:
  7735. rar_high |= E1000_RAH_POOL_1 <<
  7736. adapter->mac_table[index].queue;
  7737. break;
  7738. }
  7739. }
  7740. wr32(E1000_RAL(index), rar_low);
  7741. wrfl();
  7742. wr32(E1000_RAH(index), rar_high);
  7743. wrfl();
  7744. }
  7745. static int igb_set_vf_mac(struct igb_adapter *adapter,
  7746. int vf, unsigned char *mac_addr)
  7747. {
  7748. struct e1000_hw *hw = &adapter->hw;
  7749. /* VF MAC addresses start at end of receive addresses and moves
  7750. * towards the first, as a result a collision should not be possible
  7751. */
  7752. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  7753. unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
  7754. ether_addr_copy(vf_mac_addr, mac_addr);
  7755. ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
  7756. adapter->mac_table[rar_entry].queue = vf;
  7757. adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
  7758. igb_rar_set_index(adapter, rar_entry);
  7759. return 0;
  7760. }
  7761. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  7762. {
  7763. struct igb_adapter *adapter = netdev_priv(netdev);
  7764. if (vf >= adapter->vfs_allocated_count)
  7765. return -EINVAL;
  7766. /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
  7767. * flag and allows to overwrite the MAC via VF netdev. This
  7768. * is necessary to allow libvirt a way to restore the original
  7769. * MAC after unbinding vfio-pci and reloading igbvf after shutting
  7770. * down a VM.
  7771. */
  7772. if (is_zero_ether_addr(mac)) {
  7773. adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
  7774. dev_info(&adapter->pdev->dev,
  7775. "remove administratively set MAC on VF %d\n",
  7776. vf);
  7777. } else if (is_valid_ether_addr(mac)) {
  7778. adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
  7779. dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
  7780. mac, vf);
  7781. dev_info(&adapter->pdev->dev,
  7782. "Reload the VF driver to make this change effective.");
  7783. /* Generate additional warning if PF is down */
  7784. if (test_bit(__IGB_DOWN, &adapter->state)) {
  7785. dev_warn(&adapter->pdev->dev,
  7786. "The VF MAC address has been set, but the PF device is not up.\n");
  7787. dev_warn(&adapter->pdev->dev,
  7788. "Bring the PF device up before attempting to use the VF device.\n");
  7789. }
  7790. } else {
  7791. return -EINVAL;
  7792. }
  7793. return igb_set_vf_mac(adapter, vf, mac);
  7794. }
  7795. static int igb_link_mbps(int internal_link_speed)
  7796. {
  7797. switch (internal_link_speed) {
  7798. case SPEED_100:
  7799. return 100;
  7800. case SPEED_1000:
  7801. return 1000;
  7802. default:
  7803. return 0;
  7804. }
  7805. }
  7806. static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
  7807. int link_speed)
  7808. {
  7809. int rf_dec, rf_int;
  7810. u32 bcnrc_val;
  7811. if (tx_rate != 0) {
  7812. /* Calculate the rate factor values to set */
  7813. rf_int = link_speed / tx_rate;
  7814. rf_dec = (link_speed - (rf_int * tx_rate));
  7815. rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
  7816. tx_rate;
  7817. bcnrc_val = E1000_RTTBCNRC_RS_ENA;
  7818. bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
  7819. E1000_RTTBCNRC_RF_INT_MASK);
  7820. bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
  7821. } else {
  7822. bcnrc_val = 0;
  7823. }
  7824. wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
  7825. /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
  7826. * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
  7827. */
  7828. wr32(E1000_RTTBCNRM, 0x14);
  7829. wr32(E1000_RTTBCNRC, bcnrc_val);
  7830. }
  7831. static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
  7832. {
  7833. int actual_link_speed, i;
  7834. bool reset_rate = false;
  7835. /* VF TX rate limit was not set or not supported */
  7836. if ((adapter->vf_rate_link_speed == 0) ||
  7837. (adapter->hw.mac.type != e1000_82576))
  7838. return;
  7839. actual_link_speed = igb_link_mbps(adapter->link_speed);
  7840. if (actual_link_speed != adapter->vf_rate_link_speed) {
  7841. reset_rate = true;
  7842. adapter->vf_rate_link_speed = 0;
  7843. dev_info(&adapter->pdev->dev,
  7844. "Link speed has been changed. VF Transmit rate is disabled\n");
  7845. }
  7846. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  7847. if (reset_rate)
  7848. adapter->vf_data[i].tx_rate = 0;
  7849. igb_set_vf_rate_limit(&adapter->hw, i,
  7850. adapter->vf_data[i].tx_rate,
  7851. actual_link_speed);
  7852. }
  7853. }
  7854. static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
  7855. int min_tx_rate, int max_tx_rate)
  7856. {
  7857. struct igb_adapter *adapter = netdev_priv(netdev);
  7858. struct e1000_hw *hw = &adapter->hw;
  7859. int actual_link_speed;
  7860. if (hw->mac.type != e1000_82576)
  7861. return -EOPNOTSUPP;
  7862. if (min_tx_rate)
  7863. return -EINVAL;
  7864. actual_link_speed = igb_link_mbps(adapter->link_speed);
  7865. if ((vf >= adapter->vfs_allocated_count) ||
  7866. (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
  7867. (max_tx_rate < 0) ||
  7868. (max_tx_rate > actual_link_speed))
  7869. return -EINVAL;
  7870. adapter->vf_rate_link_speed = actual_link_speed;
  7871. adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
  7872. igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
  7873. return 0;
  7874. }
  7875. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  7876. bool setting)
  7877. {
  7878. struct igb_adapter *adapter = netdev_priv(netdev);
  7879. struct e1000_hw *hw = &adapter->hw;
  7880. u32 reg_val, reg_offset;
  7881. if (!adapter->vfs_allocated_count)
  7882. return -EOPNOTSUPP;
  7883. if (vf >= adapter->vfs_allocated_count)
  7884. return -EINVAL;
  7885. reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
  7886. reg_val = rd32(reg_offset);
  7887. if (setting)
  7888. reg_val |= (BIT(vf) |
  7889. BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
  7890. else
  7891. reg_val &= ~(BIT(vf) |
  7892. BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
  7893. wr32(reg_offset, reg_val);
  7894. adapter->vf_data[vf].spoofchk_enabled = setting;
  7895. return 0;
  7896. }
  7897. static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
  7898. {
  7899. struct igb_adapter *adapter = netdev_priv(netdev);
  7900. if (vf >= adapter->vfs_allocated_count)
  7901. return -EINVAL;
  7902. if (adapter->vf_data[vf].trusted == setting)
  7903. return 0;
  7904. adapter->vf_data[vf].trusted = setting;
  7905. dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
  7906. vf, setting ? "" : "not ");
  7907. return 0;
  7908. }
  7909. static int igb_ndo_get_vf_config(struct net_device *netdev,
  7910. int vf, struct ifla_vf_info *ivi)
  7911. {
  7912. struct igb_adapter *adapter = netdev_priv(netdev);
  7913. if (vf >= adapter->vfs_allocated_count)
  7914. return -EINVAL;
  7915. ivi->vf = vf;
  7916. memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
  7917. ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
  7918. ivi->min_tx_rate = 0;
  7919. ivi->vlan = adapter->vf_data[vf].pf_vlan;
  7920. ivi->qos = adapter->vf_data[vf].pf_qos;
  7921. ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
  7922. ivi->trusted = adapter->vf_data[vf].trusted;
  7923. return 0;
  7924. }
  7925. static void igb_vmm_control(struct igb_adapter *adapter)
  7926. {
  7927. struct e1000_hw *hw = &adapter->hw;
  7928. u32 reg;
  7929. switch (hw->mac.type) {
  7930. case e1000_82575:
  7931. case e1000_i210:
  7932. case e1000_i211:
  7933. case e1000_i354:
  7934. default:
  7935. /* replication is not supported for 82575 */
  7936. return;
  7937. case e1000_82576:
  7938. /* notify HW that the MAC is adding vlan tags */
  7939. reg = rd32(E1000_DTXCTL);
  7940. reg |= E1000_DTXCTL_VLAN_ADDED;
  7941. wr32(E1000_DTXCTL, reg);
  7942. /* Fall through */
  7943. case e1000_82580:
  7944. /* enable replication vlan tag stripping */
  7945. reg = rd32(E1000_RPLOLR);
  7946. reg |= E1000_RPLOLR_STRVLAN;
  7947. wr32(E1000_RPLOLR, reg);
  7948. /* Fall through */
  7949. case e1000_i350:
  7950. /* none of the above registers are supported by i350 */
  7951. break;
  7952. }
  7953. if (adapter->vfs_allocated_count) {
  7954. igb_vmdq_set_loopback_pf(hw, true);
  7955. igb_vmdq_set_replication_pf(hw, true);
  7956. igb_vmdq_set_anti_spoofing_pf(hw, true,
  7957. adapter->vfs_allocated_count);
  7958. } else {
  7959. igb_vmdq_set_loopback_pf(hw, false);
  7960. igb_vmdq_set_replication_pf(hw, false);
  7961. }
  7962. }
  7963. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
  7964. {
  7965. struct e1000_hw *hw = &adapter->hw;
  7966. u32 dmac_thr;
  7967. u16 hwm;
  7968. if (hw->mac.type > e1000_82580) {
  7969. if (adapter->flags & IGB_FLAG_DMAC) {
  7970. u32 reg;
  7971. /* force threshold to 0. */
  7972. wr32(E1000_DMCTXTH, 0);
  7973. /* DMA Coalescing high water mark needs to be greater
  7974. * than the Rx threshold. Set hwm to PBA - max frame
  7975. * size in 16B units, capping it at PBA - 6KB.
  7976. */
  7977. hwm = 64 * (pba - 6);
  7978. reg = rd32(E1000_FCRTC);
  7979. reg &= ~E1000_FCRTC_RTH_COAL_MASK;
  7980. reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
  7981. & E1000_FCRTC_RTH_COAL_MASK);
  7982. wr32(E1000_FCRTC, reg);
  7983. /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
  7984. * frame size, capping it at PBA - 10KB.
  7985. */
  7986. dmac_thr = pba - 10;
  7987. reg = rd32(E1000_DMACR);
  7988. reg &= ~E1000_DMACR_DMACTHR_MASK;
  7989. reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
  7990. & E1000_DMACR_DMACTHR_MASK);
  7991. /* transition to L0x or L1 if available..*/
  7992. reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
  7993. /* watchdog timer= +-1000 usec in 32usec intervals */
  7994. reg |= (1000 >> 5);
  7995. /* Disable BMC-to-OS Watchdog Enable */
  7996. if (hw->mac.type != e1000_i354)
  7997. reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
  7998. wr32(E1000_DMACR, reg);
  7999. /* no lower threshold to disable
  8000. * coalescing(smart fifb)-UTRESH=0
  8001. */
  8002. wr32(E1000_DMCRTRH, 0);
  8003. reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
  8004. wr32(E1000_DMCTLX, reg);
  8005. /* free space in tx packet buffer to wake from
  8006. * DMA coal
  8007. */
  8008. wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
  8009. (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
  8010. /* make low power state decision controlled
  8011. * by DMA coal
  8012. */
  8013. reg = rd32(E1000_PCIEMISC);
  8014. reg &= ~E1000_PCIEMISC_LX_DECISION;
  8015. wr32(E1000_PCIEMISC, reg);
  8016. } /* endif adapter->dmac is not disabled */
  8017. } else if (hw->mac.type == e1000_82580) {
  8018. u32 reg = rd32(E1000_PCIEMISC);
  8019. wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
  8020. wr32(E1000_DMACR, 0);
  8021. }
  8022. }
  8023. /**
  8024. * igb_read_i2c_byte - Reads 8 bit word over I2C
  8025. * @hw: pointer to hardware structure
  8026. * @byte_offset: byte offset to read
  8027. * @dev_addr: device address
  8028. * @data: value read
  8029. *
  8030. * Performs byte read operation over I2C interface at
  8031. * a specified device address.
  8032. **/
  8033. s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  8034. u8 dev_addr, u8 *data)
  8035. {
  8036. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  8037. struct i2c_client *this_client = adapter->i2c_client;
  8038. s32 status;
  8039. u16 swfw_mask = 0;
  8040. if (!this_client)
  8041. return E1000_ERR_I2C;
  8042. swfw_mask = E1000_SWFW_PHY0_SM;
  8043. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  8044. return E1000_ERR_SWFW_SYNC;
  8045. status = i2c_smbus_read_byte_data(this_client, byte_offset);
  8046. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  8047. if (status < 0)
  8048. return E1000_ERR_I2C;
  8049. else {
  8050. *data = status;
  8051. return 0;
  8052. }
  8053. }
  8054. /**
  8055. * igb_write_i2c_byte - Writes 8 bit word over I2C
  8056. * @hw: pointer to hardware structure
  8057. * @byte_offset: byte offset to write
  8058. * @dev_addr: device address
  8059. * @data: value to write
  8060. *
  8061. * Performs byte write operation over I2C interface at
  8062. * a specified device address.
  8063. **/
  8064. s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  8065. u8 dev_addr, u8 data)
  8066. {
  8067. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  8068. struct i2c_client *this_client = adapter->i2c_client;
  8069. s32 status;
  8070. u16 swfw_mask = E1000_SWFW_PHY0_SM;
  8071. if (!this_client)
  8072. return E1000_ERR_I2C;
  8073. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  8074. return E1000_ERR_SWFW_SYNC;
  8075. status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
  8076. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  8077. if (status)
  8078. return E1000_ERR_I2C;
  8079. else
  8080. return 0;
  8081. }
  8082. int igb_reinit_queues(struct igb_adapter *adapter)
  8083. {
  8084. struct net_device *netdev = adapter->netdev;
  8085. struct pci_dev *pdev = adapter->pdev;
  8086. int err = 0;
  8087. if (netif_running(netdev))
  8088. igb_close(netdev);
  8089. igb_reset_interrupt_capability(adapter);
  8090. if (igb_init_interrupt_scheme(adapter, true)) {
  8091. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  8092. return -ENOMEM;
  8093. }
  8094. if (netif_running(netdev))
  8095. err = igb_open(netdev);
  8096. return err;
  8097. }
  8098. static void igb_nfc_filter_exit(struct igb_adapter *adapter)
  8099. {
  8100. struct igb_nfc_filter *rule;
  8101. spin_lock(&adapter->nfc_lock);
  8102. hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
  8103. igb_erase_filter(adapter, rule);
  8104. hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
  8105. igb_erase_filter(adapter, rule);
  8106. spin_unlock(&adapter->nfc_lock);
  8107. }
  8108. static void igb_nfc_filter_restore(struct igb_adapter *adapter)
  8109. {
  8110. struct igb_nfc_filter *rule;
  8111. spin_lock(&adapter->nfc_lock);
  8112. hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
  8113. igb_add_filter(adapter, rule);
  8114. spin_unlock(&adapter->nfc_lock);
  8115. }
  8116. /* igb_main.c */