igb_main.c 254 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945894689478948894989508951895289538954895589568957895889598960896189628963896489658966896789688969897089718972897389748975897689778978897989808981898289838984898589868987898889898990899189928993899489958996899789988999900090019002900390049005900690079008900990109011901290139014901590169017901890199020902190229023902490259026902790289029903090319032903390349035903690379038903990409041904290439044904590469047904890499050905190529053905490559056905790589059906090619062906390649065906690679068906990709071907290739074907590769077907890799080908190829083908490859086908790889089909090919092909390949095909690979098909991009101910291039104910591069107910891099110911191129113911491159116911791189119912091219122912391249125912691279128912991309131913291339134913591369137913891399140914191429143914491459146914791489149915091519152915391549155915691579158915991609161916291639164916591669167916891699170917191729173917491759176917791789179918091819182918391849185918691879188918991909191919291939194919591969197919891999200920192029203920492059206920792089209921092119212921392149215921692179218921992209221922292239224922592269227922892299230923192329233923492359236923792389239924092419242924392449245924692479248924992509251925292539254925592569257925892599260926192629263926492659266926792689269927092719272927392749275927692779278927992809281928292839284928592869287928892899290929192929293929492959296929792989299930093019302930393049305930693079308930993109311931293139314931593169317931893199320932193229323932493259326932793289329933093319332933393349335933693379338933993409341934293439344934593469347934893499350935193529353935493559356935793589359936093619362936393649365936693679368936993709371937293739374937593769377937893799380938193829383938493859386938793889389939093919392939393949395939693979398939994009401940294039404940594069407940894099410941194129413941494159416941794189419942094219422942394249425942694279428942994309431943294339434943594369437943894399440944194429443944494459446944794489449945094519452945394549455945694579458945994609461946294639464946594669467946894699470947194729473947494759476947794789479948094819482948394849485948694879488948994909491949294939494949594969497949894999500950195029503950495059506950795089509951095119512951395149515951695179518951995209521952295239524952595269527952895299530953195329533953495359536953795389539954095419542954395449545954695479548
  1. // SPDX-License-Identifier: GPL-2.0
  2. /* Copyright(c) 2007 - 2018 Intel Corporation. */
  3. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  4. #include <linux/module.h>
  5. #include <linux/types.h>
  6. #include <linux/init.h>
  7. #include <linux/bitops.h>
  8. #include <linux/vmalloc.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/netdevice.h>
  11. #include <linux/ipv6.h>
  12. #include <linux/slab.h>
  13. #include <net/checksum.h>
  14. #include <net/ip6_checksum.h>
  15. #include <net/pkt_sched.h>
  16. #include <net/pkt_cls.h>
  17. #include <linux/net_tstamp.h>
  18. #include <linux/mii.h>
  19. #include <linux/ethtool.h>
  20. #include <linux/if.h>
  21. #include <linux/if_vlan.h>
  22. #include <linux/pci.h>
  23. #include <linux/pci-aspm.h>
  24. #include <linux/delay.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/ip.h>
  27. #include <linux/tcp.h>
  28. #include <linux/sctp.h>
  29. #include <linux/if_ether.h>
  30. #include <linux/aer.h>
  31. #include <linux/prefetch.h>
  32. #include <linux/pm_runtime.h>
  33. #include <linux/etherdevice.h>
  34. #ifdef CONFIG_IGB_DCA
  35. #include <linux/dca.h>
  36. #endif
  37. #include <linux/i2c.h>
  38. #include "igb.h"
  39. #define MAJ 5
  40. #define MIN 4
  41. #define BUILD 0
  42. #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
  43. __stringify(BUILD) "-k"
  44. enum queue_mode {
  45. QUEUE_MODE_STRICT_PRIORITY,
  46. QUEUE_MODE_STREAM_RESERVATION,
  47. };
  48. enum tx_queue_prio {
  49. TX_QUEUE_PRIO_HIGH,
  50. TX_QUEUE_PRIO_LOW,
  51. };
  52. char igb_driver_name[] = "igb";
  53. char igb_driver_version[] = DRV_VERSION;
  54. static const char igb_driver_string[] =
  55. "Intel(R) Gigabit Ethernet Network Driver";
  56. static const char igb_copyright[] =
  57. "Copyright (c) 2007-2014 Intel Corporation.";
  58. static const struct e1000_info *igb_info_tbl[] = {
  59. [board_82575] = &e1000_82575_info,
  60. };
  61. static const struct pci_device_id igb_pci_tbl[] = {
  62. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
  63. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
  64. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
  65. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
  66. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
  67. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
  68. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
  69. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
  70. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
  71. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
  72. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
  73. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
  74. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
  75. { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
  76. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
  77. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
  78. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
  79. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
  80. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
  81. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
  82. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
  83. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
  84. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
  85. { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
  86. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
  87. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
  88. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
  89. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
  90. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
  91. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
  92. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
  93. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
  94. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
  95. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
  96. { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
  97. /* required last entry */
  98. {0, }
  99. };
  100. MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
  101. static int igb_setup_all_tx_resources(struct igb_adapter *);
  102. static int igb_setup_all_rx_resources(struct igb_adapter *);
  103. static void igb_free_all_tx_resources(struct igb_adapter *);
  104. static void igb_free_all_rx_resources(struct igb_adapter *);
  105. static void igb_setup_mrqc(struct igb_adapter *);
  106. static int igb_probe(struct pci_dev *, const struct pci_device_id *);
  107. static void igb_remove(struct pci_dev *pdev);
  108. static int igb_sw_init(struct igb_adapter *);
  109. int igb_open(struct net_device *);
  110. int igb_close(struct net_device *);
  111. static void igb_configure(struct igb_adapter *);
  112. static void igb_configure_tx(struct igb_adapter *);
  113. static void igb_configure_rx(struct igb_adapter *);
  114. static void igb_clean_all_tx_rings(struct igb_adapter *);
  115. static void igb_clean_all_rx_rings(struct igb_adapter *);
  116. static void igb_clean_tx_ring(struct igb_ring *);
  117. static void igb_clean_rx_ring(struct igb_ring *);
  118. static void igb_set_rx_mode(struct net_device *);
  119. static void igb_update_phy_info(struct timer_list *);
  120. static void igb_watchdog(struct timer_list *);
  121. static void igb_watchdog_task(struct work_struct *);
  122. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
  123. static void igb_get_stats64(struct net_device *dev,
  124. struct rtnl_link_stats64 *stats);
  125. static int igb_change_mtu(struct net_device *, int);
  126. static int igb_set_mac(struct net_device *, void *);
  127. static void igb_set_uta(struct igb_adapter *adapter, bool set);
  128. static irqreturn_t igb_intr(int irq, void *);
  129. static irqreturn_t igb_intr_msi(int irq, void *);
  130. static irqreturn_t igb_msix_other(int irq, void *);
  131. static irqreturn_t igb_msix_ring(int irq, void *);
  132. #ifdef CONFIG_IGB_DCA
  133. static void igb_update_dca(struct igb_q_vector *);
  134. static void igb_setup_dca(struct igb_adapter *);
  135. #endif /* CONFIG_IGB_DCA */
  136. static int igb_poll(struct napi_struct *, int);
  137. static bool igb_clean_tx_irq(struct igb_q_vector *, int);
  138. static int igb_clean_rx_irq(struct igb_q_vector *, int);
  139. static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
  140. static void igb_tx_timeout(struct net_device *);
  141. static void igb_reset_task(struct work_struct *);
  142. static void igb_vlan_mode(struct net_device *netdev,
  143. netdev_features_t features);
  144. static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
  145. static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
  146. static void igb_restore_vlan(struct igb_adapter *);
  147. static void igb_rar_set_index(struct igb_adapter *, u32);
  148. static void igb_ping_all_vfs(struct igb_adapter *);
  149. static void igb_msg_task(struct igb_adapter *);
  150. static void igb_vmm_control(struct igb_adapter *);
  151. static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
  152. static void igb_flush_mac_table(struct igb_adapter *);
  153. static int igb_available_rars(struct igb_adapter *, u8);
  154. static void igb_set_default_mac_filter(struct igb_adapter *);
  155. static int igb_uc_sync(struct net_device *, const unsigned char *);
  156. static int igb_uc_unsync(struct net_device *, const unsigned char *);
  157. static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
  158. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
  159. static int igb_ndo_set_vf_vlan(struct net_device *netdev,
  160. int vf, u16 vlan, u8 qos, __be16 vlan_proto);
  161. static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
  162. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  163. bool setting);
  164. static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf,
  165. bool setting);
  166. static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
  167. struct ifla_vf_info *ivi);
  168. static void igb_check_vf_rate_limit(struct igb_adapter *);
  169. static void igb_nfc_filter_exit(struct igb_adapter *adapter);
  170. static void igb_nfc_filter_restore(struct igb_adapter *adapter);
  171. #ifdef CONFIG_PCI_IOV
  172. static int igb_vf_configure(struct igb_adapter *adapter, int vf);
  173. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs);
  174. static int igb_disable_sriov(struct pci_dev *dev);
  175. static int igb_pci_disable_sriov(struct pci_dev *dev);
  176. #endif
  177. static int igb_suspend(struct device *);
  178. static int igb_resume(struct device *);
  179. static int igb_runtime_suspend(struct device *dev);
  180. static int igb_runtime_resume(struct device *dev);
  181. static int igb_runtime_idle(struct device *dev);
  182. static const struct dev_pm_ops igb_pm_ops = {
  183. SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
  184. SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
  185. igb_runtime_idle)
  186. };
  187. static void igb_shutdown(struct pci_dev *);
  188. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
  189. #ifdef CONFIG_IGB_DCA
  190. static int igb_notify_dca(struct notifier_block *, unsigned long, void *);
  191. static struct notifier_block dca_notifier = {
  192. .notifier_call = igb_notify_dca,
  193. .next = NULL,
  194. .priority = 0
  195. };
  196. #endif
  197. #ifdef CONFIG_NET_POLL_CONTROLLER
  198. /* for netdump / net console */
  199. static void igb_netpoll(struct net_device *);
  200. #endif
  201. #ifdef CONFIG_PCI_IOV
  202. static unsigned int max_vfs;
  203. module_param(max_vfs, uint, 0);
  204. MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
  205. #endif /* CONFIG_PCI_IOV */
  206. static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
  207. pci_channel_state_t);
  208. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
  209. static void igb_io_resume(struct pci_dev *);
  210. static const struct pci_error_handlers igb_err_handler = {
  211. .error_detected = igb_io_error_detected,
  212. .slot_reset = igb_io_slot_reset,
  213. .resume = igb_io_resume,
  214. };
  215. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
  216. static struct pci_driver igb_driver = {
  217. .name = igb_driver_name,
  218. .id_table = igb_pci_tbl,
  219. .probe = igb_probe,
  220. .remove = igb_remove,
  221. #ifdef CONFIG_PM
  222. .driver.pm = &igb_pm_ops,
  223. #endif
  224. .shutdown = igb_shutdown,
  225. .sriov_configure = igb_pci_sriov_configure,
  226. .err_handler = &igb_err_handler
  227. };
  228. MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
  229. MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
  230. MODULE_LICENSE("GPL");
  231. MODULE_VERSION(DRV_VERSION);
  232. #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
  233. static int debug = -1;
  234. module_param(debug, int, 0);
  235. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  236. struct igb_reg_info {
  237. u32 ofs;
  238. char *name;
  239. };
  240. static const struct igb_reg_info igb_reg_info_tbl[] = {
  241. /* General Registers */
  242. {E1000_CTRL, "CTRL"},
  243. {E1000_STATUS, "STATUS"},
  244. {E1000_CTRL_EXT, "CTRL_EXT"},
  245. /* Interrupt Registers */
  246. {E1000_ICR, "ICR"},
  247. /* RX Registers */
  248. {E1000_RCTL, "RCTL"},
  249. {E1000_RDLEN(0), "RDLEN"},
  250. {E1000_RDH(0), "RDH"},
  251. {E1000_RDT(0), "RDT"},
  252. {E1000_RXDCTL(0), "RXDCTL"},
  253. {E1000_RDBAL(0), "RDBAL"},
  254. {E1000_RDBAH(0), "RDBAH"},
  255. /* TX Registers */
  256. {E1000_TCTL, "TCTL"},
  257. {E1000_TDBAL(0), "TDBAL"},
  258. {E1000_TDBAH(0), "TDBAH"},
  259. {E1000_TDLEN(0), "TDLEN"},
  260. {E1000_TDH(0), "TDH"},
  261. {E1000_TDT(0), "TDT"},
  262. {E1000_TXDCTL(0), "TXDCTL"},
  263. {E1000_TDFH, "TDFH"},
  264. {E1000_TDFT, "TDFT"},
  265. {E1000_TDFHS, "TDFHS"},
  266. {E1000_TDFPC, "TDFPC"},
  267. /* List Terminator */
  268. {}
  269. };
  270. /* igb_regdump - register printout routine */
  271. static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
  272. {
  273. int n = 0;
  274. char rname[16];
  275. u32 regs[8];
  276. switch (reginfo->ofs) {
  277. case E1000_RDLEN(0):
  278. for (n = 0; n < 4; n++)
  279. regs[n] = rd32(E1000_RDLEN(n));
  280. break;
  281. case E1000_RDH(0):
  282. for (n = 0; n < 4; n++)
  283. regs[n] = rd32(E1000_RDH(n));
  284. break;
  285. case E1000_RDT(0):
  286. for (n = 0; n < 4; n++)
  287. regs[n] = rd32(E1000_RDT(n));
  288. break;
  289. case E1000_RXDCTL(0):
  290. for (n = 0; n < 4; n++)
  291. regs[n] = rd32(E1000_RXDCTL(n));
  292. break;
  293. case E1000_RDBAL(0):
  294. for (n = 0; n < 4; n++)
  295. regs[n] = rd32(E1000_RDBAL(n));
  296. break;
  297. case E1000_RDBAH(0):
  298. for (n = 0; n < 4; n++)
  299. regs[n] = rd32(E1000_RDBAH(n));
  300. break;
  301. case E1000_TDBAL(0):
  302. for (n = 0; n < 4; n++)
  303. regs[n] = rd32(E1000_RDBAL(n));
  304. break;
  305. case E1000_TDBAH(0):
  306. for (n = 0; n < 4; n++)
  307. regs[n] = rd32(E1000_TDBAH(n));
  308. break;
  309. case E1000_TDLEN(0):
  310. for (n = 0; n < 4; n++)
  311. regs[n] = rd32(E1000_TDLEN(n));
  312. break;
  313. case E1000_TDH(0):
  314. for (n = 0; n < 4; n++)
  315. regs[n] = rd32(E1000_TDH(n));
  316. break;
  317. case E1000_TDT(0):
  318. for (n = 0; n < 4; n++)
  319. regs[n] = rd32(E1000_TDT(n));
  320. break;
  321. case E1000_TXDCTL(0):
  322. for (n = 0; n < 4; n++)
  323. regs[n] = rd32(E1000_TXDCTL(n));
  324. break;
  325. default:
  326. pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
  327. return;
  328. }
  329. snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
  330. pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
  331. regs[2], regs[3]);
  332. }
  333. /* igb_dump - Print registers, Tx-rings and Rx-rings */
  334. static void igb_dump(struct igb_adapter *adapter)
  335. {
  336. struct net_device *netdev = adapter->netdev;
  337. struct e1000_hw *hw = &adapter->hw;
  338. struct igb_reg_info *reginfo;
  339. struct igb_ring *tx_ring;
  340. union e1000_adv_tx_desc *tx_desc;
  341. struct my_u0 { u64 a; u64 b; } *u0;
  342. struct igb_ring *rx_ring;
  343. union e1000_adv_rx_desc *rx_desc;
  344. u32 staterr;
  345. u16 i, n;
  346. if (!netif_msg_hw(adapter))
  347. return;
  348. /* Print netdevice Info */
  349. if (netdev) {
  350. dev_info(&adapter->pdev->dev, "Net device Info\n");
  351. pr_info("Device Name state trans_start\n");
  352. pr_info("%-15s %016lX %016lX\n", netdev->name,
  353. netdev->state, dev_trans_start(netdev));
  354. }
  355. /* Print Registers */
  356. dev_info(&adapter->pdev->dev, "Register Dump\n");
  357. pr_info(" Register Name Value\n");
  358. for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
  359. reginfo->name; reginfo++) {
  360. igb_regdump(hw, reginfo);
  361. }
  362. /* Print TX Ring Summary */
  363. if (!netdev || !netif_running(netdev))
  364. goto exit;
  365. dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
  366. pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
  367. for (n = 0; n < adapter->num_tx_queues; n++) {
  368. struct igb_tx_buffer *buffer_info;
  369. tx_ring = adapter->tx_ring[n];
  370. buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
  371. pr_info(" %5d %5X %5X %016llX %04X %p %016llX\n",
  372. n, tx_ring->next_to_use, tx_ring->next_to_clean,
  373. (u64)dma_unmap_addr(buffer_info, dma),
  374. dma_unmap_len(buffer_info, len),
  375. buffer_info->next_to_watch,
  376. (u64)buffer_info->time_stamp);
  377. }
  378. /* Print TX Rings */
  379. if (!netif_msg_tx_done(adapter))
  380. goto rx_ring_summary;
  381. dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
  382. /* Transmit Descriptor Formats
  383. *
  384. * Advanced Transmit Descriptor
  385. * +--------------------------------------------------------------+
  386. * 0 | Buffer Address [63:0] |
  387. * +--------------------------------------------------------------+
  388. * 8 | PAYLEN | PORTS |CC|IDX | STA | DCMD |DTYP|MAC|RSV| DTALEN |
  389. * +--------------------------------------------------------------+
  390. * 63 46 45 40 39 38 36 35 32 31 24 15 0
  391. */
  392. for (n = 0; n < adapter->num_tx_queues; n++) {
  393. tx_ring = adapter->tx_ring[n];
  394. pr_info("------------------------------------\n");
  395. pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
  396. pr_info("------------------------------------\n");
  397. pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
  398. for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
  399. const char *next_desc;
  400. struct igb_tx_buffer *buffer_info;
  401. tx_desc = IGB_TX_DESC(tx_ring, i);
  402. buffer_info = &tx_ring->tx_buffer_info[i];
  403. u0 = (struct my_u0 *)tx_desc;
  404. if (i == tx_ring->next_to_use &&
  405. i == tx_ring->next_to_clean)
  406. next_desc = " NTC/U";
  407. else if (i == tx_ring->next_to_use)
  408. next_desc = " NTU";
  409. else if (i == tx_ring->next_to_clean)
  410. next_desc = " NTC";
  411. else
  412. next_desc = "";
  413. pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
  414. i, le64_to_cpu(u0->a),
  415. le64_to_cpu(u0->b),
  416. (u64)dma_unmap_addr(buffer_info, dma),
  417. dma_unmap_len(buffer_info, len),
  418. buffer_info->next_to_watch,
  419. (u64)buffer_info->time_stamp,
  420. buffer_info->skb, next_desc);
  421. if (netif_msg_pktdata(adapter) && buffer_info->skb)
  422. print_hex_dump(KERN_INFO, "",
  423. DUMP_PREFIX_ADDRESS,
  424. 16, 1, buffer_info->skb->data,
  425. dma_unmap_len(buffer_info, len),
  426. true);
  427. }
  428. }
  429. /* Print RX Rings Summary */
  430. rx_ring_summary:
  431. dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
  432. pr_info("Queue [NTU] [NTC]\n");
  433. for (n = 0; n < adapter->num_rx_queues; n++) {
  434. rx_ring = adapter->rx_ring[n];
  435. pr_info(" %5d %5X %5X\n",
  436. n, rx_ring->next_to_use, rx_ring->next_to_clean);
  437. }
  438. /* Print RX Rings */
  439. if (!netif_msg_rx_status(adapter))
  440. goto exit;
  441. dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
  442. /* Advanced Receive Descriptor (Read) Format
  443. * 63 1 0
  444. * +-----------------------------------------------------+
  445. * 0 | Packet Buffer Address [63:1] |A0/NSE|
  446. * +----------------------------------------------+------+
  447. * 8 | Header Buffer Address [63:1] | DD |
  448. * +-----------------------------------------------------+
  449. *
  450. *
  451. * Advanced Receive Descriptor (Write-Back) Format
  452. *
  453. * 63 48 47 32 31 30 21 20 17 16 4 3 0
  454. * +------------------------------------------------------+
  455. * 0 | Packet IP |SPH| HDR_LEN | RSV|Packet| RSS |
  456. * | Checksum Ident | | | | Type | Type |
  457. * +------------------------------------------------------+
  458. * 8 | VLAN Tag | Length | Extended Error | Extended Status |
  459. * +------------------------------------------------------+
  460. * 63 48 47 32 31 20 19 0
  461. */
  462. for (n = 0; n < adapter->num_rx_queues; n++) {
  463. rx_ring = adapter->rx_ring[n];
  464. pr_info("------------------------------------\n");
  465. pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
  466. pr_info("------------------------------------\n");
  467. pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
  468. pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
  469. for (i = 0; i < rx_ring->count; i++) {
  470. const char *next_desc;
  471. struct igb_rx_buffer *buffer_info;
  472. buffer_info = &rx_ring->rx_buffer_info[i];
  473. rx_desc = IGB_RX_DESC(rx_ring, i);
  474. u0 = (struct my_u0 *)rx_desc;
  475. staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
  476. if (i == rx_ring->next_to_use)
  477. next_desc = " NTU";
  478. else if (i == rx_ring->next_to_clean)
  479. next_desc = " NTC";
  480. else
  481. next_desc = "";
  482. if (staterr & E1000_RXD_STAT_DD) {
  483. /* Descriptor Done */
  484. pr_info("%s[0x%03X] %016llX %016llX ---------------- %s\n",
  485. "RWB", i,
  486. le64_to_cpu(u0->a),
  487. le64_to_cpu(u0->b),
  488. next_desc);
  489. } else {
  490. pr_info("%s[0x%03X] %016llX %016llX %016llX %s\n",
  491. "R ", i,
  492. le64_to_cpu(u0->a),
  493. le64_to_cpu(u0->b),
  494. (u64)buffer_info->dma,
  495. next_desc);
  496. if (netif_msg_pktdata(adapter) &&
  497. buffer_info->dma && buffer_info->page) {
  498. print_hex_dump(KERN_INFO, "",
  499. DUMP_PREFIX_ADDRESS,
  500. 16, 1,
  501. page_address(buffer_info->page) +
  502. buffer_info->page_offset,
  503. igb_rx_bufsz(rx_ring), true);
  504. }
  505. }
  506. }
  507. }
  508. exit:
  509. return;
  510. }
  511. /**
  512. * igb_get_i2c_data - Reads the I2C SDA data bit
  513. * @hw: pointer to hardware structure
  514. * @i2cctl: Current value of I2CCTL register
  515. *
  516. * Returns the I2C data bit value
  517. **/
  518. static int igb_get_i2c_data(void *data)
  519. {
  520. struct igb_adapter *adapter = (struct igb_adapter *)data;
  521. struct e1000_hw *hw = &adapter->hw;
  522. s32 i2cctl = rd32(E1000_I2CPARAMS);
  523. return !!(i2cctl & E1000_I2C_DATA_IN);
  524. }
  525. /**
  526. * igb_set_i2c_data - Sets the I2C data bit
  527. * @data: pointer to hardware structure
  528. * @state: I2C data value (0 or 1) to set
  529. *
  530. * Sets the I2C data bit
  531. **/
  532. static void igb_set_i2c_data(void *data, int state)
  533. {
  534. struct igb_adapter *adapter = (struct igb_adapter *)data;
  535. struct e1000_hw *hw = &adapter->hw;
  536. s32 i2cctl = rd32(E1000_I2CPARAMS);
  537. if (state)
  538. i2cctl |= E1000_I2C_DATA_OUT;
  539. else
  540. i2cctl &= ~E1000_I2C_DATA_OUT;
  541. i2cctl &= ~E1000_I2C_DATA_OE_N;
  542. i2cctl |= E1000_I2C_CLK_OE_N;
  543. wr32(E1000_I2CPARAMS, i2cctl);
  544. wrfl();
  545. }
  546. /**
  547. * igb_set_i2c_clk - Sets the I2C SCL clock
  548. * @data: pointer to hardware structure
  549. * @state: state to set clock
  550. *
  551. * Sets the I2C clock line to state
  552. **/
  553. static void igb_set_i2c_clk(void *data, int state)
  554. {
  555. struct igb_adapter *adapter = (struct igb_adapter *)data;
  556. struct e1000_hw *hw = &adapter->hw;
  557. s32 i2cctl = rd32(E1000_I2CPARAMS);
  558. if (state) {
  559. i2cctl |= E1000_I2C_CLK_OUT;
  560. i2cctl &= ~E1000_I2C_CLK_OE_N;
  561. } else {
  562. i2cctl &= ~E1000_I2C_CLK_OUT;
  563. i2cctl &= ~E1000_I2C_CLK_OE_N;
  564. }
  565. wr32(E1000_I2CPARAMS, i2cctl);
  566. wrfl();
  567. }
  568. /**
  569. * igb_get_i2c_clk - Gets the I2C SCL clock state
  570. * @data: pointer to hardware structure
  571. *
  572. * Gets the I2C clock state
  573. **/
  574. static int igb_get_i2c_clk(void *data)
  575. {
  576. struct igb_adapter *adapter = (struct igb_adapter *)data;
  577. struct e1000_hw *hw = &adapter->hw;
  578. s32 i2cctl = rd32(E1000_I2CPARAMS);
  579. return !!(i2cctl & E1000_I2C_CLK_IN);
  580. }
  581. static const struct i2c_algo_bit_data igb_i2c_algo = {
  582. .setsda = igb_set_i2c_data,
  583. .setscl = igb_set_i2c_clk,
  584. .getsda = igb_get_i2c_data,
  585. .getscl = igb_get_i2c_clk,
  586. .udelay = 5,
  587. .timeout = 20,
  588. };
  589. /**
  590. * igb_get_hw_dev - return device
  591. * @hw: pointer to hardware structure
  592. *
  593. * used by hardware layer to print debugging information
  594. **/
  595. struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  596. {
  597. struct igb_adapter *adapter = hw->back;
  598. return adapter->netdev;
  599. }
  600. /**
  601. * igb_init_module - Driver Registration Routine
  602. *
  603. * igb_init_module is the first routine called when the driver is
  604. * loaded. All it does is register with the PCI subsystem.
  605. **/
  606. static int __init igb_init_module(void)
  607. {
  608. int ret;
  609. pr_info("%s - version %s\n",
  610. igb_driver_string, igb_driver_version);
  611. pr_info("%s\n", igb_copyright);
  612. #ifdef CONFIG_IGB_DCA
  613. dca_register_notify(&dca_notifier);
  614. #endif
  615. ret = pci_register_driver(&igb_driver);
  616. return ret;
  617. }
  618. module_init(igb_init_module);
  619. /**
  620. * igb_exit_module - Driver Exit Cleanup Routine
  621. *
  622. * igb_exit_module is called just before the driver is removed
  623. * from memory.
  624. **/
  625. static void __exit igb_exit_module(void)
  626. {
  627. #ifdef CONFIG_IGB_DCA
  628. dca_unregister_notify(&dca_notifier);
  629. #endif
  630. pci_unregister_driver(&igb_driver);
  631. }
  632. module_exit(igb_exit_module);
  633. #define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
  634. /**
  635. * igb_cache_ring_register - Descriptor ring to register mapping
  636. * @adapter: board private structure to initialize
  637. *
  638. * Once we know the feature-set enabled for the device, we'll cache
  639. * the register offset the descriptor ring is assigned to.
  640. **/
  641. static void igb_cache_ring_register(struct igb_adapter *adapter)
  642. {
  643. int i = 0, j = 0;
  644. u32 rbase_offset = adapter->vfs_allocated_count;
  645. switch (adapter->hw.mac.type) {
  646. case e1000_82576:
  647. /* The queues are allocated for virtualization such that VF 0
  648. * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
  649. * In order to avoid collision we start at the first free queue
  650. * and continue consuming queues in the same sequence
  651. */
  652. if (adapter->vfs_allocated_count) {
  653. for (; i < adapter->rss_queues; i++)
  654. adapter->rx_ring[i]->reg_idx = rbase_offset +
  655. Q_IDX_82576(i);
  656. }
  657. /* Fall through */
  658. case e1000_82575:
  659. case e1000_82580:
  660. case e1000_i350:
  661. case e1000_i354:
  662. case e1000_i210:
  663. case e1000_i211:
  664. /* Fall through */
  665. default:
  666. for (; i < adapter->num_rx_queues; i++)
  667. adapter->rx_ring[i]->reg_idx = rbase_offset + i;
  668. for (; j < adapter->num_tx_queues; j++)
  669. adapter->tx_ring[j]->reg_idx = rbase_offset + j;
  670. break;
  671. }
  672. }
  673. u32 igb_rd32(struct e1000_hw *hw, u32 reg)
  674. {
  675. struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
  676. u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
  677. u32 value = 0;
  678. if (E1000_REMOVED(hw_addr))
  679. return ~value;
  680. value = readl(&hw_addr[reg]);
  681. /* reads should not return all F's */
  682. if (!(~value) && (!reg || !(~readl(hw_addr)))) {
  683. struct net_device *netdev = igb->netdev;
  684. hw->hw_addr = NULL;
  685. netdev_err(netdev, "PCIe link lost\n");
  686. }
  687. return value;
  688. }
  689. /**
  690. * igb_write_ivar - configure ivar for given MSI-X vector
  691. * @hw: pointer to the HW structure
  692. * @msix_vector: vector number we are allocating to a given ring
  693. * @index: row index of IVAR register to write within IVAR table
  694. * @offset: column offset of in IVAR, should be multiple of 8
  695. *
  696. * This function is intended to handle the writing of the IVAR register
  697. * for adapters 82576 and newer. The IVAR table consists of 2 columns,
  698. * each containing an cause allocation for an Rx and Tx ring, and a
  699. * variable number of rows depending on the number of queues supported.
  700. **/
  701. static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
  702. int index, int offset)
  703. {
  704. u32 ivar = array_rd32(E1000_IVAR0, index);
  705. /* clear any bits that are currently set */
  706. ivar &= ~((u32)0xFF << offset);
  707. /* write vector and valid bit */
  708. ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
  709. array_wr32(E1000_IVAR0, index, ivar);
  710. }
  711. #define IGB_N0_QUEUE -1
  712. static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
  713. {
  714. struct igb_adapter *adapter = q_vector->adapter;
  715. struct e1000_hw *hw = &adapter->hw;
  716. int rx_queue = IGB_N0_QUEUE;
  717. int tx_queue = IGB_N0_QUEUE;
  718. u32 msixbm = 0;
  719. if (q_vector->rx.ring)
  720. rx_queue = q_vector->rx.ring->reg_idx;
  721. if (q_vector->tx.ring)
  722. tx_queue = q_vector->tx.ring->reg_idx;
  723. switch (hw->mac.type) {
  724. case e1000_82575:
  725. /* The 82575 assigns vectors using a bitmask, which matches the
  726. * bitmask for the EICR/EIMS/EIMC registers. To assign one
  727. * or more queues to a vector, we write the appropriate bits
  728. * into the MSIXBM register for that vector.
  729. */
  730. if (rx_queue > IGB_N0_QUEUE)
  731. msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
  732. if (tx_queue > IGB_N0_QUEUE)
  733. msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
  734. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
  735. msixbm |= E1000_EIMS_OTHER;
  736. array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
  737. q_vector->eims_value = msixbm;
  738. break;
  739. case e1000_82576:
  740. /* 82576 uses a table that essentially consists of 2 columns
  741. * with 8 rows. The ordering is column-major so we use the
  742. * lower 3 bits as the row index, and the 4th bit as the
  743. * column offset.
  744. */
  745. if (rx_queue > IGB_N0_QUEUE)
  746. igb_write_ivar(hw, msix_vector,
  747. rx_queue & 0x7,
  748. (rx_queue & 0x8) << 1);
  749. if (tx_queue > IGB_N0_QUEUE)
  750. igb_write_ivar(hw, msix_vector,
  751. tx_queue & 0x7,
  752. ((tx_queue & 0x8) << 1) + 8);
  753. q_vector->eims_value = BIT(msix_vector);
  754. break;
  755. case e1000_82580:
  756. case e1000_i350:
  757. case e1000_i354:
  758. case e1000_i210:
  759. case e1000_i211:
  760. /* On 82580 and newer adapters the scheme is similar to 82576
  761. * however instead of ordering column-major we have things
  762. * ordered row-major. So we traverse the table by using
  763. * bit 0 as the column offset, and the remaining bits as the
  764. * row index.
  765. */
  766. if (rx_queue > IGB_N0_QUEUE)
  767. igb_write_ivar(hw, msix_vector,
  768. rx_queue >> 1,
  769. (rx_queue & 0x1) << 4);
  770. if (tx_queue > IGB_N0_QUEUE)
  771. igb_write_ivar(hw, msix_vector,
  772. tx_queue >> 1,
  773. ((tx_queue & 0x1) << 4) + 8);
  774. q_vector->eims_value = BIT(msix_vector);
  775. break;
  776. default:
  777. BUG();
  778. break;
  779. }
  780. /* add q_vector eims value to global eims_enable_mask */
  781. adapter->eims_enable_mask |= q_vector->eims_value;
  782. /* configure q_vector to set itr on first interrupt */
  783. q_vector->set_itr = 1;
  784. }
  785. /**
  786. * igb_configure_msix - Configure MSI-X hardware
  787. * @adapter: board private structure to initialize
  788. *
  789. * igb_configure_msix sets up the hardware to properly
  790. * generate MSI-X interrupts.
  791. **/
  792. static void igb_configure_msix(struct igb_adapter *adapter)
  793. {
  794. u32 tmp;
  795. int i, vector = 0;
  796. struct e1000_hw *hw = &adapter->hw;
  797. adapter->eims_enable_mask = 0;
  798. /* set vector for other causes, i.e. link changes */
  799. switch (hw->mac.type) {
  800. case e1000_82575:
  801. tmp = rd32(E1000_CTRL_EXT);
  802. /* enable MSI-X PBA support*/
  803. tmp |= E1000_CTRL_EXT_PBA_CLR;
  804. /* Auto-Mask interrupts upon ICR read. */
  805. tmp |= E1000_CTRL_EXT_EIAME;
  806. tmp |= E1000_CTRL_EXT_IRCA;
  807. wr32(E1000_CTRL_EXT, tmp);
  808. /* enable msix_other interrupt */
  809. array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
  810. adapter->eims_other = E1000_EIMS_OTHER;
  811. break;
  812. case e1000_82576:
  813. case e1000_82580:
  814. case e1000_i350:
  815. case e1000_i354:
  816. case e1000_i210:
  817. case e1000_i211:
  818. /* Turn on MSI-X capability first, or our settings
  819. * won't stick. And it will take days to debug.
  820. */
  821. wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
  822. E1000_GPIE_PBA | E1000_GPIE_EIAME |
  823. E1000_GPIE_NSICR);
  824. /* enable msix_other interrupt */
  825. adapter->eims_other = BIT(vector);
  826. tmp = (vector++ | E1000_IVAR_VALID) << 8;
  827. wr32(E1000_IVAR_MISC, tmp);
  828. break;
  829. default:
  830. /* do nothing, since nothing else supports MSI-X */
  831. break;
  832. } /* switch (hw->mac.type) */
  833. adapter->eims_enable_mask |= adapter->eims_other;
  834. for (i = 0; i < adapter->num_q_vectors; i++)
  835. igb_assign_vector(adapter->q_vector[i], vector++);
  836. wrfl();
  837. }
  838. /**
  839. * igb_request_msix - Initialize MSI-X interrupts
  840. * @adapter: board private structure to initialize
  841. *
  842. * igb_request_msix allocates MSI-X vectors and requests interrupts from the
  843. * kernel.
  844. **/
  845. static int igb_request_msix(struct igb_adapter *adapter)
  846. {
  847. struct net_device *netdev = adapter->netdev;
  848. int i, err = 0, vector = 0, free_vector = 0;
  849. err = request_irq(adapter->msix_entries[vector].vector,
  850. igb_msix_other, 0, netdev->name, adapter);
  851. if (err)
  852. goto err_out;
  853. for (i = 0; i < adapter->num_q_vectors; i++) {
  854. struct igb_q_vector *q_vector = adapter->q_vector[i];
  855. vector++;
  856. q_vector->itr_register = adapter->io_addr + E1000_EITR(vector);
  857. if (q_vector->rx.ring && q_vector->tx.ring)
  858. sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
  859. q_vector->rx.ring->queue_index);
  860. else if (q_vector->tx.ring)
  861. sprintf(q_vector->name, "%s-tx-%u", netdev->name,
  862. q_vector->tx.ring->queue_index);
  863. else if (q_vector->rx.ring)
  864. sprintf(q_vector->name, "%s-rx-%u", netdev->name,
  865. q_vector->rx.ring->queue_index);
  866. else
  867. sprintf(q_vector->name, "%s-unused", netdev->name);
  868. err = request_irq(adapter->msix_entries[vector].vector,
  869. igb_msix_ring, 0, q_vector->name,
  870. q_vector);
  871. if (err)
  872. goto err_free;
  873. }
  874. igb_configure_msix(adapter);
  875. return 0;
  876. err_free:
  877. /* free already assigned IRQs */
  878. free_irq(adapter->msix_entries[free_vector++].vector, adapter);
  879. vector--;
  880. for (i = 0; i < vector; i++) {
  881. free_irq(adapter->msix_entries[free_vector++].vector,
  882. adapter->q_vector[i]);
  883. }
  884. err_out:
  885. return err;
  886. }
  887. /**
  888. * igb_free_q_vector - Free memory allocated for specific interrupt vector
  889. * @adapter: board private structure to initialize
  890. * @v_idx: Index of vector to be freed
  891. *
  892. * This function frees the memory allocated to the q_vector.
  893. **/
  894. static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
  895. {
  896. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  897. adapter->q_vector[v_idx] = NULL;
  898. /* igb_get_stats64() might access the rings on this vector,
  899. * we must wait a grace period before freeing it.
  900. */
  901. if (q_vector)
  902. kfree_rcu(q_vector, rcu);
  903. }
  904. /**
  905. * igb_reset_q_vector - Reset config for interrupt vector
  906. * @adapter: board private structure to initialize
  907. * @v_idx: Index of vector to be reset
  908. *
  909. * If NAPI is enabled it will delete any references to the
  910. * NAPI struct. This is preparation for igb_free_q_vector.
  911. **/
  912. static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
  913. {
  914. struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
  915. /* Coming from igb_set_interrupt_capability, the vectors are not yet
  916. * allocated. So, q_vector is NULL so we should stop here.
  917. */
  918. if (!q_vector)
  919. return;
  920. if (q_vector->tx.ring)
  921. adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
  922. if (q_vector->rx.ring)
  923. adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
  924. netif_napi_del(&q_vector->napi);
  925. }
  926. static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
  927. {
  928. int v_idx = adapter->num_q_vectors;
  929. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  930. pci_disable_msix(adapter->pdev);
  931. else if (adapter->flags & IGB_FLAG_HAS_MSI)
  932. pci_disable_msi(adapter->pdev);
  933. while (v_idx--)
  934. igb_reset_q_vector(adapter, v_idx);
  935. }
  936. /**
  937. * igb_free_q_vectors - Free memory allocated for interrupt vectors
  938. * @adapter: board private structure to initialize
  939. *
  940. * This function frees the memory allocated to the q_vectors. In addition if
  941. * NAPI is enabled it will delete any references to the NAPI struct prior
  942. * to freeing the q_vector.
  943. **/
  944. static void igb_free_q_vectors(struct igb_adapter *adapter)
  945. {
  946. int v_idx = adapter->num_q_vectors;
  947. adapter->num_tx_queues = 0;
  948. adapter->num_rx_queues = 0;
  949. adapter->num_q_vectors = 0;
  950. while (v_idx--) {
  951. igb_reset_q_vector(adapter, v_idx);
  952. igb_free_q_vector(adapter, v_idx);
  953. }
  954. }
  955. /**
  956. * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
  957. * @adapter: board private structure to initialize
  958. *
  959. * This function resets the device so that it has 0 Rx queues, Tx queues, and
  960. * MSI-X interrupts allocated.
  961. */
  962. static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
  963. {
  964. igb_free_q_vectors(adapter);
  965. igb_reset_interrupt_capability(adapter);
  966. }
  967. /**
  968. * igb_set_interrupt_capability - set MSI or MSI-X if supported
  969. * @adapter: board private structure to initialize
  970. * @msix: boolean value of MSIX capability
  971. *
  972. * Attempt to configure interrupts using the best available
  973. * capabilities of the hardware and kernel.
  974. **/
  975. static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
  976. {
  977. int err;
  978. int numvecs, i;
  979. if (!msix)
  980. goto msi_only;
  981. adapter->flags |= IGB_FLAG_HAS_MSIX;
  982. /* Number of supported queues. */
  983. adapter->num_rx_queues = adapter->rss_queues;
  984. if (adapter->vfs_allocated_count)
  985. adapter->num_tx_queues = 1;
  986. else
  987. adapter->num_tx_queues = adapter->rss_queues;
  988. /* start with one vector for every Rx queue */
  989. numvecs = adapter->num_rx_queues;
  990. /* if Tx handler is separate add 1 for every Tx queue */
  991. if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
  992. numvecs += adapter->num_tx_queues;
  993. /* store the number of vectors reserved for queues */
  994. adapter->num_q_vectors = numvecs;
  995. /* add 1 vector for link status interrupts */
  996. numvecs++;
  997. for (i = 0; i < numvecs; i++)
  998. adapter->msix_entries[i].entry = i;
  999. err = pci_enable_msix_range(adapter->pdev,
  1000. adapter->msix_entries,
  1001. numvecs,
  1002. numvecs);
  1003. if (err > 0)
  1004. return;
  1005. igb_reset_interrupt_capability(adapter);
  1006. /* If we can't do MSI-X, try MSI */
  1007. msi_only:
  1008. adapter->flags &= ~IGB_FLAG_HAS_MSIX;
  1009. #ifdef CONFIG_PCI_IOV
  1010. /* disable SR-IOV for non MSI-X configurations */
  1011. if (adapter->vf_data) {
  1012. struct e1000_hw *hw = &adapter->hw;
  1013. /* disable iov and allow time for transactions to clear */
  1014. pci_disable_sriov(adapter->pdev);
  1015. msleep(500);
  1016. kfree(adapter->vf_mac_list);
  1017. adapter->vf_mac_list = NULL;
  1018. kfree(adapter->vf_data);
  1019. adapter->vf_data = NULL;
  1020. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  1021. wrfl();
  1022. msleep(100);
  1023. dev_info(&adapter->pdev->dev, "IOV Disabled\n");
  1024. }
  1025. #endif
  1026. adapter->vfs_allocated_count = 0;
  1027. adapter->rss_queues = 1;
  1028. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  1029. adapter->num_rx_queues = 1;
  1030. adapter->num_tx_queues = 1;
  1031. adapter->num_q_vectors = 1;
  1032. if (!pci_enable_msi(adapter->pdev))
  1033. adapter->flags |= IGB_FLAG_HAS_MSI;
  1034. }
  1035. static void igb_add_ring(struct igb_ring *ring,
  1036. struct igb_ring_container *head)
  1037. {
  1038. head->ring = ring;
  1039. head->count++;
  1040. }
  1041. /**
  1042. * igb_alloc_q_vector - Allocate memory for a single interrupt vector
  1043. * @adapter: board private structure to initialize
  1044. * @v_count: q_vectors allocated on adapter, used for ring interleaving
  1045. * @v_idx: index of vector in adapter struct
  1046. * @txr_count: total number of Tx rings to allocate
  1047. * @txr_idx: index of first Tx ring to allocate
  1048. * @rxr_count: total number of Rx rings to allocate
  1049. * @rxr_idx: index of first Rx ring to allocate
  1050. *
  1051. * We allocate one q_vector. If allocation fails we return -ENOMEM.
  1052. **/
  1053. static int igb_alloc_q_vector(struct igb_adapter *adapter,
  1054. int v_count, int v_idx,
  1055. int txr_count, int txr_idx,
  1056. int rxr_count, int rxr_idx)
  1057. {
  1058. struct igb_q_vector *q_vector;
  1059. struct igb_ring *ring;
  1060. int ring_count, size;
  1061. /* igb only supports 1 Tx and/or 1 Rx queue per vector */
  1062. if (txr_count > 1 || rxr_count > 1)
  1063. return -ENOMEM;
  1064. ring_count = txr_count + rxr_count;
  1065. size = sizeof(struct igb_q_vector) +
  1066. (sizeof(struct igb_ring) * ring_count);
  1067. /* allocate q_vector and rings */
  1068. q_vector = adapter->q_vector[v_idx];
  1069. if (!q_vector) {
  1070. q_vector = kzalloc(size, GFP_KERNEL);
  1071. } else if (size > ksize(q_vector)) {
  1072. kfree_rcu(q_vector, rcu);
  1073. q_vector = kzalloc(size, GFP_KERNEL);
  1074. } else {
  1075. memset(q_vector, 0, size);
  1076. }
  1077. if (!q_vector)
  1078. return -ENOMEM;
  1079. /* initialize NAPI */
  1080. netif_napi_add(adapter->netdev, &q_vector->napi,
  1081. igb_poll, 64);
  1082. /* tie q_vector and adapter together */
  1083. adapter->q_vector[v_idx] = q_vector;
  1084. q_vector->adapter = adapter;
  1085. /* initialize work limits */
  1086. q_vector->tx.work_limit = adapter->tx_work_limit;
  1087. /* initialize ITR configuration */
  1088. q_vector->itr_register = adapter->io_addr + E1000_EITR(0);
  1089. q_vector->itr_val = IGB_START_ITR;
  1090. /* initialize pointer to rings */
  1091. ring = q_vector->ring;
  1092. /* intialize ITR */
  1093. if (rxr_count) {
  1094. /* rx or rx/tx vector */
  1095. if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
  1096. q_vector->itr_val = adapter->rx_itr_setting;
  1097. } else {
  1098. /* tx only vector */
  1099. if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
  1100. q_vector->itr_val = adapter->tx_itr_setting;
  1101. }
  1102. if (txr_count) {
  1103. /* assign generic ring traits */
  1104. ring->dev = &adapter->pdev->dev;
  1105. ring->netdev = adapter->netdev;
  1106. /* configure backlink on ring */
  1107. ring->q_vector = q_vector;
  1108. /* update q_vector Tx values */
  1109. igb_add_ring(ring, &q_vector->tx);
  1110. /* For 82575, context index must be unique per ring. */
  1111. if (adapter->hw.mac.type == e1000_82575)
  1112. set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
  1113. /* apply Tx specific ring traits */
  1114. ring->count = adapter->tx_ring_count;
  1115. ring->queue_index = txr_idx;
  1116. ring->cbs_enable = false;
  1117. ring->idleslope = 0;
  1118. ring->sendslope = 0;
  1119. ring->hicredit = 0;
  1120. ring->locredit = 0;
  1121. u64_stats_init(&ring->tx_syncp);
  1122. u64_stats_init(&ring->tx_syncp2);
  1123. /* assign ring to adapter */
  1124. adapter->tx_ring[txr_idx] = ring;
  1125. /* push pointer to next ring */
  1126. ring++;
  1127. }
  1128. if (rxr_count) {
  1129. /* assign generic ring traits */
  1130. ring->dev = &adapter->pdev->dev;
  1131. ring->netdev = adapter->netdev;
  1132. /* configure backlink on ring */
  1133. ring->q_vector = q_vector;
  1134. /* update q_vector Rx values */
  1135. igb_add_ring(ring, &q_vector->rx);
  1136. /* set flag indicating ring supports SCTP checksum offload */
  1137. if (adapter->hw.mac.type >= e1000_82576)
  1138. set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
  1139. /* On i350, i354, i210, and i211, loopback VLAN packets
  1140. * have the tag byte-swapped.
  1141. */
  1142. if (adapter->hw.mac.type >= e1000_i350)
  1143. set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
  1144. /* apply Rx specific ring traits */
  1145. ring->count = adapter->rx_ring_count;
  1146. ring->queue_index = rxr_idx;
  1147. u64_stats_init(&ring->rx_syncp);
  1148. /* assign ring to adapter */
  1149. adapter->rx_ring[rxr_idx] = ring;
  1150. }
  1151. return 0;
  1152. }
  1153. /**
  1154. * igb_alloc_q_vectors - Allocate memory for interrupt vectors
  1155. * @adapter: board private structure to initialize
  1156. *
  1157. * We allocate one q_vector per queue interrupt. If allocation fails we
  1158. * return -ENOMEM.
  1159. **/
  1160. static int igb_alloc_q_vectors(struct igb_adapter *adapter)
  1161. {
  1162. int q_vectors = adapter->num_q_vectors;
  1163. int rxr_remaining = adapter->num_rx_queues;
  1164. int txr_remaining = adapter->num_tx_queues;
  1165. int rxr_idx = 0, txr_idx = 0, v_idx = 0;
  1166. int err;
  1167. if (q_vectors >= (rxr_remaining + txr_remaining)) {
  1168. for (; rxr_remaining; v_idx++) {
  1169. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1170. 0, 0, 1, rxr_idx);
  1171. if (err)
  1172. goto err_out;
  1173. /* update counts and index */
  1174. rxr_remaining--;
  1175. rxr_idx++;
  1176. }
  1177. }
  1178. for (; v_idx < q_vectors; v_idx++) {
  1179. int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
  1180. int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
  1181. err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
  1182. tqpv, txr_idx, rqpv, rxr_idx);
  1183. if (err)
  1184. goto err_out;
  1185. /* update counts and index */
  1186. rxr_remaining -= rqpv;
  1187. txr_remaining -= tqpv;
  1188. rxr_idx++;
  1189. txr_idx++;
  1190. }
  1191. return 0;
  1192. err_out:
  1193. adapter->num_tx_queues = 0;
  1194. adapter->num_rx_queues = 0;
  1195. adapter->num_q_vectors = 0;
  1196. while (v_idx--)
  1197. igb_free_q_vector(adapter, v_idx);
  1198. return -ENOMEM;
  1199. }
  1200. /**
  1201. * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
  1202. * @adapter: board private structure to initialize
  1203. * @msix: boolean value of MSIX capability
  1204. *
  1205. * This function initializes the interrupts and allocates all of the queues.
  1206. **/
  1207. static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
  1208. {
  1209. struct pci_dev *pdev = adapter->pdev;
  1210. int err;
  1211. igb_set_interrupt_capability(adapter, msix);
  1212. err = igb_alloc_q_vectors(adapter);
  1213. if (err) {
  1214. dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
  1215. goto err_alloc_q_vectors;
  1216. }
  1217. igb_cache_ring_register(adapter);
  1218. return 0;
  1219. err_alloc_q_vectors:
  1220. igb_reset_interrupt_capability(adapter);
  1221. return err;
  1222. }
  1223. /**
  1224. * igb_request_irq - initialize interrupts
  1225. * @adapter: board private structure to initialize
  1226. *
  1227. * Attempts to configure interrupts using the best available
  1228. * capabilities of the hardware and kernel.
  1229. **/
  1230. static int igb_request_irq(struct igb_adapter *adapter)
  1231. {
  1232. struct net_device *netdev = adapter->netdev;
  1233. struct pci_dev *pdev = adapter->pdev;
  1234. int err = 0;
  1235. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1236. err = igb_request_msix(adapter);
  1237. if (!err)
  1238. goto request_done;
  1239. /* fall back to MSI */
  1240. igb_free_all_tx_resources(adapter);
  1241. igb_free_all_rx_resources(adapter);
  1242. igb_clear_interrupt_scheme(adapter);
  1243. err = igb_init_interrupt_scheme(adapter, false);
  1244. if (err)
  1245. goto request_done;
  1246. igb_setup_all_tx_resources(adapter);
  1247. igb_setup_all_rx_resources(adapter);
  1248. igb_configure(adapter);
  1249. }
  1250. igb_assign_vector(adapter->q_vector[0], 0);
  1251. if (adapter->flags & IGB_FLAG_HAS_MSI) {
  1252. err = request_irq(pdev->irq, igb_intr_msi, 0,
  1253. netdev->name, adapter);
  1254. if (!err)
  1255. goto request_done;
  1256. /* fall back to legacy interrupts */
  1257. igb_reset_interrupt_capability(adapter);
  1258. adapter->flags &= ~IGB_FLAG_HAS_MSI;
  1259. }
  1260. err = request_irq(pdev->irq, igb_intr, IRQF_SHARED,
  1261. netdev->name, adapter);
  1262. if (err)
  1263. dev_err(&pdev->dev, "Error %d getting interrupt\n",
  1264. err);
  1265. request_done:
  1266. return err;
  1267. }
  1268. static void igb_free_irq(struct igb_adapter *adapter)
  1269. {
  1270. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1271. int vector = 0, i;
  1272. free_irq(adapter->msix_entries[vector++].vector, adapter);
  1273. for (i = 0; i < adapter->num_q_vectors; i++)
  1274. free_irq(adapter->msix_entries[vector++].vector,
  1275. adapter->q_vector[i]);
  1276. } else {
  1277. free_irq(adapter->pdev->irq, adapter);
  1278. }
  1279. }
  1280. /**
  1281. * igb_irq_disable - Mask off interrupt generation on the NIC
  1282. * @adapter: board private structure
  1283. **/
  1284. static void igb_irq_disable(struct igb_adapter *adapter)
  1285. {
  1286. struct e1000_hw *hw = &adapter->hw;
  1287. /* we need to be careful when disabling interrupts. The VFs are also
  1288. * mapped into these registers and so clearing the bits can cause
  1289. * issues on the VF drivers so we only need to clear what we set
  1290. */
  1291. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1292. u32 regval = rd32(E1000_EIAM);
  1293. wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
  1294. wr32(E1000_EIMC, adapter->eims_enable_mask);
  1295. regval = rd32(E1000_EIAC);
  1296. wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
  1297. }
  1298. wr32(E1000_IAM, 0);
  1299. wr32(E1000_IMC, ~0);
  1300. wrfl();
  1301. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1302. int i;
  1303. for (i = 0; i < adapter->num_q_vectors; i++)
  1304. synchronize_irq(adapter->msix_entries[i].vector);
  1305. } else {
  1306. synchronize_irq(adapter->pdev->irq);
  1307. }
  1308. }
  1309. /**
  1310. * igb_irq_enable - Enable default interrupt generation settings
  1311. * @adapter: board private structure
  1312. **/
  1313. static void igb_irq_enable(struct igb_adapter *adapter)
  1314. {
  1315. struct e1000_hw *hw = &adapter->hw;
  1316. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  1317. u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
  1318. u32 regval = rd32(E1000_EIAC);
  1319. wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
  1320. regval = rd32(E1000_EIAM);
  1321. wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
  1322. wr32(E1000_EIMS, adapter->eims_enable_mask);
  1323. if (adapter->vfs_allocated_count) {
  1324. wr32(E1000_MBVFIMR, 0xFF);
  1325. ims |= E1000_IMS_VMMB;
  1326. }
  1327. wr32(E1000_IMS, ims);
  1328. } else {
  1329. wr32(E1000_IMS, IMS_ENABLE_MASK |
  1330. E1000_IMS_DRSTA);
  1331. wr32(E1000_IAM, IMS_ENABLE_MASK |
  1332. E1000_IMS_DRSTA);
  1333. }
  1334. }
  1335. static void igb_update_mng_vlan(struct igb_adapter *adapter)
  1336. {
  1337. struct e1000_hw *hw = &adapter->hw;
  1338. u16 pf_id = adapter->vfs_allocated_count;
  1339. u16 vid = adapter->hw.mng_cookie.vlan_id;
  1340. u16 old_vid = adapter->mng_vlan_id;
  1341. if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
  1342. /* add VID to filter table */
  1343. igb_vfta_set(hw, vid, pf_id, true, true);
  1344. adapter->mng_vlan_id = vid;
  1345. } else {
  1346. adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
  1347. }
  1348. if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
  1349. (vid != old_vid) &&
  1350. !test_bit(old_vid, adapter->active_vlans)) {
  1351. /* remove VID from filter table */
  1352. igb_vfta_set(hw, vid, pf_id, false, true);
  1353. }
  1354. }
  1355. /**
  1356. * igb_release_hw_control - release control of the h/w to f/w
  1357. * @adapter: address of board private structure
  1358. *
  1359. * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
  1360. * For ASF and Pass Through versions of f/w this means that the
  1361. * driver is no longer loaded.
  1362. **/
  1363. static void igb_release_hw_control(struct igb_adapter *adapter)
  1364. {
  1365. struct e1000_hw *hw = &adapter->hw;
  1366. u32 ctrl_ext;
  1367. /* Let firmware take over control of h/w */
  1368. ctrl_ext = rd32(E1000_CTRL_EXT);
  1369. wr32(E1000_CTRL_EXT,
  1370. ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
  1371. }
  1372. /**
  1373. * igb_get_hw_control - get control of the h/w from f/w
  1374. * @adapter: address of board private structure
  1375. *
  1376. * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
  1377. * For ASF and Pass Through versions of f/w this means that
  1378. * the driver is loaded.
  1379. **/
  1380. static void igb_get_hw_control(struct igb_adapter *adapter)
  1381. {
  1382. struct e1000_hw *hw = &adapter->hw;
  1383. u32 ctrl_ext;
  1384. /* Let firmware know the driver has taken over */
  1385. ctrl_ext = rd32(E1000_CTRL_EXT);
  1386. wr32(E1000_CTRL_EXT,
  1387. ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
  1388. }
  1389. static void enable_fqtss(struct igb_adapter *adapter, bool enable)
  1390. {
  1391. struct net_device *netdev = adapter->netdev;
  1392. struct e1000_hw *hw = &adapter->hw;
  1393. WARN_ON(hw->mac.type != e1000_i210);
  1394. if (enable)
  1395. adapter->flags |= IGB_FLAG_FQTSS;
  1396. else
  1397. adapter->flags &= ~IGB_FLAG_FQTSS;
  1398. if (netif_running(netdev))
  1399. schedule_work(&adapter->reset_task);
  1400. }
  1401. static bool is_fqtss_enabled(struct igb_adapter *adapter)
  1402. {
  1403. return (adapter->flags & IGB_FLAG_FQTSS) ? true : false;
  1404. }
  1405. static void set_tx_desc_fetch_prio(struct e1000_hw *hw, int queue,
  1406. enum tx_queue_prio prio)
  1407. {
  1408. u32 val;
  1409. WARN_ON(hw->mac.type != e1000_i210);
  1410. WARN_ON(queue < 0 || queue > 4);
  1411. val = rd32(E1000_I210_TXDCTL(queue));
  1412. if (prio == TX_QUEUE_PRIO_HIGH)
  1413. val |= E1000_TXDCTL_PRIORITY;
  1414. else
  1415. val &= ~E1000_TXDCTL_PRIORITY;
  1416. wr32(E1000_I210_TXDCTL(queue), val);
  1417. }
  1418. static void set_queue_mode(struct e1000_hw *hw, int queue, enum queue_mode mode)
  1419. {
  1420. u32 val;
  1421. WARN_ON(hw->mac.type != e1000_i210);
  1422. WARN_ON(queue < 0 || queue > 1);
  1423. val = rd32(E1000_I210_TQAVCC(queue));
  1424. if (mode == QUEUE_MODE_STREAM_RESERVATION)
  1425. val |= E1000_TQAVCC_QUEUEMODE;
  1426. else
  1427. val &= ~E1000_TQAVCC_QUEUEMODE;
  1428. wr32(E1000_I210_TQAVCC(queue), val);
  1429. }
  1430. /**
  1431. * igb_configure_cbs - Configure Credit-Based Shaper (CBS)
  1432. * @adapter: pointer to adapter struct
  1433. * @queue: queue number
  1434. * @enable: true = enable CBS, false = disable CBS
  1435. * @idleslope: idleSlope in kbps
  1436. * @sendslope: sendSlope in kbps
  1437. * @hicredit: hiCredit in bytes
  1438. * @locredit: loCredit in bytes
  1439. *
  1440. * Configure CBS for a given hardware queue. When disabling, idleslope,
  1441. * sendslope, hicredit, locredit arguments are ignored. Returns 0 if
  1442. * success. Negative otherwise.
  1443. **/
  1444. static void igb_configure_cbs(struct igb_adapter *adapter, int queue,
  1445. bool enable, int idleslope, int sendslope,
  1446. int hicredit, int locredit)
  1447. {
  1448. struct net_device *netdev = adapter->netdev;
  1449. struct e1000_hw *hw = &adapter->hw;
  1450. u32 tqavcc;
  1451. u16 value;
  1452. WARN_ON(hw->mac.type != e1000_i210);
  1453. WARN_ON(queue < 0 || queue > 1);
  1454. if (enable || queue == 0) {
  1455. /* i210 does not allow the queue 0 to be in the Strict
  1456. * Priority mode while the Qav mode is enabled, so,
  1457. * instead of disabling strict priority mode, we give
  1458. * queue 0 the maximum of credits possible.
  1459. *
  1460. * See section 8.12.19 of the i210 datasheet, "Note:
  1461. * Queue0 QueueMode must be set to 1b when
  1462. * TransmitMode is set to Qav."
  1463. */
  1464. if (queue == 0 && !enable) {
  1465. /* max "linkspeed" idleslope in kbps */
  1466. idleslope = 1000000;
  1467. hicredit = ETH_FRAME_LEN;
  1468. }
  1469. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH);
  1470. set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION);
  1471. /* According to i210 datasheet section 7.2.7.7, we should set
  1472. * the 'idleSlope' field from TQAVCC register following the
  1473. * equation:
  1474. *
  1475. * For 100 Mbps link speed:
  1476. *
  1477. * value = BW * 0x7735 * 0.2 (E1)
  1478. *
  1479. * For 1000Mbps link speed:
  1480. *
  1481. * value = BW * 0x7735 * 2 (E2)
  1482. *
  1483. * E1 and E2 can be merged into one equation as shown below.
  1484. * Note that 'link-speed' is in Mbps.
  1485. *
  1486. * value = BW * 0x7735 * 2 * link-speed
  1487. * -------------- (E3)
  1488. * 1000
  1489. *
  1490. * 'BW' is the percentage bandwidth out of full link speed
  1491. * which can be found with the following equation. Note that
  1492. * idleSlope here is the parameter from this function which
  1493. * is in kbps.
  1494. *
  1495. * BW = idleSlope
  1496. * ----------------- (E4)
  1497. * link-speed * 1000
  1498. *
  1499. * That said, we can come up with a generic equation to
  1500. * calculate the value we should set it TQAVCC register by
  1501. * replacing 'BW' in E3 by E4. The resulting equation is:
  1502. *
  1503. * value = idleSlope * 0x7735 * 2 * link-speed
  1504. * ----------------- -------------- (E5)
  1505. * link-speed * 1000 1000
  1506. *
  1507. * 'link-speed' is present in both sides of the fraction so
  1508. * it is canceled out. The final equation is the following:
  1509. *
  1510. * value = idleSlope * 61034
  1511. * ----------------- (E6)
  1512. * 1000000
  1513. *
  1514. * NOTE: For i210, given the above, we can see that idleslope
  1515. * is represented in 16.38431 kbps units by the value at
  1516. * the TQAVCC register (1Gbps / 61034), which reduces
  1517. * the granularity for idleslope increments.
  1518. * For instance, if you want to configure a 2576kbps
  1519. * idleslope, the value to be written on the register
  1520. * would have to be 157.23. If rounded down, you end
  1521. * up with less bandwidth available than originally
  1522. * required (~2572 kbps). If rounded up, you end up
  1523. * with a higher bandwidth (~2589 kbps). Below the
  1524. * approach we take is to always round up the
  1525. * calculated value, so the resulting bandwidth might
  1526. * be slightly higher for some configurations.
  1527. */
  1528. value = DIV_ROUND_UP_ULL(idleslope * 61034ULL, 1000000);
  1529. tqavcc = rd32(E1000_I210_TQAVCC(queue));
  1530. tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
  1531. tqavcc |= value;
  1532. wr32(E1000_I210_TQAVCC(queue), tqavcc);
  1533. wr32(E1000_I210_TQAVHC(queue), 0x80000000 + hicredit * 0x7735);
  1534. } else {
  1535. set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_LOW);
  1536. set_queue_mode(hw, queue, QUEUE_MODE_STRICT_PRIORITY);
  1537. /* Set idleSlope to zero. */
  1538. tqavcc = rd32(E1000_I210_TQAVCC(queue));
  1539. tqavcc &= ~E1000_TQAVCC_IDLESLOPE_MASK;
  1540. wr32(E1000_I210_TQAVCC(queue), tqavcc);
  1541. /* Set hiCredit to zero. */
  1542. wr32(E1000_I210_TQAVHC(queue), 0);
  1543. }
  1544. /* XXX: In i210 controller the sendSlope and loCredit parameters from
  1545. * CBS are not configurable by software so we don't do any 'controller
  1546. * configuration' in respect to these parameters.
  1547. */
  1548. netdev_dbg(netdev, "CBS %s: queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n",
  1549. (enable) ? "enabled" : "disabled", queue,
  1550. idleslope, sendslope, hicredit, locredit);
  1551. }
  1552. static int igb_save_cbs_params(struct igb_adapter *adapter, int queue,
  1553. bool enable, int idleslope, int sendslope,
  1554. int hicredit, int locredit)
  1555. {
  1556. struct igb_ring *ring;
  1557. if (queue < 0 || queue > adapter->num_tx_queues)
  1558. return -EINVAL;
  1559. ring = adapter->tx_ring[queue];
  1560. ring->cbs_enable = enable;
  1561. ring->idleslope = idleslope;
  1562. ring->sendslope = sendslope;
  1563. ring->hicredit = hicredit;
  1564. ring->locredit = locredit;
  1565. return 0;
  1566. }
  1567. static bool is_any_cbs_enabled(struct igb_adapter *adapter)
  1568. {
  1569. struct igb_ring *ring;
  1570. int i;
  1571. for (i = 0; i < adapter->num_tx_queues; i++) {
  1572. ring = adapter->tx_ring[i];
  1573. if (ring->cbs_enable)
  1574. return true;
  1575. }
  1576. return false;
  1577. }
  1578. static void igb_setup_tx_mode(struct igb_adapter *adapter)
  1579. {
  1580. struct net_device *netdev = adapter->netdev;
  1581. struct e1000_hw *hw = &adapter->hw;
  1582. u32 val;
  1583. /* Only i210 controller supports changing the transmission mode. */
  1584. if (hw->mac.type != e1000_i210)
  1585. return;
  1586. if (is_fqtss_enabled(adapter)) {
  1587. int i, max_queue;
  1588. /* Configure TQAVCTRL register: set transmit mode to 'Qav',
  1589. * set data fetch arbitration to 'round robin' and set data
  1590. * transfer arbitration to 'credit shaper algorithm.
  1591. */
  1592. val = rd32(E1000_I210_TQAVCTRL);
  1593. val |= E1000_TQAVCTRL_XMIT_MODE | E1000_TQAVCTRL_DATATRANARB;
  1594. val &= ~E1000_TQAVCTRL_DATAFETCHARB;
  1595. wr32(E1000_I210_TQAVCTRL, val);
  1596. /* Configure Tx and Rx packet buffers sizes as described in
  1597. * i210 datasheet section 7.2.7.7.
  1598. */
  1599. val = rd32(E1000_TXPBS);
  1600. val &= ~I210_TXPBSIZE_MASK;
  1601. val |= I210_TXPBSIZE_PB0_8KB | I210_TXPBSIZE_PB1_8KB |
  1602. I210_TXPBSIZE_PB2_4KB | I210_TXPBSIZE_PB3_4KB;
  1603. wr32(E1000_TXPBS, val);
  1604. val = rd32(E1000_RXPBS);
  1605. val &= ~I210_RXPBSIZE_MASK;
  1606. val |= I210_RXPBSIZE_PB_32KB;
  1607. wr32(E1000_RXPBS, val);
  1608. /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ
  1609. * register should not exceed the buffer size programmed in
  1610. * TXPBS. The smallest buffer size programmed in TXPBS is 4kB
  1611. * so according to the datasheet we should set MAX_TPKT_SIZE to
  1612. * 4kB / 64.
  1613. *
  1614. * However, when we do so, no frame from queue 2 and 3 are
  1615. * transmitted. It seems the MAX_TPKT_SIZE should not be great
  1616. * or _equal_ to the buffer size programmed in TXPBS. For this
  1617. * reason, we set set MAX_ TPKT_SIZE to (4kB - 1) / 64.
  1618. */
  1619. val = (4096 - 1) / 64;
  1620. wr32(E1000_I210_DTXMXPKTSZ, val);
  1621. /* Since FQTSS mode is enabled, apply any CBS configuration
  1622. * previously set. If no previous CBS configuration has been
  1623. * done, then the initial configuration is applied, which means
  1624. * CBS is disabled.
  1625. */
  1626. max_queue = (adapter->num_tx_queues < I210_SR_QUEUES_NUM) ?
  1627. adapter->num_tx_queues : I210_SR_QUEUES_NUM;
  1628. for (i = 0; i < max_queue; i++) {
  1629. struct igb_ring *ring = adapter->tx_ring[i];
  1630. igb_configure_cbs(adapter, i, ring->cbs_enable,
  1631. ring->idleslope, ring->sendslope,
  1632. ring->hicredit, ring->locredit);
  1633. }
  1634. } else {
  1635. wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
  1636. wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
  1637. wr32(E1000_I210_DTXMXPKTSZ, I210_DTXMXPKTSZ_DEFAULT);
  1638. val = rd32(E1000_I210_TQAVCTRL);
  1639. /* According to Section 8.12.21, the other flags we've set when
  1640. * enabling FQTSS are not relevant when disabling FQTSS so we
  1641. * don't set they here.
  1642. */
  1643. val &= ~E1000_TQAVCTRL_XMIT_MODE;
  1644. wr32(E1000_I210_TQAVCTRL, val);
  1645. }
  1646. netdev_dbg(netdev, "FQTSS %s\n", (is_fqtss_enabled(adapter)) ?
  1647. "enabled" : "disabled");
  1648. }
  1649. /**
  1650. * igb_configure - configure the hardware for RX and TX
  1651. * @adapter: private board structure
  1652. **/
  1653. static void igb_configure(struct igb_adapter *adapter)
  1654. {
  1655. struct net_device *netdev = adapter->netdev;
  1656. int i;
  1657. igb_get_hw_control(adapter);
  1658. igb_set_rx_mode(netdev);
  1659. igb_setup_tx_mode(adapter);
  1660. igb_restore_vlan(adapter);
  1661. igb_setup_tctl(adapter);
  1662. igb_setup_mrqc(adapter);
  1663. igb_setup_rctl(adapter);
  1664. igb_nfc_filter_restore(adapter);
  1665. igb_configure_tx(adapter);
  1666. igb_configure_rx(adapter);
  1667. igb_rx_fifo_flush_82575(&adapter->hw);
  1668. /* call igb_desc_unused which always leaves
  1669. * at least 1 descriptor unused to make sure
  1670. * next_to_use != next_to_clean
  1671. */
  1672. for (i = 0; i < adapter->num_rx_queues; i++) {
  1673. struct igb_ring *ring = adapter->rx_ring[i];
  1674. igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
  1675. }
  1676. }
  1677. /**
  1678. * igb_power_up_link - Power up the phy/serdes link
  1679. * @adapter: address of board private structure
  1680. **/
  1681. void igb_power_up_link(struct igb_adapter *adapter)
  1682. {
  1683. igb_reset_phy(&adapter->hw);
  1684. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1685. igb_power_up_phy_copper(&adapter->hw);
  1686. else
  1687. igb_power_up_serdes_link_82575(&adapter->hw);
  1688. igb_setup_link(&adapter->hw);
  1689. }
  1690. /**
  1691. * igb_power_down_link - Power down the phy/serdes link
  1692. * @adapter: address of board private structure
  1693. */
  1694. static void igb_power_down_link(struct igb_adapter *adapter)
  1695. {
  1696. if (adapter->hw.phy.media_type == e1000_media_type_copper)
  1697. igb_power_down_phy_copper_82575(&adapter->hw);
  1698. else
  1699. igb_shutdown_serdes_link_82575(&adapter->hw);
  1700. }
  1701. /**
  1702. * Detect and switch function for Media Auto Sense
  1703. * @adapter: address of the board private structure
  1704. **/
  1705. static void igb_check_swap_media(struct igb_adapter *adapter)
  1706. {
  1707. struct e1000_hw *hw = &adapter->hw;
  1708. u32 ctrl_ext, connsw;
  1709. bool swap_now = false;
  1710. ctrl_ext = rd32(E1000_CTRL_EXT);
  1711. connsw = rd32(E1000_CONNSW);
  1712. /* need to live swap if current media is copper and we have fiber/serdes
  1713. * to go to.
  1714. */
  1715. if ((hw->phy.media_type == e1000_media_type_copper) &&
  1716. (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
  1717. swap_now = true;
  1718. } else if (!(connsw & E1000_CONNSW_SERDESD)) {
  1719. /* copper signal takes time to appear */
  1720. if (adapter->copper_tries < 4) {
  1721. adapter->copper_tries++;
  1722. connsw |= E1000_CONNSW_AUTOSENSE_CONF;
  1723. wr32(E1000_CONNSW, connsw);
  1724. return;
  1725. } else {
  1726. adapter->copper_tries = 0;
  1727. if ((connsw & E1000_CONNSW_PHYSD) &&
  1728. (!(connsw & E1000_CONNSW_PHY_PDN))) {
  1729. swap_now = true;
  1730. connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
  1731. wr32(E1000_CONNSW, connsw);
  1732. }
  1733. }
  1734. }
  1735. if (!swap_now)
  1736. return;
  1737. switch (hw->phy.media_type) {
  1738. case e1000_media_type_copper:
  1739. netdev_info(adapter->netdev,
  1740. "MAS: changing media to fiber/serdes\n");
  1741. ctrl_ext |=
  1742. E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1743. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1744. adapter->copper_tries = 0;
  1745. break;
  1746. case e1000_media_type_internal_serdes:
  1747. case e1000_media_type_fiber:
  1748. netdev_info(adapter->netdev,
  1749. "MAS: changing media to copper\n");
  1750. ctrl_ext &=
  1751. ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
  1752. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  1753. break;
  1754. default:
  1755. /* shouldn't get here during regular operation */
  1756. netdev_err(adapter->netdev,
  1757. "AMS: Invalid media type found, returning\n");
  1758. break;
  1759. }
  1760. wr32(E1000_CTRL_EXT, ctrl_ext);
  1761. }
  1762. /**
  1763. * igb_up - Open the interface and prepare it to handle traffic
  1764. * @adapter: board private structure
  1765. **/
  1766. int igb_up(struct igb_adapter *adapter)
  1767. {
  1768. struct e1000_hw *hw = &adapter->hw;
  1769. int i;
  1770. /* hardware has been reset, we need to reload some things */
  1771. igb_configure(adapter);
  1772. clear_bit(__IGB_DOWN, &adapter->state);
  1773. for (i = 0; i < adapter->num_q_vectors; i++)
  1774. napi_enable(&(adapter->q_vector[i]->napi));
  1775. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  1776. igb_configure_msix(adapter);
  1777. else
  1778. igb_assign_vector(adapter->q_vector[0], 0);
  1779. /* Clear any pending interrupts. */
  1780. rd32(E1000_ICR);
  1781. igb_irq_enable(adapter);
  1782. /* notify VFs that reset has been completed */
  1783. if (adapter->vfs_allocated_count) {
  1784. u32 reg_data = rd32(E1000_CTRL_EXT);
  1785. reg_data |= E1000_CTRL_EXT_PFRSTD;
  1786. wr32(E1000_CTRL_EXT, reg_data);
  1787. }
  1788. netif_tx_start_all_queues(adapter->netdev);
  1789. /* start the watchdog. */
  1790. hw->mac.get_link_status = 1;
  1791. schedule_work(&adapter->watchdog_task);
  1792. if ((adapter->flags & IGB_FLAG_EEE) &&
  1793. (!hw->dev_spec._82575.eee_disable))
  1794. adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
  1795. return 0;
  1796. }
  1797. void igb_down(struct igb_adapter *adapter)
  1798. {
  1799. struct net_device *netdev = adapter->netdev;
  1800. struct e1000_hw *hw = &adapter->hw;
  1801. u32 tctl, rctl;
  1802. int i;
  1803. /* signal that we're down so the interrupt handler does not
  1804. * reschedule our watchdog timer
  1805. */
  1806. set_bit(__IGB_DOWN, &adapter->state);
  1807. /* disable receives in the hardware */
  1808. rctl = rd32(E1000_RCTL);
  1809. wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
  1810. /* flush and sleep below */
  1811. igb_nfc_filter_exit(adapter);
  1812. netif_carrier_off(netdev);
  1813. netif_tx_stop_all_queues(netdev);
  1814. /* disable transmits in the hardware */
  1815. tctl = rd32(E1000_TCTL);
  1816. tctl &= ~E1000_TCTL_EN;
  1817. wr32(E1000_TCTL, tctl);
  1818. /* flush both disables and wait for them to finish */
  1819. wrfl();
  1820. usleep_range(10000, 11000);
  1821. igb_irq_disable(adapter);
  1822. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  1823. for (i = 0; i < adapter->num_q_vectors; i++) {
  1824. if (adapter->q_vector[i]) {
  1825. napi_synchronize(&adapter->q_vector[i]->napi);
  1826. napi_disable(&adapter->q_vector[i]->napi);
  1827. }
  1828. }
  1829. del_timer_sync(&adapter->watchdog_timer);
  1830. del_timer_sync(&adapter->phy_info_timer);
  1831. /* record the stats before reset*/
  1832. spin_lock(&adapter->stats64_lock);
  1833. igb_update_stats(adapter);
  1834. spin_unlock(&adapter->stats64_lock);
  1835. adapter->link_speed = 0;
  1836. adapter->link_duplex = 0;
  1837. if (!pci_channel_offline(adapter->pdev))
  1838. igb_reset(adapter);
  1839. /* clear VLAN promisc flag so VFTA will be updated if necessary */
  1840. adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
  1841. igb_clean_all_tx_rings(adapter);
  1842. igb_clean_all_rx_rings(adapter);
  1843. #ifdef CONFIG_IGB_DCA
  1844. /* since we reset the hardware DCA settings were cleared */
  1845. igb_setup_dca(adapter);
  1846. #endif
  1847. }
  1848. void igb_reinit_locked(struct igb_adapter *adapter)
  1849. {
  1850. WARN_ON(in_interrupt());
  1851. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  1852. usleep_range(1000, 2000);
  1853. igb_down(adapter);
  1854. igb_up(adapter);
  1855. clear_bit(__IGB_RESETTING, &adapter->state);
  1856. }
  1857. /** igb_enable_mas - Media Autosense re-enable after swap
  1858. *
  1859. * @adapter: adapter struct
  1860. **/
  1861. static void igb_enable_mas(struct igb_adapter *adapter)
  1862. {
  1863. struct e1000_hw *hw = &adapter->hw;
  1864. u32 connsw = rd32(E1000_CONNSW);
  1865. /* configure for SerDes media detect */
  1866. if ((hw->phy.media_type == e1000_media_type_copper) &&
  1867. (!(connsw & E1000_CONNSW_SERDESD))) {
  1868. connsw |= E1000_CONNSW_ENRGSRC;
  1869. connsw |= E1000_CONNSW_AUTOSENSE_EN;
  1870. wr32(E1000_CONNSW, connsw);
  1871. wrfl();
  1872. }
  1873. }
  1874. void igb_reset(struct igb_adapter *adapter)
  1875. {
  1876. struct pci_dev *pdev = adapter->pdev;
  1877. struct e1000_hw *hw = &adapter->hw;
  1878. struct e1000_mac_info *mac = &hw->mac;
  1879. struct e1000_fc_info *fc = &hw->fc;
  1880. u32 pba, hwm;
  1881. /* Repartition Pba for greater than 9k mtu
  1882. * To take effect CTRL.RST is required.
  1883. */
  1884. switch (mac->type) {
  1885. case e1000_i350:
  1886. case e1000_i354:
  1887. case e1000_82580:
  1888. pba = rd32(E1000_RXPBS);
  1889. pba = igb_rxpbs_adjust_82580(pba);
  1890. break;
  1891. case e1000_82576:
  1892. pba = rd32(E1000_RXPBS);
  1893. pba &= E1000_RXPBS_SIZE_MASK_82576;
  1894. break;
  1895. case e1000_82575:
  1896. case e1000_i210:
  1897. case e1000_i211:
  1898. default:
  1899. pba = E1000_PBA_34K;
  1900. break;
  1901. }
  1902. if (mac->type == e1000_82575) {
  1903. u32 min_rx_space, min_tx_space, needed_tx_space;
  1904. /* write Rx PBA so that hardware can report correct Tx PBA */
  1905. wr32(E1000_PBA, pba);
  1906. /* To maintain wire speed transmits, the Tx FIFO should be
  1907. * large enough to accommodate two full transmit packets,
  1908. * rounded up to the next 1KB and expressed in KB. Likewise,
  1909. * the Rx FIFO should be large enough to accommodate at least
  1910. * one full receive packet and is similarly rounded up and
  1911. * expressed in KB.
  1912. */
  1913. min_rx_space = DIV_ROUND_UP(MAX_JUMBO_FRAME_SIZE, 1024);
  1914. /* The Tx FIFO also stores 16 bytes of information about the Tx
  1915. * but don't include Ethernet FCS because hardware appends it.
  1916. * We only need to round down to the nearest 512 byte block
  1917. * count since the value we care about is 2 frames, not 1.
  1918. */
  1919. min_tx_space = adapter->max_frame_size;
  1920. min_tx_space += sizeof(union e1000_adv_tx_desc) - ETH_FCS_LEN;
  1921. min_tx_space = DIV_ROUND_UP(min_tx_space, 512);
  1922. /* upper 16 bits has Tx packet buffer allocation size in KB */
  1923. needed_tx_space = min_tx_space - (rd32(E1000_PBA) >> 16);
  1924. /* If current Tx allocation is less than the min Tx FIFO size,
  1925. * and the min Tx FIFO size is less than the current Rx FIFO
  1926. * allocation, take space away from current Rx allocation.
  1927. */
  1928. if (needed_tx_space < pba) {
  1929. pba -= needed_tx_space;
  1930. /* if short on Rx space, Rx wins and must trump Tx
  1931. * adjustment
  1932. */
  1933. if (pba < min_rx_space)
  1934. pba = min_rx_space;
  1935. }
  1936. /* adjust PBA for jumbo frames */
  1937. wr32(E1000_PBA, pba);
  1938. }
  1939. /* flow control settings
  1940. * The high water mark must be low enough to fit one full frame
  1941. * after transmitting the pause frame. As such we must have enough
  1942. * space to allow for us to complete our current transmit and then
  1943. * receive the frame that is in progress from the link partner.
  1944. * Set it to:
  1945. * - the full Rx FIFO size minus one full Tx plus one full Rx frame
  1946. */
  1947. hwm = (pba << 10) - (adapter->max_frame_size + MAX_JUMBO_FRAME_SIZE);
  1948. fc->high_water = hwm & 0xFFFFFFF0; /* 16-byte granularity */
  1949. fc->low_water = fc->high_water - 16;
  1950. fc->pause_time = 0xFFFF;
  1951. fc->send_xon = 1;
  1952. fc->current_mode = fc->requested_mode;
  1953. /* disable receive for all VFs and wait one second */
  1954. if (adapter->vfs_allocated_count) {
  1955. int i;
  1956. for (i = 0 ; i < adapter->vfs_allocated_count; i++)
  1957. adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
  1958. /* ping all the active vfs to let them know we are going down */
  1959. igb_ping_all_vfs(adapter);
  1960. /* disable transmits and receives */
  1961. wr32(E1000_VFRE, 0);
  1962. wr32(E1000_VFTE, 0);
  1963. }
  1964. /* Allow time for pending master requests to run */
  1965. hw->mac.ops.reset_hw(hw);
  1966. wr32(E1000_WUC, 0);
  1967. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  1968. /* need to resetup here after media swap */
  1969. adapter->ei.get_invariants(hw);
  1970. adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
  1971. }
  1972. if ((mac->type == e1000_82575) &&
  1973. (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
  1974. igb_enable_mas(adapter);
  1975. }
  1976. if (hw->mac.ops.init_hw(hw))
  1977. dev_err(&pdev->dev, "Hardware Error\n");
  1978. /* RAR registers were cleared during init_hw, clear mac table */
  1979. igb_flush_mac_table(adapter);
  1980. __dev_uc_unsync(adapter->netdev, NULL);
  1981. /* Recover default RAR entry */
  1982. igb_set_default_mac_filter(adapter);
  1983. /* Flow control settings reset on hardware reset, so guarantee flow
  1984. * control is off when forcing speed.
  1985. */
  1986. if (!hw->mac.autoneg)
  1987. igb_force_mac_fc(hw);
  1988. igb_init_dmac(adapter, pba);
  1989. #ifdef CONFIG_IGB_HWMON
  1990. /* Re-initialize the thermal sensor on i350 devices. */
  1991. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  1992. if (mac->type == e1000_i350 && hw->bus.func == 0) {
  1993. /* If present, re-initialize the external thermal sensor
  1994. * interface.
  1995. */
  1996. if (adapter->ets)
  1997. mac->ops.init_thermal_sensor_thresh(hw);
  1998. }
  1999. }
  2000. #endif
  2001. /* Re-establish EEE setting */
  2002. if (hw->phy.media_type == e1000_media_type_copper) {
  2003. switch (mac->type) {
  2004. case e1000_i350:
  2005. case e1000_i210:
  2006. case e1000_i211:
  2007. igb_set_eee_i350(hw, true, true);
  2008. break;
  2009. case e1000_i354:
  2010. igb_set_eee_i354(hw, true, true);
  2011. break;
  2012. default:
  2013. break;
  2014. }
  2015. }
  2016. if (!netif_running(adapter->netdev))
  2017. igb_power_down_link(adapter);
  2018. igb_update_mng_vlan(adapter);
  2019. /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
  2020. wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
  2021. /* Re-enable PTP, where applicable. */
  2022. if (adapter->ptp_flags & IGB_PTP_ENABLED)
  2023. igb_ptp_reset(adapter);
  2024. igb_get_phy_info(hw);
  2025. }
  2026. static netdev_features_t igb_fix_features(struct net_device *netdev,
  2027. netdev_features_t features)
  2028. {
  2029. /* Since there is no support for separate Rx/Tx vlan accel
  2030. * enable/disable make sure Tx flag is always in same state as Rx.
  2031. */
  2032. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  2033. features |= NETIF_F_HW_VLAN_CTAG_TX;
  2034. else
  2035. features &= ~NETIF_F_HW_VLAN_CTAG_TX;
  2036. return features;
  2037. }
  2038. static int igb_set_features(struct net_device *netdev,
  2039. netdev_features_t features)
  2040. {
  2041. netdev_features_t changed = netdev->features ^ features;
  2042. struct igb_adapter *adapter = netdev_priv(netdev);
  2043. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  2044. igb_vlan_mode(netdev, features);
  2045. if (!(changed & (NETIF_F_RXALL | NETIF_F_NTUPLE)))
  2046. return 0;
  2047. if (!(features & NETIF_F_NTUPLE)) {
  2048. struct hlist_node *node2;
  2049. struct igb_nfc_filter *rule;
  2050. spin_lock(&adapter->nfc_lock);
  2051. hlist_for_each_entry_safe(rule, node2,
  2052. &adapter->nfc_filter_list, nfc_node) {
  2053. igb_erase_filter(adapter, rule);
  2054. hlist_del(&rule->nfc_node);
  2055. kfree(rule);
  2056. }
  2057. spin_unlock(&adapter->nfc_lock);
  2058. adapter->nfc_filter_count = 0;
  2059. }
  2060. netdev->features = features;
  2061. if (netif_running(netdev))
  2062. igb_reinit_locked(adapter);
  2063. else
  2064. igb_reset(adapter);
  2065. return 0;
  2066. }
  2067. static int igb_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  2068. struct net_device *dev,
  2069. const unsigned char *addr, u16 vid,
  2070. u16 flags)
  2071. {
  2072. /* guarantee we can provide a unique filter for the unicast address */
  2073. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) {
  2074. struct igb_adapter *adapter = netdev_priv(dev);
  2075. int vfn = adapter->vfs_allocated_count;
  2076. if (netdev_uc_count(dev) >= igb_available_rars(adapter, vfn))
  2077. return -ENOMEM;
  2078. }
  2079. return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags);
  2080. }
  2081. #define IGB_MAX_MAC_HDR_LEN 127
  2082. #define IGB_MAX_NETWORK_HDR_LEN 511
  2083. static netdev_features_t
  2084. igb_features_check(struct sk_buff *skb, struct net_device *dev,
  2085. netdev_features_t features)
  2086. {
  2087. unsigned int network_hdr_len, mac_hdr_len;
  2088. /* Make certain the headers can be described by a context descriptor */
  2089. mac_hdr_len = skb_network_header(skb) - skb->data;
  2090. if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
  2091. return features & ~(NETIF_F_HW_CSUM |
  2092. NETIF_F_SCTP_CRC |
  2093. NETIF_F_HW_VLAN_CTAG_TX |
  2094. NETIF_F_TSO |
  2095. NETIF_F_TSO6);
  2096. network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb);
  2097. if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
  2098. return features & ~(NETIF_F_HW_CSUM |
  2099. NETIF_F_SCTP_CRC |
  2100. NETIF_F_TSO |
  2101. NETIF_F_TSO6);
  2102. /* We can only support IPV4 TSO in tunnels if we can mangle the
  2103. * inner IP ID field, so strip TSO if MANGLEID is not supported.
  2104. */
  2105. if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID))
  2106. features &= ~NETIF_F_TSO;
  2107. return features;
  2108. }
  2109. static int igb_offload_cbs(struct igb_adapter *adapter,
  2110. struct tc_cbs_qopt_offload *qopt)
  2111. {
  2112. struct e1000_hw *hw = &adapter->hw;
  2113. int err;
  2114. /* CBS offloading is only supported by i210 controller. */
  2115. if (hw->mac.type != e1000_i210)
  2116. return -EOPNOTSUPP;
  2117. /* CBS offloading is only supported by queue 0 and queue 1. */
  2118. if (qopt->queue < 0 || qopt->queue > 1)
  2119. return -EINVAL;
  2120. err = igb_save_cbs_params(adapter, qopt->queue, qopt->enable,
  2121. qopt->idleslope, qopt->sendslope,
  2122. qopt->hicredit, qopt->locredit);
  2123. if (err)
  2124. return err;
  2125. if (is_fqtss_enabled(adapter)) {
  2126. igb_configure_cbs(adapter, qopt->queue, qopt->enable,
  2127. qopt->idleslope, qopt->sendslope,
  2128. qopt->hicredit, qopt->locredit);
  2129. if (!is_any_cbs_enabled(adapter))
  2130. enable_fqtss(adapter, false);
  2131. } else {
  2132. enable_fqtss(adapter, true);
  2133. }
  2134. return 0;
  2135. }
  2136. #define ETHER_TYPE_FULL_MASK ((__force __be16)~0)
  2137. #define VLAN_PRIO_FULL_MASK (0x07)
  2138. static int igb_parse_cls_flower(struct igb_adapter *adapter,
  2139. struct tc_cls_flower_offload *f,
  2140. int traffic_class,
  2141. struct igb_nfc_filter *input)
  2142. {
  2143. struct netlink_ext_ack *extack = f->common.extack;
  2144. if (f->dissector->used_keys &
  2145. ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
  2146. BIT(FLOW_DISSECTOR_KEY_CONTROL) |
  2147. BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
  2148. BIT(FLOW_DISSECTOR_KEY_VLAN))) {
  2149. NL_SET_ERR_MSG_MOD(extack,
  2150. "Unsupported key used, only BASIC, CONTROL, ETH_ADDRS and VLAN are supported");
  2151. return -EOPNOTSUPP;
  2152. }
  2153. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
  2154. struct flow_dissector_key_eth_addrs *key, *mask;
  2155. key = skb_flow_dissector_target(f->dissector,
  2156. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2157. f->key);
  2158. mask = skb_flow_dissector_target(f->dissector,
  2159. FLOW_DISSECTOR_KEY_ETH_ADDRS,
  2160. f->mask);
  2161. if (!is_zero_ether_addr(mask->dst)) {
  2162. if (!is_broadcast_ether_addr(mask->dst)) {
  2163. NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for destination MAC address");
  2164. return -EINVAL;
  2165. }
  2166. input->filter.match_flags |=
  2167. IGB_FILTER_FLAG_DST_MAC_ADDR;
  2168. ether_addr_copy(input->filter.dst_addr, key->dst);
  2169. }
  2170. if (!is_zero_ether_addr(mask->src)) {
  2171. if (!is_broadcast_ether_addr(mask->src)) {
  2172. NL_SET_ERR_MSG_MOD(extack, "Only full masks are supported for source MAC address");
  2173. return -EINVAL;
  2174. }
  2175. input->filter.match_flags |=
  2176. IGB_FILTER_FLAG_SRC_MAC_ADDR;
  2177. ether_addr_copy(input->filter.src_addr, key->src);
  2178. }
  2179. }
  2180. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
  2181. struct flow_dissector_key_basic *key, *mask;
  2182. key = skb_flow_dissector_target(f->dissector,
  2183. FLOW_DISSECTOR_KEY_BASIC,
  2184. f->key);
  2185. mask = skb_flow_dissector_target(f->dissector,
  2186. FLOW_DISSECTOR_KEY_BASIC,
  2187. f->mask);
  2188. if (mask->n_proto) {
  2189. if (mask->n_proto != ETHER_TYPE_FULL_MASK) {
  2190. NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for EtherType filter");
  2191. return -EINVAL;
  2192. }
  2193. input->filter.match_flags |= IGB_FILTER_FLAG_ETHER_TYPE;
  2194. input->filter.etype = key->n_proto;
  2195. }
  2196. }
  2197. if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
  2198. struct flow_dissector_key_vlan *key, *mask;
  2199. key = skb_flow_dissector_target(f->dissector,
  2200. FLOW_DISSECTOR_KEY_VLAN,
  2201. f->key);
  2202. mask = skb_flow_dissector_target(f->dissector,
  2203. FLOW_DISSECTOR_KEY_VLAN,
  2204. f->mask);
  2205. if (mask->vlan_priority) {
  2206. if (mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
  2207. NL_SET_ERR_MSG_MOD(extack, "Only full mask is supported for VLAN priority");
  2208. return -EINVAL;
  2209. }
  2210. input->filter.match_flags |= IGB_FILTER_FLAG_VLAN_TCI;
  2211. input->filter.vlan_tci = key->vlan_priority;
  2212. }
  2213. }
  2214. input->action = traffic_class;
  2215. input->cookie = f->cookie;
  2216. return 0;
  2217. }
  2218. static int igb_configure_clsflower(struct igb_adapter *adapter,
  2219. struct tc_cls_flower_offload *cls_flower)
  2220. {
  2221. struct netlink_ext_ack *extack = cls_flower->common.extack;
  2222. struct igb_nfc_filter *filter, *f;
  2223. int err, tc;
  2224. tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
  2225. if (tc < 0) {
  2226. NL_SET_ERR_MSG_MOD(extack, "Invalid traffic class");
  2227. return -EINVAL;
  2228. }
  2229. filter = kzalloc(sizeof(*filter), GFP_KERNEL);
  2230. if (!filter)
  2231. return -ENOMEM;
  2232. err = igb_parse_cls_flower(adapter, cls_flower, tc, filter);
  2233. if (err < 0)
  2234. goto err_parse;
  2235. spin_lock(&adapter->nfc_lock);
  2236. hlist_for_each_entry(f, &adapter->nfc_filter_list, nfc_node) {
  2237. if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
  2238. err = -EEXIST;
  2239. NL_SET_ERR_MSG_MOD(extack,
  2240. "This filter is already set in ethtool");
  2241. goto err_locked;
  2242. }
  2243. }
  2244. hlist_for_each_entry(f, &adapter->cls_flower_list, nfc_node) {
  2245. if (!memcmp(&f->filter, &filter->filter, sizeof(f->filter))) {
  2246. err = -EEXIST;
  2247. NL_SET_ERR_MSG_MOD(extack,
  2248. "This filter is already set in cls_flower");
  2249. goto err_locked;
  2250. }
  2251. }
  2252. err = igb_add_filter(adapter, filter);
  2253. if (err < 0) {
  2254. NL_SET_ERR_MSG_MOD(extack, "Could not add filter to the adapter");
  2255. goto err_locked;
  2256. }
  2257. hlist_add_head(&filter->nfc_node, &adapter->cls_flower_list);
  2258. spin_unlock(&adapter->nfc_lock);
  2259. return 0;
  2260. err_locked:
  2261. spin_unlock(&adapter->nfc_lock);
  2262. err_parse:
  2263. kfree(filter);
  2264. return err;
  2265. }
  2266. static int igb_delete_clsflower(struct igb_adapter *adapter,
  2267. struct tc_cls_flower_offload *cls_flower)
  2268. {
  2269. struct igb_nfc_filter *filter;
  2270. int err;
  2271. spin_lock(&adapter->nfc_lock);
  2272. hlist_for_each_entry(filter, &adapter->cls_flower_list, nfc_node)
  2273. if (filter->cookie == cls_flower->cookie)
  2274. break;
  2275. if (!filter) {
  2276. err = -ENOENT;
  2277. goto out;
  2278. }
  2279. err = igb_erase_filter(adapter, filter);
  2280. if (err < 0)
  2281. goto out;
  2282. hlist_del(&filter->nfc_node);
  2283. kfree(filter);
  2284. out:
  2285. spin_unlock(&adapter->nfc_lock);
  2286. return err;
  2287. }
  2288. static int igb_setup_tc_cls_flower(struct igb_adapter *adapter,
  2289. struct tc_cls_flower_offload *cls_flower)
  2290. {
  2291. switch (cls_flower->command) {
  2292. case TC_CLSFLOWER_REPLACE:
  2293. return igb_configure_clsflower(adapter, cls_flower);
  2294. case TC_CLSFLOWER_DESTROY:
  2295. return igb_delete_clsflower(adapter, cls_flower);
  2296. case TC_CLSFLOWER_STATS:
  2297. return -EOPNOTSUPP;
  2298. default:
  2299. return -EINVAL;
  2300. }
  2301. }
  2302. static int igb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  2303. void *cb_priv)
  2304. {
  2305. struct igb_adapter *adapter = cb_priv;
  2306. if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
  2307. return -EOPNOTSUPP;
  2308. switch (type) {
  2309. case TC_SETUP_CLSFLOWER:
  2310. return igb_setup_tc_cls_flower(adapter, type_data);
  2311. default:
  2312. return -EOPNOTSUPP;
  2313. }
  2314. }
  2315. static int igb_setup_tc_block(struct igb_adapter *adapter,
  2316. struct tc_block_offload *f)
  2317. {
  2318. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  2319. return -EOPNOTSUPP;
  2320. switch (f->command) {
  2321. case TC_BLOCK_BIND:
  2322. return tcf_block_cb_register(f->block, igb_setup_tc_block_cb,
  2323. adapter, adapter);
  2324. case TC_BLOCK_UNBIND:
  2325. tcf_block_cb_unregister(f->block, igb_setup_tc_block_cb,
  2326. adapter);
  2327. return 0;
  2328. default:
  2329. return -EOPNOTSUPP;
  2330. }
  2331. }
  2332. static int igb_setup_tc(struct net_device *dev, enum tc_setup_type type,
  2333. void *type_data)
  2334. {
  2335. struct igb_adapter *adapter = netdev_priv(dev);
  2336. switch (type) {
  2337. case TC_SETUP_QDISC_CBS:
  2338. return igb_offload_cbs(adapter, type_data);
  2339. case TC_SETUP_BLOCK:
  2340. return igb_setup_tc_block(adapter, type_data);
  2341. default:
  2342. return -EOPNOTSUPP;
  2343. }
  2344. }
  2345. static const struct net_device_ops igb_netdev_ops = {
  2346. .ndo_open = igb_open,
  2347. .ndo_stop = igb_close,
  2348. .ndo_start_xmit = igb_xmit_frame,
  2349. .ndo_get_stats64 = igb_get_stats64,
  2350. .ndo_set_rx_mode = igb_set_rx_mode,
  2351. .ndo_set_mac_address = igb_set_mac,
  2352. .ndo_change_mtu = igb_change_mtu,
  2353. .ndo_do_ioctl = igb_ioctl,
  2354. .ndo_tx_timeout = igb_tx_timeout,
  2355. .ndo_validate_addr = eth_validate_addr,
  2356. .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
  2357. .ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
  2358. .ndo_set_vf_mac = igb_ndo_set_vf_mac,
  2359. .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
  2360. .ndo_set_vf_rate = igb_ndo_set_vf_bw,
  2361. .ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
  2362. .ndo_set_vf_trust = igb_ndo_set_vf_trust,
  2363. .ndo_get_vf_config = igb_ndo_get_vf_config,
  2364. #ifdef CONFIG_NET_POLL_CONTROLLER
  2365. .ndo_poll_controller = igb_netpoll,
  2366. #endif
  2367. .ndo_fix_features = igb_fix_features,
  2368. .ndo_set_features = igb_set_features,
  2369. .ndo_fdb_add = igb_ndo_fdb_add,
  2370. .ndo_features_check = igb_features_check,
  2371. .ndo_setup_tc = igb_setup_tc,
  2372. };
  2373. /**
  2374. * igb_set_fw_version - Configure version string for ethtool
  2375. * @adapter: adapter struct
  2376. **/
  2377. void igb_set_fw_version(struct igb_adapter *adapter)
  2378. {
  2379. struct e1000_hw *hw = &adapter->hw;
  2380. struct e1000_fw_version fw;
  2381. igb_get_fw_version(hw, &fw);
  2382. switch (hw->mac.type) {
  2383. case e1000_i210:
  2384. case e1000_i211:
  2385. if (!(igb_get_flash_presence_i210(hw))) {
  2386. snprintf(adapter->fw_version,
  2387. sizeof(adapter->fw_version),
  2388. "%2d.%2d-%d",
  2389. fw.invm_major, fw.invm_minor,
  2390. fw.invm_img_type);
  2391. break;
  2392. }
  2393. /* fall through */
  2394. default:
  2395. /* if option is rom valid, display its version too */
  2396. if (fw.or_valid) {
  2397. snprintf(adapter->fw_version,
  2398. sizeof(adapter->fw_version),
  2399. "%d.%d, 0x%08x, %d.%d.%d",
  2400. fw.eep_major, fw.eep_minor, fw.etrack_id,
  2401. fw.or_major, fw.or_build, fw.or_patch);
  2402. /* no option rom */
  2403. } else if (fw.etrack_id != 0X0000) {
  2404. snprintf(adapter->fw_version,
  2405. sizeof(adapter->fw_version),
  2406. "%d.%d, 0x%08x",
  2407. fw.eep_major, fw.eep_minor, fw.etrack_id);
  2408. } else {
  2409. snprintf(adapter->fw_version,
  2410. sizeof(adapter->fw_version),
  2411. "%d.%d.%d",
  2412. fw.eep_major, fw.eep_minor, fw.eep_build);
  2413. }
  2414. break;
  2415. }
  2416. }
  2417. /**
  2418. * igb_init_mas - init Media Autosense feature if enabled in the NVM
  2419. *
  2420. * @adapter: adapter struct
  2421. **/
  2422. static void igb_init_mas(struct igb_adapter *adapter)
  2423. {
  2424. struct e1000_hw *hw = &adapter->hw;
  2425. u16 eeprom_data;
  2426. hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
  2427. switch (hw->bus.func) {
  2428. case E1000_FUNC_0:
  2429. if (eeprom_data & IGB_MAS_ENABLE_0) {
  2430. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2431. netdev_info(adapter->netdev,
  2432. "MAS: Enabling Media Autosense for port %d\n",
  2433. hw->bus.func);
  2434. }
  2435. break;
  2436. case E1000_FUNC_1:
  2437. if (eeprom_data & IGB_MAS_ENABLE_1) {
  2438. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2439. netdev_info(adapter->netdev,
  2440. "MAS: Enabling Media Autosense for port %d\n",
  2441. hw->bus.func);
  2442. }
  2443. break;
  2444. case E1000_FUNC_2:
  2445. if (eeprom_data & IGB_MAS_ENABLE_2) {
  2446. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2447. netdev_info(adapter->netdev,
  2448. "MAS: Enabling Media Autosense for port %d\n",
  2449. hw->bus.func);
  2450. }
  2451. break;
  2452. case E1000_FUNC_3:
  2453. if (eeprom_data & IGB_MAS_ENABLE_3) {
  2454. adapter->flags |= IGB_FLAG_MAS_ENABLE;
  2455. netdev_info(adapter->netdev,
  2456. "MAS: Enabling Media Autosense for port %d\n",
  2457. hw->bus.func);
  2458. }
  2459. break;
  2460. default:
  2461. /* Shouldn't get here */
  2462. netdev_err(adapter->netdev,
  2463. "MAS: Invalid port configuration, returning\n");
  2464. break;
  2465. }
  2466. }
  2467. /**
  2468. * igb_init_i2c - Init I2C interface
  2469. * @adapter: pointer to adapter structure
  2470. **/
  2471. static s32 igb_init_i2c(struct igb_adapter *adapter)
  2472. {
  2473. s32 status = 0;
  2474. /* I2C interface supported on i350 devices */
  2475. if (adapter->hw.mac.type != e1000_i350)
  2476. return 0;
  2477. /* Initialize the i2c bus which is controlled by the registers.
  2478. * This bus will use the i2c_algo_bit structue that implements
  2479. * the protocol through toggling of the 4 bits in the register.
  2480. */
  2481. adapter->i2c_adap.owner = THIS_MODULE;
  2482. adapter->i2c_algo = igb_i2c_algo;
  2483. adapter->i2c_algo.data = adapter;
  2484. adapter->i2c_adap.algo_data = &adapter->i2c_algo;
  2485. adapter->i2c_adap.dev.parent = &adapter->pdev->dev;
  2486. strlcpy(adapter->i2c_adap.name, "igb BB",
  2487. sizeof(adapter->i2c_adap.name));
  2488. status = i2c_bit_add_bus(&adapter->i2c_adap);
  2489. return status;
  2490. }
  2491. /**
  2492. * igb_probe - Device Initialization Routine
  2493. * @pdev: PCI device information struct
  2494. * @ent: entry in igb_pci_tbl
  2495. *
  2496. * Returns 0 on success, negative on failure
  2497. *
  2498. * igb_probe initializes an adapter identified by a pci_dev structure.
  2499. * The OS initialization, configuring of the adapter private structure,
  2500. * and a hardware reset occur.
  2501. **/
  2502. static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  2503. {
  2504. struct net_device *netdev;
  2505. struct igb_adapter *adapter;
  2506. struct e1000_hw *hw;
  2507. u16 eeprom_data = 0;
  2508. s32 ret_val;
  2509. static int global_quad_port_a; /* global quad port a indication */
  2510. const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
  2511. int err, pci_using_dac;
  2512. u8 part_str[E1000_PBANUM_LENGTH];
  2513. /* Catch broken hardware that put the wrong VF device ID in
  2514. * the PCIe SR-IOV capability.
  2515. */
  2516. if (pdev->is_virtfn) {
  2517. WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
  2518. pci_name(pdev), pdev->vendor, pdev->device);
  2519. return -EINVAL;
  2520. }
  2521. err = pci_enable_device_mem(pdev);
  2522. if (err)
  2523. return err;
  2524. pci_using_dac = 0;
  2525. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
  2526. if (!err) {
  2527. pci_using_dac = 1;
  2528. } else {
  2529. err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
  2530. if (err) {
  2531. dev_err(&pdev->dev,
  2532. "No usable DMA configuration, aborting\n");
  2533. goto err_dma;
  2534. }
  2535. }
  2536. err = pci_request_mem_regions(pdev, igb_driver_name);
  2537. if (err)
  2538. goto err_pci_reg;
  2539. pci_enable_pcie_error_reporting(pdev);
  2540. pci_set_master(pdev);
  2541. pci_save_state(pdev);
  2542. err = -ENOMEM;
  2543. netdev = alloc_etherdev_mq(sizeof(struct igb_adapter),
  2544. IGB_MAX_TX_QUEUES);
  2545. if (!netdev)
  2546. goto err_alloc_etherdev;
  2547. SET_NETDEV_DEV(netdev, &pdev->dev);
  2548. pci_set_drvdata(pdev, netdev);
  2549. adapter = netdev_priv(netdev);
  2550. adapter->netdev = netdev;
  2551. adapter->pdev = pdev;
  2552. hw = &adapter->hw;
  2553. hw->back = adapter;
  2554. adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
  2555. err = -EIO;
  2556. adapter->io_addr = pci_iomap(pdev, 0, 0);
  2557. if (!adapter->io_addr)
  2558. goto err_ioremap;
  2559. /* hw->hw_addr can be altered, we'll use adapter->io_addr for unmap */
  2560. hw->hw_addr = adapter->io_addr;
  2561. netdev->netdev_ops = &igb_netdev_ops;
  2562. igb_set_ethtool_ops(netdev);
  2563. netdev->watchdog_timeo = 5 * HZ;
  2564. strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
  2565. netdev->mem_start = pci_resource_start(pdev, 0);
  2566. netdev->mem_end = pci_resource_end(pdev, 0);
  2567. /* PCI config space info */
  2568. hw->vendor_id = pdev->vendor;
  2569. hw->device_id = pdev->device;
  2570. hw->revision_id = pdev->revision;
  2571. hw->subsystem_vendor_id = pdev->subsystem_vendor;
  2572. hw->subsystem_device_id = pdev->subsystem_device;
  2573. /* Copy the default MAC, PHY and NVM function pointers */
  2574. memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
  2575. memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
  2576. memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
  2577. /* Initialize skew-specific constants */
  2578. err = ei->get_invariants(hw);
  2579. if (err)
  2580. goto err_sw_init;
  2581. /* setup the private structure */
  2582. err = igb_sw_init(adapter);
  2583. if (err)
  2584. goto err_sw_init;
  2585. igb_get_bus_info_pcie(hw);
  2586. hw->phy.autoneg_wait_to_complete = false;
  2587. /* Copper options */
  2588. if (hw->phy.media_type == e1000_media_type_copper) {
  2589. hw->phy.mdix = AUTO_ALL_MODES;
  2590. hw->phy.disable_polarity_correction = false;
  2591. hw->phy.ms_type = e1000_ms_hw_default;
  2592. }
  2593. if (igb_check_reset_block(hw))
  2594. dev_info(&pdev->dev,
  2595. "PHY reset is blocked due to SOL/IDER session.\n");
  2596. /* features is initialized to 0 in allocation, it might have bits
  2597. * set by igb_sw_init so we should use an or instead of an
  2598. * assignment.
  2599. */
  2600. netdev->features |= NETIF_F_SG |
  2601. NETIF_F_TSO |
  2602. NETIF_F_TSO6 |
  2603. NETIF_F_RXHASH |
  2604. NETIF_F_RXCSUM |
  2605. NETIF_F_HW_CSUM;
  2606. if (hw->mac.type >= e1000_82576)
  2607. netdev->features |= NETIF_F_SCTP_CRC;
  2608. if (hw->mac.type >= e1000_i350)
  2609. netdev->features |= NETIF_F_HW_TC;
  2610. #define IGB_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \
  2611. NETIF_F_GSO_GRE_CSUM | \
  2612. NETIF_F_GSO_IPXIP4 | \
  2613. NETIF_F_GSO_IPXIP6 | \
  2614. NETIF_F_GSO_UDP_TUNNEL | \
  2615. NETIF_F_GSO_UDP_TUNNEL_CSUM)
  2616. netdev->gso_partial_features = IGB_GSO_PARTIAL_FEATURES;
  2617. netdev->features |= NETIF_F_GSO_PARTIAL | IGB_GSO_PARTIAL_FEATURES;
  2618. /* copy netdev features into list of user selectable features */
  2619. netdev->hw_features |= netdev->features |
  2620. NETIF_F_HW_VLAN_CTAG_RX |
  2621. NETIF_F_HW_VLAN_CTAG_TX |
  2622. NETIF_F_RXALL;
  2623. if (hw->mac.type >= e1000_i350)
  2624. netdev->hw_features |= NETIF_F_NTUPLE;
  2625. if (pci_using_dac)
  2626. netdev->features |= NETIF_F_HIGHDMA;
  2627. netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID;
  2628. netdev->mpls_features |= NETIF_F_HW_CSUM;
  2629. netdev->hw_enc_features |= netdev->vlan_features;
  2630. /* set this bit last since it cannot be part of vlan_features */
  2631. netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
  2632. NETIF_F_HW_VLAN_CTAG_RX |
  2633. NETIF_F_HW_VLAN_CTAG_TX;
  2634. netdev->priv_flags |= IFF_SUPP_NOFCS;
  2635. netdev->priv_flags |= IFF_UNICAST_FLT;
  2636. /* MTU range: 68 - 9216 */
  2637. netdev->min_mtu = ETH_MIN_MTU;
  2638. netdev->max_mtu = MAX_STD_JUMBO_FRAME_SIZE;
  2639. adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
  2640. /* before reading the NVM, reset the controller to put the device in a
  2641. * known good starting state
  2642. */
  2643. hw->mac.ops.reset_hw(hw);
  2644. /* make sure the NVM is good , i211/i210 parts can have special NVM
  2645. * that doesn't contain a checksum
  2646. */
  2647. switch (hw->mac.type) {
  2648. case e1000_i210:
  2649. case e1000_i211:
  2650. if (igb_get_flash_presence_i210(hw)) {
  2651. if (hw->nvm.ops.validate(hw) < 0) {
  2652. dev_err(&pdev->dev,
  2653. "The NVM Checksum Is Not Valid\n");
  2654. err = -EIO;
  2655. goto err_eeprom;
  2656. }
  2657. }
  2658. break;
  2659. default:
  2660. if (hw->nvm.ops.validate(hw) < 0) {
  2661. dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
  2662. err = -EIO;
  2663. goto err_eeprom;
  2664. }
  2665. break;
  2666. }
  2667. if (eth_platform_get_mac_address(&pdev->dev, hw->mac.addr)) {
  2668. /* copy the MAC address out of the NVM */
  2669. if (hw->mac.ops.read_mac_addr(hw))
  2670. dev_err(&pdev->dev, "NVM Read Error\n");
  2671. }
  2672. memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
  2673. if (!is_valid_ether_addr(netdev->dev_addr)) {
  2674. dev_err(&pdev->dev, "Invalid MAC Address\n");
  2675. err = -EIO;
  2676. goto err_eeprom;
  2677. }
  2678. igb_set_default_mac_filter(adapter);
  2679. /* get firmware version for ethtool -i */
  2680. igb_set_fw_version(adapter);
  2681. /* configure RXPBSIZE and TXPBSIZE */
  2682. if (hw->mac.type == e1000_i210) {
  2683. wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
  2684. wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
  2685. }
  2686. timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
  2687. timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
  2688. INIT_WORK(&adapter->reset_task, igb_reset_task);
  2689. INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
  2690. /* Initialize link properties that are user-changeable */
  2691. adapter->fc_autoneg = true;
  2692. hw->mac.autoneg = true;
  2693. hw->phy.autoneg_advertised = 0x2f;
  2694. hw->fc.requested_mode = e1000_fc_default;
  2695. hw->fc.current_mode = e1000_fc_default;
  2696. igb_validate_mdi_setting(hw);
  2697. /* By default, support wake on port A */
  2698. if (hw->bus.func == 0)
  2699. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2700. /* Check the NVM for wake support on non-port A ports */
  2701. if (hw->mac.type >= e1000_82580)
  2702. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
  2703. NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
  2704. &eeprom_data);
  2705. else if (hw->bus.func == 1)
  2706. hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
  2707. if (eeprom_data & IGB_EEPROM_APME)
  2708. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2709. /* now that we have the eeprom settings, apply the special cases where
  2710. * the eeprom may be wrong or the board simply won't support wake on
  2711. * lan on a particular port
  2712. */
  2713. switch (pdev->device) {
  2714. case E1000_DEV_ID_82575GB_QUAD_COPPER:
  2715. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2716. break;
  2717. case E1000_DEV_ID_82575EB_FIBER_SERDES:
  2718. case E1000_DEV_ID_82576_FIBER:
  2719. case E1000_DEV_ID_82576_SERDES:
  2720. /* Wake events only supported on port A for dual fiber
  2721. * regardless of eeprom setting
  2722. */
  2723. if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
  2724. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2725. break;
  2726. case E1000_DEV_ID_82576_QUAD_COPPER:
  2727. case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
  2728. /* if quad port adapter, disable WoL on all but port A */
  2729. if (global_quad_port_a != 0)
  2730. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2731. else
  2732. adapter->flags |= IGB_FLAG_QUAD_PORT_A;
  2733. /* Reset for multiple quad port adapters */
  2734. if (++global_quad_port_a == 4)
  2735. global_quad_port_a = 0;
  2736. break;
  2737. default:
  2738. /* If the device can't wake, don't set software support */
  2739. if (!device_can_wakeup(&adapter->pdev->dev))
  2740. adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
  2741. }
  2742. /* initialize the wol settings based on the eeprom settings */
  2743. if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
  2744. adapter->wol |= E1000_WUFC_MAG;
  2745. /* Some vendors want WoL disabled by default, but still supported */
  2746. if ((hw->mac.type == e1000_i350) &&
  2747. (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
  2748. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2749. adapter->wol = 0;
  2750. }
  2751. /* Some vendors want the ability to Use the EEPROM setting as
  2752. * enable/disable only, and not for capability
  2753. */
  2754. if (((hw->mac.type == e1000_i350) ||
  2755. (hw->mac.type == e1000_i354)) &&
  2756. (pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)) {
  2757. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2758. adapter->wol = 0;
  2759. }
  2760. if (hw->mac.type == e1000_i350) {
  2761. if (((pdev->subsystem_device == 0x5001) ||
  2762. (pdev->subsystem_device == 0x5002)) &&
  2763. (hw->bus.func == 0)) {
  2764. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2765. adapter->wol = 0;
  2766. }
  2767. if (pdev->subsystem_device == 0x1F52)
  2768. adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
  2769. }
  2770. device_set_wakeup_enable(&adapter->pdev->dev,
  2771. adapter->flags & IGB_FLAG_WOL_SUPPORTED);
  2772. /* reset the hardware with the new settings */
  2773. igb_reset(adapter);
  2774. /* Init the I2C interface */
  2775. err = igb_init_i2c(adapter);
  2776. if (err) {
  2777. dev_err(&pdev->dev, "failed to init i2c interface\n");
  2778. goto err_eeprom;
  2779. }
  2780. /* let the f/w know that the h/w is now under the control of the
  2781. * driver.
  2782. */
  2783. igb_get_hw_control(adapter);
  2784. strcpy(netdev->name, "eth%d");
  2785. err = register_netdev(netdev);
  2786. if (err)
  2787. goto err_register;
  2788. /* carrier off reporting is important to ethtool even BEFORE open */
  2789. netif_carrier_off(netdev);
  2790. #ifdef CONFIG_IGB_DCA
  2791. if (dca_add_requester(&pdev->dev) == 0) {
  2792. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  2793. dev_info(&pdev->dev, "DCA enabled\n");
  2794. igb_setup_dca(adapter);
  2795. }
  2796. #endif
  2797. #ifdef CONFIG_IGB_HWMON
  2798. /* Initialize the thermal sensor on i350 devices. */
  2799. if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
  2800. u16 ets_word;
  2801. /* Read the NVM to determine if this i350 device supports an
  2802. * external thermal sensor.
  2803. */
  2804. hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
  2805. if (ets_word != 0x0000 && ets_word != 0xFFFF)
  2806. adapter->ets = true;
  2807. else
  2808. adapter->ets = false;
  2809. if (igb_sysfs_init(adapter))
  2810. dev_err(&pdev->dev,
  2811. "failed to allocate sysfs resources\n");
  2812. } else {
  2813. adapter->ets = false;
  2814. }
  2815. #endif
  2816. /* Check if Media Autosense is enabled */
  2817. adapter->ei = *ei;
  2818. if (hw->dev_spec._82575.mas_capable)
  2819. igb_init_mas(adapter);
  2820. /* do hw tstamp init after resetting */
  2821. igb_ptp_init(adapter);
  2822. dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
  2823. /* print bus type/speed/width info, not applicable to i354 */
  2824. if (hw->mac.type != e1000_i354) {
  2825. dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
  2826. netdev->name,
  2827. ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
  2828. (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
  2829. "unknown"),
  2830. ((hw->bus.width == e1000_bus_width_pcie_x4) ?
  2831. "Width x4" :
  2832. (hw->bus.width == e1000_bus_width_pcie_x2) ?
  2833. "Width x2" :
  2834. (hw->bus.width == e1000_bus_width_pcie_x1) ?
  2835. "Width x1" : "unknown"), netdev->dev_addr);
  2836. }
  2837. if ((hw->mac.type >= e1000_i210 ||
  2838. igb_get_flash_presence_i210(hw))) {
  2839. ret_val = igb_read_part_string(hw, part_str,
  2840. E1000_PBANUM_LENGTH);
  2841. } else {
  2842. ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
  2843. }
  2844. if (ret_val)
  2845. strcpy(part_str, "Unknown");
  2846. dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
  2847. dev_info(&pdev->dev,
  2848. "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
  2849. (adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
  2850. (adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
  2851. adapter->num_rx_queues, adapter->num_tx_queues);
  2852. if (hw->phy.media_type == e1000_media_type_copper) {
  2853. switch (hw->mac.type) {
  2854. case e1000_i350:
  2855. case e1000_i210:
  2856. case e1000_i211:
  2857. /* Enable EEE for internal copper PHY devices */
  2858. err = igb_set_eee_i350(hw, true, true);
  2859. if ((!err) &&
  2860. (!hw->dev_spec._82575.eee_disable)) {
  2861. adapter->eee_advert =
  2862. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2863. adapter->flags |= IGB_FLAG_EEE;
  2864. }
  2865. break;
  2866. case e1000_i354:
  2867. if ((rd32(E1000_CTRL_EXT) &
  2868. E1000_CTRL_EXT_LINK_MODE_SGMII)) {
  2869. err = igb_set_eee_i354(hw, true, true);
  2870. if ((!err) &&
  2871. (!hw->dev_spec._82575.eee_disable)) {
  2872. adapter->eee_advert =
  2873. MDIO_EEE_100TX | MDIO_EEE_1000T;
  2874. adapter->flags |= IGB_FLAG_EEE;
  2875. }
  2876. }
  2877. break;
  2878. default:
  2879. break;
  2880. }
  2881. }
  2882. pm_runtime_put_noidle(&pdev->dev);
  2883. return 0;
  2884. err_register:
  2885. igb_release_hw_control(adapter);
  2886. memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
  2887. err_eeprom:
  2888. if (!igb_check_reset_block(hw))
  2889. igb_reset_phy(hw);
  2890. if (hw->flash_address)
  2891. iounmap(hw->flash_address);
  2892. err_sw_init:
  2893. kfree(adapter->mac_table);
  2894. kfree(adapter->shadow_vfta);
  2895. igb_clear_interrupt_scheme(adapter);
  2896. #ifdef CONFIG_PCI_IOV
  2897. igb_disable_sriov(pdev);
  2898. #endif
  2899. pci_iounmap(pdev, adapter->io_addr);
  2900. err_ioremap:
  2901. free_netdev(netdev);
  2902. err_alloc_etherdev:
  2903. pci_release_mem_regions(pdev);
  2904. err_pci_reg:
  2905. err_dma:
  2906. pci_disable_device(pdev);
  2907. return err;
  2908. }
  2909. #ifdef CONFIG_PCI_IOV
  2910. static int igb_disable_sriov(struct pci_dev *pdev)
  2911. {
  2912. struct net_device *netdev = pci_get_drvdata(pdev);
  2913. struct igb_adapter *adapter = netdev_priv(netdev);
  2914. struct e1000_hw *hw = &adapter->hw;
  2915. /* reclaim resources allocated to VFs */
  2916. if (adapter->vf_data) {
  2917. /* disable iov and allow time for transactions to clear */
  2918. if (pci_vfs_assigned(pdev)) {
  2919. dev_warn(&pdev->dev,
  2920. "Cannot deallocate SR-IOV virtual functions while they are assigned - VFs will not be deallocated\n");
  2921. return -EPERM;
  2922. } else {
  2923. pci_disable_sriov(pdev);
  2924. msleep(500);
  2925. }
  2926. kfree(adapter->vf_mac_list);
  2927. adapter->vf_mac_list = NULL;
  2928. kfree(adapter->vf_data);
  2929. adapter->vf_data = NULL;
  2930. adapter->vfs_allocated_count = 0;
  2931. wr32(E1000_IOVCTL, E1000_IOVCTL_REUSE_VFQ);
  2932. wrfl();
  2933. msleep(100);
  2934. dev_info(&pdev->dev, "IOV Disabled\n");
  2935. /* Re-enable DMA Coalescing flag since IOV is turned off */
  2936. adapter->flags |= IGB_FLAG_DMAC;
  2937. }
  2938. return 0;
  2939. }
  2940. static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs)
  2941. {
  2942. struct net_device *netdev = pci_get_drvdata(pdev);
  2943. struct igb_adapter *adapter = netdev_priv(netdev);
  2944. int old_vfs = pci_num_vf(pdev);
  2945. struct vf_mac_filter *mac_list;
  2946. int err = 0;
  2947. int num_vf_mac_filters, i;
  2948. if (!(adapter->flags & IGB_FLAG_HAS_MSIX) || num_vfs > 7) {
  2949. err = -EPERM;
  2950. goto out;
  2951. }
  2952. if (!num_vfs)
  2953. goto out;
  2954. if (old_vfs) {
  2955. dev_info(&pdev->dev, "%d pre-allocated VFs found - override max_vfs setting of %d\n",
  2956. old_vfs, max_vfs);
  2957. adapter->vfs_allocated_count = old_vfs;
  2958. } else
  2959. adapter->vfs_allocated_count = num_vfs;
  2960. adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
  2961. sizeof(struct vf_data_storage), GFP_KERNEL);
  2962. /* if allocation failed then we do not support SR-IOV */
  2963. if (!adapter->vf_data) {
  2964. adapter->vfs_allocated_count = 0;
  2965. err = -ENOMEM;
  2966. goto out;
  2967. }
  2968. /* Due to the limited number of RAR entries calculate potential
  2969. * number of MAC filters available for the VFs. Reserve entries
  2970. * for PF default MAC, PF MAC filters and at least one RAR entry
  2971. * for each VF for VF MAC.
  2972. */
  2973. num_vf_mac_filters = adapter->hw.mac.rar_entry_count -
  2974. (1 + IGB_PF_MAC_FILTERS_RESERVED +
  2975. adapter->vfs_allocated_count);
  2976. adapter->vf_mac_list = kcalloc(num_vf_mac_filters,
  2977. sizeof(struct vf_mac_filter),
  2978. GFP_KERNEL);
  2979. mac_list = adapter->vf_mac_list;
  2980. INIT_LIST_HEAD(&adapter->vf_macs.l);
  2981. if (adapter->vf_mac_list) {
  2982. /* Initialize list of VF MAC filters */
  2983. for (i = 0; i < num_vf_mac_filters; i++) {
  2984. mac_list->vf = -1;
  2985. mac_list->free = true;
  2986. list_add(&mac_list->l, &adapter->vf_macs.l);
  2987. mac_list++;
  2988. }
  2989. } else {
  2990. /* If we could not allocate memory for the VF MAC filters
  2991. * we can continue without this feature but warn user.
  2992. */
  2993. dev_err(&pdev->dev,
  2994. "Unable to allocate memory for VF MAC filter list\n");
  2995. }
  2996. /* only call pci_enable_sriov() if no VFs are allocated already */
  2997. if (!old_vfs) {
  2998. err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
  2999. if (err)
  3000. goto err_out;
  3001. }
  3002. dev_info(&pdev->dev, "%d VFs allocated\n",
  3003. adapter->vfs_allocated_count);
  3004. for (i = 0; i < adapter->vfs_allocated_count; i++)
  3005. igb_vf_configure(adapter, i);
  3006. /* DMA Coalescing is not supported in IOV mode. */
  3007. adapter->flags &= ~IGB_FLAG_DMAC;
  3008. goto out;
  3009. err_out:
  3010. kfree(adapter->vf_mac_list);
  3011. adapter->vf_mac_list = NULL;
  3012. kfree(adapter->vf_data);
  3013. adapter->vf_data = NULL;
  3014. adapter->vfs_allocated_count = 0;
  3015. out:
  3016. return err;
  3017. }
  3018. #endif
  3019. /**
  3020. * igb_remove_i2c - Cleanup I2C interface
  3021. * @adapter: pointer to adapter structure
  3022. **/
  3023. static void igb_remove_i2c(struct igb_adapter *adapter)
  3024. {
  3025. /* free the adapter bus structure */
  3026. i2c_del_adapter(&adapter->i2c_adap);
  3027. }
  3028. /**
  3029. * igb_remove - Device Removal Routine
  3030. * @pdev: PCI device information struct
  3031. *
  3032. * igb_remove is called by the PCI subsystem to alert the driver
  3033. * that it should release a PCI device. The could be caused by a
  3034. * Hot-Plug event, or because the driver is going to be removed from
  3035. * memory.
  3036. **/
  3037. static void igb_remove(struct pci_dev *pdev)
  3038. {
  3039. struct net_device *netdev = pci_get_drvdata(pdev);
  3040. struct igb_adapter *adapter = netdev_priv(netdev);
  3041. struct e1000_hw *hw = &adapter->hw;
  3042. pm_runtime_get_noresume(&pdev->dev);
  3043. #ifdef CONFIG_IGB_HWMON
  3044. igb_sysfs_exit(adapter);
  3045. #endif
  3046. igb_remove_i2c(adapter);
  3047. igb_ptp_stop(adapter);
  3048. /* The watchdog timer may be rescheduled, so explicitly
  3049. * disable watchdog from being rescheduled.
  3050. */
  3051. set_bit(__IGB_DOWN, &adapter->state);
  3052. del_timer_sync(&adapter->watchdog_timer);
  3053. del_timer_sync(&adapter->phy_info_timer);
  3054. cancel_work_sync(&adapter->reset_task);
  3055. cancel_work_sync(&adapter->watchdog_task);
  3056. #ifdef CONFIG_IGB_DCA
  3057. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  3058. dev_info(&pdev->dev, "DCA disabled\n");
  3059. dca_remove_requester(&pdev->dev);
  3060. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  3061. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  3062. }
  3063. #endif
  3064. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  3065. * would have already happened in close and is redundant.
  3066. */
  3067. igb_release_hw_control(adapter);
  3068. #ifdef CONFIG_PCI_IOV
  3069. igb_disable_sriov(pdev);
  3070. #endif
  3071. unregister_netdev(netdev);
  3072. igb_clear_interrupt_scheme(adapter);
  3073. pci_iounmap(pdev, adapter->io_addr);
  3074. if (hw->flash_address)
  3075. iounmap(hw->flash_address);
  3076. pci_release_mem_regions(pdev);
  3077. kfree(adapter->mac_table);
  3078. kfree(adapter->shadow_vfta);
  3079. free_netdev(netdev);
  3080. pci_disable_pcie_error_reporting(pdev);
  3081. pci_disable_device(pdev);
  3082. }
  3083. /**
  3084. * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
  3085. * @adapter: board private structure to initialize
  3086. *
  3087. * This function initializes the vf specific data storage and then attempts to
  3088. * allocate the VFs. The reason for ordering it this way is because it is much
  3089. * mor expensive time wise to disable SR-IOV than it is to allocate and free
  3090. * the memory for the VFs.
  3091. **/
  3092. static void igb_probe_vfs(struct igb_adapter *adapter)
  3093. {
  3094. #ifdef CONFIG_PCI_IOV
  3095. struct pci_dev *pdev = adapter->pdev;
  3096. struct e1000_hw *hw = &adapter->hw;
  3097. /* Virtualization features not supported on i210 family. */
  3098. if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
  3099. return;
  3100. /* Of the below we really only want the effect of getting
  3101. * IGB_FLAG_HAS_MSIX set (if available), without which
  3102. * igb_enable_sriov() has no effect.
  3103. */
  3104. igb_set_interrupt_capability(adapter, true);
  3105. igb_reset_interrupt_capability(adapter);
  3106. pci_sriov_set_totalvfs(pdev, 7);
  3107. igb_enable_sriov(pdev, max_vfs);
  3108. #endif /* CONFIG_PCI_IOV */
  3109. }
  3110. unsigned int igb_get_max_rss_queues(struct igb_adapter *adapter)
  3111. {
  3112. struct e1000_hw *hw = &adapter->hw;
  3113. unsigned int max_rss_queues;
  3114. /* Determine the maximum number of RSS queues supported. */
  3115. switch (hw->mac.type) {
  3116. case e1000_i211:
  3117. max_rss_queues = IGB_MAX_RX_QUEUES_I211;
  3118. break;
  3119. case e1000_82575:
  3120. case e1000_i210:
  3121. max_rss_queues = IGB_MAX_RX_QUEUES_82575;
  3122. break;
  3123. case e1000_i350:
  3124. /* I350 cannot do RSS and SR-IOV at the same time */
  3125. if (!!adapter->vfs_allocated_count) {
  3126. max_rss_queues = 1;
  3127. break;
  3128. }
  3129. /* fall through */
  3130. case e1000_82576:
  3131. if (!!adapter->vfs_allocated_count) {
  3132. max_rss_queues = 2;
  3133. break;
  3134. }
  3135. /* fall through */
  3136. case e1000_82580:
  3137. case e1000_i354:
  3138. default:
  3139. max_rss_queues = IGB_MAX_RX_QUEUES;
  3140. break;
  3141. }
  3142. return max_rss_queues;
  3143. }
  3144. static void igb_init_queue_configuration(struct igb_adapter *adapter)
  3145. {
  3146. u32 max_rss_queues;
  3147. max_rss_queues = igb_get_max_rss_queues(adapter);
  3148. adapter->rss_queues = min_t(u32, max_rss_queues, num_online_cpus());
  3149. igb_set_flag_queue_pairs(adapter, max_rss_queues);
  3150. }
  3151. void igb_set_flag_queue_pairs(struct igb_adapter *adapter,
  3152. const u32 max_rss_queues)
  3153. {
  3154. struct e1000_hw *hw = &adapter->hw;
  3155. /* Determine if we need to pair queues. */
  3156. switch (hw->mac.type) {
  3157. case e1000_82575:
  3158. case e1000_i211:
  3159. /* Device supports enough interrupts without queue pairing. */
  3160. break;
  3161. case e1000_82576:
  3162. case e1000_82580:
  3163. case e1000_i350:
  3164. case e1000_i354:
  3165. case e1000_i210:
  3166. default:
  3167. /* If rss_queues > half of max_rss_queues, pair the queues in
  3168. * order to conserve interrupts due to limited supply.
  3169. */
  3170. if (adapter->rss_queues > (max_rss_queues / 2))
  3171. adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
  3172. else
  3173. adapter->flags &= ~IGB_FLAG_QUEUE_PAIRS;
  3174. break;
  3175. }
  3176. }
  3177. /**
  3178. * igb_sw_init - Initialize general software structures (struct igb_adapter)
  3179. * @adapter: board private structure to initialize
  3180. *
  3181. * igb_sw_init initializes the Adapter private data structure.
  3182. * Fields are initialized based on PCI device information and
  3183. * OS network device settings (MTU size).
  3184. **/
  3185. static int igb_sw_init(struct igb_adapter *adapter)
  3186. {
  3187. struct e1000_hw *hw = &adapter->hw;
  3188. struct net_device *netdev = adapter->netdev;
  3189. struct pci_dev *pdev = adapter->pdev;
  3190. pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
  3191. /* set default ring sizes */
  3192. adapter->tx_ring_count = IGB_DEFAULT_TXD;
  3193. adapter->rx_ring_count = IGB_DEFAULT_RXD;
  3194. /* set default ITR values */
  3195. adapter->rx_itr_setting = IGB_DEFAULT_ITR;
  3196. adapter->tx_itr_setting = IGB_DEFAULT_ITR;
  3197. /* set default work limits */
  3198. adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
  3199. adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
  3200. VLAN_HLEN;
  3201. adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
  3202. spin_lock_init(&adapter->nfc_lock);
  3203. spin_lock_init(&adapter->stats64_lock);
  3204. #ifdef CONFIG_PCI_IOV
  3205. switch (hw->mac.type) {
  3206. case e1000_82576:
  3207. case e1000_i350:
  3208. if (max_vfs > 7) {
  3209. dev_warn(&pdev->dev,
  3210. "Maximum of 7 VFs per PF, using max\n");
  3211. max_vfs = adapter->vfs_allocated_count = 7;
  3212. } else
  3213. adapter->vfs_allocated_count = max_vfs;
  3214. if (adapter->vfs_allocated_count)
  3215. dev_warn(&pdev->dev,
  3216. "Enabling SR-IOV VFs using the module parameter is deprecated - please use the pci sysfs interface.\n");
  3217. break;
  3218. default:
  3219. break;
  3220. }
  3221. #endif /* CONFIG_PCI_IOV */
  3222. /* Assume MSI-X interrupts, will be checked during IRQ allocation */
  3223. adapter->flags |= IGB_FLAG_HAS_MSIX;
  3224. adapter->mac_table = kzalloc(sizeof(struct igb_mac_addr) *
  3225. hw->mac.rar_entry_count, GFP_ATOMIC);
  3226. if (!adapter->mac_table)
  3227. return -ENOMEM;
  3228. igb_probe_vfs(adapter);
  3229. igb_init_queue_configuration(adapter);
  3230. /* Setup and initialize a copy of the hw vlan table array */
  3231. adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
  3232. GFP_ATOMIC);
  3233. if (!adapter->shadow_vfta)
  3234. return -ENOMEM;
  3235. /* This call may decrease the number of queues */
  3236. if (igb_init_interrupt_scheme(adapter, true)) {
  3237. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  3238. return -ENOMEM;
  3239. }
  3240. /* Explicitly disable IRQ since the NIC can be in any state. */
  3241. igb_irq_disable(adapter);
  3242. if (hw->mac.type >= e1000_i350)
  3243. adapter->flags &= ~IGB_FLAG_DMAC;
  3244. set_bit(__IGB_DOWN, &adapter->state);
  3245. return 0;
  3246. }
  3247. /**
  3248. * igb_open - Called when a network interface is made active
  3249. * @netdev: network interface device structure
  3250. *
  3251. * Returns 0 on success, negative value on failure
  3252. *
  3253. * The open entry point is called when a network interface is made
  3254. * active by the system (IFF_UP). At this point all resources needed
  3255. * for transmit and receive operations are allocated, the interrupt
  3256. * handler is registered with the OS, the watchdog timer is started,
  3257. * and the stack is notified that the interface is ready.
  3258. **/
  3259. static int __igb_open(struct net_device *netdev, bool resuming)
  3260. {
  3261. struct igb_adapter *adapter = netdev_priv(netdev);
  3262. struct e1000_hw *hw = &adapter->hw;
  3263. struct pci_dev *pdev = adapter->pdev;
  3264. int err;
  3265. int i;
  3266. /* disallow open during test */
  3267. if (test_bit(__IGB_TESTING, &adapter->state)) {
  3268. WARN_ON(resuming);
  3269. return -EBUSY;
  3270. }
  3271. if (!resuming)
  3272. pm_runtime_get_sync(&pdev->dev);
  3273. netif_carrier_off(netdev);
  3274. /* allocate transmit descriptors */
  3275. err = igb_setup_all_tx_resources(adapter);
  3276. if (err)
  3277. goto err_setup_tx;
  3278. /* allocate receive descriptors */
  3279. err = igb_setup_all_rx_resources(adapter);
  3280. if (err)
  3281. goto err_setup_rx;
  3282. igb_power_up_link(adapter);
  3283. /* before we allocate an interrupt, we must be ready to handle it.
  3284. * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
  3285. * as soon as we call pci_request_irq, so we have to setup our
  3286. * clean_rx handler before we do so.
  3287. */
  3288. igb_configure(adapter);
  3289. err = igb_request_irq(adapter);
  3290. if (err)
  3291. goto err_req_irq;
  3292. /* Notify the stack of the actual queue counts. */
  3293. err = netif_set_real_num_tx_queues(adapter->netdev,
  3294. adapter->num_tx_queues);
  3295. if (err)
  3296. goto err_set_queues;
  3297. err = netif_set_real_num_rx_queues(adapter->netdev,
  3298. adapter->num_rx_queues);
  3299. if (err)
  3300. goto err_set_queues;
  3301. /* From here on the code is the same as igb_up() */
  3302. clear_bit(__IGB_DOWN, &adapter->state);
  3303. for (i = 0; i < adapter->num_q_vectors; i++)
  3304. napi_enable(&(adapter->q_vector[i]->napi));
  3305. /* Clear any pending interrupts. */
  3306. rd32(E1000_ICR);
  3307. igb_irq_enable(adapter);
  3308. /* notify VFs that reset has been completed */
  3309. if (adapter->vfs_allocated_count) {
  3310. u32 reg_data = rd32(E1000_CTRL_EXT);
  3311. reg_data |= E1000_CTRL_EXT_PFRSTD;
  3312. wr32(E1000_CTRL_EXT, reg_data);
  3313. }
  3314. netif_tx_start_all_queues(netdev);
  3315. if (!resuming)
  3316. pm_runtime_put(&pdev->dev);
  3317. /* start the watchdog. */
  3318. hw->mac.get_link_status = 1;
  3319. schedule_work(&adapter->watchdog_task);
  3320. return 0;
  3321. err_set_queues:
  3322. igb_free_irq(adapter);
  3323. err_req_irq:
  3324. igb_release_hw_control(adapter);
  3325. igb_power_down_link(adapter);
  3326. igb_free_all_rx_resources(adapter);
  3327. err_setup_rx:
  3328. igb_free_all_tx_resources(adapter);
  3329. err_setup_tx:
  3330. igb_reset(adapter);
  3331. if (!resuming)
  3332. pm_runtime_put(&pdev->dev);
  3333. return err;
  3334. }
  3335. int igb_open(struct net_device *netdev)
  3336. {
  3337. return __igb_open(netdev, false);
  3338. }
  3339. /**
  3340. * igb_close - Disables a network interface
  3341. * @netdev: network interface device structure
  3342. *
  3343. * Returns 0, this is not allowed to fail
  3344. *
  3345. * The close entry point is called when an interface is de-activated
  3346. * by the OS. The hardware is still under the driver's control, but
  3347. * needs to be disabled. A global MAC reset is issued to stop the
  3348. * hardware, and all transmit and receive resources are freed.
  3349. **/
  3350. static int __igb_close(struct net_device *netdev, bool suspending)
  3351. {
  3352. struct igb_adapter *adapter = netdev_priv(netdev);
  3353. struct pci_dev *pdev = adapter->pdev;
  3354. WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
  3355. if (!suspending)
  3356. pm_runtime_get_sync(&pdev->dev);
  3357. igb_down(adapter);
  3358. igb_free_irq(adapter);
  3359. igb_free_all_tx_resources(adapter);
  3360. igb_free_all_rx_resources(adapter);
  3361. if (!suspending)
  3362. pm_runtime_put_sync(&pdev->dev);
  3363. return 0;
  3364. }
  3365. int igb_close(struct net_device *netdev)
  3366. {
  3367. if (netif_device_present(netdev) || netdev->dismantle)
  3368. return __igb_close(netdev, false);
  3369. return 0;
  3370. }
  3371. /**
  3372. * igb_setup_tx_resources - allocate Tx resources (Descriptors)
  3373. * @tx_ring: tx descriptor ring (for a specific queue) to setup
  3374. *
  3375. * Return 0 on success, negative on failure
  3376. **/
  3377. int igb_setup_tx_resources(struct igb_ring *tx_ring)
  3378. {
  3379. struct device *dev = tx_ring->dev;
  3380. int size;
  3381. size = sizeof(struct igb_tx_buffer) * tx_ring->count;
  3382. tx_ring->tx_buffer_info = vmalloc(size);
  3383. if (!tx_ring->tx_buffer_info)
  3384. goto err;
  3385. /* round up to nearest 4K */
  3386. tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
  3387. tx_ring->size = ALIGN(tx_ring->size, 4096);
  3388. tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
  3389. &tx_ring->dma, GFP_KERNEL);
  3390. if (!tx_ring->desc)
  3391. goto err;
  3392. tx_ring->next_to_use = 0;
  3393. tx_ring->next_to_clean = 0;
  3394. return 0;
  3395. err:
  3396. vfree(tx_ring->tx_buffer_info);
  3397. tx_ring->tx_buffer_info = NULL;
  3398. dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
  3399. return -ENOMEM;
  3400. }
  3401. /**
  3402. * igb_setup_all_tx_resources - wrapper to allocate Tx resources
  3403. * (Descriptors) for all queues
  3404. * @adapter: board private structure
  3405. *
  3406. * Return 0 on success, negative on failure
  3407. **/
  3408. static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
  3409. {
  3410. struct pci_dev *pdev = adapter->pdev;
  3411. int i, err = 0;
  3412. for (i = 0; i < adapter->num_tx_queues; i++) {
  3413. err = igb_setup_tx_resources(adapter->tx_ring[i]);
  3414. if (err) {
  3415. dev_err(&pdev->dev,
  3416. "Allocation for Tx Queue %u failed\n", i);
  3417. for (i--; i >= 0; i--)
  3418. igb_free_tx_resources(adapter->tx_ring[i]);
  3419. break;
  3420. }
  3421. }
  3422. return err;
  3423. }
  3424. /**
  3425. * igb_setup_tctl - configure the transmit control registers
  3426. * @adapter: Board private structure
  3427. **/
  3428. void igb_setup_tctl(struct igb_adapter *adapter)
  3429. {
  3430. struct e1000_hw *hw = &adapter->hw;
  3431. u32 tctl;
  3432. /* disable queue 0 which is enabled by default on 82575 and 82576 */
  3433. wr32(E1000_TXDCTL(0), 0);
  3434. /* Program the Transmit Control Register */
  3435. tctl = rd32(E1000_TCTL);
  3436. tctl &= ~E1000_TCTL_CT;
  3437. tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
  3438. (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
  3439. igb_config_collision_dist(hw);
  3440. /* Enable transmits */
  3441. tctl |= E1000_TCTL_EN;
  3442. wr32(E1000_TCTL, tctl);
  3443. }
  3444. /**
  3445. * igb_configure_tx_ring - Configure transmit ring after Reset
  3446. * @adapter: board private structure
  3447. * @ring: tx ring to configure
  3448. *
  3449. * Configure a transmit ring after a reset.
  3450. **/
  3451. void igb_configure_tx_ring(struct igb_adapter *adapter,
  3452. struct igb_ring *ring)
  3453. {
  3454. struct e1000_hw *hw = &adapter->hw;
  3455. u32 txdctl = 0;
  3456. u64 tdba = ring->dma;
  3457. int reg_idx = ring->reg_idx;
  3458. /* disable the queue */
  3459. wr32(E1000_TXDCTL(reg_idx), 0);
  3460. wrfl();
  3461. mdelay(10);
  3462. wr32(E1000_TDLEN(reg_idx),
  3463. ring->count * sizeof(union e1000_adv_tx_desc));
  3464. wr32(E1000_TDBAL(reg_idx),
  3465. tdba & 0x00000000ffffffffULL);
  3466. wr32(E1000_TDBAH(reg_idx), tdba >> 32);
  3467. ring->tail = adapter->io_addr + E1000_TDT(reg_idx);
  3468. wr32(E1000_TDH(reg_idx), 0);
  3469. writel(0, ring->tail);
  3470. txdctl |= IGB_TX_PTHRESH;
  3471. txdctl |= IGB_TX_HTHRESH << 8;
  3472. txdctl |= IGB_TX_WTHRESH << 16;
  3473. /* reinitialize tx_buffer_info */
  3474. memset(ring->tx_buffer_info, 0,
  3475. sizeof(struct igb_tx_buffer) * ring->count);
  3476. txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
  3477. wr32(E1000_TXDCTL(reg_idx), txdctl);
  3478. }
  3479. /**
  3480. * igb_configure_tx - Configure transmit Unit after Reset
  3481. * @adapter: board private structure
  3482. *
  3483. * Configure the Tx unit of the MAC after a reset.
  3484. **/
  3485. static void igb_configure_tx(struct igb_adapter *adapter)
  3486. {
  3487. int i;
  3488. for (i = 0; i < adapter->num_tx_queues; i++)
  3489. igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
  3490. }
  3491. /**
  3492. * igb_setup_rx_resources - allocate Rx resources (Descriptors)
  3493. * @rx_ring: Rx descriptor ring (for a specific queue) to setup
  3494. *
  3495. * Returns 0 on success, negative on failure
  3496. **/
  3497. int igb_setup_rx_resources(struct igb_ring *rx_ring)
  3498. {
  3499. struct device *dev = rx_ring->dev;
  3500. int size;
  3501. size = sizeof(struct igb_rx_buffer) * rx_ring->count;
  3502. rx_ring->rx_buffer_info = vmalloc(size);
  3503. if (!rx_ring->rx_buffer_info)
  3504. goto err;
  3505. /* Round up to nearest 4K */
  3506. rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
  3507. rx_ring->size = ALIGN(rx_ring->size, 4096);
  3508. rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
  3509. &rx_ring->dma, GFP_KERNEL);
  3510. if (!rx_ring->desc)
  3511. goto err;
  3512. rx_ring->next_to_alloc = 0;
  3513. rx_ring->next_to_clean = 0;
  3514. rx_ring->next_to_use = 0;
  3515. return 0;
  3516. err:
  3517. vfree(rx_ring->rx_buffer_info);
  3518. rx_ring->rx_buffer_info = NULL;
  3519. dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
  3520. return -ENOMEM;
  3521. }
  3522. /**
  3523. * igb_setup_all_rx_resources - wrapper to allocate Rx resources
  3524. * (Descriptors) for all queues
  3525. * @adapter: board private structure
  3526. *
  3527. * Return 0 on success, negative on failure
  3528. **/
  3529. static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
  3530. {
  3531. struct pci_dev *pdev = adapter->pdev;
  3532. int i, err = 0;
  3533. for (i = 0; i < adapter->num_rx_queues; i++) {
  3534. err = igb_setup_rx_resources(adapter->rx_ring[i]);
  3535. if (err) {
  3536. dev_err(&pdev->dev,
  3537. "Allocation for Rx Queue %u failed\n", i);
  3538. for (i--; i >= 0; i--)
  3539. igb_free_rx_resources(adapter->rx_ring[i]);
  3540. break;
  3541. }
  3542. }
  3543. return err;
  3544. }
  3545. /**
  3546. * igb_setup_mrqc - configure the multiple receive queue control registers
  3547. * @adapter: Board private structure
  3548. **/
  3549. static void igb_setup_mrqc(struct igb_adapter *adapter)
  3550. {
  3551. struct e1000_hw *hw = &adapter->hw;
  3552. u32 mrqc, rxcsum;
  3553. u32 j, num_rx_queues;
  3554. u32 rss_key[10];
  3555. netdev_rss_key_fill(rss_key, sizeof(rss_key));
  3556. for (j = 0; j < 10; j++)
  3557. wr32(E1000_RSSRK(j), rss_key[j]);
  3558. num_rx_queues = adapter->rss_queues;
  3559. switch (hw->mac.type) {
  3560. case e1000_82576:
  3561. /* 82576 supports 2 RSS queues for SR-IOV */
  3562. if (adapter->vfs_allocated_count)
  3563. num_rx_queues = 2;
  3564. break;
  3565. default:
  3566. break;
  3567. }
  3568. if (adapter->rss_indir_tbl_init != num_rx_queues) {
  3569. for (j = 0; j < IGB_RETA_SIZE; j++)
  3570. adapter->rss_indir_tbl[j] =
  3571. (j * num_rx_queues) / IGB_RETA_SIZE;
  3572. adapter->rss_indir_tbl_init = num_rx_queues;
  3573. }
  3574. igb_write_rss_indir_tbl(adapter);
  3575. /* Disable raw packet checksumming so that RSS hash is placed in
  3576. * descriptor on writeback. No need to enable TCP/UDP/IP checksum
  3577. * offloads as they are enabled by default
  3578. */
  3579. rxcsum = rd32(E1000_RXCSUM);
  3580. rxcsum |= E1000_RXCSUM_PCSD;
  3581. if (adapter->hw.mac.type >= e1000_82576)
  3582. /* Enable Receive Checksum Offload for SCTP */
  3583. rxcsum |= E1000_RXCSUM_CRCOFL;
  3584. /* Don't need to set TUOFL or IPOFL, they default to 1 */
  3585. wr32(E1000_RXCSUM, rxcsum);
  3586. /* Generate RSS hash based on packet types, TCP/UDP
  3587. * port numbers and/or IPv4/v6 src and dst addresses
  3588. */
  3589. mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
  3590. E1000_MRQC_RSS_FIELD_IPV4_TCP |
  3591. E1000_MRQC_RSS_FIELD_IPV6 |
  3592. E1000_MRQC_RSS_FIELD_IPV6_TCP |
  3593. E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
  3594. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
  3595. mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
  3596. if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
  3597. mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
  3598. /* If VMDq is enabled then we set the appropriate mode for that, else
  3599. * we default to RSS so that an RSS hash is calculated per packet even
  3600. * if we are only using one queue
  3601. */
  3602. if (adapter->vfs_allocated_count) {
  3603. if (hw->mac.type > e1000_82575) {
  3604. /* Set the default pool for the PF's first queue */
  3605. u32 vtctl = rd32(E1000_VT_CTL);
  3606. vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
  3607. E1000_VT_CTL_DISABLE_DEF_POOL);
  3608. vtctl |= adapter->vfs_allocated_count <<
  3609. E1000_VT_CTL_DEFAULT_POOL_SHIFT;
  3610. wr32(E1000_VT_CTL, vtctl);
  3611. }
  3612. if (adapter->rss_queues > 1)
  3613. mrqc |= E1000_MRQC_ENABLE_VMDQ_RSS_MQ;
  3614. else
  3615. mrqc |= E1000_MRQC_ENABLE_VMDQ;
  3616. } else {
  3617. if (hw->mac.type != e1000_i211)
  3618. mrqc |= E1000_MRQC_ENABLE_RSS_MQ;
  3619. }
  3620. igb_vmm_control(adapter);
  3621. wr32(E1000_MRQC, mrqc);
  3622. }
  3623. /**
  3624. * igb_setup_rctl - configure the receive control registers
  3625. * @adapter: Board private structure
  3626. **/
  3627. void igb_setup_rctl(struct igb_adapter *adapter)
  3628. {
  3629. struct e1000_hw *hw = &adapter->hw;
  3630. u32 rctl;
  3631. rctl = rd32(E1000_RCTL);
  3632. rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
  3633. rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
  3634. rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
  3635. (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
  3636. /* enable stripping of CRC. It's unlikely this will break BMC
  3637. * redirection as it did with e1000. Newer features require
  3638. * that the HW strips the CRC.
  3639. */
  3640. rctl |= E1000_RCTL_SECRC;
  3641. /* disable store bad packets and clear size bits. */
  3642. rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
  3643. /* enable LPE to allow for reception of jumbo frames */
  3644. rctl |= E1000_RCTL_LPE;
  3645. /* disable queue 0 to prevent tail write w/o re-config */
  3646. wr32(E1000_RXDCTL(0), 0);
  3647. /* Attention!!! For SR-IOV PF driver operations you must enable
  3648. * queue drop for all VF and PF queues to prevent head of line blocking
  3649. * if an un-trusted VF does not provide descriptors to hardware.
  3650. */
  3651. if (adapter->vfs_allocated_count) {
  3652. /* set all queue drop enable bits */
  3653. wr32(E1000_QDE, ALL_QUEUES);
  3654. }
  3655. /* This is useful for sniffing bad packets. */
  3656. if (adapter->netdev->features & NETIF_F_RXALL) {
  3657. /* UPE and MPE will be handled by normal PROMISC logic
  3658. * in e1000e_set_rx_mode
  3659. */
  3660. rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
  3661. E1000_RCTL_BAM | /* RX All Bcast Pkts */
  3662. E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
  3663. rctl &= ~(E1000_RCTL_DPF | /* Allow filtered pause */
  3664. E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
  3665. /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
  3666. * and that breaks VLANs.
  3667. */
  3668. }
  3669. wr32(E1000_RCTL, rctl);
  3670. }
  3671. static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
  3672. int vfn)
  3673. {
  3674. struct e1000_hw *hw = &adapter->hw;
  3675. u32 vmolr;
  3676. if (size > MAX_JUMBO_FRAME_SIZE)
  3677. size = MAX_JUMBO_FRAME_SIZE;
  3678. vmolr = rd32(E1000_VMOLR(vfn));
  3679. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  3680. vmolr |= size | E1000_VMOLR_LPE;
  3681. wr32(E1000_VMOLR(vfn), vmolr);
  3682. return 0;
  3683. }
  3684. static inline void igb_set_vf_vlan_strip(struct igb_adapter *adapter,
  3685. int vfn, bool enable)
  3686. {
  3687. struct e1000_hw *hw = &adapter->hw;
  3688. u32 val, reg;
  3689. if (hw->mac.type < e1000_82576)
  3690. return;
  3691. if (hw->mac.type == e1000_i350)
  3692. reg = E1000_DVMOLR(vfn);
  3693. else
  3694. reg = E1000_VMOLR(vfn);
  3695. val = rd32(reg);
  3696. if (enable)
  3697. val |= E1000_VMOLR_STRVLAN;
  3698. else
  3699. val &= ~(E1000_VMOLR_STRVLAN);
  3700. wr32(reg, val);
  3701. }
  3702. static inline void igb_set_vmolr(struct igb_adapter *adapter,
  3703. int vfn, bool aupe)
  3704. {
  3705. struct e1000_hw *hw = &adapter->hw;
  3706. u32 vmolr;
  3707. /* This register exists only on 82576 and newer so if we are older then
  3708. * we should exit and do nothing
  3709. */
  3710. if (hw->mac.type < e1000_82576)
  3711. return;
  3712. vmolr = rd32(E1000_VMOLR(vfn));
  3713. if (aupe)
  3714. vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
  3715. else
  3716. vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
  3717. /* clear all bits that might not be set */
  3718. vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
  3719. if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
  3720. vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
  3721. /* for VMDq only allow the VFs and pool 0 to accept broadcast and
  3722. * multicast packets
  3723. */
  3724. if (vfn <= adapter->vfs_allocated_count)
  3725. vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
  3726. wr32(E1000_VMOLR(vfn), vmolr);
  3727. }
  3728. /**
  3729. * igb_configure_rx_ring - Configure a receive ring after Reset
  3730. * @adapter: board private structure
  3731. * @ring: receive ring to be configured
  3732. *
  3733. * Configure the Rx unit of the MAC after a reset.
  3734. **/
  3735. void igb_configure_rx_ring(struct igb_adapter *adapter,
  3736. struct igb_ring *ring)
  3737. {
  3738. struct e1000_hw *hw = &adapter->hw;
  3739. union e1000_adv_rx_desc *rx_desc;
  3740. u64 rdba = ring->dma;
  3741. int reg_idx = ring->reg_idx;
  3742. u32 srrctl = 0, rxdctl = 0;
  3743. /* disable the queue */
  3744. wr32(E1000_RXDCTL(reg_idx), 0);
  3745. /* Set DMA base address registers */
  3746. wr32(E1000_RDBAL(reg_idx),
  3747. rdba & 0x00000000ffffffffULL);
  3748. wr32(E1000_RDBAH(reg_idx), rdba >> 32);
  3749. wr32(E1000_RDLEN(reg_idx),
  3750. ring->count * sizeof(union e1000_adv_rx_desc));
  3751. /* initialize head and tail */
  3752. ring->tail = adapter->io_addr + E1000_RDT(reg_idx);
  3753. wr32(E1000_RDH(reg_idx), 0);
  3754. writel(0, ring->tail);
  3755. /* set descriptor configuration */
  3756. srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
  3757. if (ring_uses_large_buffer(ring))
  3758. srrctl |= IGB_RXBUFFER_3072 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  3759. else
  3760. srrctl |= IGB_RXBUFFER_2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
  3761. srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
  3762. if (hw->mac.type >= e1000_82580)
  3763. srrctl |= E1000_SRRCTL_TIMESTAMP;
  3764. /* Only set Drop Enable if we are supporting multiple queues */
  3765. if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
  3766. srrctl |= E1000_SRRCTL_DROP_EN;
  3767. wr32(E1000_SRRCTL(reg_idx), srrctl);
  3768. /* set filtering for VMDQ pools */
  3769. igb_set_vmolr(adapter, reg_idx & 0x7, true);
  3770. rxdctl |= IGB_RX_PTHRESH;
  3771. rxdctl |= IGB_RX_HTHRESH << 8;
  3772. rxdctl |= IGB_RX_WTHRESH << 16;
  3773. /* initialize rx_buffer_info */
  3774. memset(ring->rx_buffer_info, 0,
  3775. sizeof(struct igb_rx_buffer) * ring->count);
  3776. /* initialize Rx descriptor 0 */
  3777. rx_desc = IGB_RX_DESC(ring, 0);
  3778. rx_desc->wb.upper.length = 0;
  3779. /* enable receive descriptor fetching */
  3780. rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
  3781. wr32(E1000_RXDCTL(reg_idx), rxdctl);
  3782. }
  3783. static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
  3784. struct igb_ring *rx_ring)
  3785. {
  3786. /* set build_skb and buffer size flags */
  3787. clear_ring_build_skb_enabled(rx_ring);
  3788. clear_ring_uses_large_buffer(rx_ring);
  3789. if (adapter->flags & IGB_FLAG_RX_LEGACY)
  3790. return;
  3791. set_ring_build_skb_enabled(rx_ring);
  3792. #if (PAGE_SIZE < 8192)
  3793. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  3794. return;
  3795. set_ring_uses_large_buffer(rx_ring);
  3796. #endif
  3797. }
  3798. /**
  3799. * igb_configure_rx - Configure receive Unit after Reset
  3800. * @adapter: board private structure
  3801. *
  3802. * Configure the Rx unit of the MAC after a reset.
  3803. **/
  3804. static void igb_configure_rx(struct igb_adapter *adapter)
  3805. {
  3806. int i;
  3807. /* set the correct pool for the PF default MAC address in entry 0 */
  3808. igb_set_default_mac_filter(adapter);
  3809. /* Setup the HW Rx Head and Tail Descriptor Pointers and
  3810. * the Base and Length of the Rx Descriptor Ring
  3811. */
  3812. for (i = 0; i < adapter->num_rx_queues; i++) {
  3813. struct igb_ring *rx_ring = adapter->rx_ring[i];
  3814. igb_set_rx_buffer_len(adapter, rx_ring);
  3815. igb_configure_rx_ring(adapter, rx_ring);
  3816. }
  3817. }
  3818. /**
  3819. * igb_free_tx_resources - Free Tx Resources per Queue
  3820. * @tx_ring: Tx descriptor ring for a specific queue
  3821. *
  3822. * Free all transmit software resources
  3823. **/
  3824. void igb_free_tx_resources(struct igb_ring *tx_ring)
  3825. {
  3826. igb_clean_tx_ring(tx_ring);
  3827. vfree(tx_ring->tx_buffer_info);
  3828. tx_ring->tx_buffer_info = NULL;
  3829. /* if not set, then don't free */
  3830. if (!tx_ring->desc)
  3831. return;
  3832. dma_free_coherent(tx_ring->dev, tx_ring->size,
  3833. tx_ring->desc, tx_ring->dma);
  3834. tx_ring->desc = NULL;
  3835. }
  3836. /**
  3837. * igb_free_all_tx_resources - Free Tx Resources for All Queues
  3838. * @adapter: board private structure
  3839. *
  3840. * Free all transmit software resources
  3841. **/
  3842. static void igb_free_all_tx_resources(struct igb_adapter *adapter)
  3843. {
  3844. int i;
  3845. for (i = 0; i < adapter->num_tx_queues; i++)
  3846. if (adapter->tx_ring[i])
  3847. igb_free_tx_resources(adapter->tx_ring[i]);
  3848. }
  3849. /**
  3850. * igb_clean_tx_ring - Free Tx Buffers
  3851. * @tx_ring: ring to be cleaned
  3852. **/
  3853. static void igb_clean_tx_ring(struct igb_ring *tx_ring)
  3854. {
  3855. u16 i = tx_ring->next_to_clean;
  3856. struct igb_tx_buffer *tx_buffer = &tx_ring->tx_buffer_info[i];
  3857. while (i != tx_ring->next_to_use) {
  3858. union e1000_adv_tx_desc *eop_desc, *tx_desc;
  3859. /* Free all the Tx ring sk_buffs */
  3860. dev_kfree_skb_any(tx_buffer->skb);
  3861. /* unmap skb header data */
  3862. dma_unmap_single(tx_ring->dev,
  3863. dma_unmap_addr(tx_buffer, dma),
  3864. dma_unmap_len(tx_buffer, len),
  3865. DMA_TO_DEVICE);
  3866. /* check for eop_desc to determine the end of the packet */
  3867. eop_desc = tx_buffer->next_to_watch;
  3868. tx_desc = IGB_TX_DESC(tx_ring, i);
  3869. /* unmap remaining buffers */
  3870. while (tx_desc != eop_desc) {
  3871. tx_buffer++;
  3872. tx_desc++;
  3873. i++;
  3874. if (unlikely(i == tx_ring->count)) {
  3875. i = 0;
  3876. tx_buffer = tx_ring->tx_buffer_info;
  3877. tx_desc = IGB_TX_DESC(tx_ring, 0);
  3878. }
  3879. /* unmap any remaining paged data */
  3880. if (dma_unmap_len(tx_buffer, len))
  3881. dma_unmap_page(tx_ring->dev,
  3882. dma_unmap_addr(tx_buffer, dma),
  3883. dma_unmap_len(tx_buffer, len),
  3884. DMA_TO_DEVICE);
  3885. }
  3886. /* move us one more past the eop_desc for start of next pkt */
  3887. tx_buffer++;
  3888. i++;
  3889. if (unlikely(i == tx_ring->count)) {
  3890. i = 0;
  3891. tx_buffer = tx_ring->tx_buffer_info;
  3892. }
  3893. }
  3894. /* reset BQL for queue */
  3895. netdev_tx_reset_queue(txring_txq(tx_ring));
  3896. /* reset next_to_use and next_to_clean */
  3897. tx_ring->next_to_use = 0;
  3898. tx_ring->next_to_clean = 0;
  3899. }
  3900. /**
  3901. * igb_clean_all_tx_rings - Free Tx Buffers for all queues
  3902. * @adapter: board private structure
  3903. **/
  3904. static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
  3905. {
  3906. int i;
  3907. for (i = 0; i < adapter->num_tx_queues; i++)
  3908. if (adapter->tx_ring[i])
  3909. igb_clean_tx_ring(adapter->tx_ring[i]);
  3910. }
  3911. /**
  3912. * igb_free_rx_resources - Free Rx Resources
  3913. * @rx_ring: ring to clean the resources from
  3914. *
  3915. * Free all receive software resources
  3916. **/
  3917. void igb_free_rx_resources(struct igb_ring *rx_ring)
  3918. {
  3919. igb_clean_rx_ring(rx_ring);
  3920. vfree(rx_ring->rx_buffer_info);
  3921. rx_ring->rx_buffer_info = NULL;
  3922. /* if not set, then don't free */
  3923. if (!rx_ring->desc)
  3924. return;
  3925. dma_free_coherent(rx_ring->dev, rx_ring->size,
  3926. rx_ring->desc, rx_ring->dma);
  3927. rx_ring->desc = NULL;
  3928. }
  3929. /**
  3930. * igb_free_all_rx_resources - Free Rx Resources for All Queues
  3931. * @adapter: board private structure
  3932. *
  3933. * Free all receive software resources
  3934. **/
  3935. static void igb_free_all_rx_resources(struct igb_adapter *adapter)
  3936. {
  3937. int i;
  3938. for (i = 0; i < adapter->num_rx_queues; i++)
  3939. if (adapter->rx_ring[i])
  3940. igb_free_rx_resources(adapter->rx_ring[i]);
  3941. }
  3942. /**
  3943. * igb_clean_rx_ring - Free Rx Buffers per Queue
  3944. * @rx_ring: ring to free buffers from
  3945. **/
  3946. static void igb_clean_rx_ring(struct igb_ring *rx_ring)
  3947. {
  3948. u16 i = rx_ring->next_to_clean;
  3949. if (rx_ring->skb)
  3950. dev_kfree_skb(rx_ring->skb);
  3951. rx_ring->skb = NULL;
  3952. /* Free all the Rx ring sk_buffs */
  3953. while (i != rx_ring->next_to_alloc) {
  3954. struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
  3955. /* Invalidate cache lines that may have been written to by
  3956. * device so that we avoid corrupting memory.
  3957. */
  3958. dma_sync_single_range_for_cpu(rx_ring->dev,
  3959. buffer_info->dma,
  3960. buffer_info->page_offset,
  3961. igb_rx_bufsz(rx_ring),
  3962. DMA_FROM_DEVICE);
  3963. /* free resources associated with mapping */
  3964. dma_unmap_page_attrs(rx_ring->dev,
  3965. buffer_info->dma,
  3966. igb_rx_pg_size(rx_ring),
  3967. DMA_FROM_DEVICE,
  3968. IGB_RX_DMA_ATTR);
  3969. __page_frag_cache_drain(buffer_info->page,
  3970. buffer_info->pagecnt_bias);
  3971. i++;
  3972. if (i == rx_ring->count)
  3973. i = 0;
  3974. }
  3975. rx_ring->next_to_alloc = 0;
  3976. rx_ring->next_to_clean = 0;
  3977. rx_ring->next_to_use = 0;
  3978. }
  3979. /**
  3980. * igb_clean_all_rx_rings - Free Rx Buffers for all queues
  3981. * @adapter: board private structure
  3982. **/
  3983. static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
  3984. {
  3985. int i;
  3986. for (i = 0; i < adapter->num_rx_queues; i++)
  3987. if (adapter->rx_ring[i])
  3988. igb_clean_rx_ring(adapter->rx_ring[i]);
  3989. }
  3990. /**
  3991. * igb_set_mac - Change the Ethernet Address of the NIC
  3992. * @netdev: network interface device structure
  3993. * @p: pointer to an address structure
  3994. *
  3995. * Returns 0 on success, negative on failure
  3996. **/
  3997. static int igb_set_mac(struct net_device *netdev, void *p)
  3998. {
  3999. struct igb_adapter *adapter = netdev_priv(netdev);
  4000. struct e1000_hw *hw = &adapter->hw;
  4001. struct sockaddr *addr = p;
  4002. if (!is_valid_ether_addr(addr->sa_data))
  4003. return -EADDRNOTAVAIL;
  4004. memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  4005. memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
  4006. /* set the correct pool for the new PF MAC address in entry 0 */
  4007. igb_set_default_mac_filter(adapter);
  4008. return 0;
  4009. }
  4010. /**
  4011. * igb_write_mc_addr_list - write multicast addresses to MTA
  4012. * @netdev: network interface device structure
  4013. *
  4014. * Writes multicast address list to the MTA hash table.
  4015. * Returns: -ENOMEM on failure
  4016. * 0 on no addresses written
  4017. * X on writing X addresses to MTA
  4018. **/
  4019. static int igb_write_mc_addr_list(struct net_device *netdev)
  4020. {
  4021. struct igb_adapter *adapter = netdev_priv(netdev);
  4022. struct e1000_hw *hw = &adapter->hw;
  4023. struct netdev_hw_addr *ha;
  4024. u8 *mta_list;
  4025. int i;
  4026. if (netdev_mc_empty(netdev)) {
  4027. /* nothing to program, so clear mc list */
  4028. igb_update_mc_addr_list(hw, NULL, 0);
  4029. igb_restore_vf_multicasts(adapter);
  4030. return 0;
  4031. }
  4032. mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
  4033. if (!mta_list)
  4034. return -ENOMEM;
  4035. /* The shared function expects a packed array of only addresses. */
  4036. i = 0;
  4037. netdev_for_each_mc_addr(ha, netdev)
  4038. memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  4039. igb_update_mc_addr_list(hw, mta_list, i);
  4040. kfree(mta_list);
  4041. return netdev_mc_count(netdev);
  4042. }
  4043. static int igb_vlan_promisc_enable(struct igb_adapter *adapter)
  4044. {
  4045. struct e1000_hw *hw = &adapter->hw;
  4046. u32 i, pf_id;
  4047. switch (hw->mac.type) {
  4048. case e1000_i210:
  4049. case e1000_i211:
  4050. case e1000_i350:
  4051. /* VLAN filtering needed for VLAN prio filter */
  4052. if (adapter->netdev->features & NETIF_F_NTUPLE)
  4053. break;
  4054. /* fall through */
  4055. case e1000_82576:
  4056. case e1000_82580:
  4057. case e1000_i354:
  4058. /* VLAN filtering needed for pool filtering */
  4059. if (adapter->vfs_allocated_count)
  4060. break;
  4061. /* fall through */
  4062. default:
  4063. return 1;
  4064. }
  4065. /* We are already in VLAN promisc, nothing to do */
  4066. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  4067. return 0;
  4068. if (!adapter->vfs_allocated_count)
  4069. goto set_vfta;
  4070. /* Add PF to all active pools */
  4071. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  4072. for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
  4073. u32 vlvf = rd32(E1000_VLVF(i));
  4074. vlvf |= BIT(pf_id);
  4075. wr32(E1000_VLVF(i), vlvf);
  4076. }
  4077. set_vfta:
  4078. /* Set all bits in the VLAN filter table array */
  4079. for (i = E1000_VLAN_FILTER_TBL_SIZE; i--;)
  4080. hw->mac.ops.write_vfta(hw, i, ~0U);
  4081. /* Set flag so we don't redo unnecessary work */
  4082. adapter->flags |= IGB_FLAG_VLAN_PROMISC;
  4083. return 0;
  4084. }
  4085. #define VFTA_BLOCK_SIZE 8
  4086. static void igb_scrub_vfta(struct igb_adapter *adapter, u32 vfta_offset)
  4087. {
  4088. struct e1000_hw *hw = &adapter->hw;
  4089. u32 vfta[VFTA_BLOCK_SIZE] = { 0 };
  4090. u32 vid_start = vfta_offset * 32;
  4091. u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32);
  4092. u32 i, vid, word, bits, pf_id;
  4093. /* guarantee that we don't scrub out management VLAN */
  4094. vid = adapter->mng_vlan_id;
  4095. if (vid >= vid_start && vid < vid_end)
  4096. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  4097. if (!adapter->vfs_allocated_count)
  4098. goto set_vfta;
  4099. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  4100. for (i = E1000_VLVF_ARRAY_SIZE; --i;) {
  4101. u32 vlvf = rd32(E1000_VLVF(i));
  4102. /* pull VLAN ID from VLVF */
  4103. vid = vlvf & VLAN_VID_MASK;
  4104. /* only concern ourselves with a certain range */
  4105. if (vid < vid_start || vid >= vid_end)
  4106. continue;
  4107. if (vlvf & E1000_VLVF_VLANID_ENABLE) {
  4108. /* record VLAN ID in VFTA */
  4109. vfta[(vid - vid_start) / 32] |= BIT(vid % 32);
  4110. /* if PF is part of this then continue */
  4111. if (test_bit(vid, adapter->active_vlans))
  4112. continue;
  4113. }
  4114. /* remove PF from the pool */
  4115. bits = ~BIT(pf_id);
  4116. bits &= rd32(E1000_VLVF(i));
  4117. wr32(E1000_VLVF(i), bits);
  4118. }
  4119. set_vfta:
  4120. /* extract values from active_vlans and write back to VFTA */
  4121. for (i = VFTA_BLOCK_SIZE; i--;) {
  4122. vid = (vfta_offset + i) * 32;
  4123. word = vid / BITS_PER_LONG;
  4124. bits = vid % BITS_PER_LONG;
  4125. vfta[i] |= adapter->active_vlans[word] >> bits;
  4126. hw->mac.ops.write_vfta(hw, vfta_offset + i, vfta[i]);
  4127. }
  4128. }
  4129. static void igb_vlan_promisc_disable(struct igb_adapter *adapter)
  4130. {
  4131. u32 i;
  4132. /* We are not in VLAN promisc, nothing to do */
  4133. if (!(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  4134. return;
  4135. /* Set flag so we don't redo unnecessary work */
  4136. adapter->flags &= ~IGB_FLAG_VLAN_PROMISC;
  4137. for (i = 0; i < E1000_VLAN_FILTER_TBL_SIZE; i += VFTA_BLOCK_SIZE)
  4138. igb_scrub_vfta(adapter, i);
  4139. }
  4140. /**
  4141. * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
  4142. * @netdev: network interface device structure
  4143. *
  4144. * The set_rx_mode entry point is called whenever the unicast or multicast
  4145. * address lists or the network interface flags are updated. This routine is
  4146. * responsible for configuring the hardware for proper unicast, multicast,
  4147. * promiscuous mode, and all-multi behavior.
  4148. **/
  4149. static void igb_set_rx_mode(struct net_device *netdev)
  4150. {
  4151. struct igb_adapter *adapter = netdev_priv(netdev);
  4152. struct e1000_hw *hw = &adapter->hw;
  4153. unsigned int vfn = adapter->vfs_allocated_count;
  4154. u32 rctl = 0, vmolr = 0, rlpml = MAX_JUMBO_FRAME_SIZE;
  4155. int count;
  4156. /* Check for Promiscuous and All Multicast modes */
  4157. if (netdev->flags & IFF_PROMISC) {
  4158. rctl |= E1000_RCTL_UPE | E1000_RCTL_MPE;
  4159. vmolr |= E1000_VMOLR_MPME;
  4160. /* enable use of UTA filter to force packets to default pool */
  4161. if (hw->mac.type == e1000_82576)
  4162. vmolr |= E1000_VMOLR_ROPE;
  4163. } else {
  4164. if (netdev->flags & IFF_ALLMULTI) {
  4165. rctl |= E1000_RCTL_MPE;
  4166. vmolr |= E1000_VMOLR_MPME;
  4167. } else {
  4168. /* Write addresses to the MTA, if the attempt fails
  4169. * then we should just turn on promiscuous mode so
  4170. * that we can at least receive multicast traffic
  4171. */
  4172. count = igb_write_mc_addr_list(netdev);
  4173. if (count < 0) {
  4174. rctl |= E1000_RCTL_MPE;
  4175. vmolr |= E1000_VMOLR_MPME;
  4176. } else if (count) {
  4177. vmolr |= E1000_VMOLR_ROMPE;
  4178. }
  4179. }
  4180. }
  4181. /* Write addresses to available RAR registers, if there is not
  4182. * sufficient space to store all the addresses then enable
  4183. * unicast promiscuous mode
  4184. */
  4185. if (__dev_uc_sync(netdev, igb_uc_sync, igb_uc_unsync)) {
  4186. rctl |= E1000_RCTL_UPE;
  4187. vmolr |= E1000_VMOLR_ROPE;
  4188. }
  4189. /* enable VLAN filtering by default */
  4190. rctl |= E1000_RCTL_VFE;
  4191. /* disable VLAN filtering for modes that require it */
  4192. if ((netdev->flags & IFF_PROMISC) ||
  4193. (netdev->features & NETIF_F_RXALL)) {
  4194. /* if we fail to set all rules then just clear VFE */
  4195. if (igb_vlan_promisc_enable(adapter))
  4196. rctl &= ~E1000_RCTL_VFE;
  4197. } else {
  4198. igb_vlan_promisc_disable(adapter);
  4199. }
  4200. /* update state of unicast, multicast, and VLAN filtering modes */
  4201. rctl |= rd32(E1000_RCTL) & ~(E1000_RCTL_UPE | E1000_RCTL_MPE |
  4202. E1000_RCTL_VFE);
  4203. wr32(E1000_RCTL, rctl);
  4204. #if (PAGE_SIZE < 8192)
  4205. if (!adapter->vfs_allocated_count) {
  4206. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  4207. rlpml = IGB_MAX_FRAME_BUILD_SKB;
  4208. }
  4209. #endif
  4210. wr32(E1000_RLPML, rlpml);
  4211. /* In order to support SR-IOV and eventually VMDq it is necessary to set
  4212. * the VMOLR to enable the appropriate modes. Without this workaround
  4213. * we will have issues with VLAN tag stripping not being done for frames
  4214. * that are only arriving because we are the default pool
  4215. */
  4216. if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
  4217. return;
  4218. /* set UTA to appropriate mode */
  4219. igb_set_uta(adapter, !!(vmolr & E1000_VMOLR_ROPE));
  4220. vmolr |= rd32(E1000_VMOLR(vfn)) &
  4221. ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
  4222. /* enable Rx jumbo frames, restrict as needed to support build_skb */
  4223. vmolr &= ~E1000_VMOLR_RLPML_MASK;
  4224. #if (PAGE_SIZE < 8192)
  4225. if (adapter->max_frame_size <= IGB_MAX_FRAME_BUILD_SKB)
  4226. vmolr |= IGB_MAX_FRAME_BUILD_SKB;
  4227. else
  4228. #endif
  4229. vmolr |= MAX_JUMBO_FRAME_SIZE;
  4230. vmolr |= E1000_VMOLR_LPE;
  4231. wr32(E1000_VMOLR(vfn), vmolr);
  4232. igb_restore_vf_multicasts(adapter);
  4233. }
  4234. static void igb_check_wvbr(struct igb_adapter *adapter)
  4235. {
  4236. struct e1000_hw *hw = &adapter->hw;
  4237. u32 wvbr = 0;
  4238. switch (hw->mac.type) {
  4239. case e1000_82576:
  4240. case e1000_i350:
  4241. wvbr = rd32(E1000_WVBR);
  4242. if (!wvbr)
  4243. return;
  4244. break;
  4245. default:
  4246. break;
  4247. }
  4248. adapter->wvbr |= wvbr;
  4249. }
  4250. #define IGB_STAGGERED_QUEUE_OFFSET 8
  4251. static void igb_spoof_check(struct igb_adapter *adapter)
  4252. {
  4253. int j;
  4254. if (!adapter->wvbr)
  4255. return;
  4256. for (j = 0; j < adapter->vfs_allocated_count; j++) {
  4257. if (adapter->wvbr & BIT(j) ||
  4258. adapter->wvbr & BIT(j + IGB_STAGGERED_QUEUE_OFFSET)) {
  4259. dev_warn(&adapter->pdev->dev,
  4260. "Spoof event(s) detected on VF %d\n", j);
  4261. adapter->wvbr &=
  4262. ~(BIT(j) |
  4263. BIT(j + IGB_STAGGERED_QUEUE_OFFSET));
  4264. }
  4265. }
  4266. }
  4267. /* Need to wait a few seconds after link up to get diagnostic information from
  4268. * the phy
  4269. */
  4270. static void igb_update_phy_info(struct timer_list *t)
  4271. {
  4272. struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
  4273. igb_get_phy_info(&adapter->hw);
  4274. }
  4275. /**
  4276. * igb_has_link - check shared code for link and determine up/down
  4277. * @adapter: pointer to driver private info
  4278. **/
  4279. bool igb_has_link(struct igb_adapter *adapter)
  4280. {
  4281. struct e1000_hw *hw = &adapter->hw;
  4282. bool link_active = false;
  4283. /* get_link_status is set on LSC (link status) interrupt or
  4284. * rx sequence error interrupt. get_link_status will stay
  4285. * false until the e1000_check_for_link establishes link
  4286. * for copper adapters ONLY
  4287. */
  4288. switch (hw->phy.media_type) {
  4289. case e1000_media_type_copper:
  4290. if (!hw->mac.get_link_status)
  4291. return true;
  4292. case e1000_media_type_internal_serdes:
  4293. hw->mac.ops.check_for_link(hw);
  4294. link_active = !hw->mac.get_link_status;
  4295. break;
  4296. default:
  4297. case e1000_media_type_unknown:
  4298. break;
  4299. }
  4300. if (((hw->mac.type == e1000_i210) ||
  4301. (hw->mac.type == e1000_i211)) &&
  4302. (hw->phy.id == I210_I_PHY_ID)) {
  4303. if (!netif_carrier_ok(adapter->netdev)) {
  4304. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  4305. } else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
  4306. adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
  4307. adapter->link_check_timeout = jiffies;
  4308. }
  4309. }
  4310. return link_active;
  4311. }
  4312. static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
  4313. {
  4314. bool ret = false;
  4315. u32 ctrl_ext, thstat;
  4316. /* check for thermal sensor event on i350 copper only */
  4317. if (hw->mac.type == e1000_i350) {
  4318. thstat = rd32(E1000_THSTAT);
  4319. ctrl_ext = rd32(E1000_CTRL_EXT);
  4320. if ((hw->phy.media_type == e1000_media_type_copper) &&
  4321. !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
  4322. ret = !!(thstat & event);
  4323. }
  4324. return ret;
  4325. }
  4326. /**
  4327. * igb_check_lvmmc - check for malformed packets received
  4328. * and indicated in LVMMC register
  4329. * @adapter: pointer to adapter
  4330. **/
  4331. static void igb_check_lvmmc(struct igb_adapter *adapter)
  4332. {
  4333. struct e1000_hw *hw = &adapter->hw;
  4334. u32 lvmmc;
  4335. lvmmc = rd32(E1000_LVMMC);
  4336. if (lvmmc) {
  4337. if (unlikely(net_ratelimit())) {
  4338. netdev_warn(adapter->netdev,
  4339. "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
  4340. lvmmc);
  4341. }
  4342. }
  4343. }
  4344. /**
  4345. * igb_watchdog - Timer Call-back
  4346. * @data: pointer to adapter cast into an unsigned long
  4347. **/
  4348. static void igb_watchdog(struct timer_list *t)
  4349. {
  4350. struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
  4351. /* Do the rest outside of interrupt context */
  4352. schedule_work(&adapter->watchdog_task);
  4353. }
  4354. static void igb_watchdog_task(struct work_struct *work)
  4355. {
  4356. struct igb_adapter *adapter = container_of(work,
  4357. struct igb_adapter,
  4358. watchdog_task);
  4359. struct e1000_hw *hw = &adapter->hw;
  4360. struct e1000_phy_info *phy = &hw->phy;
  4361. struct net_device *netdev = adapter->netdev;
  4362. u32 link;
  4363. int i;
  4364. u32 connsw;
  4365. u16 phy_data, retry_count = 20;
  4366. link = igb_has_link(adapter);
  4367. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
  4368. if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
  4369. adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
  4370. else
  4371. link = false;
  4372. }
  4373. /* Force link down if we have fiber to swap to */
  4374. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  4375. if (hw->phy.media_type == e1000_media_type_copper) {
  4376. connsw = rd32(E1000_CONNSW);
  4377. if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
  4378. link = 0;
  4379. }
  4380. }
  4381. if (link) {
  4382. /* Perform a reset if the media type changed. */
  4383. if (hw->dev_spec._82575.media_changed) {
  4384. hw->dev_spec._82575.media_changed = false;
  4385. adapter->flags |= IGB_FLAG_MEDIA_RESET;
  4386. igb_reset(adapter);
  4387. }
  4388. /* Cancel scheduled suspend requests. */
  4389. pm_runtime_resume(netdev->dev.parent);
  4390. if (!netif_carrier_ok(netdev)) {
  4391. u32 ctrl;
  4392. hw->mac.ops.get_speed_and_duplex(hw,
  4393. &adapter->link_speed,
  4394. &adapter->link_duplex);
  4395. ctrl = rd32(E1000_CTRL);
  4396. /* Links status message must follow this format */
  4397. netdev_info(netdev,
  4398. "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
  4399. netdev->name,
  4400. adapter->link_speed,
  4401. adapter->link_duplex == FULL_DUPLEX ?
  4402. "Full" : "Half",
  4403. (ctrl & E1000_CTRL_TFCE) &&
  4404. (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
  4405. (ctrl & E1000_CTRL_RFCE) ? "RX" :
  4406. (ctrl & E1000_CTRL_TFCE) ? "TX" : "None");
  4407. /* disable EEE if enabled */
  4408. if ((adapter->flags & IGB_FLAG_EEE) &&
  4409. (adapter->link_duplex == HALF_DUPLEX)) {
  4410. dev_info(&adapter->pdev->dev,
  4411. "EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
  4412. adapter->hw.dev_spec._82575.eee_disable = true;
  4413. adapter->flags &= ~IGB_FLAG_EEE;
  4414. }
  4415. /* check if SmartSpeed worked */
  4416. igb_check_downshift(hw);
  4417. if (phy->speed_downgraded)
  4418. netdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
  4419. /* check for thermal sensor event */
  4420. if (igb_thermal_sensor_event(hw,
  4421. E1000_THSTAT_LINK_THROTTLE))
  4422. netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
  4423. /* adjust timeout factor according to speed/duplex */
  4424. adapter->tx_timeout_factor = 1;
  4425. switch (adapter->link_speed) {
  4426. case SPEED_10:
  4427. adapter->tx_timeout_factor = 14;
  4428. break;
  4429. case SPEED_100:
  4430. /* maybe add some timeout factor ? */
  4431. break;
  4432. }
  4433. if (adapter->link_speed != SPEED_1000)
  4434. goto no_wait;
  4435. /* wait for Remote receiver status OK */
  4436. retry_read_status:
  4437. if (!igb_read_phy_reg(hw, PHY_1000T_STATUS,
  4438. &phy_data)) {
  4439. if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) &&
  4440. retry_count) {
  4441. msleep(100);
  4442. retry_count--;
  4443. goto retry_read_status;
  4444. } else if (!retry_count) {
  4445. dev_err(&adapter->pdev->dev, "exceed max 2 second\n");
  4446. }
  4447. } else {
  4448. dev_err(&adapter->pdev->dev, "read 1000Base-T Status Reg\n");
  4449. }
  4450. no_wait:
  4451. netif_carrier_on(netdev);
  4452. igb_ping_all_vfs(adapter);
  4453. igb_check_vf_rate_limit(adapter);
  4454. /* link state has changed, schedule phy info update */
  4455. if (!test_bit(__IGB_DOWN, &adapter->state))
  4456. mod_timer(&adapter->phy_info_timer,
  4457. round_jiffies(jiffies + 2 * HZ));
  4458. }
  4459. } else {
  4460. if (netif_carrier_ok(netdev)) {
  4461. adapter->link_speed = 0;
  4462. adapter->link_duplex = 0;
  4463. /* check for thermal sensor event */
  4464. if (igb_thermal_sensor_event(hw,
  4465. E1000_THSTAT_PWR_DOWN)) {
  4466. netdev_err(netdev, "The network adapter was stopped because it overheated\n");
  4467. }
  4468. /* Links status message must follow this format */
  4469. netdev_info(netdev, "igb: %s NIC Link is Down\n",
  4470. netdev->name);
  4471. netif_carrier_off(netdev);
  4472. igb_ping_all_vfs(adapter);
  4473. /* link state has changed, schedule phy info update */
  4474. if (!test_bit(__IGB_DOWN, &adapter->state))
  4475. mod_timer(&adapter->phy_info_timer,
  4476. round_jiffies(jiffies + 2 * HZ));
  4477. /* link is down, time to check for alternate media */
  4478. if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
  4479. igb_check_swap_media(adapter);
  4480. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  4481. schedule_work(&adapter->reset_task);
  4482. /* return immediately */
  4483. return;
  4484. }
  4485. }
  4486. pm_schedule_suspend(netdev->dev.parent,
  4487. MSEC_PER_SEC * 5);
  4488. /* also check for alternate media here */
  4489. } else if (!netif_carrier_ok(netdev) &&
  4490. (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
  4491. igb_check_swap_media(adapter);
  4492. if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
  4493. schedule_work(&adapter->reset_task);
  4494. /* return immediately */
  4495. return;
  4496. }
  4497. }
  4498. }
  4499. spin_lock(&adapter->stats64_lock);
  4500. igb_update_stats(adapter);
  4501. spin_unlock(&adapter->stats64_lock);
  4502. for (i = 0; i < adapter->num_tx_queues; i++) {
  4503. struct igb_ring *tx_ring = adapter->tx_ring[i];
  4504. if (!netif_carrier_ok(netdev)) {
  4505. /* We've lost link, so the controller stops DMA,
  4506. * but we've got queued Tx work that's never going
  4507. * to get done, so reset controller to flush Tx.
  4508. * (Do the reset outside of interrupt context).
  4509. */
  4510. if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
  4511. adapter->tx_timeout_count++;
  4512. schedule_work(&adapter->reset_task);
  4513. /* return immediately since reset is imminent */
  4514. return;
  4515. }
  4516. }
  4517. /* Force detection of hung controller every watchdog period */
  4518. set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  4519. }
  4520. /* Cause software interrupt to ensure Rx ring is cleaned */
  4521. if (adapter->flags & IGB_FLAG_HAS_MSIX) {
  4522. u32 eics = 0;
  4523. for (i = 0; i < adapter->num_q_vectors; i++)
  4524. eics |= adapter->q_vector[i]->eims_value;
  4525. wr32(E1000_EICS, eics);
  4526. } else {
  4527. wr32(E1000_ICS, E1000_ICS_RXDMT0);
  4528. }
  4529. igb_spoof_check(adapter);
  4530. igb_ptp_rx_hang(adapter);
  4531. igb_ptp_tx_hang(adapter);
  4532. /* Check LVMMC register on i350/i354 only */
  4533. if ((adapter->hw.mac.type == e1000_i350) ||
  4534. (adapter->hw.mac.type == e1000_i354))
  4535. igb_check_lvmmc(adapter);
  4536. /* Reset the timer */
  4537. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  4538. if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
  4539. mod_timer(&adapter->watchdog_timer,
  4540. round_jiffies(jiffies + HZ));
  4541. else
  4542. mod_timer(&adapter->watchdog_timer,
  4543. round_jiffies(jiffies + 2 * HZ));
  4544. }
  4545. }
  4546. enum latency_range {
  4547. lowest_latency = 0,
  4548. low_latency = 1,
  4549. bulk_latency = 2,
  4550. latency_invalid = 255
  4551. };
  4552. /**
  4553. * igb_update_ring_itr - update the dynamic ITR value based on packet size
  4554. * @q_vector: pointer to q_vector
  4555. *
  4556. * Stores a new ITR value based on strictly on packet size. This
  4557. * algorithm is less sophisticated than that used in igb_update_itr,
  4558. * due to the difficulty of synchronizing statistics across multiple
  4559. * receive rings. The divisors and thresholds used by this function
  4560. * were determined based on theoretical maximum wire speed and testing
  4561. * data, in order to minimize response time while increasing bulk
  4562. * throughput.
  4563. * This functionality is controlled by ethtool's coalescing settings.
  4564. * NOTE: This function is called only when operating in a multiqueue
  4565. * receive environment.
  4566. **/
  4567. static void igb_update_ring_itr(struct igb_q_vector *q_vector)
  4568. {
  4569. int new_val = q_vector->itr_val;
  4570. int avg_wire_size = 0;
  4571. struct igb_adapter *adapter = q_vector->adapter;
  4572. unsigned int packets;
  4573. /* For non-gigabit speeds, just fix the interrupt rate at 4000
  4574. * ints/sec - ITR timer value of 120 ticks.
  4575. */
  4576. if (adapter->link_speed != SPEED_1000) {
  4577. new_val = IGB_4K_ITR;
  4578. goto set_itr_val;
  4579. }
  4580. packets = q_vector->rx.total_packets;
  4581. if (packets)
  4582. avg_wire_size = q_vector->rx.total_bytes / packets;
  4583. packets = q_vector->tx.total_packets;
  4584. if (packets)
  4585. avg_wire_size = max_t(u32, avg_wire_size,
  4586. q_vector->tx.total_bytes / packets);
  4587. /* if avg_wire_size isn't set no work was done */
  4588. if (!avg_wire_size)
  4589. goto clear_counts;
  4590. /* Add 24 bytes to size to account for CRC, preamble, and gap */
  4591. avg_wire_size += 24;
  4592. /* Don't starve jumbo frames */
  4593. avg_wire_size = min(avg_wire_size, 3000);
  4594. /* Give a little boost to mid-size frames */
  4595. if ((avg_wire_size > 300) && (avg_wire_size < 1200))
  4596. new_val = avg_wire_size / 3;
  4597. else
  4598. new_val = avg_wire_size / 2;
  4599. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4600. if (new_val < IGB_20K_ITR &&
  4601. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  4602. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  4603. new_val = IGB_20K_ITR;
  4604. set_itr_val:
  4605. if (new_val != q_vector->itr_val) {
  4606. q_vector->itr_val = new_val;
  4607. q_vector->set_itr = 1;
  4608. }
  4609. clear_counts:
  4610. q_vector->rx.total_bytes = 0;
  4611. q_vector->rx.total_packets = 0;
  4612. q_vector->tx.total_bytes = 0;
  4613. q_vector->tx.total_packets = 0;
  4614. }
  4615. /**
  4616. * igb_update_itr - update the dynamic ITR value based on statistics
  4617. * @q_vector: pointer to q_vector
  4618. * @ring_container: ring info to update the itr for
  4619. *
  4620. * Stores a new ITR value based on packets and byte
  4621. * counts during the last interrupt. The advantage of per interrupt
  4622. * computation is faster updates and more accurate ITR for the current
  4623. * traffic pattern. Constants in this function were computed
  4624. * based on theoretical maximum wire speed and thresholds were set based
  4625. * on testing data as well as attempting to minimize response time
  4626. * while increasing bulk throughput.
  4627. * This functionality is controlled by ethtool's coalescing settings.
  4628. * NOTE: These calculations are only valid when operating in a single-
  4629. * queue environment.
  4630. **/
  4631. static void igb_update_itr(struct igb_q_vector *q_vector,
  4632. struct igb_ring_container *ring_container)
  4633. {
  4634. unsigned int packets = ring_container->total_packets;
  4635. unsigned int bytes = ring_container->total_bytes;
  4636. u8 itrval = ring_container->itr;
  4637. /* no packets, exit with status unchanged */
  4638. if (packets == 0)
  4639. return;
  4640. switch (itrval) {
  4641. case lowest_latency:
  4642. /* handle TSO and jumbo frames */
  4643. if (bytes/packets > 8000)
  4644. itrval = bulk_latency;
  4645. else if ((packets < 5) && (bytes > 512))
  4646. itrval = low_latency;
  4647. break;
  4648. case low_latency: /* 50 usec aka 20000 ints/s */
  4649. if (bytes > 10000) {
  4650. /* this if handles the TSO accounting */
  4651. if (bytes/packets > 8000)
  4652. itrval = bulk_latency;
  4653. else if ((packets < 10) || ((bytes/packets) > 1200))
  4654. itrval = bulk_latency;
  4655. else if ((packets > 35))
  4656. itrval = lowest_latency;
  4657. } else if (bytes/packets > 2000) {
  4658. itrval = bulk_latency;
  4659. } else if (packets <= 2 && bytes < 512) {
  4660. itrval = lowest_latency;
  4661. }
  4662. break;
  4663. case bulk_latency: /* 250 usec aka 4000 ints/s */
  4664. if (bytes > 25000) {
  4665. if (packets > 35)
  4666. itrval = low_latency;
  4667. } else if (bytes < 1500) {
  4668. itrval = low_latency;
  4669. }
  4670. break;
  4671. }
  4672. /* clear work counters since we have the values we need */
  4673. ring_container->total_bytes = 0;
  4674. ring_container->total_packets = 0;
  4675. /* write updated itr to ring container */
  4676. ring_container->itr = itrval;
  4677. }
  4678. static void igb_set_itr(struct igb_q_vector *q_vector)
  4679. {
  4680. struct igb_adapter *adapter = q_vector->adapter;
  4681. u32 new_itr = q_vector->itr_val;
  4682. u8 current_itr = 0;
  4683. /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
  4684. if (adapter->link_speed != SPEED_1000) {
  4685. current_itr = 0;
  4686. new_itr = IGB_4K_ITR;
  4687. goto set_itr_now;
  4688. }
  4689. igb_update_itr(q_vector, &q_vector->tx);
  4690. igb_update_itr(q_vector, &q_vector->rx);
  4691. current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
  4692. /* conservative mode (itr 3) eliminates the lowest_latency setting */
  4693. if (current_itr == lowest_latency &&
  4694. ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
  4695. (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
  4696. current_itr = low_latency;
  4697. switch (current_itr) {
  4698. /* counts and packets in update_itr are dependent on these numbers */
  4699. case lowest_latency:
  4700. new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
  4701. break;
  4702. case low_latency:
  4703. new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
  4704. break;
  4705. case bulk_latency:
  4706. new_itr = IGB_4K_ITR; /* 4,000 ints/sec */
  4707. break;
  4708. default:
  4709. break;
  4710. }
  4711. set_itr_now:
  4712. if (new_itr != q_vector->itr_val) {
  4713. /* this attempts to bias the interrupt rate towards Bulk
  4714. * by adding intermediate steps when interrupt rate is
  4715. * increasing
  4716. */
  4717. new_itr = new_itr > q_vector->itr_val ?
  4718. max((new_itr * q_vector->itr_val) /
  4719. (new_itr + (q_vector->itr_val >> 2)),
  4720. new_itr) : new_itr;
  4721. /* Don't write the value here; it resets the adapter's
  4722. * internal timer, and causes us to delay far longer than
  4723. * we should between interrupts. Instead, we write the ITR
  4724. * value at the beginning of the next interrupt so the timing
  4725. * ends up being correct.
  4726. */
  4727. q_vector->itr_val = new_itr;
  4728. q_vector->set_itr = 1;
  4729. }
  4730. }
  4731. static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
  4732. u32 type_tucmd, u32 mss_l4len_idx)
  4733. {
  4734. struct e1000_adv_tx_context_desc *context_desc;
  4735. u16 i = tx_ring->next_to_use;
  4736. context_desc = IGB_TX_CTXTDESC(tx_ring, i);
  4737. i++;
  4738. tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
  4739. /* set bits to identify this as an advanced context descriptor */
  4740. type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
  4741. /* For 82575, context index must be unique per ring. */
  4742. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  4743. mss_l4len_idx |= tx_ring->reg_idx << 4;
  4744. context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
  4745. context_desc->seqnum_seed = 0;
  4746. context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
  4747. context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
  4748. }
  4749. static int igb_tso(struct igb_ring *tx_ring,
  4750. struct igb_tx_buffer *first,
  4751. u8 *hdr_len)
  4752. {
  4753. u32 vlan_macip_lens, type_tucmd, mss_l4len_idx;
  4754. struct sk_buff *skb = first->skb;
  4755. union {
  4756. struct iphdr *v4;
  4757. struct ipv6hdr *v6;
  4758. unsigned char *hdr;
  4759. } ip;
  4760. union {
  4761. struct tcphdr *tcp;
  4762. unsigned char *hdr;
  4763. } l4;
  4764. u32 paylen, l4_offset;
  4765. int err;
  4766. if (skb->ip_summed != CHECKSUM_PARTIAL)
  4767. return 0;
  4768. if (!skb_is_gso(skb))
  4769. return 0;
  4770. err = skb_cow_head(skb, 0);
  4771. if (err < 0)
  4772. return err;
  4773. ip.hdr = skb_network_header(skb);
  4774. l4.hdr = skb_checksum_start(skb);
  4775. /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
  4776. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  4777. /* initialize outer IP header fields */
  4778. if (ip.v4->version == 4) {
  4779. unsigned char *csum_start = skb_checksum_start(skb);
  4780. unsigned char *trans_start = ip.hdr + (ip.v4->ihl * 4);
  4781. /* IP header will have to cancel out any data that
  4782. * is not a part of the outer IP header
  4783. */
  4784. ip.v4->check = csum_fold(csum_partial(trans_start,
  4785. csum_start - trans_start,
  4786. 0));
  4787. type_tucmd |= E1000_ADVTXD_TUCMD_IPV4;
  4788. ip.v4->tot_len = 0;
  4789. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4790. IGB_TX_FLAGS_CSUM |
  4791. IGB_TX_FLAGS_IPV4;
  4792. } else {
  4793. ip.v6->payload_len = 0;
  4794. first->tx_flags |= IGB_TX_FLAGS_TSO |
  4795. IGB_TX_FLAGS_CSUM;
  4796. }
  4797. /* determine offset of inner transport header */
  4798. l4_offset = l4.hdr - skb->data;
  4799. /* compute length of segmentation header */
  4800. *hdr_len = (l4.tcp->doff * 4) + l4_offset;
  4801. /* remove payload length from inner checksum */
  4802. paylen = skb->len - l4_offset;
  4803. csum_replace_by_diff(&l4.tcp->check, htonl(paylen));
  4804. /* update gso size and bytecount with header size */
  4805. first->gso_segs = skb_shinfo(skb)->gso_segs;
  4806. first->bytecount += (first->gso_segs - 1) * *hdr_len;
  4807. /* MSS L4LEN IDX */
  4808. mss_l4len_idx = (*hdr_len - l4_offset) << E1000_ADVTXD_L4LEN_SHIFT;
  4809. mss_l4len_idx |= skb_shinfo(skb)->gso_size << E1000_ADVTXD_MSS_SHIFT;
  4810. /* VLAN MACLEN IPLEN */
  4811. vlan_macip_lens = l4.hdr - ip.hdr;
  4812. vlan_macip_lens |= (ip.hdr - skb->data) << E1000_ADVTXD_MACLEN_SHIFT;
  4813. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4814. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, mss_l4len_idx);
  4815. return 1;
  4816. }
  4817. static inline bool igb_ipv6_csum_is_sctp(struct sk_buff *skb)
  4818. {
  4819. unsigned int offset = 0;
  4820. ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL);
  4821. return offset == skb_checksum_start_offset(skb);
  4822. }
  4823. static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
  4824. {
  4825. struct sk_buff *skb = first->skb;
  4826. u32 vlan_macip_lens = 0;
  4827. u32 type_tucmd = 0;
  4828. if (skb->ip_summed != CHECKSUM_PARTIAL) {
  4829. csum_failed:
  4830. if (!(first->tx_flags & IGB_TX_FLAGS_VLAN))
  4831. return;
  4832. goto no_csum;
  4833. }
  4834. switch (skb->csum_offset) {
  4835. case offsetof(struct tcphdr, check):
  4836. type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP;
  4837. /* fall through */
  4838. case offsetof(struct udphdr, check):
  4839. break;
  4840. case offsetof(struct sctphdr, checksum):
  4841. /* validate that this is actually an SCTP request */
  4842. if (((first->protocol == htons(ETH_P_IP)) &&
  4843. (ip_hdr(skb)->protocol == IPPROTO_SCTP)) ||
  4844. ((first->protocol == htons(ETH_P_IPV6)) &&
  4845. igb_ipv6_csum_is_sctp(skb))) {
  4846. type_tucmd = E1000_ADVTXD_TUCMD_L4T_SCTP;
  4847. break;
  4848. }
  4849. default:
  4850. skb_checksum_help(skb);
  4851. goto csum_failed;
  4852. }
  4853. /* update TX checksum flag */
  4854. first->tx_flags |= IGB_TX_FLAGS_CSUM;
  4855. vlan_macip_lens = skb_checksum_start_offset(skb) -
  4856. skb_network_offset(skb);
  4857. no_csum:
  4858. vlan_macip_lens |= skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT;
  4859. vlan_macip_lens |= first->tx_flags & IGB_TX_FLAGS_VLAN_MASK;
  4860. igb_tx_ctxtdesc(tx_ring, vlan_macip_lens, type_tucmd, 0);
  4861. }
  4862. #define IGB_SET_FLAG(_input, _flag, _result) \
  4863. ((_flag <= _result) ? \
  4864. ((u32)(_input & _flag) * (_result / _flag)) : \
  4865. ((u32)(_input & _flag) / (_flag / _result)))
  4866. static u32 igb_tx_cmd_type(struct sk_buff *skb, u32 tx_flags)
  4867. {
  4868. /* set type for advanced descriptor with frame checksum insertion */
  4869. u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
  4870. E1000_ADVTXD_DCMD_DEXT |
  4871. E1000_ADVTXD_DCMD_IFCS;
  4872. /* set HW vlan bit if vlan is present */
  4873. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_VLAN,
  4874. (E1000_ADVTXD_DCMD_VLE));
  4875. /* set segmentation bits for TSO */
  4876. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSO,
  4877. (E1000_ADVTXD_DCMD_TSE));
  4878. /* set timestamp bit if present */
  4879. cmd_type |= IGB_SET_FLAG(tx_flags, IGB_TX_FLAGS_TSTAMP,
  4880. (E1000_ADVTXD_MAC_TSTAMP));
  4881. /* insert frame checksum */
  4882. cmd_type ^= IGB_SET_FLAG(skb->no_fcs, 1, E1000_ADVTXD_DCMD_IFCS);
  4883. return cmd_type;
  4884. }
  4885. static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
  4886. union e1000_adv_tx_desc *tx_desc,
  4887. u32 tx_flags, unsigned int paylen)
  4888. {
  4889. u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
  4890. /* 82575 requires a unique index per ring */
  4891. if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
  4892. olinfo_status |= tx_ring->reg_idx << 4;
  4893. /* insert L4 checksum */
  4894. olinfo_status |= IGB_SET_FLAG(tx_flags,
  4895. IGB_TX_FLAGS_CSUM,
  4896. (E1000_TXD_POPTS_TXSM << 8));
  4897. /* insert IPv4 checksum */
  4898. olinfo_status |= IGB_SET_FLAG(tx_flags,
  4899. IGB_TX_FLAGS_IPV4,
  4900. (E1000_TXD_POPTS_IXSM << 8));
  4901. tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
  4902. }
  4903. static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  4904. {
  4905. struct net_device *netdev = tx_ring->netdev;
  4906. netif_stop_subqueue(netdev, tx_ring->queue_index);
  4907. /* Herbert's original patch had:
  4908. * smp_mb__after_netif_stop_queue();
  4909. * but since that doesn't exist yet, just open code it.
  4910. */
  4911. smp_mb();
  4912. /* We need to check again in a case another CPU has just
  4913. * made room available.
  4914. */
  4915. if (igb_desc_unused(tx_ring) < size)
  4916. return -EBUSY;
  4917. /* A reprieve! */
  4918. netif_wake_subqueue(netdev, tx_ring->queue_index);
  4919. u64_stats_update_begin(&tx_ring->tx_syncp2);
  4920. tx_ring->tx_stats.restart_queue2++;
  4921. u64_stats_update_end(&tx_ring->tx_syncp2);
  4922. return 0;
  4923. }
  4924. static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
  4925. {
  4926. if (igb_desc_unused(tx_ring) >= size)
  4927. return 0;
  4928. return __igb_maybe_stop_tx(tx_ring, size);
  4929. }
  4930. static int igb_tx_map(struct igb_ring *tx_ring,
  4931. struct igb_tx_buffer *first,
  4932. const u8 hdr_len)
  4933. {
  4934. struct sk_buff *skb = first->skb;
  4935. struct igb_tx_buffer *tx_buffer;
  4936. union e1000_adv_tx_desc *tx_desc;
  4937. struct skb_frag_struct *frag;
  4938. dma_addr_t dma;
  4939. unsigned int data_len, size;
  4940. u32 tx_flags = first->tx_flags;
  4941. u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
  4942. u16 i = tx_ring->next_to_use;
  4943. tx_desc = IGB_TX_DESC(tx_ring, i);
  4944. igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
  4945. size = skb_headlen(skb);
  4946. data_len = skb->data_len;
  4947. dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE);
  4948. tx_buffer = first;
  4949. for (frag = &skb_shinfo(skb)->frags[0];; frag++) {
  4950. if (dma_mapping_error(tx_ring->dev, dma))
  4951. goto dma_error;
  4952. /* record length, and DMA address */
  4953. dma_unmap_len_set(tx_buffer, len, size);
  4954. dma_unmap_addr_set(tx_buffer, dma, dma);
  4955. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  4956. while (unlikely(size > IGB_MAX_DATA_PER_TXD)) {
  4957. tx_desc->read.cmd_type_len =
  4958. cpu_to_le32(cmd_type ^ IGB_MAX_DATA_PER_TXD);
  4959. i++;
  4960. tx_desc++;
  4961. if (i == tx_ring->count) {
  4962. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4963. i = 0;
  4964. }
  4965. tx_desc->read.olinfo_status = 0;
  4966. dma += IGB_MAX_DATA_PER_TXD;
  4967. size -= IGB_MAX_DATA_PER_TXD;
  4968. tx_desc->read.buffer_addr = cpu_to_le64(dma);
  4969. }
  4970. if (likely(!data_len))
  4971. break;
  4972. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
  4973. i++;
  4974. tx_desc++;
  4975. if (i == tx_ring->count) {
  4976. tx_desc = IGB_TX_DESC(tx_ring, 0);
  4977. i = 0;
  4978. }
  4979. tx_desc->read.olinfo_status = 0;
  4980. size = skb_frag_size(frag);
  4981. data_len -= size;
  4982. dma = skb_frag_dma_map(tx_ring->dev, frag, 0,
  4983. size, DMA_TO_DEVICE);
  4984. tx_buffer = &tx_ring->tx_buffer_info[i];
  4985. }
  4986. /* write last descriptor with RS and EOP bits */
  4987. cmd_type |= size | IGB_TXD_DCMD;
  4988. tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
  4989. netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
  4990. /* set the timestamp */
  4991. first->time_stamp = jiffies;
  4992. /* Force memory writes to complete before letting h/w know there
  4993. * are new descriptors to fetch. (Only applicable for weak-ordered
  4994. * memory model archs, such as IA-64).
  4995. *
  4996. * We also need this memory barrier to make certain all of the
  4997. * status bits have been updated before next_to_watch is written.
  4998. */
  4999. wmb();
  5000. /* set next_to_watch value indicating a packet is present */
  5001. first->next_to_watch = tx_desc;
  5002. i++;
  5003. if (i == tx_ring->count)
  5004. i = 0;
  5005. tx_ring->next_to_use = i;
  5006. /* Make sure there is space in the ring for the next send. */
  5007. igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
  5008. if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) {
  5009. writel(i, tx_ring->tail);
  5010. /* we need this if more than one processor can write to our tail
  5011. * at a time, it synchronizes IO on IA64/Altix systems
  5012. */
  5013. mmiowb();
  5014. }
  5015. return 0;
  5016. dma_error:
  5017. dev_err(tx_ring->dev, "TX DMA map failed\n");
  5018. tx_buffer = &tx_ring->tx_buffer_info[i];
  5019. /* clear dma mappings for failed tx_buffer_info map */
  5020. while (tx_buffer != first) {
  5021. if (dma_unmap_len(tx_buffer, len))
  5022. dma_unmap_page(tx_ring->dev,
  5023. dma_unmap_addr(tx_buffer, dma),
  5024. dma_unmap_len(tx_buffer, len),
  5025. DMA_TO_DEVICE);
  5026. dma_unmap_len_set(tx_buffer, len, 0);
  5027. if (i-- == 0)
  5028. i += tx_ring->count;
  5029. tx_buffer = &tx_ring->tx_buffer_info[i];
  5030. }
  5031. if (dma_unmap_len(tx_buffer, len))
  5032. dma_unmap_single(tx_ring->dev,
  5033. dma_unmap_addr(tx_buffer, dma),
  5034. dma_unmap_len(tx_buffer, len),
  5035. DMA_TO_DEVICE);
  5036. dma_unmap_len_set(tx_buffer, len, 0);
  5037. dev_kfree_skb_any(tx_buffer->skb);
  5038. tx_buffer->skb = NULL;
  5039. tx_ring->next_to_use = i;
  5040. return -1;
  5041. }
  5042. netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
  5043. struct igb_ring *tx_ring)
  5044. {
  5045. struct igb_tx_buffer *first;
  5046. int tso;
  5047. u32 tx_flags = 0;
  5048. unsigned short f;
  5049. u16 count = TXD_USE_COUNT(skb_headlen(skb));
  5050. __be16 protocol = vlan_get_protocol(skb);
  5051. u8 hdr_len = 0;
  5052. /* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
  5053. * + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
  5054. * + 2 desc gap to keep tail from touching head,
  5055. * + 1 desc for context descriptor,
  5056. * otherwise try next time
  5057. */
  5058. for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
  5059. count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
  5060. if (igb_maybe_stop_tx(tx_ring, count + 3)) {
  5061. /* this is a hard error */
  5062. return NETDEV_TX_BUSY;
  5063. }
  5064. /* record the location of the first descriptor for this packet */
  5065. first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
  5066. first->skb = skb;
  5067. first->bytecount = skb->len;
  5068. first->gso_segs = 1;
  5069. if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
  5070. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  5071. if (adapter->tstamp_config.tx_type == HWTSTAMP_TX_ON &&
  5072. !test_and_set_bit_lock(__IGB_PTP_TX_IN_PROGRESS,
  5073. &adapter->state)) {
  5074. skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
  5075. tx_flags |= IGB_TX_FLAGS_TSTAMP;
  5076. adapter->ptp_tx_skb = skb_get(skb);
  5077. adapter->ptp_tx_start = jiffies;
  5078. if (adapter->hw.mac.type == e1000_82576)
  5079. schedule_work(&adapter->ptp_tx_work);
  5080. } else {
  5081. adapter->tx_hwtstamp_skipped++;
  5082. }
  5083. }
  5084. skb_tx_timestamp(skb);
  5085. if (skb_vlan_tag_present(skb)) {
  5086. tx_flags |= IGB_TX_FLAGS_VLAN;
  5087. tx_flags |= (skb_vlan_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
  5088. }
  5089. /* record initial flags and protocol */
  5090. first->tx_flags = tx_flags;
  5091. first->protocol = protocol;
  5092. tso = igb_tso(tx_ring, first, &hdr_len);
  5093. if (tso < 0)
  5094. goto out_drop;
  5095. else if (!tso)
  5096. igb_tx_csum(tx_ring, first);
  5097. if (igb_tx_map(tx_ring, first, hdr_len))
  5098. goto cleanup_tx_tstamp;
  5099. return NETDEV_TX_OK;
  5100. out_drop:
  5101. dev_kfree_skb_any(first->skb);
  5102. first->skb = NULL;
  5103. cleanup_tx_tstamp:
  5104. if (unlikely(tx_flags & IGB_TX_FLAGS_TSTAMP)) {
  5105. struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
  5106. dev_kfree_skb_any(adapter->ptp_tx_skb);
  5107. adapter->ptp_tx_skb = NULL;
  5108. if (adapter->hw.mac.type == e1000_82576)
  5109. cancel_work_sync(&adapter->ptp_tx_work);
  5110. clear_bit_unlock(__IGB_PTP_TX_IN_PROGRESS, &adapter->state);
  5111. }
  5112. return NETDEV_TX_OK;
  5113. }
  5114. static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
  5115. struct sk_buff *skb)
  5116. {
  5117. unsigned int r_idx = skb->queue_mapping;
  5118. if (r_idx >= adapter->num_tx_queues)
  5119. r_idx = r_idx % adapter->num_tx_queues;
  5120. return adapter->tx_ring[r_idx];
  5121. }
  5122. static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
  5123. struct net_device *netdev)
  5124. {
  5125. struct igb_adapter *adapter = netdev_priv(netdev);
  5126. /* The minimum packet size with TCTL.PSP set is 17 so pad the skb
  5127. * in order to meet this minimum size requirement.
  5128. */
  5129. if (skb_put_padto(skb, 17))
  5130. return NETDEV_TX_OK;
  5131. return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
  5132. }
  5133. /**
  5134. * igb_tx_timeout - Respond to a Tx Hang
  5135. * @netdev: network interface device structure
  5136. **/
  5137. static void igb_tx_timeout(struct net_device *netdev)
  5138. {
  5139. struct igb_adapter *adapter = netdev_priv(netdev);
  5140. struct e1000_hw *hw = &adapter->hw;
  5141. /* Do the reset outside of interrupt context */
  5142. adapter->tx_timeout_count++;
  5143. if (hw->mac.type >= e1000_82580)
  5144. hw->dev_spec._82575.global_device_reset = true;
  5145. schedule_work(&adapter->reset_task);
  5146. wr32(E1000_EICS,
  5147. (adapter->eims_enable_mask & ~adapter->eims_other));
  5148. }
  5149. static void igb_reset_task(struct work_struct *work)
  5150. {
  5151. struct igb_adapter *adapter;
  5152. adapter = container_of(work, struct igb_adapter, reset_task);
  5153. igb_dump(adapter);
  5154. netdev_err(adapter->netdev, "Reset adapter\n");
  5155. igb_reinit_locked(adapter);
  5156. }
  5157. /**
  5158. * igb_get_stats64 - Get System Network Statistics
  5159. * @netdev: network interface device structure
  5160. * @stats: rtnl_link_stats64 pointer
  5161. **/
  5162. static void igb_get_stats64(struct net_device *netdev,
  5163. struct rtnl_link_stats64 *stats)
  5164. {
  5165. struct igb_adapter *adapter = netdev_priv(netdev);
  5166. spin_lock(&adapter->stats64_lock);
  5167. igb_update_stats(adapter);
  5168. memcpy(stats, &adapter->stats64, sizeof(*stats));
  5169. spin_unlock(&adapter->stats64_lock);
  5170. }
  5171. /**
  5172. * igb_change_mtu - Change the Maximum Transfer Unit
  5173. * @netdev: network interface device structure
  5174. * @new_mtu: new value for maximum frame size
  5175. *
  5176. * Returns 0 on success, negative on failure
  5177. **/
  5178. static int igb_change_mtu(struct net_device *netdev, int new_mtu)
  5179. {
  5180. struct igb_adapter *adapter = netdev_priv(netdev);
  5181. struct pci_dev *pdev = adapter->pdev;
  5182. int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
  5183. /* adjust max frame to be at least the size of a standard frame */
  5184. if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
  5185. max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
  5186. while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
  5187. usleep_range(1000, 2000);
  5188. /* igb_down has a dependency on max_frame_size */
  5189. adapter->max_frame_size = max_frame;
  5190. if (netif_running(netdev))
  5191. igb_down(adapter);
  5192. dev_info(&pdev->dev, "changing MTU from %d to %d\n",
  5193. netdev->mtu, new_mtu);
  5194. netdev->mtu = new_mtu;
  5195. if (netif_running(netdev))
  5196. igb_up(adapter);
  5197. else
  5198. igb_reset(adapter);
  5199. clear_bit(__IGB_RESETTING, &adapter->state);
  5200. return 0;
  5201. }
  5202. /**
  5203. * igb_update_stats - Update the board statistics counters
  5204. * @adapter: board private structure
  5205. **/
  5206. void igb_update_stats(struct igb_adapter *adapter)
  5207. {
  5208. struct rtnl_link_stats64 *net_stats = &adapter->stats64;
  5209. struct e1000_hw *hw = &adapter->hw;
  5210. struct pci_dev *pdev = adapter->pdev;
  5211. u32 reg, mpc;
  5212. int i;
  5213. u64 bytes, packets;
  5214. unsigned int start;
  5215. u64 _bytes, _packets;
  5216. /* Prevent stats update while adapter is being reset, or if the pci
  5217. * connection is down.
  5218. */
  5219. if (adapter->link_speed == 0)
  5220. return;
  5221. if (pci_channel_offline(pdev))
  5222. return;
  5223. bytes = 0;
  5224. packets = 0;
  5225. rcu_read_lock();
  5226. for (i = 0; i < adapter->num_rx_queues; i++) {
  5227. struct igb_ring *ring = adapter->rx_ring[i];
  5228. u32 rqdpc = rd32(E1000_RQDPC(i));
  5229. if (hw->mac.type >= e1000_i210)
  5230. wr32(E1000_RQDPC(i), 0);
  5231. if (rqdpc) {
  5232. ring->rx_stats.drops += rqdpc;
  5233. net_stats->rx_fifo_errors += rqdpc;
  5234. }
  5235. do {
  5236. start = u64_stats_fetch_begin_irq(&ring->rx_syncp);
  5237. _bytes = ring->rx_stats.bytes;
  5238. _packets = ring->rx_stats.packets;
  5239. } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start));
  5240. bytes += _bytes;
  5241. packets += _packets;
  5242. }
  5243. net_stats->rx_bytes = bytes;
  5244. net_stats->rx_packets = packets;
  5245. bytes = 0;
  5246. packets = 0;
  5247. for (i = 0; i < adapter->num_tx_queues; i++) {
  5248. struct igb_ring *ring = adapter->tx_ring[i];
  5249. do {
  5250. start = u64_stats_fetch_begin_irq(&ring->tx_syncp);
  5251. _bytes = ring->tx_stats.bytes;
  5252. _packets = ring->tx_stats.packets;
  5253. } while (u64_stats_fetch_retry_irq(&ring->tx_syncp, start));
  5254. bytes += _bytes;
  5255. packets += _packets;
  5256. }
  5257. net_stats->tx_bytes = bytes;
  5258. net_stats->tx_packets = packets;
  5259. rcu_read_unlock();
  5260. /* read stats registers */
  5261. adapter->stats.crcerrs += rd32(E1000_CRCERRS);
  5262. adapter->stats.gprc += rd32(E1000_GPRC);
  5263. adapter->stats.gorc += rd32(E1000_GORCL);
  5264. rd32(E1000_GORCH); /* clear GORCL */
  5265. adapter->stats.bprc += rd32(E1000_BPRC);
  5266. adapter->stats.mprc += rd32(E1000_MPRC);
  5267. adapter->stats.roc += rd32(E1000_ROC);
  5268. adapter->stats.prc64 += rd32(E1000_PRC64);
  5269. adapter->stats.prc127 += rd32(E1000_PRC127);
  5270. adapter->stats.prc255 += rd32(E1000_PRC255);
  5271. adapter->stats.prc511 += rd32(E1000_PRC511);
  5272. adapter->stats.prc1023 += rd32(E1000_PRC1023);
  5273. adapter->stats.prc1522 += rd32(E1000_PRC1522);
  5274. adapter->stats.symerrs += rd32(E1000_SYMERRS);
  5275. adapter->stats.sec += rd32(E1000_SEC);
  5276. mpc = rd32(E1000_MPC);
  5277. adapter->stats.mpc += mpc;
  5278. net_stats->rx_fifo_errors += mpc;
  5279. adapter->stats.scc += rd32(E1000_SCC);
  5280. adapter->stats.ecol += rd32(E1000_ECOL);
  5281. adapter->stats.mcc += rd32(E1000_MCC);
  5282. adapter->stats.latecol += rd32(E1000_LATECOL);
  5283. adapter->stats.dc += rd32(E1000_DC);
  5284. adapter->stats.rlec += rd32(E1000_RLEC);
  5285. adapter->stats.xonrxc += rd32(E1000_XONRXC);
  5286. adapter->stats.xontxc += rd32(E1000_XONTXC);
  5287. adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
  5288. adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
  5289. adapter->stats.fcruc += rd32(E1000_FCRUC);
  5290. adapter->stats.gptc += rd32(E1000_GPTC);
  5291. adapter->stats.gotc += rd32(E1000_GOTCL);
  5292. rd32(E1000_GOTCH); /* clear GOTCL */
  5293. adapter->stats.rnbc += rd32(E1000_RNBC);
  5294. adapter->stats.ruc += rd32(E1000_RUC);
  5295. adapter->stats.rfc += rd32(E1000_RFC);
  5296. adapter->stats.rjc += rd32(E1000_RJC);
  5297. adapter->stats.tor += rd32(E1000_TORH);
  5298. adapter->stats.tot += rd32(E1000_TOTH);
  5299. adapter->stats.tpr += rd32(E1000_TPR);
  5300. adapter->stats.ptc64 += rd32(E1000_PTC64);
  5301. adapter->stats.ptc127 += rd32(E1000_PTC127);
  5302. adapter->stats.ptc255 += rd32(E1000_PTC255);
  5303. adapter->stats.ptc511 += rd32(E1000_PTC511);
  5304. adapter->stats.ptc1023 += rd32(E1000_PTC1023);
  5305. adapter->stats.ptc1522 += rd32(E1000_PTC1522);
  5306. adapter->stats.mptc += rd32(E1000_MPTC);
  5307. adapter->stats.bptc += rd32(E1000_BPTC);
  5308. adapter->stats.tpt += rd32(E1000_TPT);
  5309. adapter->stats.colc += rd32(E1000_COLC);
  5310. adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
  5311. /* read internal phy specific stats */
  5312. reg = rd32(E1000_CTRL_EXT);
  5313. if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
  5314. adapter->stats.rxerrc += rd32(E1000_RXERRC);
  5315. /* this stat has invalid values on i210/i211 */
  5316. if ((hw->mac.type != e1000_i210) &&
  5317. (hw->mac.type != e1000_i211))
  5318. adapter->stats.tncrs += rd32(E1000_TNCRS);
  5319. }
  5320. adapter->stats.tsctc += rd32(E1000_TSCTC);
  5321. adapter->stats.tsctfc += rd32(E1000_TSCTFC);
  5322. adapter->stats.iac += rd32(E1000_IAC);
  5323. adapter->stats.icrxoc += rd32(E1000_ICRXOC);
  5324. adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
  5325. adapter->stats.icrxatc += rd32(E1000_ICRXATC);
  5326. adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
  5327. adapter->stats.ictxatc += rd32(E1000_ICTXATC);
  5328. adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
  5329. adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
  5330. adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
  5331. /* Fill out the OS statistics structure */
  5332. net_stats->multicast = adapter->stats.mprc;
  5333. net_stats->collisions = adapter->stats.colc;
  5334. /* Rx Errors */
  5335. /* RLEC on some newer hardware can be incorrect so build
  5336. * our own version based on RUC and ROC
  5337. */
  5338. net_stats->rx_errors = adapter->stats.rxerrc +
  5339. adapter->stats.crcerrs + adapter->stats.algnerrc +
  5340. adapter->stats.ruc + adapter->stats.roc +
  5341. adapter->stats.cexterr;
  5342. net_stats->rx_length_errors = adapter->stats.ruc +
  5343. adapter->stats.roc;
  5344. net_stats->rx_crc_errors = adapter->stats.crcerrs;
  5345. net_stats->rx_frame_errors = adapter->stats.algnerrc;
  5346. net_stats->rx_missed_errors = adapter->stats.mpc;
  5347. /* Tx Errors */
  5348. net_stats->tx_errors = adapter->stats.ecol +
  5349. adapter->stats.latecol;
  5350. net_stats->tx_aborted_errors = adapter->stats.ecol;
  5351. net_stats->tx_window_errors = adapter->stats.latecol;
  5352. net_stats->tx_carrier_errors = adapter->stats.tncrs;
  5353. /* Tx Dropped needs to be maintained elsewhere */
  5354. /* Management Stats */
  5355. adapter->stats.mgptc += rd32(E1000_MGTPTC);
  5356. adapter->stats.mgprc += rd32(E1000_MGTPRC);
  5357. adapter->stats.mgpdc += rd32(E1000_MGTPDC);
  5358. /* OS2BMC Stats */
  5359. reg = rd32(E1000_MANC);
  5360. if (reg & E1000_MANC_EN_BMC2OS) {
  5361. adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
  5362. adapter->stats.o2bspc += rd32(E1000_O2BSPC);
  5363. adapter->stats.b2ospc += rd32(E1000_B2OSPC);
  5364. adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
  5365. }
  5366. }
  5367. static void igb_tsync_interrupt(struct igb_adapter *adapter)
  5368. {
  5369. struct e1000_hw *hw = &adapter->hw;
  5370. struct ptp_clock_event event;
  5371. struct timespec64 ts;
  5372. u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
  5373. if (tsicr & TSINTR_SYS_WRAP) {
  5374. event.type = PTP_CLOCK_PPS;
  5375. if (adapter->ptp_caps.pps)
  5376. ptp_clock_event(adapter->ptp_clock, &event);
  5377. ack |= TSINTR_SYS_WRAP;
  5378. }
  5379. if (tsicr & E1000_TSICR_TXTS) {
  5380. /* retrieve hardware timestamp */
  5381. schedule_work(&adapter->ptp_tx_work);
  5382. ack |= E1000_TSICR_TXTS;
  5383. }
  5384. if (tsicr & TSINTR_TT0) {
  5385. spin_lock(&adapter->tmreg_lock);
  5386. ts = timespec64_add(adapter->perout[0].start,
  5387. adapter->perout[0].period);
  5388. /* u32 conversion of tv_sec is safe until y2106 */
  5389. wr32(E1000_TRGTTIML0, ts.tv_nsec);
  5390. wr32(E1000_TRGTTIMH0, (u32)ts.tv_sec);
  5391. tsauxc = rd32(E1000_TSAUXC);
  5392. tsauxc |= TSAUXC_EN_TT0;
  5393. wr32(E1000_TSAUXC, tsauxc);
  5394. adapter->perout[0].start = ts;
  5395. spin_unlock(&adapter->tmreg_lock);
  5396. ack |= TSINTR_TT0;
  5397. }
  5398. if (tsicr & TSINTR_TT1) {
  5399. spin_lock(&adapter->tmreg_lock);
  5400. ts = timespec64_add(adapter->perout[1].start,
  5401. adapter->perout[1].period);
  5402. wr32(E1000_TRGTTIML1, ts.tv_nsec);
  5403. wr32(E1000_TRGTTIMH1, (u32)ts.tv_sec);
  5404. tsauxc = rd32(E1000_TSAUXC);
  5405. tsauxc |= TSAUXC_EN_TT1;
  5406. wr32(E1000_TSAUXC, tsauxc);
  5407. adapter->perout[1].start = ts;
  5408. spin_unlock(&adapter->tmreg_lock);
  5409. ack |= TSINTR_TT1;
  5410. }
  5411. if (tsicr & TSINTR_AUTT0) {
  5412. nsec = rd32(E1000_AUXSTMPL0);
  5413. sec = rd32(E1000_AUXSTMPH0);
  5414. event.type = PTP_CLOCK_EXTTS;
  5415. event.index = 0;
  5416. event.timestamp = sec * 1000000000ULL + nsec;
  5417. ptp_clock_event(adapter->ptp_clock, &event);
  5418. ack |= TSINTR_AUTT0;
  5419. }
  5420. if (tsicr & TSINTR_AUTT1) {
  5421. nsec = rd32(E1000_AUXSTMPL1);
  5422. sec = rd32(E1000_AUXSTMPH1);
  5423. event.type = PTP_CLOCK_EXTTS;
  5424. event.index = 1;
  5425. event.timestamp = sec * 1000000000ULL + nsec;
  5426. ptp_clock_event(adapter->ptp_clock, &event);
  5427. ack |= TSINTR_AUTT1;
  5428. }
  5429. /* acknowledge the interrupts */
  5430. wr32(E1000_TSICR, ack);
  5431. }
  5432. static irqreturn_t igb_msix_other(int irq, void *data)
  5433. {
  5434. struct igb_adapter *adapter = data;
  5435. struct e1000_hw *hw = &adapter->hw;
  5436. u32 icr = rd32(E1000_ICR);
  5437. /* reading ICR causes bit 31 of EICR to be cleared */
  5438. if (icr & E1000_ICR_DRSTA)
  5439. schedule_work(&adapter->reset_task);
  5440. if (icr & E1000_ICR_DOUTSYNC) {
  5441. /* HW is reporting DMA is out of sync */
  5442. adapter->stats.doosync++;
  5443. /* The DMA Out of Sync is also indication of a spoof event
  5444. * in IOV mode. Check the Wrong VM Behavior register to
  5445. * see if it is really a spoof event.
  5446. */
  5447. igb_check_wvbr(adapter);
  5448. }
  5449. /* Check for a mailbox event */
  5450. if (icr & E1000_ICR_VMMB)
  5451. igb_msg_task(adapter);
  5452. if (icr & E1000_ICR_LSC) {
  5453. hw->mac.get_link_status = 1;
  5454. /* guard against interrupt when we're going down */
  5455. if (!test_bit(__IGB_DOWN, &adapter->state))
  5456. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  5457. }
  5458. if (icr & E1000_ICR_TS)
  5459. igb_tsync_interrupt(adapter);
  5460. wr32(E1000_EIMS, adapter->eims_other);
  5461. return IRQ_HANDLED;
  5462. }
  5463. static void igb_write_itr(struct igb_q_vector *q_vector)
  5464. {
  5465. struct igb_adapter *adapter = q_vector->adapter;
  5466. u32 itr_val = q_vector->itr_val & 0x7FFC;
  5467. if (!q_vector->set_itr)
  5468. return;
  5469. if (!itr_val)
  5470. itr_val = 0x4;
  5471. if (adapter->hw.mac.type == e1000_82575)
  5472. itr_val |= itr_val << 16;
  5473. else
  5474. itr_val |= E1000_EITR_CNT_IGNR;
  5475. writel(itr_val, q_vector->itr_register);
  5476. q_vector->set_itr = 0;
  5477. }
  5478. static irqreturn_t igb_msix_ring(int irq, void *data)
  5479. {
  5480. struct igb_q_vector *q_vector = data;
  5481. /* Write the ITR value calculated from the previous interrupt. */
  5482. igb_write_itr(q_vector);
  5483. napi_schedule(&q_vector->napi);
  5484. return IRQ_HANDLED;
  5485. }
  5486. #ifdef CONFIG_IGB_DCA
  5487. static void igb_update_tx_dca(struct igb_adapter *adapter,
  5488. struct igb_ring *tx_ring,
  5489. int cpu)
  5490. {
  5491. struct e1000_hw *hw = &adapter->hw;
  5492. u32 txctrl = dca3_get_tag(tx_ring->dev, cpu);
  5493. if (hw->mac.type != e1000_82575)
  5494. txctrl <<= E1000_DCA_TXCTRL_CPUID_SHIFT;
  5495. /* We can enable relaxed ordering for reads, but not writes when
  5496. * DCA is enabled. This is due to a known issue in some chipsets
  5497. * which will cause the DCA tag to be cleared.
  5498. */
  5499. txctrl |= E1000_DCA_TXCTRL_DESC_RRO_EN |
  5500. E1000_DCA_TXCTRL_DATA_RRO_EN |
  5501. E1000_DCA_TXCTRL_DESC_DCA_EN;
  5502. wr32(E1000_DCA_TXCTRL(tx_ring->reg_idx), txctrl);
  5503. }
  5504. static void igb_update_rx_dca(struct igb_adapter *adapter,
  5505. struct igb_ring *rx_ring,
  5506. int cpu)
  5507. {
  5508. struct e1000_hw *hw = &adapter->hw;
  5509. u32 rxctrl = dca3_get_tag(&adapter->pdev->dev, cpu);
  5510. if (hw->mac.type != e1000_82575)
  5511. rxctrl <<= E1000_DCA_RXCTRL_CPUID_SHIFT;
  5512. /* We can enable relaxed ordering for reads, but not writes when
  5513. * DCA is enabled. This is due to a known issue in some chipsets
  5514. * which will cause the DCA tag to be cleared.
  5515. */
  5516. rxctrl |= E1000_DCA_RXCTRL_DESC_RRO_EN |
  5517. E1000_DCA_RXCTRL_DESC_DCA_EN;
  5518. wr32(E1000_DCA_RXCTRL(rx_ring->reg_idx), rxctrl);
  5519. }
  5520. static void igb_update_dca(struct igb_q_vector *q_vector)
  5521. {
  5522. struct igb_adapter *adapter = q_vector->adapter;
  5523. int cpu = get_cpu();
  5524. if (q_vector->cpu == cpu)
  5525. goto out_no_update;
  5526. if (q_vector->tx.ring)
  5527. igb_update_tx_dca(adapter, q_vector->tx.ring, cpu);
  5528. if (q_vector->rx.ring)
  5529. igb_update_rx_dca(adapter, q_vector->rx.ring, cpu);
  5530. q_vector->cpu = cpu;
  5531. out_no_update:
  5532. put_cpu();
  5533. }
  5534. static void igb_setup_dca(struct igb_adapter *adapter)
  5535. {
  5536. struct e1000_hw *hw = &adapter->hw;
  5537. int i;
  5538. if (!(adapter->flags & IGB_FLAG_DCA_ENABLED))
  5539. return;
  5540. /* Always use CB2 mode, difference is masked in the CB driver. */
  5541. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
  5542. for (i = 0; i < adapter->num_q_vectors; i++) {
  5543. adapter->q_vector[i]->cpu = -1;
  5544. igb_update_dca(adapter->q_vector[i]);
  5545. }
  5546. }
  5547. static int __igb_notify_dca(struct device *dev, void *data)
  5548. {
  5549. struct net_device *netdev = dev_get_drvdata(dev);
  5550. struct igb_adapter *adapter = netdev_priv(netdev);
  5551. struct pci_dev *pdev = adapter->pdev;
  5552. struct e1000_hw *hw = &adapter->hw;
  5553. unsigned long event = *(unsigned long *)data;
  5554. switch (event) {
  5555. case DCA_PROVIDER_ADD:
  5556. /* if already enabled, don't do it again */
  5557. if (adapter->flags & IGB_FLAG_DCA_ENABLED)
  5558. break;
  5559. if (dca_add_requester(dev) == 0) {
  5560. adapter->flags |= IGB_FLAG_DCA_ENABLED;
  5561. dev_info(&pdev->dev, "DCA enabled\n");
  5562. igb_setup_dca(adapter);
  5563. break;
  5564. }
  5565. /* Fall Through since DCA is disabled. */
  5566. case DCA_PROVIDER_REMOVE:
  5567. if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
  5568. /* without this a class_device is left
  5569. * hanging around in the sysfs model
  5570. */
  5571. dca_remove_requester(dev);
  5572. dev_info(&pdev->dev, "DCA disabled\n");
  5573. adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
  5574. wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
  5575. }
  5576. break;
  5577. }
  5578. return 0;
  5579. }
  5580. static int igb_notify_dca(struct notifier_block *nb, unsigned long event,
  5581. void *p)
  5582. {
  5583. int ret_val;
  5584. ret_val = driver_for_each_device(&igb_driver.driver, NULL, &event,
  5585. __igb_notify_dca);
  5586. return ret_val ? NOTIFY_BAD : NOTIFY_DONE;
  5587. }
  5588. #endif /* CONFIG_IGB_DCA */
  5589. #ifdef CONFIG_PCI_IOV
  5590. static int igb_vf_configure(struct igb_adapter *adapter, int vf)
  5591. {
  5592. unsigned char mac_addr[ETH_ALEN];
  5593. eth_zero_addr(mac_addr);
  5594. igb_set_vf_mac(adapter, vf, mac_addr);
  5595. /* By default spoof check is enabled for all VFs */
  5596. adapter->vf_data[vf].spoofchk_enabled = true;
  5597. /* By default VFs are not trusted */
  5598. adapter->vf_data[vf].trusted = false;
  5599. return 0;
  5600. }
  5601. #endif
  5602. static void igb_ping_all_vfs(struct igb_adapter *adapter)
  5603. {
  5604. struct e1000_hw *hw = &adapter->hw;
  5605. u32 ping;
  5606. int i;
  5607. for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
  5608. ping = E1000_PF_CONTROL_MSG;
  5609. if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
  5610. ping |= E1000_VT_MSGTYPE_CTS;
  5611. igb_write_mbx(hw, &ping, 1, i);
  5612. }
  5613. }
  5614. static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  5615. {
  5616. struct e1000_hw *hw = &adapter->hw;
  5617. u32 vmolr = rd32(E1000_VMOLR(vf));
  5618. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5619. vf_data->flags &= ~(IGB_VF_FLAG_UNI_PROMISC |
  5620. IGB_VF_FLAG_MULTI_PROMISC);
  5621. vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  5622. if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
  5623. vmolr |= E1000_VMOLR_MPME;
  5624. vf_data->flags |= IGB_VF_FLAG_MULTI_PROMISC;
  5625. *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
  5626. } else {
  5627. /* if we have hashes and we are clearing a multicast promisc
  5628. * flag we need to write the hashes to the MTA as this step
  5629. * was previously skipped
  5630. */
  5631. if (vf_data->num_vf_mc_hashes > 30) {
  5632. vmolr |= E1000_VMOLR_MPME;
  5633. } else if (vf_data->num_vf_mc_hashes) {
  5634. int j;
  5635. vmolr |= E1000_VMOLR_ROMPE;
  5636. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  5637. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  5638. }
  5639. }
  5640. wr32(E1000_VMOLR(vf), vmolr);
  5641. /* there are flags left unprocessed, likely not supported */
  5642. if (*msgbuf & E1000_VT_MSGINFO_MASK)
  5643. return -EINVAL;
  5644. return 0;
  5645. }
  5646. static int igb_set_vf_multicasts(struct igb_adapter *adapter,
  5647. u32 *msgbuf, u32 vf)
  5648. {
  5649. int n = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  5650. u16 *hash_list = (u16 *)&msgbuf[1];
  5651. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5652. int i;
  5653. /* salt away the number of multicast addresses assigned
  5654. * to this VF for later use to restore when the PF multi cast
  5655. * list changes
  5656. */
  5657. vf_data->num_vf_mc_hashes = n;
  5658. /* only up to 30 hash values supported */
  5659. if (n > 30)
  5660. n = 30;
  5661. /* store the hashes for later use */
  5662. for (i = 0; i < n; i++)
  5663. vf_data->vf_mc_hashes[i] = hash_list[i];
  5664. /* Flush and reset the mta with the new values */
  5665. igb_set_rx_mode(adapter->netdev);
  5666. return 0;
  5667. }
  5668. static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
  5669. {
  5670. struct e1000_hw *hw = &adapter->hw;
  5671. struct vf_data_storage *vf_data;
  5672. int i, j;
  5673. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  5674. u32 vmolr = rd32(E1000_VMOLR(i));
  5675. vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
  5676. vf_data = &adapter->vf_data[i];
  5677. if ((vf_data->num_vf_mc_hashes > 30) ||
  5678. (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
  5679. vmolr |= E1000_VMOLR_MPME;
  5680. } else if (vf_data->num_vf_mc_hashes) {
  5681. vmolr |= E1000_VMOLR_ROMPE;
  5682. for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
  5683. igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
  5684. }
  5685. wr32(E1000_VMOLR(i), vmolr);
  5686. }
  5687. }
  5688. static void igb_clear_vf_vfta(struct igb_adapter *adapter, u32 vf)
  5689. {
  5690. struct e1000_hw *hw = &adapter->hw;
  5691. u32 pool_mask, vlvf_mask, i;
  5692. /* create mask for VF and other pools */
  5693. pool_mask = E1000_VLVF_POOLSEL_MASK;
  5694. vlvf_mask = BIT(E1000_VLVF_POOLSEL_SHIFT + vf);
  5695. /* drop PF from pool bits */
  5696. pool_mask &= ~BIT(E1000_VLVF_POOLSEL_SHIFT +
  5697. adapter->vfs_allocated_count);
  5698. /* Find the vlan filter for this id */
  5699. for (i = E1000_VLVF_ARRAY_SIZE; i--;) {
  5700. u32 vlvf = rd32(E1000_VLVF(i));
  5701. u32 vfta_mask, vid, vfta;
  5702. /* remove the vf from the pool */
  5703. if (!(vlvf & vlvf_mask))
  5704. continue;
  5705. /* clear out bit from VLVF */
  5706. vlvf ^= vlvf_mask;
  5707. /* if other pools are present, just remove ourselves */
  5708. if (vlvf & pool_mask)
  5709. goto update_vlvfb;
  5710. /* if PF is present, leave VFTA */
  5711. if (vlvf & E1000_VLVF_POOLSEL_MASK)
  5712. goto update_vlvf;
  5713. vid = vlvf & E1000_VLVF_VLANID_MASK;
  5714. vfta_mask = BIT(vid % 32);
  5715. /* clear bit from VFTA */
  5716. vfta = adapter->shadow_vfta[vid / 32];
  5717. if (vfta & vfta_mask)
  5718. hw->mac.ops.write_vfta(hw, vid / 32, vfta ^ vfta_mask);
  5719. update_vlvf:
  5720. /* clear pool selection enable */
  5721. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  5722. vlvf &= E1000_VLVF_POOLSEL_MASK;
  5723. else
  5724. vlvf = 0;
  5725. update_vlvfb:
  5726. /* clear pool bits */
  5727. wr32(E1000_VLVF(i), vlvf);
  5728. }
  5729. }
  5730. static int igb_find_vlvf_entry(struct e1000_hw *hw, u32 vlan)
  5731. {
  5732. u32 vlvf;
  5733. int idx;
  5734. /* short cut the special case */
  5735. if (vlan == 0)
  5736. return 0;
  5737. /* Search for the VLAN id in the VLVF entries */
  5738. for (idx = E1000_VLVF_ARRAY_SIZE; --idx;) {
  5739. vlvf = rd32(E1000_VLVF(idx));
  5740. if ((vlvf & VLAN_VID_MASK) == vlan)
  5741. break;
  5742. }
  5743. return idx;
  5744. }
  5745. static void igb_update_pf_vlvf(struct igb_adapter *adapter, u32 vid)
  5746. {
  5747. struct e1000_hw *hw = &adapter->hw;
  5748. u32 bits, pf_id;
  5749. int idx;
  5750. idx = igb_find_vlvf_entry(hw, vid);
  5751. if (!idx)
  5752. return;
  5753. /* See if any other pools are set for this VLAN filter
  5754. * entry other than the PF.
  5755. */
  5756. pf_id = adapter->vfs_allocated_count + E1000_VLVF_POOLSEL_SHIFT;
  5757. bits = ~BIT(pf_id) & E1000_VLVF_POOLSEL_MASK;
  5758. bits &= rd32(E1000_VLVF(idx));
  5759. /* Disable the filter so this falls into the default pool. */
  5760. if (!bits) {
  5761. if (adapter->flags & IGB_FLAG_VLAN_PROMISC)
  5762. wr32(E1000_VLVF(idx), BIT(pf_id));
  5763. else
  5764. wr32(E1000_VLVF(idx), 0);
  5765. }
  5766. }
  5767. static s32 igb_set_vf_vlan(struct igb_adapter *adapter, u32 vid,
  5768. bool add, u32 vf)
  5769. {
  5770. int pf_id = adapter->vfs_allocated_count;
  5771. struct e1000_hw *hw = &adapter->hw;
  5772. int err;
  5773. /* If VLAN overlaps with one the PF is currently monitoring make
  5774. * sure that we are able to allocate a VLVF entry. This may be
  5775. * redundant but it guarantees PF will maintain visibility to
  5776. * the VLAN.
  5777. */
  5778. if (add && test_bit(vid, adapter->active_vlans)) {
  5779. err = igb_vfta_set(hw, vid, pf_id, true, false);
  5780. if (err)
  5781. return err;
  5782. }
  5783. err = igb_vfta_set(hw, vid, vf, add, false);
  5784. if (add && !err)
  5785. return err;
  5786. /* If we failed to add the VF VLAN or we are removing the VF VLAN
  5787. * we may need to drop the PF pool bit in order to allow us to free
  5788. * up the VLVF resources.
  5789. */
  5790. if (test_bit(vid, adapter->active_vlans) ||
  5791. (adapter->flags & IGB_FLAG_VLAN_PROMISC))
  5792. igb_update_pf_vlvf(adapter, vid);
  5793. return err;
  5794. }
  5795. static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
  5796. {
  5797. struct e1000_hw *hw = &adapter->hw;
  5798. if (vid)
  5799. wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
  5800. else
  5801. wr32(E1000_VMVIR(vf), 0);
  5802. }
  5803. static int igb_enable_port_vlan(struct igb_adapter *adapter, int vf,
  5804. u16 vlan, u8 qos)
  5805. {
  5806. int err;
  5807. err = igb_set_vf_vlan(adapter, vlan, true, vf);
  5808. if (err)
  5809. return err;
  5810. igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
  5811. igb_set_vmolr(adapter, vf, !vlan);
  5812. /* revoke access to previous VLAN */
  5813. if (vlan != adapter->vf_data[vf].pf_vlan)
  5814. igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
  5815. false, vf);
  5816. adapter->vf_data[vf].pf_vlan = vlan;
  5817. adapter->vf_data[vf].pf_qos = qos;
  5818. igb_set_vf_vlan_strip(adapter, vf, true);
  5819. dev_info(&adapter->pdev->dev,
  5820. "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
  5821. if (test_bit(__IGB_DOWN, &adapter->state)) {
  5822. dev_warn(&adapter->pdev->dev,
  5823. "The VF VLAN has been set, but the PF device is not up.\n");
  5824. dev_warn(&adapter->pdev->dev,
  5825. "Bring the PF device up before attempting to use the VF device.\n");
  5826. }
  5827. return err;
  5828. }
  5829. static int igb_disable_port_vlan(struct igb_adapter *adapter, int vf)
  5830. {
  5831. /* Restore tagless access via VLAN 0 */
  5832. igb_set_vf_vlan(adapter, 0, true, vf);
  5833. igb_set_vmvir(adapter, 0, vf);
  5834. igb_set_vmolr(adapter, vf, true);
  5835. /* Remove any PF assigned VLAN */
  5836. if (adapter->vf_data[vf].pf_vlan)
  5837. igb_set_vf_vlan(adapter, adapter->vf_data[vf].pf_vlan,
  5838. false, vf);
  5839. adapter->vf_data[vf].pf_vlan = 0;
  5840. adapter->vf_data[vf].pf_qos = 0;
  5841. igb_set_vf_vlan_strip(adapter, vf, false);
  5842. return 0;
  5843. }
  5844. static int igb_ndo_set_vf_vlan(struct net_device *netdev, int vf,
  5845. u16 vlan, u8 qos, __be16 vlan_proto)
  5846. {
  5847. struct igb_adapter *adapter = netdev_priv(netdev);
  5848. if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
  5849. return -EINVAL;
  5850. if (vlan_proto != htons(ETH_P_8021Q))
  5851. return -EPROTONOSUPPORT;
  5852. return (vlan || qos) ? igb_enable_port_vlan(adapter, vf, vlan, qos) :
  5853. igb_disable_port_vlan(adapter, vf);
  5854. }
  5855. static int igb_set_vf_vlan_msg(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
  5856. {
  5857. int add = (msgbuf[0] & E1000_VT_MSGINFO_MASK) >> E1000_VT_MSGINFO_SHIFT;
  5858. int vid = (msgbuf[1] & E1000_VLVF_VLANID_MASK);
  5859. int ret;
  5860. if (adapter->vf_data[vf].pf_vlan)
  5861. return -1;
  5862. /* VLAN 0 is a special case, don't allow it to be removed */
  5863. if (!vid && !add)
  5864. return 0;
  5865. ret = igb_set_vf_vlan(adapter, vid, !!add, vf);
  5866. if (!ret)
  5867. igb_set_vf_vlan_strip(adapter, vf, !!vid);
  5868. return ret;
  5869. }
  5870. static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
  5871. {
  5872. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  5873. /* clear flags - except flag that indicates PF has set the MAC */
  5874. vf_data->flags &= IGB_VF_FLAG_PF_SET_MAC;
  5875. vf_data->last_nack = jiffies;
  5876. /* reset vlans for device */
  5877. igb_clear_vf_vfta(adapter, vf);
  5878. igb_set_vf_vlan(adapter, vf_data->pf_vlan, true, vf);
  5879. igb_set_vmvir(adapter, vf_data->pf_vlan |
  5880. (vf_data->pf_qos << VLAN_PRIO_SHIFT), vf);
  5881. igb_set_vmolr(adapter, vf, !vf_data->pf_vlan);
  5882. igb_set_vf_vlan_strip(adapter, vf, !!(vf_data->pf_vlan));
  5883. /* reset multicast table array for vf */
  5884. adapter->vf_data[vf].num_vf_mc_hashes = 0;
  5885. /* Flush and reset the mta with the new values */
  5886. igb_set_rx_mode(adapter->netdev);
  5887. }
  5888. static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
  5889. {
  5890. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  5891. /* clear mac address as we were hotplug removed/added */
  5892. if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
  5893. eth_zero_addr(vf_mac);
  5894. /* process remaining reset events */
  5895. igb_vf_reset(adapter, vf);
  5896. }
  5897. static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
  5898. {
  5899. struct e1000_hw *hw = &adapter->hw;
  5900. unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
  5901. u32 reg, msgbuf[3];
  5902. u8 *addr = (u8 *)(&msgbuf[1]);
  5903. /* process all the same items cleared in a function level reset */
  5904. igb_vf_reset(adapter, vf);
  5905. /* set vf mac address */
  5906. igb_set_vf_mac(adapter, vf, vf_mac);
  5907. /* enable transmit and receive for vf */
  5908. reg = rd32(E1000_VFTE);
  5909. wr32(E1000_VFTE, reg | BIT(vf));
  5910. reg = rd32(E1000_VFRE);
  5911. wr32(E1000_VFRE, reg | BIT(vf));
  5912. adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
  5913. /* reply to reset with ack and vf mac address */
  5914. if (!is_zero_ether_addr(vf_mac)) {
  5915. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
  5916. memcpy(addr, vf_mac, ETH_ALEN);
  5917. } else {
  5918. msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
  5919. }
  5920. igb_write_mbx(hw, msgbuf, 3, vf);
  5921. }
  5922. static void igb_flush_mac_table(struct igb_adapter *adapter)
  5923. {
  5924. struct e1000_hw *hw = &adapter->hw;
  5925. int i;
  5926. for (i = 0; i < hw->mac.rar_entry_count; i++) {
  5927. adapter->mac_table[i].state &= ~IGB_MAC_STATE_IN_USE;
  5928. memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
  5929. adapter->mac_table[i].queue = 0;
  5930. igb_rar_set_index(adapter, i);
  5931. }
  5932. }
  5933. static int igb_available_rars(struct igb_adapter *adapter, u8 queue)
  5934. {
  5935. struct e1000_hw *hw = &adapter->hw;
  5936. /* do not count rar entries reserved for VFs MAC addresses */
  5937. int rar_entries = hw->mac.rar_entry_count -
  5938. adapter->vfs_allocated_count;
  5939. int i, count = 0;
  5940. for (i = 0; i < rar_entries; i++) {
  5941. /* do not count default entries */
  5942. if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT)
  5943. continue;
  5944. /* do not count "in use" entries for different queues */
  5945. if ((adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE) &&
  5946. (adapter->mac_table[i].queue != queue))
  5947. continue;
  5948. count++;
  5949. }
  5950. return count;
  5951. }
  5952. /* Set default MAC address for the PF in the first RAR entry */
  5953. static void igb_set_default_mac_filter(struct igb_adapter *adapter)
  5954. {
  5955. struct igb_mac_addr *mac_table = &adapter->mac_table[0];
  5956. ether_addr_copy(mac_table->addr, adapter->hw.mac.addr);
  5957. mac_table->queue = adapter->vfs_allocated_count;
  5958. mac_table->state = IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
  5959. igb_rar_set_index(adapter, 0);
  5960. }
  5961. /* If the filter to be added and an already existing filter express
  5962. * the same address and address type, it should be possible to only
  5963. * override the other configurations, for example the queue to steer
  5964. * traffic.
  5965. */
  5966. static bool igb_mac_entry_can_be_used(const struct igb_mac_addr *entry,
  5967. const u8 *addr, const u8 flags)
  5968. {
  5969. if (!(entry->state & IGB_MAC_STATE_IN_USE))
  5970. return true;
  5971. if ((entry->state & IGB_MAC_STATE_SRC_ADDR) !=
  5972. (flags & IGB_MAC_STATE_SRC_ADDR))
  5973. return false;
  5974. if (!ether_addr_equal(addr, entry->addr))
  5975. return false;
  5976. return true;
  5977. }
  5978. /* Add a MAC filter for 'addr' directing matching traffic to 'queue',
  5979. * 'flags' is used to indicate what kind of match is made, match is by
  5980. * default for the destination address, if matching by source address
  5981. * is desired the flag IGB_MAC_STATE_SRC_ADDR can be used.
  5982. */
  5983. static int igb_add_mac_filter_flags(struct igb_adapter *adapter,
  5984. const u8 *addr, const u8 queue,
  5985. const u8 flags)
  5986. {
  5987. struct e1000_hw *hw = &adapter->hw;
  5988. int rar_entries = hw->mac.rar_entry_count -
  5989. adapter->vfs_allocated_count;
  5990. int i;
  5991. if (is_zero_ether_addr(addr))
  5992. return -EINVAL;
  5993. /* Search for the first empty entry in the MAC table.
  5994. * Do not touch entries at the end of the table reserved for the VF MAC
  5995. * addresses.
  5996. */
  5997. for (i = 0; i < rar_entries; i++) {
  5998. if (!igb_mac_entry_can_be_used(&adapter->mac_table[i],
  5999. addr, flags))
  6000. continue;
  6001. ether_addr_copy(adapter->mac_table[i].addr, addr);
  6002. adapter->mac_table[i].queue = queue;
  6003. adapter->mac_table[i].state |= IGB_MAC_STATE_IN_USE | flags;
  6004. igb_rar_set_index(adapter, i);
  6005. return i;
  6006. }
  6007. return -ENOSPC;
  6008. }
  6009. static int igb_add_mac_filter(struct igb_adapter *adapter, const u8 *addr,
  6010. const u8 queue)
  6011. {
  6012. return igb_add_mac_filter_flags(adapter, addr, queue, 0);
  6013. }
  6014. /* Remove a MAC filter for 'addr' directing matching traffic to
  6015. * 'queue', 'flags' is used to indicate what kind of match need to be
  6016. * removed, match is by default for the destination address, if
  6017. * matching by source address is to be removed the flag
  6018. * IGB_MAC_STATE_SRC_ADDR can be used.
  6019. */
  6020. static int igb_del_mac_filter_flags(struct igb_adapter *adapter,
  6021. const u8 *addr, const u8 queue,
  6022. const u8 flags)
  6023. {
  6024. struct e1000_hw *hw = &adapter->hw;
  6025. int rar_entries = hw->mac.rar_entry_count -
  6026. adapter->vfs_allocated_count;
  6027. int i;
  6028. if (is_zero_ether_addr(addr))
  6029. return -EINVAL;
  6030. /* Search for matching entry in the MAC table based on given address
  6031. * and queue. Do not touch entries at the end of the table reserved
  6032. * for the VF MAC addresses.
  6033. */
  6034. for (i = 0; i < rar_entries; i++) {
  6035. if (!(adapter->mac_table[i].state & IGB_MAC_STATE_IN_USE))
  6036. continue;
  6037. if ((adapter->mac_table[i].state & flags) != flags)
  6038. continue;
  6039. if (adapter->mac_table[i].queue != queue)
  6040. continue;
  6041. if (!ether_addr_equal(adapter->mac_table[i].addr, addr))
  6042. continue;
  6043. /* When a filter for the default address is "deleted",
  6044. * we return it to its initial configuration
  6045. */
  6046. if (adapter->mac_table[i].state & IGB_MAC_STATE_DEFAULT) {
  6047. adapter->mac_table[i].state =
  6048. IGB_MAC_STATE_DEFAULT | IGB_MAC_STATE_IN_USE;
  6049. adapter->mac_table[i].queue =
  6050. adapter->vfs_allocated_count;
  6051. } else {
  6052. adapter->mac_table[i].state = 0;
  6053. adapter->mac_table[i].queue = 0;
  6054. memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
  6055. }
  6056. igb_rar_set_index(adapter, i);
  6057. return 0;
  6058. }
  6059. return -ENOENT;
  6060. }
  6061. static int igb_del_mac_filter(struct igb_adapter *adapter, const u8 *addr,
  6062. const u8 queue)
  6063. {
  6064. return igb_del_mac_filter_flags(adapter, addr, queue, 0);
  6065. }
  6066. int igb_add_mac_steering_filter(struct igb_adapter *adapter,
  6067. const u8 *addr, u8 queue, u8 flags)
  6068. {
  6069. struct e1000_hw *hw = &adapter->hw;
  6070. /* In theory, this should be supported on 82575 as well, but
  6071. * that part wasn't easily accessible during development.
  6072. */
  6073. if (hw->mac.type != e1000_i210)
  6074. return -EOPNOTSUPP;
  6075. return igb_add_mac_filter_flags(adapter, addr, queue,
  6076. IGB_MAC_STATE_QUEUE_STEERING | flags);
  6077. }
  6078. int igb_del_mac_steering_filter(struct igb_adapter *adapter,
  6079. const u8 *addr, u8 queue, u8 flags)
  6080. {
  6081. return igb_del_mac_filter_flags(adapter, addr, queue,
  6082. IGB_MAC_STATE_QUEUE_STEERING | flags);
  6083. }
  6084. static int igb_uc_sync(struct net_device *netdev, const unsigned char *addr)
  6085. {
  6086. struct igb_adapter *adapter = netdev_priv(netdev);
  6087. int ret;
  6088. ret = igb_add_mac_filter(adapter, addr, adapter->vfs_allocated_count);
  6089. return min_t(int, ret, 0);
  6090. }
  6091. static int igb_uc_unsync(struct net_device *netdev, const unsigned char *addr)
  6092. {
  6093. struct igb_adapter *adapter = netdev_priv(netdev);
  6094. igb_del_mac_filter(adapter, addr, adapter->vfs_allocated_count);
  6095. return 0;
  6096. }
  6097. static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
  6098. const u32 info, const u8 *addr)
  6099. {
  6100. struct pci_dev *pdev = adapter->pdev;
  6101. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6102. struct list_head *pos;
  6103. struct vf_mac_filter *entry = NULL;
  6104. int ret = 0;
  6105. switch (info) {
  6106. case E1000_VF_MAC_FILTER_CLR:
  6107. /* remove all unicast MAC filters related to the current VF */
  6108. list_for_each(pos, &adapter->vf_macs.l) {
  6109. entry = list_entry(pos, struct vf_mac_filter, l);
  6110. if (entry->vf == vf) {
  6111. entry->vf = -1;
  6112. entry->free = true;
  6113. igb_del_mac_filter(adapter, entry->vf_mac, vf);
  6114. }
  6115. }
  6116. break;
  6117. case E1000_VF_MAC_FILTER_ADD:
  6118. if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
  6119. !vf_data->trusted) {
  6120. dev_warn(&pdev->dev,
  6121. "VF %d requested MAC filter but is administratively denied\n",
  6122. vf);
  6123. return -EINVAL;
  6124. }
  6125. if (!is_valid_ether_addr(addr)) {
  6126. dev_warn(&pdev->dev,
  6127. "VF %d attempted to set invalid MAC filter\n",
  6128. vf);
  6129. return -EINVAL;
  6130. }
  6131. /* try to find empty slot in the list */
  6132. list_for_each(pos, &adapter->vf_macs.l) {
  6133. entry = list_entry(pos, struct vf_mac_filter, l);
  6134. if (entry->free)
  6135. break;
  6136. }
  6137. if (entry && entry->free) {
  6138. entry->free = false;
  6139. entry->vf = vf;
  6140. ether_addr_copy(entry->vf_mac, addr);
  6141. ret = igb_add_mac_filter(adapter, addr, vf);
  6142. ret = min_t(int, ret, 0);
  6143. } else {
  6144. ret = -ENOSPC;
  6145. }
  6146. if (ret == -ENOSPC)
  6147. dev_warn(&pdev->dev,
  6148. "VF %d has requested MAC filter but there is no space for it\n",
  6149. vf);
  6150. break;
  6151. default:
  6152. ret = -EINVAL;
  6153. break;
  6154. }
  6155. return ret;
  6156. }
  6157. static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
  6158. {
  6159. struct pci_dev *pdev = adapter->pdev;
  6160. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6161. u32 info = msg[0] & E1000_VT_MSGINFO_MASK;
  6162. /* The VF MAC Address is stored in a packed array of bytes
  6163. * starting at the second 32 bit word of the msg array
  6164. */
  6165. unsigned char *addr = (unsigned char *)&msg[1];
  6166. int ret = 0;
  6167. if (!info) {
  6168. if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
  6169. !vf_data->trusted) {
  6170. dev_warn(&pdev->dev,
  6171. "VF %d attempted to override administratively set MAC address\nReload the VF driver to resume operations\n",
  6172. vf);
  6173. return -EINVAL;
  6174. }
  6175. if (!is_valid_ether_addr(addr)) {
  6176. dev_warn(&pdev->dev,
  6177. "VF %d attempted to set invalid MAC\n",
  6178. vf);
  6179. return -EINVAL;
  6180. }
  6181. ret = igb_set_vf_mac(adapter, vf, addr);
  6182. } else {
  6183. ret = igb_set_vf_mac_filter(adapter, vf, info, addr);
  6184. }
  6185. return ret;
  6186. }
  6187. static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
  6188. {
  6189. struct e1000_hw *hw = &adapter->hw;
  6190. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6191. u32 msg = E1000_VT_MSGTYPE_NACK;
  6192. /* if device isn't clear to send it shouldn't be reading either */
  6193. if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
  6194. time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
  6195. igb_write_mbx(hw, &msg, 1, vf);
  6196. vf_data->last_nack = jiffies;
  6197. }
  6198. }
  6199. static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
  6200. {
  6201. struct pci_dev *pdev = adapter->pdev;
  6202. u32 msgbuf[E1000_VFMAILBOX_SIZE];
  6203. struct e1000_hw *hw = &adapter->hw;
  6204. struct vf_data_storage *vf_data = &adapter->vf_data[vf];
  6205. s32 retval;
  6206. retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf, false);
  6207. if (retval) {
  6208. /* if receive failed revoke VF CTS stats and restart init */
  6209. dev_err(&pdev->dev, "Error receiving message from VF\n");
  6210. vf_data->flags &= ~IGB_VF_FLAG_CTS;
  6211. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  6212. goto unlock;
  6213. goto out;
  6214. }
  6215. /* this is a message we already processed, do nothing */
  6216. if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
  6217. goto unlock;
  6218. /* until the vf completes a reset it should not be
  6219. * allowed to start any configuration.
  6220. */
  6221. if (msgbuf[0] == E1000_VF_RESET) {
  6222. /* unlocks mailbox */
  6223. igb_vf_reset_msg(adapter, vf);
  6224. return;
  6225. }
  6226. if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
  6227. if (!time_after(jiffies, vf_data->last_nack + (2 * HZ)))
  6228. goto unlock;
  6229. retval = -1;
  6230. goto out;
  6231. }
  6232. switch ((msgbuf[0] & 0xFFFF)) {
  6233. case E1000_VF_SET_MAC_ADDR:
  6234. retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
  6235. break;
  6236. case E1000_VF_SET_PROMISC:
  6237. retval = igb_set_vf_promisc(adapter, msgbuf, vf);
  6238. break;
  6239. case E1000_VF_SET_MULTICAST:
  6240. retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
  6241. break;
  6242. case E1000_VF_SET_LPE:
  6243. retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
  6244. break;
  6245. case E1000_VF_SET_VLAN:
  6246. retval = -1;
  6247. if (vf_data->pf_vlan)
  6248. dev_warn(&pdev->dev,
  6249. "VF %d attempted to override administratively set VLAN tag\nReload the VF driver to resume operations\n",
  6250. vf);
  6251. else
  6252. retval = igb_set_vf_vlan_msg(adapter, msgbuf, vf);
  6253. break;
  6254. default:
  6255. dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
  6256. retval = -1;
  6257. break;
  6258. }
  6259. msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
  6260. out:
  6261. /* notify the VF of the results of what it sent us */
  6262. if (retval)
  6263. msgbuf[0] |= E1000_VT_MSGTYPE_NACK;
  6264. else
  6265. msgbuf[0] |= E1000_VT_MSGTYPE_ACK;
  6266. /* unlocks mailbox */
  6267. igb_write_mbx(hw, msgbuf, 1, vf);
  6268. return;
  6269. unlock:
  6270. igb_unlock_mbx(hw, vf);
  6271. }
  6272. static void igb_msg_task(struct igb_adapter *adapter)
  6273. {
  6274. struct e1000_hw *hw = &adapter->hw;
  6275. u32 vf;
  6276. for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
  6277. /* process any reset requests */
  6278. if (!igb_check_for_rst(hw, vf))
  6279. igb_vf_reset_event(adapter, vf);
  6280. /* process any messages pending */
  6281. if (!igb_check_for_msg(hw, vf))
  6282. igb_rcv_msg_from_vf(adapter, vf);
  6283. /* process any acks */
  6284. if (!igb_check_for_ack(hw, vf))
  6285. igb_rcv_ack_from_vf(adapter, vf);
  6286. }
  6287. }
  6288. /**
  6289. * igb_set_uta - Set unicast filter table address
  6290. * @adapter: board private structure
  6291. * @set: boolean indicating if we are setting or clearing bits
  6292. *
  6293. * The unicast table address is a register array of 32-bit registers.
  6294. * The table is meant to be used in a way similar to how the MTA is used
  6295. * however due to certain limitations in the hardware it is necessary to
  6296. * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscuous
  6297. * enable bit to allow vlan tag stripping when promiscuous mode is enabled
  6298. **/
  6299. static void igb_set_uta(struct igb_adapter *adapter, bool set)
  6300. {
  6301. struct e1000_hw *hw = &adapter->hw;
  6302. u32 uta = set ? ~0 : 0;
  6303. int i;
  6304. /* we only need to do this if VMDq is enabled */
  6305. if (!adapter->vfs_allocated_count)
  6306. return;
  6307. for (i = hw->mac.uta_reg_count; i--;)
  6308. array_wr32(E1000_UTA, i, uta);
  6309. }
  6310. /**
  6311. * igb_intr_msi - Interrupt Handler
  6312. * @irq: interrupt number
  6313. * @data: pointer to a network interface device structure
  6314. **/
  6315. static irqreturn_t igb_intr_msi(int irq, void *data)
  6316. {
  6317. struct igb_adapter *adapter = data;
  6318. struct igb_q_vector *q_vector = adapter->q_vector[0];
  6319. struct e1000_hw *hw = &adapter->hw;
  6320. /* read ICR disables interrupts using IAM */
  6321. u32 icr = rd32(E1000_ICR);
  6322. igb_write_itr(q_vector);
  6323. if (icr & E1000_ICR_DRSTA)
  6324. schedule_work(&adapter->reset_task);
  6325. if (icr & E1000_ICR_DOUTSYNC) {
  6326. /* HW is reporting DMA is out of sync */
  6327. adapter->stats.doosync++;
  6328. }
  6329. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  6330. hw->mac.get_link_status = 1;
  6331. if (!test_bit(__IGB_DOWN, &adapter->state))
  6332. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  6333. }
  6334. if (icr & E1000_ICR_TS)
  6335. igb_tsync_interrupt(adapter);
  6336. napi_schedule(&q_vector->napi);
  6337. return IRQ_HANDLED;
  6338. }
  6339. /**
  6340. * igb_intr - Legacy Interrupt Handler
  6341. * @irq: interrupt number
  6342. * @data: pointer to a network interface device structure
  6343. **/
  6344. static irqreturn_t igb_intr(int irq, void *data)
  6345. {
  6346. struct igb_adapter *adapter = data;
  6347. struct igb_q_vector *q_vector = adapter->q_vector[0];
  6348. struct e1000_hw *hw = &adapter->hw;
  6349. /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
  6350. * need for the IMC write
  6351. */
  6352. u32 icr = rd32(E1000_ICR);
  6353. /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
  6354. * not set, then the adapter didn't send an interrupt
  6355. */
  6356. if (!(icr & E1000_ICR_INT_ASSERTED))
  6357. return IRQ_NONE;
  6358. igb_write_itr(q_vector);
  6359. if (icr & E1000_ICR_DRSTA)
  6360. schedule_work(&adapter->reset_task);
  6361. if (icr & E1000_ICR_DOUTSYNC) {
  6362. /* HW is reporting DMA is out of sync */
  6363. adapter->stats.doosync++;
  6364. }
  6365. if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
  6366. hw->mac.get_link_status = 1;
  6367. /* guard against interrupt when we're going down */
  6368. if (!test_bit(__IGB_DOWN, &adapter->state))
  6369. mod_timer(&adapter->watchdog_timer, jiffies + 1);
  6370. }
  6371. if (icr & E1000_ICR_TS)
  6372. igb_tsync_interrupt(adapter);
  6373. napi_schedule(&q_vector->napi);
  6374. return IRQ_HANDLED;
  6375. }
  6376. static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
  6377. {
  6378. struct igb_adapter *adapter = q_vector->adapter;
  6379. struct e1000_hw *hw = &adapter->hw;
  6380. if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
  6381. (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
  6382. if ((adapter->num_q_vectors == 1) && !adapter->vf_data)
  6383. igb_set_itr(q_vector);
  6384. else
  6385. igb_update_ring_itr(q_vector);
  6386. }
  6387. if (!test_bit(__IGB_DOWN, &adapter->state)) {
  6388. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  6389. wr32(E1000_EIMS, q_vector->eims_value);
  6390. else
  6391. igb_irq_enable(adapter);
  6392. }
  6393. }
  6394. /**
  6395. * igb_poll - NAPI Rx polling callback
  6396. * @napi: napi polling structure
  6397. * @budget: count of how many packets we should handle
  6398. **/
  6399. static int igb_poll(struct napi_struct *napi, int budget)
  6400. {
  6401. struct igb_q_vector *q_vector = container_of(napi,
  6402. struct igb_q_vector,
  6403. napi);
  6404. bool clean_complete = true;
  6405. int work_done = 0;
  6406. #ifdef CONFIG_IGB_DCA
  6407. if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
  6408. igb_update_dca(q_vector);
  6409. #endif
  6410. if (q_vector->tx.ring)
  6411. clean_complete = igb_clean_tx_irq(q_vector, budget);
  6412. if (q_vector->rx.ring) {
  6413. int cleaned = igb_clean_rx_irq(q_vector, budget);
  6414. work_done += cleaned;
  6415. if (cleaned >= budget)
  6416. clean_complete = false;
  6417. }
  6418. /* If all work not completed, return budget and keep polling */
  6419. if (!clean_complete)
  6420. return budget;
  6421. /* If not enough Rx work done, exit the polling mode */
  6422. napi_complete_done(napi, work_done);
  6423. igb_ring_irq_enable(q_vector);
  6424. return 0;
  6425. }
  6426. /**
  6427. * igb_clean_tx_irq - Reclaim resources after transmit completes
  6428. * @q_vector: pointer to q_vector containing needed info
  6429. * @napi_budget: Used to determine if we are in netpoll
  6430. *
  6431. * returns true if ring is completely cleaned
  6432. **/
  6433. static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
  6434. {
  6435. struct igb_adapter *adapter = q_vector->adapter;
  6436. struct igb_ring *tx_ring = q_vector->tx.ring;
  6437. struct igb_tx_buffer *tx_buffer;
  6438. union e1000_adv_tx_desc *tx_desc;
  6439. unsigned int total_bytes = 0, total_packets = 0;
  6440. unsigned int budget = q_vector->tx.work_limit;
  6441. unsigned int i = tx_ring->next_to_clean;
  6442. if (test_bit(__IGB_DOWN, &adapter->state))
  6443. return true;
  6444. tx_buffer = &tx_ring->tx_buffer_info[i];
  6445. tx_desc = IGB_TX_DESC(tx_ring, i);
  6446. i -= tx_ring->count;
  6447. do {
  6448. union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
  6449. /* if next_to_watch is not set then there is no work pending */
  6450. if (!eop_desc)
  6451. break;
  6452. /* prevent any other reads prior to eop_desc */
  6453. smp_rmb();
  6454. /* if DD is not set pending work has not been completed */
  6455. if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
  6456. break;
  6457. /* clear next_to_watch to prevent false hangs */
  6458. tx_buffer->next_to_watch = NULL;
  6459. /* update the statistics for this packet */
  6460. total_bytes += tx_buffer->bytecount;
  6461. total_packets += tx_buffer->gso_segs;
  6462. /* free the skb */
  6463. napi_consume_skb(tx_buffer->skb, napi_budget);
  6464. /* unmap skb header data */
  6465. dma_unmap_single(tx_ring->dev,
  6466. dma_unmap_addr(tx_buffer, dma),
  6467. dma_unmap_len(tx_buffer, len),
  6468. DMA_TO_DEVICE);
  6469. /* clear tx_buffer data */
  6470. dma_unmap_len_set(tx_buffer, len, 0);
  6471. /* clear last DMA location and unmap remaining buffers */
  6472. while (tx_desc != eop_desc) {
  6473. tx_buffer++;
  6474. tx_desc++;
  6475. i++;
  6476. if (unlikely(!i)) {
  6477. i -= tx_ring->count;
  6478. tx_buffer = tx_ring->tx_buffer_info;
  6479. tx_desc = IGB_TX_DESC(tx_ring, 0);
  6480. }
  6481. /* unmap any remaining paged data */
  6482. if (dma_unmap_len(tx_buffer, len)) {
  6483. dma_unmap_page(tx_ring->dev,
  6484. dma_unmap_addr(tx_buffer, dma),
  6485. dma_unmap_len(tx_buffer, len),
  6486. DMA_TO_DEVICE);
  6487. dma_unmap_len_set(tx_buffer, len, 0);
  6488. }
  6489. }
  6490. /* move us one more past the eop_desc for start of next pkt */
  6491. tx_buffer++;
  6492. tx_desc++;
  6493. i++;
  6494. if (unlikely(!i)) {
  6495. i -= tx_ring->count;
  6496. tx_buffer = tx_ring->tx_buffer_info;
  6497. tx_desc = IGB_TX_DESC(tx_ring, 0);
  6498. }
  6499. /* issue prefetch for next Tx descriptor */
  6500. prefetch(tx_desc);
  6501. /* update budget accounting */
  6502. budget--;
  6503. } while (likely(budget));
  6504. netdev_tx_completed_queue(txring_txq(tx_ring),
  6505. total_packets, total_bytes);
  6506. i += tx_ring->count;
  6507. tx_ring->next_to_clean = i;
  6508. u64_stats_update_begin(&tx_ring->tx_syncp);
  6509. tx_ring->tx_stats.bytes += total_bytes;
  6510. tx_ring->tx_stats.packets += total_packets;
  6511. u64_stats_update_end(&tx_ring->tx_syncp);
  6512. q_vector->tx.total_bytes += total_bytes;
  6513. q_vector->tx.total_packets += total_packets;
  6514. if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
  6515. struct e1000_hw *hw = &adapter->hw;
  6516. /* Detect a transmit hang in hardware, this serializes the
  6517. * check with the clearing of time_stamp and movement of i
  6518. */
  6519. clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
  6520. if (tx_buffer->next_to_watch &&
  6521. time_after(jiffies, tx_buffer->time_stamp +
  6522. (adapter->tx_timeout_factor * HZ)) &&
  6523. !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
  6524. /* detected Tx unit hang */
  6525. dev_err(tx_ring->dev,
  6526. "Detected Tx Unit Hang\n"
  6527. " Tx Queue <%d>\n"
  6528. " TDH <%x>\n"
  6529. " TDT <%x>\n"
  6530. " next_to_use <%x>\n"
  6531. " next_to_clean <%x>\n"
  6532. "buffer_info[next_to_clean]\n"
  6533. " time_stamp <%lx>\n"
  6534. " next_to_watch <%p>\n"
  6535. " jiffies <%lx>\n"
  6536. " desc.status <%x>\n",
  6537. tx_ring->queue_index,
  6538. rd32(E1000_TDH(tx_ring->reg_idx)),
  6539. readl(tx_ring->tail),
  6540. tx_ring->next_to_use,
  6541. tx_ring->next_to_clean,
  6542. tx_buffer->time_stamp,
  6543. tx_buffer->next_to_watch,
  6544. jiffies,
  6545. tx_buffer->next_to_watch->wb.status);
  6546. netif_stop_subqueue(tx_ring->netdev,
  6547. tx_ring->queue_index);
  6548. /* we are about to reset, no point in enabling stuff */
  6549. return true;
  6550. }
  6551. }
  6552. #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
  6553. if (unlikely(total_packets &&
  6554. netif_carrier_ok(tx_ring->netdev) &&
  6555. igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
  6556. /* Make sure that anybody stopping the queue after this
  6557. * sees the new next_to_clean.
  6558. */
  6559. smp_mb();
  6560. if (__netif_subqueue_stopped(tx_ring->netdev,
  6561. tx_ring->queue_index) &&
  6562. !(test_bit(__IGB_DOWN, &adapter->state))) {
  6563. netif_wake_subqueue(tx_ring->netdev,
  6564. tx_ring->queue_index);
  6565. u64_stats_update_begin(&tx_ring->tx_syncp);
  6566. tx_ring->tx_stats.restart_queue++;
  6567. u64_stats_update_end(&tx_ring->tx_syncp);
  6568. }
  6569. }
  6570. return !!budget;
  6571. }
  6572. /**
  6573. * igb_reuse_rx_page - page flip buffer and store it back on the ring
  6574. * @rx_ring: rx descriptor ring to store buffers on
  6575. * @old_buff: donor buffer to have page reused
  6576. *
  6577. * Synchronizes page for reuse by the adapter
  6578. **/
  6579. static void igb_reuse_rx_page(struct igb_ring *rx_ring,
  6580. struct igb_rx_buffer *old_buff)
  6581. {
  6582. struct igb_rx_buffer *new_buff;
  6583. u16 nta = rx_ring->next_to_alloc;
  6584. new_buff = &rx_ring->rx_buffer_info[nta];
  6585. /* update, and store next to alloc */
  6586. nta++;
  6587. rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
  6588. /* Transfer page from old buffer to new buffer.
  6589. * Move each member individually to avoid possible store
  6590. * forwarding stalls.
  6591. */
  6592. new_buff->dma = old_buff->dma;
  6593. new_buff->page = old_buff->page;
  6594. new_buff->page_offset = old_buff->page_offset;
  6595. new_buff->pagecnt_bias = old_buff->pagecnt_bias;
  6596. }
  6597. static inline bool igb_page_is_reserved(struct page *page)
  6598. {
  6599. return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
  6600. }
  6601. static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer)
  6602. {
  6603. unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
  6604. struct page *page = rx_buffer->page;
  6605. /* avoid re-using remote pages */
  6606. if (unlikely(igb_page_is_reserved(page)))
  6607. return false;
  6608. #if (PAGE_SIZE < 8192)
  6609. /* if we are only owner of page we can reuse it */
  6610. if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
  6611. return false;
  6612. #else
  6613. #define IGB_LAST_OFFSET \
  6614. (SKB_WITH_OVERHEAD(PAGE_SIZE) - IGB_RXBUFFER_2048)
  6615. if (rx_buffer->page_offset > IGB_LAST_OFFSET)
  6616. return false;
  6617. #endif
  6618. /* If we have drained the page fragment pool we need to update
  6619. * the pagecnt_bias and page count so that we fully restock the
  6620. * number of references the driver holds.
  6621. */
  6622. if (unlikely(!pagecnt_bias)) {
  6623. page_ref_add(page, USHRT_MAX);
  6624. rx_buffer->pagecnt_bias = USHRT_MAX;
  6625. }
  6626. return true;
  6627. }
  6628. /**
  6629. * igb_add_rx_frag - Add contents of Rx buffer to sk_buff
  6630. * @rx_ring: rx descriptor ring to transact packets on
  6631. * @rx_buffer: buffer containing page to add
  6632. * @skb: sk_buff to place the data into
  6633. * @size: size of buffer to be added
  6634. *
  6635. * This function will add the data contained in rx_buffer->page to the skb.
  6636. **/
  6637. static void igb_add_rx_frag(struct igb_ring *rx_ring,
  6638. struct igb_rx_buffer *rx_buffer,
  6639. struct sk_buff *skb,
  6640. unsigned int size)
  6641. {
  6642. #if (PAGE_SIZE < 8192)
  6643. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6644. #else
  6645. unsigned int truesize = ring_uses_build_skb(rx_ring) ?
  6646. SKB_DATA_ALIGN(IGB_SKB_PAD + size) :
  6647. SKB_DATA_ALIGN(size);
  6648. #endif
  6649. skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
  6650. rx_buffer->page_offset, size, truesize);
  6651. #if (PAGE_SIZE < 8192)
  6652. rx_buffer->page_offset ^= truesize;
  6653. #else
  6654. rx_buffer->page_offset += truesize;
  6655. #endif
  6656. }
  6657. static struct sk_buff *igb_construct_skb(struct igb_ring *rx_ring,
  6658. struct igb_rx_buffer *rx_buffer,
  6659. union e1000_adv_rx_desc *rx_desc,
  6660. unsigned int size)
  6661. {
  6662. void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
  6663. #if (PAGE_SIZE < 8192)
  6664. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6665. #else
  6666. unsigned int truesize = SKB_DATA_ALIGN(size);
  6667. #endif
  6668. unsigned int headlen;
  6669. struct sk_buff *skb;
  6670. /* prefetch first cache line of first page */
  6671. prefetch(va);
  6672. #if L1_CACHE_BYTES < 128
  6673. prefetch(va + L1_CACHE_BYTES);
  6674. #endif
  6675. /* allocate a skb to store the frags */
  6676. skb = napi_alloc_skb(&rx_ring->q_vector->napi, IGB_RX_HDR_LEN);
  6677. if (unlikely(!skb))
  6678. return NULL;
  6679. if (unlikely(igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))) {
  6680. igb_ptp_rx_pktstamp(rx_ring->q_vector, va, skb);
  6681. va += IGB_TS_HDR_LEN;
  6682. size -= IGB_TS_HDR_LEN;
  6683. }
  6684. /* Determine available headroom for copy */
  6685. headlen = size;
  6686. if (headlen > IGB_RX_HDR_LEN)
  6687. headlen = eth_get_headlen(va, IGB_RX_HDR_LEN);
  6688. /* align pull length to size of long to optimize memcpy performance */
  6689. memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
  6690. /* update all of the pointers */
  6691. size -= headlen;
  6692. if (size) {
  6693. skb_add_rx_frag(skb, 0, rx_buffer->page,
  6694. (va + headlen) - page_address(rx_buffer->page),
  6695. size, truesize);
  6696. #if (PAGE_SIZE < 8192)
  6697. rx_buffer->page_offset ^= truesize;
  6698. #else
  6699. rx_buffer->page_offset += truesize;
  6700. #endif
  6701. } else {
  6702. rx_buffer->pagecnt_bias++;
  6703. }
  6704. return skb;
  6705. }
  6706. static struct sk_buff *igb_build_skb(struct igb_ring *rx_ring,
  6707. struct igb_rx_buffer *rx_buffer,
  6708. union e1000_adv_rx_desc *rx_desc,
  6709. unsigned int size)
  6710. {
  6711. void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
  6712. #if (PAGE_SIZE < 8192)
  6713. unsigned int truesize = igb_rx_pg_size(rx_ring) / 2;
  6714. #else
  6715. unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
  6716. SKB_DATA_ALIGN(IGB_SKB_PAD + size);
  6717. #endif
  6718. struct sk_buff *skb;
  6719. /* prefetch first cache line of first page */
  6720. prefetch(va);
  6721. #if L1_CACHE_BYTES < 128
  6722. prefetch(va + L1_CACHE_BYTES);
  6723. #endif
  6724. /* build an skb around the page buffer */
  6725. skb = build_skb(va - IGB_SKB_PAD, truesize);
  6726. if (unlikely(!skb))
  6727. return NULL;
  6728. /* update pointers within the skb to store the data */
  6729. skb_reserve(skb, IGB_SKB_PAD);
  6730. __skb_put(skb, size);
  6731. /* pull timestamp out of packet data */
  6732. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
  6733. igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
  6734. __skb_pull(skb, IGB_TS_HDR_LEN);
  6735. }
  6736. /* update buffer offset */
  6737. #if (PAGE_SIZE < 8192)
  6738. rx_buffer->page_offset ^= truesize;
  6739. #else
  6740. rx_buffer->page_offset += truesize;
  6741. #endif
  6742. return skb;
  6743. }
  6744. static inline void igb_rx_checksum(struct igb_ring *ring,
  6745. union e1000_adv_rx_desc *rx_desc,
  6746. struct sk_buff *skb)
  6747. {
  6748. skb_checksum_none_assert(skb);
  6749. /* Ignore Checksum bit is set */
  6750. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
  6751. return;
  6752. /* Rx checksum disabled via ethtool */
  6753. if (!(ring->netdev->features & NETIF_F_RXCSUM))
  6754. return;
  6755. /* TCP/UDP checksum error bit is set */
  6756. if (igb_test_staterr(rx_desc,
  6757. E1000_RXDEXT_STATERR_TCPE |
  6758. E1000_RXDEXT_STATERR_IPE)) {
  6759. /* work around errata with sctp packets where the TCPE aka
  6760. * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
  6761. * packets, (aka let the stack check the crc32c)
  6762. */
  6763. if (!((skb->len == 60) &&
  6764. test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
  6765. u64_stats_update_begin(&ring->rx_syncp);
  6766. ring->rx_stats.csum_err++;
  6767. u64_stats_update_end(&ring->rx_syncp);
  6768. }
  6769. /* let the stack verify checksum errors */
  6770. return;
  6771. }
  6772. /* It must be a TCP or UDP packet with a valid checksum */
  6773. if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
  6774. E1000_RXD_STAT_UDPCS))
  6775. skb->ip_summed = CHECKSUM_UNNECESSARY;
  6776. dev_dbg(ring->dev, "cksum success: bits %08X\n",
  6777. le32_to_cpu(rx_desc->wb.upper.status_error));
  6778. }
  6779. static inline void igb_rx_hash(struct igb_ring *ring,
  6780. union e1000_adv_rx_desc *rx_desc,
  6781. struct sk_buff *skb)
  6782. {
  6783. if (ring->netdev->features & NETIF_F_RXHASH)
  6784. skb_set_hash(skb,
  6785. le32_to_cpu(rx_desc->wb.lower.hi_dword.rss),
  6786. PKT_HASH_TYPE_L3);
  6787. }
  6788. /**
  6789. * igb_is_non_eop - process handling of non-EOP buffers
  6790. * @rx_ring: Rx ring being processed
  6791. * @rx_desc: Rx descriptor for current buffer
  6792. * @skb: current socket buffer containing buffer in progress
  6793. *
  6794. * This function updates next to clean. If the buffer is an EOP buffer
  6795. * this function exits returning false, otherwise it will place the
  6796. * sk_buff in the next buffer to be chained and return true indicating
  6797. * that this is in fact a non-EOP buffer.
  6798. **/
  6799. static bool igb_is_non_eop(struct igb_ring *rx_ring,
  6800. union e1000_adv_rx_desc *rx_desc)
  6801. {
  6802. u32 ntc = rx_ring->next_to_clean + 1;
  6803. /* fetch, update, and store next to clean */
  6804. ntc = (ntc < rx_ring->count) ? ntc : 0;
  6805. rx_ring->next_to_clean = ntc;
  6806. prefetch(IGB_RX_DESC(rx_ring, ntc));
  6807. if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
  6808. return false;
  6809. return true;
  6810. }
  6811. /**
  6812. * igb_cleanup_headers - Correct corrupted or empty headers
  6813. * @rx_ring: rx descriptor ring packet is being transacted on
  6814. * @rx_desc: pointer to the EOP Rx descriptor
  6815. * @skb: pointer to current skb being fixed
  6816. *
  6817. * Address the case where we are pulling data in on pages only
  6818. * and as such no data is present in the skb header.
  6819. *
  6820. * In addition if skb is not at least 60 bytes we need to pad it so that
  6821. * it is large enough to qualify as a valid Ethernet frame.
  6822. *
  6823. * Returns true if an error was encountered and skb was freed.
  6824. **/
  6825. static bool igb_cleanup_headers(struct igb_ring *rx_ring,
  6826. union e1000_adv_rx_desc *rx_desc,
  6827. struct sk_buff *skb)
  6828. {
  6829. if (unlikely((igb_test_staterr(rx_desc,
  6830. E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
  6831. struct net_device *netdev = rx_ring->netdev;
  6832. if (!(netdev->features & NETIF_F_RXALL)) {
  6833. dev_kfree_skb_any(skb);
  6834. return true;
  6835. }
  6836. }
  6837. /* if eth_skb_pad returns an error the skb was freed */
  6838. if (eth_skb_pad(skb))
  6839. return true;
  6840. return false;
  6841. }
  6842. /**
  6843. * igb_process_skb_fields - Populate skb header fields from Rx descriptor
  6844. * @rx_ring: rx descriptor ring packet is being transacted on
  6845. * @rx_desc: pointer to the EOP Rx descriptor
  6846. * @skb: pointer to current skb being populated
  6847. *
  6848. * This function checks the ring, descriptor, and packet information in
  6849. * order to populate the hash, checksum, VLAN, timestamp, protocol, and
  6850. * other fields within the skb.
  6851. **/
  6852. static void igb_process_skb_fields(struct igb_ring *rx_ring,
  6853. union e1000_adv_rx_desc *rx_desc,
  6854. struct sk_buff *skb)
  6855. {
  6856. struct net_device *dev = rx_ring->netdev;
  6857. igb_rx_hash(rx_ring, rx_desc, skb);
  6858. igb_rx_checksum(rx_ring, rx_desc, skb);
  6859. if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
  6860. !igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
  6861. igb_ptp_rx_rgtstamp(rx_ring->q_vector, skb);
  6862. if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
  6863. igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
  6864. u16 vid;
  6865. if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
  6866. test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
  6867. vid = be16_to_cpu(rx_desc->wb.upper.vlan);
  6868. else
  6869. vid = le16_to_cpu(rx_desc->wb.upper.vlan);
  6870. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
  6871. }
  6872. skb_record_rx_queue(skb, rx_ring->queue_index);
  6873. skb->protocol = eth_type_trans(skb, rx_ring->netdev);
  6874. }
  6875. static struct igb_rx_buffer *igb_get_rx_buffer(struct igb_ring *rx_ring,
  6876. const unsigned int size)
  6877. {
  6878. struct igb_rx_buffer *rx_buffer;
  6879. rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
  6880. prefetchw(rx_buffer->page);
  6881. /* we are reusing so sync this buffer for CPU use */
  6882. dma_sync_single_range_for_cpu(rx_ring->dev,
  6883. rx_buffer->dma,
  6884. rx_buffer->page_offset,
  6885. size,
  6886. DMA_FROM_DEVICE);
  6887. rx_buffer->pagecnt_bias--;
  6888. return rx_buffer;
  6889. }
  6890. static void igb_put_rx_buffer(struct igb_ring *rx_ring,
  6891. struct igb_rx_buffer *rx_buffer)
  6892. {
  6893. if (igb_can_reuse_rx_page(rx_buffer)) {
  6894. /* hand second half of page back to the ring */
  6895. igb_reuse_rx_page(rx_ring, rx_buffer);
  6896. } else {
  6897. /* We are not reusing the buffer so unmap it and free
  6898. * any references we are holding to it
  6899. */
  6900. dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
  6901. igb_rx_pg_size(rx_ring), DMA_FROM_DEVICE,
  6902. IGB_RX_DMA_ATTR);
  6903. __page_frag_cache_drain(rx_buffer->page,
  6904. rx_buffer->pagecnt_bias);
  6905. }
  6906. /* clear contents of rx_buffer */
  6907. rx_buffer->page = NULL;
  6908. }
  6909. static int igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
  6910. {
  6911. struct igb_ring *rx_ring = q_vector->rx.ring;
  6912. struct sk_buff *skb = rx_ring->skb;
  6913. unsigned int total_bytes = 0, total_packets = 0;
  6914. u16 cleaned_count = igb_desc_unused(rx_ring);
  6915. while (likely(total_packets < budget)) {
  6916. union e1000_adv_rx_desc *rx_desc;
  6917. struct igb_rx_buffer *rx_buffer;
  6918. unsigned int size;
  6919. /* return some buffers to hardware, one at a time is too slow */
  6920. if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
  6921. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  6922. cleaned_count = 0;
  6923. }
  6924. rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
  6925. size = le16_to_cpu(rx_desc->wb.upper.length);
  6926. if (!size)
  6927. break;
  6928. /* This memory barrier is needed to keep us from reading
  6929. * any other fields out of the rx_desc until we know the
  6930. * descriptor has been written back
  6931. */
  6932. dma_rmb();
  6933. rx_buffer = igb_get_rx_buffer(rx_ring, size);
  6934. /* retrieve a buffer from the ring */
  6935. if (skb)
  6936. igb_add_rx_frag(rx_ring, rx_buffer, skb, size);
  6937. else if (ring_uses_build_skb(rx_ring))
  6938. skb = igb_build_skb(rx_ring, rx_buffer, rx_desc, size);
  6939. else
  6940. skb = igb_construct_skb(rx_ring, rx_buffer,
  6941. rx_desc, size);
  6942. /* exit if we failed to retrieve a buffer */
  6943. if (!skb) {
  6944. rx_ring->rx_stats.alloc_failed++;
  6945. rx_buffer->pagecnt_bias++;
  6946. break;
  6947. }
  6948. igb_put_rx_buffer(rx_ring, rx_buffer);
  6949. cleaned_count++;
  6950. /* fetch next buffer in frame if non-eop */
  6951. if (igb_is_non_eop(rx_ring, rx_desc))
  6952. continue;
  6953. /* verify the packet layout is correct */
  6954. if (igb_cleanup_headers(rx_ring, rx_desc, skb)) {
  6955. skb = NULL;
  6956. continue;
  6957. }
  6958. /* probably a little skewed due to removing CRC */
  6959. total_bytes += skb->len;
  6960. /* populate checksum, timestamp, VLAN, and protocol */
  6961. igb_process_skb_fields(rx_ring, rx_desc, skb);
  6962. napi_gro_receive(&q_vector->napi, skb);
  6963. /* reset skb pointer */
  6964. skb = NULL;
  6965. /* update budget accounting */
  6966. total_packets++;
  6967. }
  6968. /* place incomplete frames back on ring for completion */
  6969. rx_ring->skb = skb;
  6970. u64_stats_update_begin(&rx_ring->rx_syncp);
  6971. rx_ring->rx_stats.packets += total_packets;
  6972. rx_ring->rx_stats.bytes += total_bytes;
  6973. u64_stats_update_end(&rx_ring->rx_syncp);
  6974. q_vector->rx.total_packets += total_packets;
  6975. q_vector->rx.total_bytes += total_bytes;
  6976. if (cleaned_count)
  6977. igb_alloc_rx_buffers(rx_ring, cleaned_count);
  6978. return total_packets;
  6979. }
  6980. static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
  6981. {
  6982. return ring_uses_build_skb(rx_ring) ? IGB_SKB_PAD : 0;
  6983. }
  6984. static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
  6985. struct igb_rx_buffer *bi)
  6986. {
  6987. struct page *page = bi->page;
  6988. dma_addr_t dma;
  6989. /* since we are recycling buffers we should seldom need to alloc */
  6990. if (likely(page))
  6991. return true;
  6992. /* alloc new page for storage */
  6993. page = dev_alloc_pages(igb_rx_pg_order(rx_ring));
  6994. if (unlikely(!page)) {
  6995. rx_ring->rx_stats.alloc_failed++;
  6996. return false;
  6997. }
  6998. /* map page for use */
  6999. dma = dma_map_page_attrs(rx_ring->dev, page, 0,
  7000. igb_rx_pg_size(rx_ring),
  7001. DMA_FROM_DEVICE,
  7002. IGB_RX_DMA_ATTR);
  7003. /* if mapping failed free memory back to system since
  7004. * there isn't much point in holding memory we can't use
  7005. */
  7006. if (dma_mapping_error(rx_ring->dev, dma)) {
  7007. __free_pages(page, igb_rx_pg_order(rx_ring));
  7008. rx_ring->rx_stats.alloc_failed++;
  7009. return false;
  7010. }
  7011. bi->dma = dma;
  7012. bi->page = page;
  7013. bi->page_offset = igb_rx_offset(rx_ring);
  7014. bi->pagecnt_bias = 1;
  7015. return true;
  7016. }
  7017. /**
  7018. * igb_alloc_rx_buffers - Replace used receive buffers; packet split
  7019. * @adapter: address of board private structure
  7020. **/
  7021. void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
  7022. {
  7023. union e1000_adv_rx_desc *rx_desc;
  7024. struct igb_rx_buffer *bi;
  7025. u16 i = rx_ring->next_to_use;
  7026. u16 bufsz;
  7027. /* nothing to do */
  7028. if (!cleaned_count)
  7029. return;
  7030. rx_desc = IGB_RX_DESC(rx_ring, i);
  7031. bi = &rx_ring->rx_buffer_info[i];
  7032. i -= rx_ring->count;
  7033. bufsz = igb_rx_bufsz(rx_ring);
  7034. do {
  7035. if (!igb_alloc_mapped_page(rx_ring, bi))
  7036. break;
  7037. /* sync the buffer for use by the device */
  7038. dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
  7039. bi->page_offset, bufsz,
  7040. DMA_FROM_DEVICE);
  7041. /* Refresh the desc even if buffer_addrs didn't change
  7042. * because each write-back erases this info.
  7043. */
  7044. rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
  7045. rx_desc++;
  7046. bi++;
  7047. i++;
  7048. if (unlikely(!i)) {
  7049. rx_desc = IGB_RX_DESC(rx_ring, 0);
  7050. bi = rx_ring->rx_buffer_info;
  7051. i -= rx_ring->count;
  7052. }
  7053. /* clear the length for the next_to_use descriptor */
  7054. rx_desc->wb.upper.length = 0;
  7055. cleaned_count--;
  7056. } while (cleaned_count);
  7057. i += rx_ring->count;
  7058. if (rx_ring->next_to_use != i) {
  7059. /* record the next descriptor to use */
  7060. rx_ring->next_to_use = i;
  7061. /* update next to alloc since we have filled the ring */
  7062. rx_ring->next_to_alloc = i;
  7063. /* Force memory writes to complete before letting h/w
  7064. * know there are new descriptors to fetch. (Only
  7065. * applicable for weak-ordered memory model archs,
  7066. * such as IA-64).
  7067. */
  7068. wmb();
  7069. writel(i, rx_ring->tail);
  7070. }
  7071. }
  7072. /**
  7073. * igb_mii_ioctl -
  7074. * @netdev:
  7075. * @ifreq:
  7076. * @cmd:
  7077. **/
  7078. static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7079. {
  7080. struct igb_adapter *adapter = netdev_priv(netdev);
  7081. struct mii_ioctl_data *data = if_mii(ifr);
  7082. if (adapter->hw.phy.media_type != e1000_media_type_copper)
  7083. return -EOPNOTSUPP;
  7084. switch (cmd) {
  7085. case SIOCGMIIPHY:
  7086. data->phy_id = adapter->hw.phy.addr;
  7087. break;
  7088. case SIOCGMIIREG:
  7089. if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
  7090. &data->val_out))
  7091. return -EIO;
  7092. break;
  7093. case SIOCSMIIREG:
  7094. default:
  7095. return -EOPNOTSUPP;
  7096. }
  7097. return 0;
  7098. }
  7099. /**
  7100. * igb_ioctl -
  7101. * @netdev:
  7102. * @ifreq:
  7103. * @cmd:
  7104. **/
  7105. static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
  7106. {
  7107. switch (cmd) {
  7108. case SIOCGMIIPHY:
  7109. case SIOCGMIIREG:
  7110. case SIOCSMIIREG:
  7111. return igb_mii_ioctl(netdev, ifr, cmd);
  7112. case SIOCGHWTSTAMP:
  7113. return igb_ptp_get_ts_config(netdev, ifr);
  7114. case SIOCSHWTSTAMP:
  7115. return igb_ptp_set_ts_config(netdev, ifr);
  7116. default:
  7117. return -EOPNOTSUPP;
  7118. }
  7119. }
  7120. void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  7121. {
  7122. struct igb_adapter *adapter = hw->back;
  7123. pci_read_config_word(adapter->pdev, reg, value);
  7124. }
  7125. void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
  7126. {
  7127. struct igb_adapter *adapter = hw->back;
  7128. pci_write_config_word(adapter->pdev, reg, *value);
  7129. }
  7130. s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  7131. {
  7132. struct igb_adapter *adapter = hw->back;
  7133. if (pcie_capability_read_word(adapter->pdev, reg, value))
  7134. return -E1000_ERR_CONFIG;
  7135. return 0;
  7136. }
  7137. s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
  7138. {
  7139. struct igb_adapter *adapter = hw->back;
  7140. if (pcie_capability_write_word(adapter->pdev, reg, *value))
  7141. return -E1000_ERR_CONFIG;
  7142. return 0;
  7143. }
  7144. static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features)
  7145. {
  7146. struct igb_adapter *adapter = netdev_priv(netdev);
  7147. struct e1000_hw *hw = &adapter->hw;
  7148. u32 ctrl, rctl;
  7149. bool enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
  7150. if (enable) {
  7151. /* enable VLAN tag insert/strip */
  7152. ctrl = rd32(E1000_CTRL);
  7153. ctrl |= E1000_CTRL_VME;
  7154. wr32(E1000_CTRL, ctrl);
  7155. /* Disable CFI check */
  7156. rctl = rd32(E1000_RCTL);
  7157. rctl &= ~E1000_RCTL_CFIEN;
  7158. wr32(E1000_RCTL, rctl);
  7159. } else {
  7160. /* disable VLAN tag insert/strip */
  7161. ctrl = rd32(E1000_CTRL);
  7162. ctrl &= ~E1000_CTRL_VME;
  7163. wr32(E1000_CTRL, ctrl);
  7164. }
  7165. igb_set_vf_vlan_strip(adapter, adapter->vfs_allocated_count, enable);
  7166. }
  7167. static int igb_vlan_rx_add_vid(struct net_device *netdev,
  7168. __be16 proto, u16 vid)
  7169. {
  7170. struct igb_adapter *adapter = netdev_priv(netdev);
  7171. struct e1000_hw *hw = &adapter->hw;
  7172. int pf_id = adapter->vfs_allocated_count;
  7173. /* add the filter since PF can receive vlans w/o entry in vlvf */
  7174. if (!vid || !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  7175. igb_vfta_set(hw, vid, pf_id, true, !!vid);
  7176. set_bit(vid, adapter->active_vlans);
  7177. return 0;
  7178. }
  7179. static int igb_vlan_rx_kill_vid(struct net_device *netdev,
  7180. __be16 proto, u16 vid)
  7181. {
  7182. struct igb_adapter *adapter = netdev_priv(netdev);
  7183. int pf_id = adapter->vfs_allocated_count;
  7184. struct e1000_hw *hw = &adapter->hw;
  7185. /* remove VID from filter table */
  7186. if (vid && !(adapter->flags & IGB_FLAG_VLAN_PROMISC))
  7187. igb_vfta_set(hw, vid, pf_id, false, true);
  7188. clear_bit(vid, adapter->active_vlans);
  7189. return 0;
  7190. }
  7191. static void igb_restore_vlan(struct igb_adapter *adapter)
  7192. {
  7193. u16 vid = 1;
  7194. igb_vlan_mode(adapter->netdev, adapter->netdev->features);
  7195. igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
  7196. for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID)
  7197. igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
  7198. }
  7199. int igb_set_spd_dplx(struct igb_adapter *adapter, u32 spd, u8 dplx)
  7200. {
  7201. struct pci_dev *pdev = adapter->pdev;
  7202. struct e1000_mac_info *mac = &adapter->hw.mac;
  7203. mac->autoneg = 0;
  7204. /* Make sure dplx is at most 1 bit and lsb of speed is not set
  7205. * for the switch() below to work
  7206. */
  7207. if ((spd & 1) || (dplx & ~1))
  7208. goto err_inval;
  7209. /* Fiber NIC's only allow 1000 gbps Full duplex
  7210. * and 100Mbps Full duplex for 100baseFx sfp
  7211. */
  7212. if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
  7213. switch (spd + dplx) {
  7214. case SPEED_10 + DUPLEX_HALF:
  7215. case SPEED_10 + DUPLEX_FULL:
  7216. case SPEED_100 + DUPLEX_HALF:
  7217. goto err_inval;
  7218. default:
  7219. break;
  7220. }
  7221. }
  7222. switch (spd + dplx) {
  7223. case SPEED_10 + DUPLEX_HALF:
  7224. mac->forced_speed_duplex = ADVERTISE_10_HALF;
  7225. break;
  7226. case SPEED_10 + DUPLEX_FULL:
  7227. mac->forced_speed_duplex = ADVERTISE_10_FULL;
  7228. break;
  7229. case SPEED_100 + DUPLEX_HALF:
  7230. mac->forced_speed_duplex = ADVERTISE_100_HALF;
  7231. break;
  7232. case SPEED_100 + DUPLEX_FULL:
  7233. mac->forced_speed_duplex = ADVERTISE_100_FULL;
  7234. break;
  7235. case SPEED_1000 + DUPLEX_FULL:
  7236. mac->autoneg = 1;
  7237. adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
  7238. break;
  7239. case SPEED_1000 + DUPLEX_HALF: /* not supported */
  7240. default:
  7241. goto err_inval;
  7242. }
  7243. /* clear MDI, MDI(-X) override is only allowed when autoneg enabled */
  7244. adapter->hw.phy.mdix = AUTO_ALL_MODES;
  7245. return 0;
  7246. err_inval:
  7247. dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
  7248. return -EINVAL;
  7249. }
  7250. static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
  7251. bool runtime)
  7252. {
  7253. struct net_device *netdev = pci_get_drvdata(pdev);
  7254. struct igb_adapter *adapter = netdev_priv(netdev);
  7255. struct e1000_hw *hw = &adapter->hw;
  7256. u32 ctrl, rctl, status;
  7257. u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
  7258. #ifdef CONFIG_PM
  7259. int retval = 0;
  7260. #endif
  7261. rtnl_lock();
  7262. netif_device_detach(netdev);
  7263. if (netif_running(netdev))
  7264. __igb_close(netdev, true);
  7265. igb_ptp_suspend(adapter);
  7266. igb_clear_interrupt_scheme(adapter);
  7267. rtnl_unlock();
  7268. #ifdef CONFIG_PM
  7269. retval = pci_save_state(pdev);
  7270. if (retval)
  7271. return retval;
  7272. #endif
  7273. status = rd32(E1000_STATUS);
  7274. if (status & E1000_STATUS_LU)
  7275. wufc &= ~E1000_WUFC_LNKC;
  7276. if (wufc) {
  7277. igb_setup_rctl(adapter);
  7278. igb_set_rx_mode(netdev);
  7279. /* turn on all-multi mode if wake on multicast is enabled */
  7280. if (wufc & E1000_WUFC_MC) {
  7281. rctl = rd32(E1000_RCTL);
  7282. rctl |= E1000_RCTL_MPE;
  7283. wr32(E1000_RCTL, rctl);
  7284. }
  7285. ctrl = rd32(E1000_CTRL);
  7286. /* advertise wake from D3Cold */
  7287. #define E1000_CTRL_ADVD3WUC 0x00100000
  7288. /* phy power management enable */
  7289. #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
  7290. ctrl |= E1000_CTRL_ADVD3WUC;
  7291. wr32(E1000_CTRL, ctrl);
  7292. /* Allow time for pending master requests to run */
  7293. igb_disable_pcie_master(hw);
  7294. wr32(E1000_WUC, E1000_WUC_PME_EN);
  7295. wr32(E1000_WUFC, wufc);
  7296. } else {
  7297. wr32(E1000_WUC, 0);
  7298. wr32(E1000_WUFC, 0);
  7299. }
  7300. *enable_wake = wufc || adapter->en_mng_pt;
  7301. if (!*enable_wake)
  7302. igb_power_down_link(adapter);
  7303. else
  7304. igb_power_up_link(adapter);
  7305. /* Release control of h/w to f/w. If f/w is AMT enabled, this
  7306. * would have already happened in close and is redundant.
  7307. */
  7308. igb_release_hw_control(adapter);
  7309. pci_disable_device(pdev);
  7310. return 0;
  7311. }
  7312. static void igb_deliver_wake_packet(struct net_device *netdev)
  7313. {
  7314. struct igb_adapter *adapter = netdev_priv(netdev);
  7315. struct e1000_hw *hw = &adapter->hw;
  7316. struct sk_buff *skb;
  7317. u32 wupl;
  7318. wupl = rd32(E1000_WUPL) & E1000_WUPL_MASK;
  7319. /* WUPM stores only the first 128 bytes of the wake packet.
  7320. * Read the packet only if we have the whole thing.
  7321. */
  7322. if ((wupl == 0) || (wupl > E1000_WUPM_BYTES))
  7323. return;
  7324. skb = netdev_alloc_skb_ip_align(netdev, E1000_WUPM_BYTES);
  7325. if (!skb)
  7326. return;
  7327. skb_put(skb, wupl);
  7328. /* Ensure reads are 32-bit aligned */
  7329. wupl = roundup(wupl, 4);
  7330. memcpy_fromio(skb->data, hw->hw_addr + E1000_WUPM_REG(0), wupl);
  7331. skb->protocol = eth_type_trans(skb, netdev);
  7332. netif_rx(skb);
  7333. }
  7334. static int __maybe_unused igb_suspend(struct device *dev)
  7335. {
  7336. int retval;
  7337. bool wake;
  7338. struct pci_dev *pdev = to_pci_dev(dev);
  7339. retval = __igb_shutdown(pdev, &wake, 0);
  7340. if (retval)
  7341. return retval;
  7342. if (wake) {
  7343. pci_prepare_to_sleep(pdev);
  7344. } else {
  7345. pci_wake_from_d3(pdev, false);
  7346. pci_set_power_state(pdev, PCI_D3hot);
  7347. }
  7348. return 0;
  7349. }
  7350. static int __maybe_unused igb_resume(struct device *dev)
  7351. {
  7352. struct pci_dev *pdev = to_pci_dev(dev);
  7353. struct net_device *netdev = pci_get_drvdata(pdev);
  7354. struct igb_adapter *adapter = netdev_priv(netdev);
  7355. struct e1000_hw *hw = &adapter->hw;
  7356. u32 err, val;
  7357. pci_set_power_state(pdev, PCI_D0);
  7358. pci_restore_state(pdev);
  7359. pci_save_state(pdev);
  7360. if (!pci_device_is_present(pdev))
  7361. return -ENODEV;
  7362. err = pci_enable_device_mem(pdev);
  7363. if (err) {
  7364. dev_err(&pdev->dev,
  7365. "igb: Cannot enable PCI device from suspend\n");
  7366. return err;
  7367. }
  7368. pci_set_master(pdev);
  7369. pci_enable_wake(pdev, PCI_D3hot, 0);
  7370. pci_enable_wake(pdev, PCI_D3cold, 0);
  7371. if (igb_init_interrupt_scheme(adapter, true)) {
  7372. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  7373. return -ENOMEM;
  7374. }
  7375. igb_reset(adapter);
  7376. /* let the f/w know that the h/w is now under the control of the
  7377. * driver.
  7378. */
  7379. igb_get_hw_control(adapter);
  7380. val = rd32(E1000_WUS);
  7381. if (val & WAKE_PKT_WUS)
  7382. igb_deliver_wake_packet(netdev);
  7383. wr32(E1000_WUS, ~0);
  7384. rtnl_lock();
  7385. if (!err && netif_running(netdev))
  7386. err = __igb_open(netdev, true);
  7387. if (!err)
  7388. netif_device_attach(netdev);
  7389. rtnl_unlock();
  7390. return err;
  7391. }
  7392. static int __maybe_unused igb_runtime_idle(struct device *dev)
  7393. {
  7394. struct pci_dev *pdev = to_pci_dev(dev);
  7395. struct net_device *netdev = pci_get_drvdata(pdev);
  7396. struct igb_adapter *adapter = netdev_priv(netdev);
  7397. if (!igb_has_link(adapter))
  7398. pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
  7399. return -EBUSY;
  7400. }
  7401. static int __maybe_unused igb_runtime_suspend(struct device *dev)
  7402. {
  7403. struct pci_dev *pdev = to_pci_dev(dev);
  7404. int retval;
  7405. bool wake;
  7406. retval = __igb_shutdown(pdev, &wake, 1);
  7407. if (retval)
  7408. return retval;
  7409. if (wake) {
  7410. pci_prepare_to_sleep(pdev);
  7411. } else {
  7412. pci_wake_from_d3(pdev, false);
  7413. pci_set_power_state(pdev, PCI_D3hot);
  7414. }
  7415. return 0;
  7416. }
  7417. static int __maybe_unused igb_runtime_resume(struct device *dev)
  7418. {
  7419. return igb_resume(dev);
  7420. }
  7421. static void igb_shutdown(struct pci_dev *pdev)
  7422. {
  7423. bool wake;
  7424. __igb_shutdown(pdev, &wake, 0);
  7425. if (system_state == SYSTEM_POWER_OFF) {
  7426. pci_wake_from_d3(pdev, wake);
  7427. pci_set_power_state(pdev, PCI_D3hot);
  7428. }
  7429. }
  7430. #ifdef CONFIG_PCI_IOV
  7431. static int igb_sriov_reinit(struct pci_dev *dev)
  7432. {
  7433. struct net_device *netdev = pci_get_drvdata(dev);
  7434. struct igb_adapter *adapter = netdev_priv(netdev);
  7435. struct pci_dev *pdev = adapter->pdev;
  7436. rtnl_lock();
  7437. if (netif_running(netdev))
  7438. igb_close(netdev);
  7439. else
  7440. igb_reset(adapter);
  7441. igb_clear_interrupt_scheme(adapter);
  7442. igb_init_queue_configuration(adapter);
  7443. if (igb_init_interrupt_scheme(adapter, true)) {
  7444. rtnl_unlock();
  7445. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  7446. return -ENOMEM;
  7447. }
  7448. if (netif_running(netdev))
  7449. igb_open(netdev);
  7450. rtnl_unlock();
  7451. return 0;
  7452. }
  7453. static int igb_pci_disable_sriov(struct pci_dev *dev)
  7454. {
  7455. int err = igb_disable_sriov(dev);
  7456. if (!err)
  7457. err = igb_sriov_reinit(dev);
  7458. return err;
  7459. }
  7460. static int igb_pci_enable_sriov(struct pci_dev *dev, int num_vfs)
  7461. {
  7462. int err = igb_enable_sriov(dev, num_vfs);
  7463. if (err)
  7464. goto out;
  7465. err = igb_sriov_reinit(dev);
  7466. if (!err)
  7467. return num_vfs;
  7468. out:
  7469. return err;
  7470. }
  7471. #endif
  7472. static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
  7473. {
  7474. #ifdef CONFIG_PCI_IOV
  7475. if (num_vfs == 0)
  7476. return igb_pci_disable_sriov(dev);
  7477. else
  7478. return igb_pci_enable_sriov(dev, num_vfs);
  7479. #endif
  7480. return 0;
  7481. }
  7482. #ifdef CONFIG_NET_POLL_CONTROLLER
  7483. /* Polling 'interrupt' - used by things like netconsole to send skbs
  7484. * without having to re-enable interrupts. It's not called while
  7485. * the interrupt routine is executing.
  7486. */
  7487. static void igb_netpoll(struct net_device *netdev)
  7488. {
  7489. struct igb_adapter *adapter = netdev_priv(netdev);
  7490. struct e1000_hw *hw = &adapter->hw;
  7491. struct igb_q_vector *q_vector;
  7492. int i;
  7493. for (i = 0; i < adapter->num_q_vectors; i++) {
  7494. q_vector = adapter->q_vector[i];
  7495. if (adapter->flags & IGB_FLAG_HAS_MSIX)
  7496. wr32(E1000_EIMC, q_vector->eims_value);
  7497. else
  7498. igb_irq_disable(adapter);
  7499. napi_schedule(&q_vector->napi);
  7500. }
  7501. }
  7502. #endif /* CONFIG_NET_POLL_CONTROLLER */
  7503. /**
  7504. * igb_io_error_detected - called when PCI error is detected
  7505. * @pdev: Pointer to PCI device
  7506. * @state: The current pci connection state
  7507. *
  7508. * This function is called after a PCI bus error affecting
  7509. * this device has been detected.
  7510. **/
  7511. static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
  7512. pci_channel_state_t state)
  7513. {
  7514. struct net_device *netdev = pci_get_drvdata(pdev);
  7515. struct igb_adapter *adapter = netdev_priv(netdev);
  7516. netif_device_detach(netdev);
  7517. if (state == pci_channel_io_perm_failure)
  7518. return PCI_ERS_RESULT_DISCONNECT;
  7519. if (netif_running(netdev))
  7520. igb_down(adapter);
  7521. pci_disable_device(pdev);
  7522. /* Request a slot slot reset. */
  7523. return PCI_ERS_RESULT_NEED_RESET;
  7524. }
  7525. /**
  7526. * igb_io_slot_reset - called after the pci bus has been reset.
  7527. * @pdev: Pointer to PCI device
  7528. *
  7529. * Restart the card from scratch, as if from a cold-boot. Implementation
  7530. * resembles the first-half of the igb_resume routine.
  7531. **/
  7532. static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
  7533. {
  7534. struct net_device *netdev = pci_get_drvdata(pdev);
  7535. struct igb_adapter *adapter = netdev_priv(netdev);
  7536. struct e1000_hw *hw = &adapter->hw;
  7537. pci_ers_result_t result;
  7538. int err;
  7539. if (pci_enable_device_mem(pdev)) {
  7540. dev_err(&pdev->dev,
  7541. "Cannot re-enable PCI device after reset.\n");
  7542. result = PCI_ERS_RESULT_DISCONNECT;
  7543. } else {
  7544. pci_set_master(pdev);
  7545. pci_restore_state(pdev);
  7546. pci_save_state(pdev);
  7547. pci_enable_wake(pdev, PCI_D3hot, 0);
  7548. pci_enable_wake(pdev, PCI_D3cold, 0);
  7549. /* In case of PCI error, adapter lose its HW address
  7550. * so we should re-assign it here.
  7551. */
  7552. hw->hw_addr = adapter->io_addr;
  7553. igb_reset(adapter);
  7554. wr32(E1000_WUS, ~0);
  7555. result = PCI_ERS_RESULT_RECOVERED;
  7556. }
  7557. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  7558. if (err) {
  7559. dev_err(&pdev->dev,
  7560. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  7561. err);
  7562. /* non-fatal, continue */
  7563. }
  7564. return result;
  7565. }
  7566. /**
  7567. * igb_io_resume - called when traffic can start flowing again.
  7568. * @pdev: Pointer to PCI device
  7569. *
  7570. * This callback is called when the error recovery driver tells us that
  7571. * its OK to resume normal operation. Implementation resembles the
  7572. * second-half of the igb_resume routine.
  7573. */
  7574. static void igb_io_resume(struct pci_dev *pdev)
  7575. {
  7576. struct net_device *netdev = pci_get_drvdata(pdev);
  7577. struct igb_adapter *adapter = netdev_priv(netdev);
  7578. if (netif_running(netdev)) {
  7579. if (igb_up(adapter)) {
  7580. dev_err(&pdev->dev, "igb_up failed after reset\n");
  7581. return;
  7582. }
  7583. }
  7584. netif_device_attach(netdev);
  7585. /* let the f/w know that the h/w is now under the control of the
  7586. * driver.
  7587. */
  7588. igb_get_hw_control(adapter);
  7589. }
  7590. /**
  7591. * igb_rar_set_index - Sync RAL[index] and RAH[index] registers with MAC table
  7592. * @adapter: Pointer to adapter structure
  7593. * @index: Index of the RAR entry which need to be synced with MAC table
  7594. **/
  7595. static void igb_rar_set_index(struct igb_adapter *adapter, u32 index)
  7596. {
  7597. struct e1000_hw *hw = &adapter->hw;
  7598. u32 rar_low, rar_high;
  7599. u8 *addr = adapter->mac_table[index].addr;
  7600. /* HW expects these to be in network order when they are plugged
  7601. * into the registers which are little endian. In order to guarantee
  7602. * that ordering we need to do an leXX_to_cpup here in order to be
  7603. * ready for the byteswap that occurs with writel
  7604. */
  7605. rar_low = le32_to_cpup((__le32 *)(addr));
  7606. rar_high = le16_to_cpup((__le16 *)(addr + 4));
  7607. /* Indicate to hardware the Address is Valid. */
  7608. if (adapter->mac_table[index].state & IGB_MAC_STATE_IN_USE) {
  7609. if (is_valid_ether_addr(addr))
  7610. rar_high |= E1000_RAH_AV;
  7611. if (adapter->mac_table[index].state & IGB_MAC_STATE_SRC_ADDR)
  7612. rar_high |= E1000_RAH_ASEL_SRC_ADDR;
  7613. switch (hw->mac.type) {
  7614. case e1000_82575:
  7615. case e1000_i210:
  7616. if (adapter->mac_table[index].state &
  7617. IGB_MAC_STATE_QUEUE_STEERING)
  7618. rar_high |= E1000_RAH_QSEL_ENABLE;
  7619. rar_high |= E1000_RAH_POOL_1 *
  7620. adapter->mac_table[index].queue;
  7621. break;
  7622. default:
  7623. rar_high |= E1000_RAH_POOL_1 <<
  7624. adapter->mac_table[index].queue;
  7625. break;
  7626. }
  7627. }
  7628. wr32(E1000_RAL(index), rar_low);
  7629. wrfl();
  7630. wr32(E1000_RAH(index), rar_high);
  7631. wrfl();
  7632. }
  7633. static int igb_set_vf_mac(struct igb_adapter *adapter,
  7634. int vf, unsigned char *mac_addr)
  7635. {
  7636. struct e1000_hw *hw = &adapter->hw;
  7637. /* VF MAC addresses start at end of receive addresses and moves
  7638. * towards the first, as a result a collision should not be possible
  7639. */
  7640. int rar_entry = hw->mac.rar_entry_count - (vf + 1);
  7641. unsigned char *vf_mac_addr = adapter->vf_data[vf].vf_mac_addresses;
  7642. ether_addr_copy(vf_mac_addr, mac_addr);
  7643. ether_addr_copy(adapter->mac_table[rar_entry].addr, mac_addr);
  7644. adapter->mac_table[rar_entry].queue = vf;
  7645. adapter->mac_table[rar_entry].state |= IGB_MAC_STATE_IN_USE;
  7646. igb_rar_set_index(adapter, rar_entry);
  7647. return 0;
  7648. }
  7649. static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
  7650. {
  7651. struct igb_adapter *adapter = netdev_priv(netdev);
  7652. if (vf >= adapter->vfs_allocated_count)
  7653. return -EINVAL;
  7654. /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
  7655. * flag and allows to overwrite the MAC via VF netdev. This
  7656. * is necessary to allow libvirt a way to restore the original
  7657. * MAC after unbinding vfio-pci and reloading igbvf after shutting
  7658. * down a VM.
  7659. */
  7660. if (is_zero_ether_addr(mac)) {
  7661. adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
  7662. dev_info(&adapter->pdev->dev,
  7663. "remove administratively set MAC on VF %d\n",
  7664. vf);
  7665. } else if (is_valid_ether_addr(mac)) {
  7666. adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
  7667. dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
  7668. mac, vf);
  7669. dev_info(&adapter->pdev->dev,
  7670. "Reload the VF driver to make this change effective.");
  7671. /* Generate additional warning if PF is down */
  7672. if (test_bit(__IGB_DOWN, &adapter->state)) {
  7673. dev_warn(&adapter->pdev->dev,
  7674. "The VF MAC address has been set, but the PF device is not up.\n");
  7675. dev_warn(&adapter->pdev->dev,
  7676. "Bring the PF device up before attempting to use the VF device.\n");
  7677. }
  7678. } else {
  7679. return -EINVAL;
  7680. }
  7681. return igb_set_vf_mac(adapter, vf, mac);
  7682. }
  7683. static int igb_link_mbps(int internal_link_speed)
  7684. {
  7685. switch (internal_link_speed) {
  7686. case SPEED_100:
  7687. return 100;
  7688. case SPEED_1000:
  7689. return 1000;
  7690. default:
  7691. return 0;
  7692. }
  7693. }
  7694. static void igb_set_vf_rate_limit(struct e1000_hw *hw, int vf, int tx_rate,
  7695. int link_speed)
  7696. {
  7697. int rf_dec, rf_int;
  7698. u32 bcnrc_val;
  7699. if (tx_rate != 0) {
  7700. /* Calculate the rate factor values to set */
  7701. rf_int = link_speed / tx_rate;
  7702. rf_dec = (link_speed - (rf_int * tx_rate));
  7703. rf_dec = (rf_dec * BIT(E1000_RTTBCNRC_RF_INT_SHIFT)) /
  7704. tx_rate;
  7705. bcnrc_val = E1000_RTTBCNRC_RS_ENA;
  7706. bcnrc_val |= ((rf_int << E1000_RTTBCNRC_RF_INT_SHIFT) &
  7707. E1000_RTTBCNRC_RF_INT_MASK);
  7708. bcnrc_val |= (rf_dec & E1000_RTTBCNRC_RF_DEC_MASK);
  7709. } else {
  7710. bcnrc_val = 0;
  7711. }
  7712. wr32(E1000_RTTDQSEL, vf); /* vf X uses queue X */
  7713. /* Set global transmit compensation time to the MMW_SIZE in RTTBCNRM
  7714. * register. MMW_SIZE=0x014 if 9728-byte jumbo is supported.
  7715. */
  7716. wr32(E1000_RTTBCNRM, 0x14);
  7717. wr32(E1000_RTTBCNRC, bcnrc_val);
  7718. }
  7719. static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
  7720. {
  7721. int actual_link_speed, i;
  7722. bool reset_rate = false;
  7723. /* VF TX rate limit was not set or not supported */
  7724. if ((adapter->vf_rate_link_speed == 0) ||
  7725. (adapter->hw.mac.type != e1000_82576))
  7726. return;
  7727. actual_link_speed = igb_link_mbps(adapter->link_speed);
  7728. if (actual_link_speed != adapter->vf_rate_link_speed) {
  7729. reset_rate = true;
  7730. adapter->vf_rate_link_speed = 0;
  7731. dev_info(&adapter->pdev->dev,
  7732. "Link speed has been changed. VF Transmit rate is disabled\n");
  7733. }
  7734. for (i = 0; i < adapter->vfs_allocated_count; i++) {
  7735. if (reset_rate)
  7736. adapter->vf_data[i].tx_rate = 0;
  7737. igb_set_vf_rate_limit(&adapter->hw, i,
  7738. adapter->vf_data[i].tx_rate,
  7739. actual_link_speed);
  7740. }
  7741. }
  7742. static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
  7743. int min_tx_rate, int max_tx_rate)
  7744. {
  7745. struct igb_adapter *adapter = netdev_priv(netdev);
  7746. struct e1000_hw *hw = &adapter->hw;
  7747. int actual_link_speed;
  7748. if (hw->mac.type != e1000_82576)
  7749. return -EOPNOTSUPP;
  7750. if (min_tx_rate)
  7751. return -EINVAL;
  7752. actual_link_speed = igb_link_mbps(adapter->link_speed);
  7753. if ((vf >= adapter->vfs_allocated_count) ||
  7754. (!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
  7755. (max_tx_rate < 0) ||
  7756. (max_tx_rate > actual_link_speed))
  7757. return -EINVAL;
  7758. adapter->vf_rate_link_speed = actual_link_speed;
  7759. adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
  7760. igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
  7761. return 0;
  7762. }
  7763. static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
  7764. bool setting)
  7765. {
  7766. struct igb_adapter *adapter = netdev_priv(netdev);
  7767. struct e1000_hw *hw = &adapter->hw;
  7768. u32 reg_val, reg_offset;
  7769. if (!adapter->vfs_allocated_count)
  7770. return -EOPNOTSUPP;
  7771. if (vf >= adapter->vfs_allocated_count)
  7772. return -EINVAL;
  7773. reg_offset = (hw->mac.type == e1000_82576) ? E1000_DTXSWC : E1000_TXSWC;
  7774. reg_val = rd32(reg_offset);
  7775. if (setting)
  7776. reg_val |= (BIT(vf) |
  7777. BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
  7778. else
  7779. reg_val &= ~(BIT(vf) |
  7780. BIT(vf + E1000_DTXSWC_VLAN_SPOOF_SHIFT));
  7781. wr32(reg_offset, reg_val);
  7782. adapter->vf_data[vf].spoofchk_enabled = setting;
  7783. return 0;
  7784. }
  7785. static int igb_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting)
  7786. {
  7787. struct igb_adapter *adapter = netdev_priv(netdev);
  7788. if (vf >= adapter->vfs_allocated_count)
  7789. return -EINVAL;
  7790. if (adapter->vf_data[vf].trusted == setting)
  7791. return 0;
  7792. adapter->vf_data[vf].trusted = setting;
  7793. dev_info(&adapter->pdev->dev, "VF %u is %strusted\n",
  7794. vf, setting ? "" : "not ");
  7795. return 0;
  7796. }
  7797. static int igb_ndo_get_vf_config(struct net_device *netdev,
  7798. int vf, struct ifla_vf_info *ivi)
  7799. {
  7800. struct igb_adapter *adapter = netdev_priv(netdev);
  7801. if (vf >= adapter->vfs_allocated_count)
  7802. return -EINVAL;
  7803. ivi->vf = vf;
  7804. memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
  7805. ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
  7806. ivi->min_tx_rate = 0;
  7807. ivi->vlan = adapter->vf_data[vf].pf_vlan;
  7808. ivi->qos = adapter->vf_data[vf].pf_qos;
  7809. ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
  7810. ivi->trusted = adapter->vf_data[vf].trusted;
  7811. return 0;
  7812. }
  7813. static void igb_vmm_control(struct igb_adapter *adapter)
  7814. {
  7815. struct e1000_hw *hw = &adapter->hw;
  7816. u32 reg;
  7817. switch (hw->mac.type) {
  7818. case e1000_82575:
  7819. case e1000_i210:
  7820. case e1000_i211:
  7821. case e1000_i354:
  7822. default:
  7823. /* replication is not supported for 82575 */
  7824. return;
  7825. case e1000_82576:
  7826. /* notify HW that the MAC is adding vlan tags */
  7827. reg = rd32(E1000_DTXCTL);
  7828. reg |= E1000_DTXCTL_VLAN_ADDED;
  7829. wr32(E1000_DTXCTL, reg);
  7830. /* Fall through */
  7831. case e1000_82580:
  7832. /* enable replication vlan tag stripping */
  7833. reg = rd32(E1000_RPLOLR);
  7834. reg |= E1000_RPLOLR_STRVLAN;
  7835. wr32(E1000_RPLOLR, reg);
  7836. /* Fall through */
  7837. case e1000_i350:
  7838. /* none of the above registers are supported by i350 */
  7839. break;
  7840. }
  7841. if (adapter->vfs_allocated_count) {
  7842. igb_vmdq_set_loopback_pf(hw, true);
  7843. igb_vmdq_set_replication_pf(hw, true);
  7844. igb_vmdq_set_anti_spoofing_pf(hw, true,
  7845. adapter->vfs_allocated_count);
  7846. } else {
  7847. igb_vmdq_set_loopback_pf(hw, false);
  7848. igb_vmdq_set_replication_pf(hw, false);
  7849. }
  7850. }
  7851. static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
  7852. {
  7853. struct e1000_hw *hw = &adapter->hw;
  7854. u32 dmac_thr;
  7855. u16 hwm;
  7856. if (hw->mac.type > e1000_82580) {
  7857. if (adapter->flags & IGB_FLAG_DMAC) {
  7858. u32 reg;
  7859. /* force threshold to 0. */
  7860. wr32(E1000_DMCTXTH, 0);
  7861. /* DMA Coalescing high water mark needs to be greater
  7862. * than the Rx threshold. Set hwm to PBA - max frame
  7863. * size in 16B units, capping it at PBA - 6KB.
  7864. */
  7865. hwm = 64 * (pba - 6);
  7866. reg = rd32(E1000_FCRTC);
  7867. reg &= ~E1000_FCRTC_RTH_COAL_MASK;
  7868. reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
  7869. & E1000_FCRTC_RTH_COAL_MASK);
  7870. wr32(E1000_FCRTC, reg);
  7871. /* Set the DMA Coalescing Rx threshold to PBA - 2 * max
  7872. * frame size, capping it at PBA - 10KB.
  7873. */
  7874. dmac_thr = pba - 10;
  7875. reg = rd32(E1000_DMACR);
  7876. reg &= ~E1000_DMACR_DMACTHR_MASK;
  7877. reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
  7878. & E1000_DMACR_DMACTHR_MASK);
  7879. /* transition to L0x or L1 if available..*/
  7880. reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
  7881. /* watchdog timer= +-1000 usec in 32usec intervals */
  7882. reg |= (1000 >> 5);
  7883. /* Disable BMC-to-OS Watchdog Enable */
  7884. if (hw->mac.type != e1000_i354)
  7885. reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
  7886. wr32(E1000_DMACR, reg);
  7887. /* no lower threshold to disable
  7888. * coalescing(smart fifb)-UTRESH=0
  7889. */
  7890. wr32(E1000_DMCRTRH, 0);
  7891. reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
  7892. wr32(E1000_DMCTLX, reg);
  7893. /* free space in tx packet buffer to wake from
  7894. * DMA coal
  7895. */
  7896. wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
  7897. (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
  7898. /* make low power state decision controlled
  7899. * by DMA coal
  7900. */
  7901. reg = rd32(E1000_PCIEMISC);
  7902. reg &= ~E1000_PCIEMISC_LX_DECISION;
  7903. wr32(E1000_PCIEMISC, reg);
  7904. } /* endif adapter->dmac is not disabled */
  7905. } else if (hw->mac.type == e1000_82580) {
  7906. u32 reg = rd32(E1000_PCIEMISC);
  7907. wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
  7908. wr32(E1000_DMACR, 0);
  7909. }
  7910. }
  7911. /**
  7912. * igb_read_i2c_byte - Reads 8 bit word over I2C
  7913. * @hw: pointer to hardware structure
  7914. * @byte_offset: byte offset to read
  7915. * @dev_addr: device address
  7916. * @data: value read
  7917. *
  7918. * Performs byte read operation over I2C interface at
  7919. * a specified device address.
  7920. **/
  7921. s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  7922. u8 dev_addr, u8 *data)
  7923. {
  7924. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  7925. struct i2c_client *this_client = adapter->i2c_client;
  7926. s32 status;
  7927. u16 swfw_mask = 0;
  7928. if (!this_client)
  7929. return E1000_ERR_I2C;
  7930. swfw_mask = E1000_SWFW_PHY0_SM;
  7931. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  7932. return E1000_ERR_SWFW_SYNC;
  7933. status = i2c_smbus_read_byte_data(this_client, byte_offset);
  7934. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  7935. if (status < 0)
  7936. return E1000_ERR_I2C;
  7937. else {
  7938. *data = status;
  7939. return 0;
  7940. }
  7941. }
  7942. /**
  7943. * igb_write_i2c_byte - Writes 8 bit word over I2C
  7944. * @hw: pointer to hardware structure
  7945. * @byte_offset: byte offset to write
  7946. * @dev_addr: device address
  7947. * @data: value to write
  7948. *
  7949. * Performs byte write operation over I2C interface at
  7950. * a specified device address.
  7951. **/
  7952. s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
  7953. u8 dev_addr, u8 data)
  7954. {
  7955. struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
  7956. struct i2c_client *this_client = adapter->i2c_client;
  7957. s32 status;
  7958. u16 swfw_mask = E1000_SWFW_PHY0_SM;
  7959. if (!this_client)
  7960. return E1000_ERR_I2C;
  7961. if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
  7962. return E1000_ERR_SWFW_SYNC;
  7963. status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
  7964. hw->mac.ops.release_swfw_sync(hw, swfw_mask);
  7965. if (status)
  7966. return E1000_ERR_I2C;
  7967. else
  7968. return 0;
  7969. }
  7970. int igb_reinit_queues(struct igb_adapter *adapter)
  7971. {
  7972. struct net_device *netdev = adapter->netdev;
  7973. struct pci_dev *pdev = adapter->pdev;
  7974. int err = 0;
  7975. if (netif_running(netdev))
  7976. igb_close(netdev);
  7977. igb_reset_interrupt_capability(adapter);
  7978. if (igb_init_interrupt_scheme(adapter, true)) {
  7979. dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
  7980. return -ENOMEM;
  7981. }
  7982. if (netif_running(netdev))
  7983. err = igb_open(netdev);
  7984. return err;
  7985. }
  7986. static void igb_nfc_filter_exit(struct igb_adapter *adapter)
  7987. {
  7988. struct igb_nfc_filter *rule;
  7989. spin_lock(&adapter->nfc_lock);
  7990. hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
  7991. igb_erase_filter(adapter, rule);
  7992. hlist_for_each_entry(rule, &adapter->cls_flower_list, nfc_node)
  7993. igb_erase_filter(adapter, rule);
  7994. spin_unlock(&adapter->nfc_lock);
  7995. }
  7996. static void igb_nfc_filter_restore(struct igb_adapter *adapter)
  7997. {
  7998. struct igb_nfc_filter *rule;
  7999. spin_lock(&adapter->nfc_lock);
  8000. hlist_for_each_entry(rule, &adapter->nfc_filter_list, nfc_node)
  8001. igb_add_filter(adapter, rule);
  8002. spin_unlock(&adapter->nfc_lock);
  8003. }
  8004. /* igb_main.c */