bnxt.c 228 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428642964306431643264336434643564366437643864396440644164426443644464456446644764486449645064516452645364546455645664576458645964606461646264636464646564666467646864696470647164726473647464756476647764786479648064816482648364846485648664876488648964906491649264936494649564966497649864996500650165026503650465056506650765086509651065116512651365146515651665176518651965206521652265236524652565266527652865296530653165326533653465356536653765386539654065416542654365446545654665476548654965506551655265536554655565566557655865596560656165626563656465656566656765686569657065716572657365746575657665776578657965806581658265836584658565866587658865896590659165926593659465956596659765986599660066016602660366046605660666076608660966106611661266136614661566166617661866196620662166226623662466256626662766286629663066316632663366346635663666376638663966406641664266436644664566466647664866496650665166526653665466556656665766586659666066616662666366646665666666676668666966706671667266736674667566766677667866796680668166826683668466856686668766886689669066916692669366946695669666976698669967006701670267036704670567066707670867096710671167126713671467156716671767186719672067216722672367246725672667276728672967306731673267336734673567366737673867396740674167426743674467456746674767486749675067516752675367546755675667576758675967606761676267636764676567666767676867696770677167726773677467756776677767786779678067816782678367846785678667876788678967906791679267936794679567966797679867996800680168026803680468056806680768086809681068116812681368146815681668176818681968206821682268236824682568266827682868296830683168326833683468356836683768386839684068416842684368446845684668476848684968506851685268536854685568566857685868596860686168626863686468656866686768686869687068716872687368746875687668776878687968806881688268836884688568866887688868896890689168926893689468956896689768986899690069016902690369046905690669076908690969106911691269136914691569166917691869196920692169226923692469256926692769286929693069316932693369346935693669376938693969406941694269436944694569466947694869496950695169526953695469556956695769586959696069616962696369646965696669676968696969706971697269736974697569766977697869796980698169826983698469856986698769886989699069916992699369946995699669976998699970007001700270037004700570067007700870097010701170127013701470157016701770187019702070217022702370247025702670277028702970307031703270337034703570367037703870397040704170427043704470457046704770487049705070517052705370547055705670577058705970607061706270637064706570667067706870697070707170727073707470757076707770787079708070817082708370847085708670877088708970907091709270937094709570967097709870997100710171027103710471057106710771087109711071117112711371147115711671177118711971207121712271237124712571267127712871297130713171327133713471357136713771387139714071417142714371447145714671477148714971507151715271537154715571567157715871597160716171627163716471657166716771687169717071717172717371747175717671777178717971807181718271837184718571867187718871897190719171927193719471957196719771987199720072017202720372047205720672077208720972107211721272137214721572167217721872197220722172227223722472257226722772287229723072317232723372347235723672377238723972407241724272437244724572467247724872497250725172527253725472557256725772587259726072617262726372647265726672677268726972707271727272737274727572767277727872797280728172827283728472857286728772887289729072917292729372947295729672977298729973007301730273037304730573067307730873097310731173127313731473157316731773187319732073217322732373247325732673277328732973307331733273337334733573367337733873397340734173427343734473457346734773487349735073517352735373547355735673577358735973607361736273637364736573667367736873697370737173727373737473757376737773787379738073817382738373847385738673877388738973907391739273937394739573967397739873997400740174027403740474057406740774087409741074117412741374147415741674177418741974207421742274237424742574267427742874297430743174327433743474357436743774387439744074417442744374447445744674477448744974507451745274537454745574567457745874597460746174627463746474657466746774687469747074717472747374747475747674777478747974807481748274837484748574867487748874897490749174927493749474957496749774987499750075017502750375047505750675077508750975107511751275137514751575167517751875197520752175227523752475257526752775287529753075317532753375347535753675377538753975407541754275437544754575467547754875497550755175527553755475557556755775587559756075617562756375647565756675677568756975707571757275737574757575767577757875797580758175827583758475857586758775887589759075917592759375947595759675977598759976007601760276037604760576067607760876097610761176127613761476157616761776187619762076217622762376247625762676277628762976307631763276337634763576367637763876397640764176427643764476457646764776487649765076517652765376547655765676577658765976607661766276637664766576667667766876697670767176727673767476757676767776787679768076817682768376847685768676877688768976907691769276937694769576967697769876997700770177027703770477057706770777087709771077117712771377147715771677177718771977207721772277237724772577267727772877297730773177327733773477357736773777387739774077417742774377447745774677477748774977507751775277537754775577567757775877597760776177627763776477657766776777687769777077717772777377747775777677777778777977807781778277837784778577867787778877897790779177927793779477957796779777987799780078017802780378047805780678077808780978107811781278137814781578167817781878197820782178227823782478257826782778287829783078317832783378347835783678377838783978407841784278437844784578467847784878497850785178527853785478557856785778587859786078617862786378647865786678677868786978707871787278737874787578767877787878797880788178827883788478857886788778887889789078917892789378947895789678977898789979007901790279037904790579067907790879097910791179127913791479157916791779187919792079217922792379247925792679277928792979307931793279337934793579367937793879397940794179427943794479457946794779487949795079517952795379547955795679577958795979607961796279637964796579667967796879697970797179727973797479757976797779787979798079817982798379847985798679877988798979907991799279937994799579967997799879998000800180028003800480058006800780088009801080118012801380148015801680178018801980208021802280238024802580268027802880298030803180328033803480358036803780388039804080418042804380448045804680478048804980508051805280538054805580568057805880598060806180628063806480658066806780688069807080718072807380748075807680778078807980808081808280838084808580868087808880898090809180928093809480958096809780988099810081018102810381048105810681078108810981108111811281138114811581168117811881198120812181228123812481258126812781288129813081318132813381348135813681378138813981408141814281438144814581468147814881498150815181528153815481558156815781588159816081618162816381648165816681678168816981708171817281738174817581768177817881798180818181828183818481858186818781888189819081918192819381948195819681978198819982008201820282038204820582068207820882098210821182128213821482158216821782188219822082218222822382248225822682278228822982308231823282338234823582368237823882398240824182428243824482458246824782488249825082518252825382548255825682578258825982608261826282638264826582668267826882698270827182728273827482758276827782788279828082818282828382848285828682878288828982908291829282938294829582968297829882998300830183028303830483058306830783088309831083118312831383148315831683178318831983208321832283238324832583268327832883298330833183328333833483358336833783388339834083418342834383448345834683478348834983508351835283538354835583568357835883598360836183628363836483658366836783688369837083718372837383748375837683778378837983808381838283838384838583868387838883898390839183928393839483958396839783988399840084018402840384048405840684078408840984108411841284138414841584168417841884198420842184228423842484258426842784288429843084318432843384348435843684378438843984408441844284438444844584468447844884498450845184528453845484558456845784588459846084618462846384648465846684678468846984708471847284738474847584768477847884798480848184828483848484858486848784888489849084918492849384948495849684978498849985008501850285038504850585068507850885098510851185128513851485158516851785188519852085218522852385248525852685278528852985308531853285338534853585368537853885398540854185428543854485458546854785488549855085518552855385548555855685578558855985608561856285638564856585668567856885698570857185728573857485758576857785788579858085818582858385848585858685878588858985908591859285938594859585968597859885998600860186028603860486058606860786088609861086118612861386148615861686178618861986208621862286238624862586268627862886298630863186328633863486358636863786388639864086418642864386448645864686478648864986508651865286538654865586568657865886598660866186628663866486658666866786688669867086718672867386748675867686778678867986808681868286838684868586868687868886898690869186928693869486958696869786988699870087018702870387048705870687078708870987108711871287138714871587168717871887198720872187228723872487258726872787288729873087318732873387348735873687378738873987408741874287438744874587468747874887498750875187528753875487558756875787588759876087618762876387648765876687678768876987708771877287738774877587768777877887798780878187828783878487858786878787888789879087918792879387948795879687978798879988008801880288038804880588068807880888098810881188128813881488158816881788188819882088218822882388248825882688278828882988308831883288338834883588368837883888398840884188428843884488458846884788488849885088518852885388548855885688578858885988608861886288638864886588668867886888698870887188728873887488758876887788788879888088818882888388848885888688878888888988908891889288938894889588968897889888998900890189028903890489058906890789088909891089118912891389148915891689178918891989208921892289238924892589268927892889298930893189328933893489358936893789388939894089418942894389448945
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2016 Broadcom Corporation
  4. * Copyright (c) 2016-2018 Broadcom Limited
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/stringify.h>
  12. #include <linux/kernel.h>
  13. #include <linux/timer.h>
  14. #include <linux/errno.h>
  15. #include <linux/ioport.h>
  16. #include <linux/slab.h>
  17. #include <linux/vmalloc.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/pci.h>
  20. #include <linux/netdevice.h>
  21. #include <linux/etherdevice.h>
  22. #include <linux/skbuff.h>
  23. #include <linux/dma-mapping.h>
  24. #include <linux/bitops.h>
  25. #include <linux/io.h>
  26. #include <linux/irq.h>
  27. #include <linux/delay.h>
  28. #include <asm/byteorder.h>
  29. #include <asm/page.h>
  30. #include <linux/time.h>
  31. #include <linux/mii.h>
  32. #include <linux/if.h>
  33. #include <linux/if_vlan.h>
  34. #include <linux/if_bridge.h>
  35. #include <linux/rtc.h>
  36. #include <linux/bpf.h>
  37. #include <net/ip.h>
  38. #include <net/tcp.h>
  39. #include <net/udp.h>
  40. #include <net/checksum.h>
  41. #include <net/ip6_checksum.h>
  42. #include <net/udp_tunnel.h>
  43. #include <linux/workqueue.h>
  44. #include <linux/prefetch.h>
  45. #include <linux/cache.h>
  46. #include <linux/log2.h>
  47. #include <linux/aer.h>
  48. #include <linux/bitmap.h>
  49. #include <linux/cpu_rmap.h>
  50. #include <linux/cpumask.h>
  51. #include <net/pkt_cls.h>
  52. #include "bnxt_hsi.h"
  53. #include "bnxt.h"
  54. #include "bnxt_ulp.h"
  55. #include "bnxt_sriov.h"
  56. #include "bnxt_ethtool.h"
  57. #include "bnxt_dcb.h"
  58. #include "bnxt_xdp.h"
  59. #include "bnxt_vfr.h"
  60. #include "bnxt_tc.h"
  61. #include "bnxt_devlink.h"
  62. #define BNXT_TX_TIMEOUT (5 * HZ)
  63. static const char version[] =
  64. "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
  65. MODULE_LICENSE("GPL");
  66. MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
  67. MODULE_VERSION(DRV_MODULE_VERSION);
  68. #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  69. #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  70. #define BNXT_RX_COPY_THRESH 256
  71. #define BNXT_TX_PUSH_THRESH 164
  72. enum board_idx {
  73. BCM57301,
  74. BCM57302,
  75. BCM57304,
  76. BCM57417_NPAR,
  77. BCM58700,
  78. BCM57311,
  79. BCM57312,
  80. BCM57402,
  81. BCM57404,
  82. BCM57406,
  83. BCM57402_NPAR,
  84. BCM57407,
  85. BCM57412,
  86. BCM57414,
  87. BCM57416,
  88. BCM57417,
  89. BCM57412_NPAR,
  90. BCM57314,
  91. BCM57417_SFP,
  92. BCM57416_SFP,
  93. BCM57404_NPAR,
  94. BCM57406_NPAR,
  95. BCM57407_SFP,
  96. BCM57407_NPAR,
  97. BCM57414_NPAR,
  98. BCM57416_NPAR,
  99. BCM57452,
  100. BCM57454,
  101. BCM5745x_NPAR,
  102. BCM58802,
  103. BCM58804,
  104. BCM58808,
  105. NETXTREME_E_VF,
  106. NETXTREME_C_VF,
  107. NETXTREME_S_VF,
  108. };
  109. /* indexed by enum above */
  110. static const struct {
  111. char *name;
  112. } board_info[] = {
  113. [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
  114. [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
  115. [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
  116. [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
  117. [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
  118. [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
  119. [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
  120. [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
  121. [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
  122. [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
  123. [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
  124. [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
  125. [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
  126. [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
  127. [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
  128. [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
  129. [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
  130. [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
  131. [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
  132. [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
  133. [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
  134. [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
  135. [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
  136. [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
  137. [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
  138. [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
  139. [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
  140. [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
  141. [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
  142. [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
  143. [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
  144. [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
  145. [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
  146. [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
  147. [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
  148. };
  149. static const struct pci_device_id bnxt_pci_tbl[] = {
  150. { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
  151. { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
  152. { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
  153. { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
  154. { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
  155. { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
  156. { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
  157. { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
  158. { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
  159. { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
  160. { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
  161. { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
  162. { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
  163. { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
  164. { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
  165. { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
  166. { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
  167. { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
  168. { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
  169. { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
  170. { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
  171. { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
  172. { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
  173. { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
  174. { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
  175. { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
  176. { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
  177. { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
  178. { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
  179. { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
  180. { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
  181. { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
  182. { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
  183. { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
  184. { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
  185. { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
  186. { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
  187. #ifdef CONFIG_BNXT_SRIOV
  188. { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
  189. { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
  190. { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
  191. { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
  192. { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
  193. { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
  194. { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
  195. { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
  196. { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
  197. #endif
  198. { 0 }
  199. };
  200. MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
  201. static const u16 bnxt_vf_req_snif[] = {
  202. HWRM_FUNC_CFG,
  203. HWRM_FUNC_VF_CFG,
  204. HWRM_PORT_PHY_QCFG,
  205. HWRM_CFA_L2_FILTER_ALLOC,
  206. };
  207. static const u16 bnxt_async_events_arr[] = {
  208. ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
  209. ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
  210. ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
  211. ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
  212. ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
  213. };
  214. static struct workqueue_struct *bnxt_pf_wq;
  215. static bool bnxt_vf_pciid(enum board_idx idx)
  216. {
  217. return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
  218. idx == NETXTREME_S_VF);
  219. }
  220. #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
  221. #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
  222. #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
  223. #define BNXT_CP_DB_REARM(db, raw_cons) \
  224. writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
  225. #define BNXT_CP_DB(db, raw_cons) \
  226. writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
  227. #define BNXT_CP_DB_IRQ_DIS(db) \
  228. writel(DB_CP_IRQ_DIS_FLAGS, db)
  229. const u16 bnxt_lhint_arr[] = {
  230. TX_BD_FLAGS_LHINT_512_AND_SMALLER,
  231. TX_BD_FLAGS_LHINT_512_TO_1023,
  232. TX_BD_FLAGS_LHINT_1024_TO_2047,
  233. TX_BD_FLAGS_LHINT_1024_TO_2047,
  234. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  235. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  236. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  237. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  238. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  239. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  240. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  241. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  242. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  243. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  244. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  245. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  246. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  247. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  248. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  249. };
  250. static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
  251. {
  252. struct metadata_dst *md_dst = skb_metadata_dst(skb);
  253. if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
  254. return 0;
  255. return md_dst->u.port_info.port_id;
  256. }
  257. static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
  258. {
  259. struct bnxt *bp = netdev_priv(dev);
  260. struct tx_bd *txbd;
  261. struct tx_bd_ext *txbd1;
  262. struct netdev_queue *txq;
  263. int i;
  264. dma_addr_t mapping;
  265. unsigned int length, pad = 0;
  266. u32 len, free_size, vlan_tag_flags, cfa_action, flags;
  267. u16 prod, last_frag;
  268. struct pci_dev *pdev = bp->pdev;
  269. struct bnxt_tx_ring_info *txr;
  270. struct bnxt_sw_tx_bd *tx_buf;
  271. i = skb_get_queue_mapping(skb);
  272. if (unlikely(i >= bp->tx_nr_rings)) {
  273. dev_kfree_skb_any(skb);
  274. return NETDEV_TX_OK;
  275. }
  276. txq = netdev_get_tx_queue(dev, i);
  277. txr = &bp->tx_ring[bp->tx_ring_map[i]];
  278. prod = txr->tx_prod;
  279. free_size = bnxt_tx_avail(bp, txr);
  280. if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
  281. netif_tx_stop_queue(txq);
  282. return NETDEV_TX_BUSY;
  283. }
  284. length = skb->len;
  285. len = skb_headlen(skb);
  286. last_frag = skb_shinfo(skb)->nr_frags;
  287. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  288. txbd->tx_bd_opaque = prod;
  289. tx_buf = &txr->tx_buf_ring[prod];
  290. tx_buf->skb = skb;
  291. tx_buf->nr_frags = last_frag;
  292. vlan_tag_flags = 0;
  293. cfa_action = bnxt_xmit_get_cfa_action(skb);
  294. if (skb_vlan_tag_present(skb)) {
  295. vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
  296. skb_vlan_tag_get(skb);
  297. /* Currently supports 8021Q, 8021AD vlan offloads
  298. * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
  299. */
  300. if (skb->vlan_proto == htons(ETH_P_8021Q))
  301. vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
  302. }
  303. if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
  304. struct tx_push_buffer *tx_push_buf = txr->tx_push;
  305. struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
  306. struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
  307. void *pdata = tx_push_buf->data;
  308. u64 *end;
  309. int j, push_len;
  310. /* Set COAL_NOW to be ready quickly for the next push */
  311. tx_push->tx_bd_len_flags_type =
  312. cpu_to_le32((length << TX_BD_LEN_SHIFT) |
  313. TX_BD_TYPE_LONG_TX_BD |
  314. TX_BD_FLAGS_LHINT_512_AND_SMALLER |
  315. TX_BD_FLAGS_COAL_NOW |
  316. TX_BD_FLAGS_PACKET_END |
  317. (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
  318. if (skb->ip_summed == CHECKSUM_PARTIAL)
  319. tx_push1->tx_bd_hsize_lflags =
  320. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  321. else
  322. tx_push1->tx_bd_hsize_lflags = 0;
  323. tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  324. tx_push1->tx_bd_cfa_action =
  325. cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
  326. end = pdata + length;
  327. end = PTR_ALIGN(end, 8) - 1;
  328. *end = 0;
  329. skb_copy_from_linear_data(skb, pdata, len);
  330. pdata += len;
  331. for (j = 0; j < last_frag; j++) {
  332. skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
  333. void *fptr;
  334. fptr = skb_frag_address_safe(frag);
  335. if (!fptr)
  336. goto normal_tx;
  337. memcpy(pdata, fptr, skb_frag_size(frag));
  338. pdata += skb_frag_size(frag);
  339. }
  340. txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
  341. txbd->tx_bd_haddr = txr->data_mapping;
  342. prod = NEXT_TX(prod);
  343. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  344. memcpy(txbd, tx_push1, sizeof(*txbd));
  345. prod = NEXT_TX(prod);
  346. tx_push->doorbell =
  347. cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
  348. txr->tx_prod = prod;
  349. tx_buf->is_push = 1;
  350. netdev_tx_sent_queue(txq, skb->len);
  351. wmb(); /* Sync is_push and byte queue before pushing data */
  352. push_len = (length + sizeof(*tx_push) + 7) / 8;
  353. if (push_len > 16) {
  354. __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
  355. __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
  356. (push_len - 16) << 1);
  357. } else {
  358. __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
  359. push_len);
  360. }
  361. goto tx_done;
  362. }
  363. normal_tx:
  364. if (length < BNXT_MIN_PKT_SIZE) {
  365. pad = BNXT_MIN_PKT_SIZE - length;
  366. if (skb_pad(skb, pad)) {
  367. /* SKB already freed. */
  368. tx_buf->skb = NULL;
  369. return NETDEV_TX_OK;
  370. }
  371. length = BNXT_MIN_PKT_SIZE;
  372. }
  373. mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
  374. if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
  375. dev_kfree_skb_any(skb);
  376. tx_buf->skb = NULL;
  377. return NETDEV_TX_OK;
  378. }
  379. dma_unmap_addr_set(tx_buf, mapping, mapping);
  380. flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
  381. ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
  382. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  383. prod = NEXT_TX(prod);
  384. txbd1 = (struct tx_bd_ext *)
  385. &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  386. txbd1->tx_bd_hsize_lflags = 0;
  387. if (skb_is_gso(skb)) {
  388. u32 hdr_len;
  389. if (skb->encapsulation)
  390. hdr_len = skb_inner_network_offset(skb) +
  391. skb_inner_network_header_len(skb) +
  392. inner_tcp_hdrlen(skb);
  393. else
  394. hdr_len = skb_transport_offset(skb) +
  395. tcp_hdrlen(skb);
  396. txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
  397. TX_BD_FLAGS_T_IPID |
  398. (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
  399. length = skb_shinfo(skb)->gso_size;
  400. txbd1->tx_bd_mss = cpu_to_le32(length);
  401. length += hdr_len;
  402. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  403. txbd1->tx_bd_hsize_lflags =
  404. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  405. txbd1->tx_bd_mss = 0;
  406. }
  407. length >>= 9;
  408. flags |= bnxt_lhint_arr[length];
  409. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  410. txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  411. txbd1->tx_bd_cfa_action =
  412. cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
  413. for (i = 0; i < last_frag; i++) {
  414. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  415. prod = NEXT_TX(prod);
  416. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  417. len = skb_frag_size(frag);
  418. mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
  419. DMA_TO_DEVICE);
  420. if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
  421. goto tx_dma_error;
  422. tx_buf = &txr->tx_buf_ring[prod];
  423. dma_unmap_addr_set(tx_buf, mapping, mapping);
  424. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  425. flags = len << TX_BD_LEN_SHIFT;
  426. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  427. }
  428. flags &= ~TX_BD_LEN;
  429. txbd->tx_bd_len_flags_type =
  430. cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
  431. TX_BD_FLAGS_PACKET_END);
  432. netdev_tx_sent_queue(txq, skb->len);
  433. /* Sync BD data before updating doorbell */
  434. wmb();
  435. prod = NEXT_TX(prod);
  436. txr->tx_prod = prod;
  437. if (!skb->xmit_more || netif_xmit_stopped(txq))
  438. bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
  439. tx_done:
  440. mmiowb();
  441. if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
  442. if (skb->xmit_more && !tx_buf->is_push)
  443. bnxt_db_write(bp, txr->tx_doorbell, DB_KEY_TX | prod);
  444. netif_tx_stop_queue(txq);
  445. /* netif_tx_stop_queue() must be done before checking
  446. * tx index in bnxt_tx_avail() below, because in
  447. * bnxt_tx_int(), we update tx index before checking for
  448. * netif_tx_queue_stopped().
  449. */
  450. smp_mb();
  451. if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
  452. netif_tx_wake_queue(txq);
  453. }
  454. return NETDEV_TX_OK;
  455. tx_dma_error:
  456. last_frag = i;
  457. /* start back at beginning and unmap skb */
  458. prod = txr->tx_prod;
  459. tx_buf = &txr->tx_buf_ring[prod];
  460. tx_buf->skb = NULL;
  461. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  462. skb_headlen(skb), PCI_DMA_TODEVICE);
  463. prod = NEXT_TX(prod);
  464. /* unmap remaining mapped pages */
  465. for (i = 0; i < last_frag; i++) {
  466. prod = NEXT_TX(prod);
  467. tx_buf = &txr->tx_buf_ring[prod];
  468. dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  469. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  470. PCI_DMA_TODEVICE);
  471. }
  472. dev_kfree_skb_any(skb);
  473. return NETDEV_TX_OK;
  474. }
  475. static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  476. {
  477. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  478. struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
  479. u16 cons = txr->tx_cons;
  480. struct pci_dev *pdev = bp->pdev;
  481. int i;
  482. unsigned int tx_bytes = 0;
  483. for (i = 0; i < nr_pkts; i++) {
  484. struct bnxt_sw_tx_bd *tx_buf;
  485. struct sk_buff *skb;
  486. int j, last;
  487. tx_buf = &txr->tx_buf_ring[cons];
  488. cons = NEXT_TX(cons);
  489. skb = tx_buf->skb;
  490. tx_buf->skb = NULL;
  491. if (tx_buf->is_push) {
  492. tx_buf->is_push = 0;
  493. goto next_tx_int;
  494. }
  495. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  496. skb_headlen(skb), PCI_DMA_TODEVICE);
  497. last = tx_buf->nr_frags;
  498. for (j = 0; j < last; j++) {
  499. cons = NEXT_TX(cons);
  500. tx_buf = &txr->tx_buf_ring[cons];
  501. dma_unmap_page(
  502. &pdev->dev,
  503. dma_unmap_addr(tx_buf, mapping),
  504. skb_frag_size(&skb_shinfo(skb)->frags[j]),
  505. PCI_DMA_TODEVICE);
  506. }
  507. next_tx_int:
  508. cons = NEXT_TX(cons);
  509. tx_bytes += skb->len;
  510. dev_kfree_skb_any(skb);
  511. }
  512. netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
  513. txr->tx_cons = cons;
  514. /* Need to make the tx_cons update visible to bnxt_start_xmit()
  515. * before checking for netif_tx_queue_stopped(). Without the
  516. * memory barrier, there is a small possibility that bnxt_start_xmit()
  517. * will miss it and cause the queue to be stopped forever.
  518. */
  519. smp_mb();
  520. if (unlikely(netif_tx_queue_stopped(txq)) &&
  521. (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  522. __netif_tx_lock(txq, smp_processor_id());
  523. if (netif_tx_queue_stopped(txq) &&
  524. bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
  525. txr->dev_state != BNXT_DEV_STATE_CLOSING)
  526. netif_tx_wake_queue(txq);
  527. __netif_tx_unlock(txq);
  528. }
  529. }
  530. static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
  531. gfp_t gfp)
  532. {
  533. struct device *dev = &bp->pdev->dev;
  534. struct page *page;
  535. page = alloc_page(gfp);
  536. if (!page)
  537. return NULL;
  538. *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
  539. DMA_ATTR_WEAK_ORDERING);
  540. if (dma_mapping_error(dev, *mapping)) {
  541. __free_page(page);
  542. return NULL;
  543. }
  544. *mapping += bp->rx_dma_offset;
  545. return page;
  546. }
  547. static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
  548. gfp_t gfp)
  549. {
  550. u8 *data;
  551. struct pci_dev *pdev = bp->pdev;
  552. data = kmalloc(bp->rx_buf_size, gfp);
  553. if (!data)
  554. return NULL;
  555. *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
  556. bp->rx_buf_use_size, bp->rx_dir,
  557. DMA_ATTR_WEAK_ORDERING);
  558. if (dma_mapping_error(&pdev->dev, *mapping)) {
  559. kfree(data);
  560. data = NULL;
  561. }
  562. return data;
  563. }
  564. int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
  565. u16 prod, gfp_t gfp)
  566. {
  567. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  568. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
  569. dma_addr_t mapping;
  570. if (BNXT_RX_PAGE_MODE(bp)) {
  571. struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
  572. if (!page)
  573. return -ENOMEM;
  574. rx_buf->data = page;
  575. rx_buf->data_ptr = page_address(page) + bp->rx_offset;
  576. } else {
  577. u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
  578. if (!data)
  579. return -ENOMEM;
  580. rx_buf->data = data;
  581. rx_buf->data_ptr = data + bp->rx_offset;
  582. }
  583. rx_buf->mapping = mapping;
  584. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  585. return 0;
  586. }
  587. void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
  588. {
  589. u16 prod = rxr->rx_prod;
  590. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  591. struct rx_bd *cons_bd, *prod_bd;
  592. prod_rx_buf = &rxr->rx_buf_ring[prod];
  593. cons_rx_buf = &rxr->rx_buf_ring[cons];
  594. prod_rx_buf->data = data;
  595. prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
  596. prod_rx_buf->mapping = cons_rx_buf->mapping;
  597. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  598. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  599. prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
  600. }
  601. static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
  602. {
  603. u16 next, max = rxr->rx_agg_bmap_size;
  604. next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
  605. if (next >= max)
  606. next = find_first_zero_bit(rxr->rx_agg_bmap, max);
  607. return next;
  608. }
  609. static inline int bnxt_alloc_rx_page(struct bnxt *bp,
  610. struct bnxt_rx_ring_info *rxr,
  611. u16 prod, gfp_t gfp)
  612. {
  613. struct rx_bd *rxbd =
  614. &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  615. struct bnxt_sw_rx_agg_bd *rx_agg_buf;
  616. struct pci_dev *pdev = bp->pdev;
  617. struct page *page;
  618. dma_addr_t mapping;
  619. u16 sw_prod = rxr->rx_sw_agg_prod;
  620. unsigned int offset = 0;
  621. if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
  622. page = rxr->rx_page;
  623. if (!page) {
  624. page = alloc_page(gfp);
  625. if (!page)
  626. return -ENOMEM;
  627. rxr->rx_page = page;
  628. rxr->rx_page_offset = 0;
  629. }
  630. offset = rxr->rx_page_offset;
  631. rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
  632. if (rxr->rx_page_offset == PAGE_SIZE)
  633. rxr->rx_page = NULL;
  634. else
  635. get_page(page);
  636. } else {
  637. page = alloc_page(gfp);
  638. if (!page)
  639. return -ENOMEM;
  640. }
  641. mapping = dma_map_page_attrs(&pdev->dev, page, offset,
  642. BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
  643. DMA_ATTR_WEAK_ORDERING);
  644. if (dma_mapping_error(&pdev->dev, mapping)) {
  645. __free_page(page);
  646. return -EIO;
  647. }
  648. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  649. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  650. __set_bit(sw_prod, rxr->rx_agg_bmap);
  651. rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
  652. rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
  653. rx_agg_buf->page = page;
  654. rx_agg_buf->offset = offset;
  655. rx_agg_buf->mapping = mapping;
  656. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  657. rxbd->rx_bd_opaque = sw_prod;
  658. return 0;
  659. }
  660. static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
  661. u32 agg_bufs)
  662. {
  663. struct bnxt *bp = bnapi->bp;
  664. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  665. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  666. u16 prod = rxr->rx_agg_prod;
  667. u16 sw_prod = rxr->rx_sw_agg_prod;
  668. u32 i;
  669. for (i = 0; i < agg_bufs; i++) {
  670. u16 cons;
  671. struct rx_agg_cmp *agg;
  672. struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
  673. struct rx_bd *prod_bd;
  674. struct page *page;
  675. agg = (struct rx_agg_cmp *)
  676. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  677. cons = agg->rx_agg_cmp_opaque;
  678. __clear_bit(cons, rxr->rx_agg_bmap);
  679. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  680. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  681. __set_bit(sw_prod, rxr->rx_agg_bmap);
  682. prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
  683. cons_rx_buf = &rxr->rx_agg_ring[cons];
  684. /* It is possible for sw_prod to be equal to cons, so
  685. * set cons_rx_buf->page to NULL first.
  686. */
  687. page = cons_rx_buf->page;
  688. cons_rx_buf->page = NULL;
  689. prod_rx_buf->page = page;
  690. prod_rx_buf->offset = cons_rx_buf->offset;
  691. prod_rx_buf->mapping = cons_rx_buf->mapping;
  692. prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  693. prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
  694. prod_bd->rx_bd_opaque = sw_prod;
  695. prod = NEXT_RX_AGG(prod);
  696. sw_prod = NEXT_RX_AGG(sw_prod);
  697. cp_cons = NEXT_CMP(cp_cons);
  698. }
  699. rxr->rx_agg_prod = prod;
  700. rxr->rx_sw_agg_prod = sw_prod;
  701. }
  702. static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
  703. struct bnxt_rx_ring_info *rxr,
  704. u16 cons, void *data, u8 *data_ptr,
  705. dma_addr_t dma_addr,
  706. unsigned int offset_and_len)
  707. {
  708. unsigned int payload = offset_and_len >> 16;
  709. unsigned int len = offset_and_len & 0xffff;
  710. struct skb_frag_struct *frag;
  711. struct page *page = data;
  712. u16 prod = rxr->rx_prod;
  713. struct sk_buff *skb;
  714. int off, err;
  715. err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
  716. if (unlikely(err)) {
  717. bnxt_reuse_rx_data(rxr, cons, data);
  718. return NULL;
  719. }
  720. dma_addr -= bp->rx_dma_offset;
  721. dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
  722. DMA_ATTR_WEAK_ORDERING);
  723. if (unlikely(!payload))
  724. payload = eth_get_headlen(data_ptr, len);
  725. skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
  726. if (!skb) {
  727. __free_page(page);
  728. return NULL;
  729. }
  730. off = (void *)data_ptr - page_address(page);
  731. skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
  732. memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
  733. payload + NET_IP_ALIGN);
  734. frag = &skb_shinfo(skb)->frags[0];
  735. skb_frag_size_sub(frag, payload);
  736. frag->page_offset += payload;
  737. skb->data_len -= payload;
  738. skb->tail += payload;
  739. return skb;
  740. }
  741. static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
  742. struct bnxt_rx_ring_info *rxr, u16 cons,
  743. void *data, u8 *data_ptr,
  744. dma_addr_t dma_addr,
  745. unsigned int offset_and_len)
  746. {
  747. u16 prod = rxr->rx_prod;
  748. struct sk_buff *skb;
  749. int err;
  750. err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
  751. if (unlikely(err)) {
  752. bnxt_reuse_rx_data(rxr, cons, data);
  753. return NULL;
  754. }
  755. skb = build_skb(data, 0);
  756. dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  757. bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
  758. if (!skb) {
  759. kfree(data);
  760. return NULL;
  761. }
  762. skb_reserve(skb, bp->rx_offset);
  763. skb_put(skb, offset_and_len & 0xffff);
  764. return skb;
  765. }
  766. static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
  767. struct sk_buff *skb, u16 cp_cons,
  768. u32 agg_bufs)
  769. {
  770. struct pci_dev *pdev = bp->pdev;
  771. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  772. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  773. u16 prod = rxr->rx_agg_prod;
  774. u32 i;
  775. for (i = 0; i < agg_bufs; i++) {
  776. u16 cons, frag_len;
  777. struct rx_agg_cmp *agg;
  778. struct bnxt_sw_rx_agg_bd *cons_rx_buf;
  779. struct page *page;
  780. dma_addr_t mapping;
  781. agg = (struct rx_agg_cmp *)
  782. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  783. cons = agg->rx_agg_cmp_opaque;
  784. frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
  785. RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
  786. cons_rx_buf = &rxr->rx_agg_ring[cons];
  787. skb_fill_page_desc(skb, i, cons_rx_buf->page,
  788. cons_rx_buf->offset, frag_len);
  789. __clear_bit(cons, rxr->rx_agg_bmap);
  790. /* It is possible for bnxt_alloc_rx_page() to allocate
  791. * a sw_prod index that equals the cons index, so we
  792. * need to clear the cons entry now.
  793. */
  794. mapping = cons_rx_buf->mapping;
  795. page = cons_rx_buf->page;
  796. cons_rx_buf->page = NULL;
  797. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
  798. struct skb_shared_info *shinfo;
  799. unsigned int nr_frags;
  800. shinfo = skb_shinfo(skb);
  801. nr_frags = --shinfo->nr_frags;
  802. __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
  803. dev_kfree_skb(skb);
  804. cons_rx_buf->page = page;
  805. /* Update prod since possibly some pages have been
  806. * allocated already.
  807. */
  808. rxr->rx_agg_prod = prod;
  809. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
  810. return NULL;
  811. }
  812. dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
  813. PCI_DMA_FROMDEVICE,
  814. DMA_ATTR_WEAK_ORDERING);
  815. skb->data_len += frag_len;
  816. skb->len += frag_len;
  817. skb->truesize += PAGE_SIZE;
  818. prod = NEXT_RX_AGG(prod);
  819. cp_cons = NEXT_CMP(cp_cons);
  820. }
  821. rxr->rx_agg_prod = prod;
  822. return skb;
  823. }
  824. static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
  825. u8 agg_bufs, u32 *raw_cons)
  826. {
  827. u16 last;
  828. struct rx_agg_cmp *agg;
  829. *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
  830. last = RING_CMP(*raw_cons);
  831. agg = (struct rx_agg_cmp *)
  832. &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
  833. return RX_AGG_CMP_VALID(agg, *raw_cons);
  834. }
  835. static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
  836. unsigned int len,
  837. dma_addr_t mapping)
  838. {
  839. struct bnxt *bp = bnapi->bp;
  840. struct pci_dev *pdev = bp->pdev;
  841. struct sk_buff *skb;
  842. skb = napi_alloc_skb(&bnapi->napi, len);
  843. if (!skb)
  844. return NULL;
  845. dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
  846. bp->rx_dir);
  847. memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
  848. len + NET_IP_ALIGN);
  849. dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
  850. bp->rx_dir);
  851. skb_put(skb, len);
  852. return skb;
  853. }
  854. static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
  855. u32 *raw_cons, void *cmp)
  856. {
  857. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  858. struct rx_cmp *rxcmp = cmp;
  859. u32 tmp_raw_cons = *raw_cons;
  860. u8 cmp_type, agg_bufs = 0;
  861. cmp_type = RX_CMP_TYPE(rxcmp);
  862. if (cmp_type == CMP_TYPE_RX_L2_CMP) {
  863. agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
  864. RX_CMP_AGG_BUFS) >>
  865. RX_CMP_AGG_BUFS_SHIFT;
  866. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  867. struct rx_tpa_end_cmp *tpa_end = cmp;
  868. agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  869. RX_TPA_END_CMP_AGG_BUFS) >>
  870. RX_TPA_END_CMP_AGG_BUFS_SHIFT;
  871. }
  872. if (agg_bufs) {
  873. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
  874. return -EBUSY;
  875. }
  876. *raw_cons = tmp_raw_cons;
  877. return 0;
  878. }
  879. static void bnxt_queue_sp_work(struct bnxt *bp)
  880. {
  881. if (BNXT_PF(bp))
  882. queue_work(bnxt_pf_wq, &bp->sp_task);
  883. else
  884. schedule_work(&bp->sp_task);
  885. }
  886. static void bnxt_cancel_sp_work(struct bnxt *bp)
  887. {
  888. if (BNXT_PF(bp))
  889. flush_workqueue(bnxt_pf_wq);
  890. else
  891. cancel_work_sync(&bp->sp_task);
  892. }
  893. static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
  894. {
  895. if (!rxr->bnapi->in_reset) {
  896. rxr->bnapi->in_reset = true;
  897. set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  898. bnxt_queue_sp_work(bp);
  899. }
  900. rxr->rx_next_cons = 0xffff;
  901. }
  902. static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
  903. struct rx_tpa_start_cmp *tpa_start,
  904. struct rx_tpa_start_cmp_ext *tpa_start1)
  905. {
  906. u8 agg_id = TPA_START_AGG_ID(tpa_start);
  907. u16 cons, prod;
  908. struct bnxt_tpa_info *tpa_info;
  909. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  910. struct rx_bd *prod_bd;
  911. dma_addr_t mapping;
  912. cons = tpa_start->rx_tpa_start_cmp_opaque;
  913. prod = rxr->rx_prod;
  914. cons_rx_buf = &rxr->rx_buf_ring[cons];
  915. prod_rx_buf = &rxr->rx_buf_ring[prod];
  916. tpa_info = &rxr->rx_tpa[agg_id];
  917. if (unlikely(cons != rxr->rx_next_cons)) {
  918. bnxt_sched_reset(bp, rxr);
  919. return;
  920. }
  921. /* Store cfa_code in tpa_info to use in tpa_end
  922. * completion processing.
  923. */
  924. tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
  925. prod_rx_buf->data = tpa_info->data;
  926. prod_rx_buf->data_ptr = tpa_info->data_ptr;
  927. mapping = tpa_info->mapping;
  928. prod_rx_buf->mapping = mapping;
  929. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  930. prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
  931. tpa_info->data = cons_rx_buf->data;
  932. tpa_info->data_ptr = cons_rx_buf->data_ptr;
  933. cons_rx_buf->data = NULL;
  934. tpa_info->mapping = cons_rx_buf->mapping;
  935. tpa_info->len =
  936. le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
  937. RX_TPA_START_CMP_LEN_SHIFT;
  938. if (likely(TPA_START_HASH_VALID(tpa_start))) {
  939. u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
  940. tpa_info->hash_type = PKT_HASH_TYPE_L4;
  941. tpa_info->gso_type = SKB_GSO_TCPV4;
  942. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  943. if (hash_type == 3)
  944. tpa_info->gso_type = SKB_GSO_TCPV6;
  945. tpa_info->rss_hash =
  946. le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
  947. } else {
  948. tpa_info->hash_type = PKT_HASH_TYPE_NONE;
  949. tpa_info->gso_type = 0;
  950. if (netif_msg_rx_err(bp))
  951. netdev_warn(bp->dev, "TPA packet without valid hash\n");
  952. }
  953. tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
  954. tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
  955. tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
  956. rxr->rx_prod = NEXT_RX(prod);
  957. cons = NEXT_RX(cons);
  958. rxr->rx_next_cons = NEXT_RX(cons);
  959. cons_rx_buf = &rxr->rx_buf_ring[cons];
  960. bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
  961. rxr->rx_prod = NEXT_RX(rxr->rx_prod);
  962. cons_rx_buf->data = NULL;
  963. }
  964. static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
  965. u16 cp_cons, u32 agg_bufs)
  966. {
  967. if (agg_bufs)
  968. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  969. }
  970. static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
  971. int payload_off, int tcp_ts,
  972. struct sk_buff *skb)
  973. {
  974. #ifdef CONFIG_INET
  975. struct tcphdr *th;
  976. int len, nw_off;
  977. u16 outer_ip_off, inner_ip_off, inner_mac_off;
  978. u32 hdr_info = tpa_info->hdr_info;
  979. bool loopback = false;
  980. inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
  981. inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
  982. outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
  983. /* If the packet is an internal loopback packet, the offsets will
  984. * have an extra 4 bytes.
  985. */
  986. if (inner_mac_off == 4) {
  987. loopback = true;
  988. } else if (inner_mac_off > 4) {
  989. __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
  990. ETH_HLEN - 2));
  991. /* We only support inner iPv4/ipv6. If we don't see the
  992. * correct protocol ID, it must be a loopback packet where
  993. * the offsets are off by 4.
  994. */
  995. if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
  996. loopback = true;
  997. }
  998. if (loopback) {
  999. /* internal loopback packet, subtract all offsets by 4 */
  1000. inner_ip_off -= 4;
  1001. inner_mac_off -= 4;
  1002. outer_ip_off -= 4;
  1003. }
  1004. nw_off = inner_ip_off - ETH_HLEN;
  1005. skb_set_network_header(skb, nw_off);
  1006. if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
  1007. struct ipv6hdr *iph = ipv6_hdr(skb);
  1008. skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
  1009. len = skb->len - skb_transport_offset(skb);
  1010. th = tcp_hdr(skb);
  1011. th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
  1012. } else {
  1013. struct iphdr *iph = ip_hdr(skb);
  1014. skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
  1015. len = skb->len - skb_transport_offset(skb);
  1016. th = tcp_hdr(skb);
  1017. th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
  1018. }
  1019. if (inner_mac_off) { /* tunnel */
  1020. struct udphdr *uh = NULL;
  1021. __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
  1022. ETH_HLEN - 2));
  1023. if (proto == htons(ETH_P_IP)) {
  1024. struct iphdr *iph = (struct iphdr *)skb->data;
  1025. if (iph->protocol == IPPROTO_UDP)
  1026. uh = (struct udphdr *)(iph + 1);
  1027. } else {
  1028. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  1029. if (iph->nexthdr == IPPROTO_UDP)
  1030. uh = (struct udphdr *)(iph + 1);
  1031. }
  1032. if (uh) {
  1033. if (uh->check)
  1034. skb_shinfo(skb)->gso_type |=
  1035. SKB_GSO_UDP_TUNNEL_CSUM;
  1036. else
  1037. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  1038. }
  1039. }
  1040. #endif
  1041. return skb;
  1042. }
  1043. #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
  1044. #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
  1045. static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
  1046. int payload_off, int tcp_ts,
  1047. struct sk_buff *skb)
  1048. {
  1049. #ifdef CONFIG_INET
  1050. struct tcphdr *th;
  1051. int len, nw_off, tcp_opt_len = 0;
  1052. if (tcp_ts)
  1053. tcp_opt_len = 12;
  1054. if (tpa_info->gso_type == SKB_GSO_TCPV4) {
  1055. struct iphdr *iph;
  1056. nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
  1057. ETH_HLEN;
  1058. skb_set_network_header(skb, nw_off);
  1059. iph = ip_hdr(skb);
  1060. skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
  1061. len = skb->len - skb_transport_offset(skb);
  1062. th = tcp_hdr(skb);
  1063. th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
  1064. } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
  1065. struct ipv6hdr *iph;
  1066. nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
  1067. ETH_HLEN;
  1068. skb_set_network_header(skb, nw_off);
  1069. iph = ipv6_hdr(skb);
  1070. skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
  1071. len = skb->len - skb_transport_offset(skb);
  1072. th = tcp_hdr(skb);
  1073. th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
  1074. } else {
  1075. dev_kfree_skb_any(skb);
  1076. return NULL;
  1077. }
  1078. if (nw_off) { /* tunnel */
  1079. struct udphdr *uh = NULL;
  1080. if (skb->protocol == htons(ETH_P_IP)) {
  1081. struct iphdr *iph = (struct iphdr *)skb->data;
  1082. if (iph->protocol == IPPROTO_UDP)
  1083. uh = (struct udphdr *)(iph + 1);
  1084. } else {
  1085. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  1086. if (iph->nexthdr == IPPROTO_UDP)
  1087. uh = (struct udphdr *)(iph + 1);
  1088. }
  1089. if (uh) {
  1090. if (uh->check)
  1091. skb_shinfo(skb)->gso_type |=
  1092. SKB_GSO_UDP_TUNNEL_CSUM;
  1093. else
  1094. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  1095. }
  1096. }
  1097. #endif
  1098. return skb;
  1099. }
  1100. static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
  1101. struct bnxt_tpa_info *tpa_info,
  1102. struct rx_tpa_end_cmp *tpa_end,
  1103. struct rx_tpa_end_cmp_ext *tpa_end1,
  1104. struct sk_buff *skb)
  1105. {
  1106. #ifdef CONFIG_INET
  1107. int payload_off;
  1108. u16 segs;
  1109. segs = TPA_END_TPA_SEGS(tpa_end);
  1110. if (segs == 1)
  1111. return skb;
  1112. NAPI_GRO_CB(skb)->count = segs;
  1113. skb_shinfo(skb)->gso_size =
  1114. le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
  1115. skb_shinfo(skb)->gso_type = tpa_info->gso_type;
  1116. payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  1117. RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
  1118. RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
  1119. skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
  1120. if (likely(skb))
  1121. tcp_gro_complete(skb);
  1122. #endif
  1123. return skb;
  1124. }
  1125. /* Given the cfa_code of a received packet determine which
  1126. * netdev (vf-rep or PF) the packet is destined to.
  1127. */
  1128. static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
  1129. {
  1130. struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
  1131. /* if vf-rep dev is NULL, the must belongs to the PF */
  1132. return dev ? dev : bp->dev;
  1133. }
  1134. static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
  1135. struct bnxt_napi *bnapi,
  1136. u32 *raw_cons,
  1137. struct rx_tpa_end_cmp *tpa_end,
  1138. struct rx_tpa_end_cmp_ext *tpa_end1,
  1139. u8 *event)
  1140. {
  1141. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1142. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1143. u8 agg_id = TPA_END_AGG_ID(tpa_end);
  1144. u8 *data_ptr, agg_bufs;
  1145. u16 cp_cons = RING_CMP(*raw_cons);
  1146. unsigned int len;
  1147. struct bnxt_tpa_info *tpa_info;
  1148. dma_addr_t mapping;
  1149. struct sk_buff *skb;
  1150. void *data;
  1151. if (unlikely(bnapi->in_reset)) {
  1152. int rc = bnxt_discard_rx(bp, bnapi, raw_cons, tpa_end);
  1153. if (rc < 0)
  1154. return ERR_PTR(-EBUSY);
  1155. return NULL;
  1156. }
  1157. tpa_info = &rxr->rx_tpa[agg_id];
  1158. data = tpa_info->data;
  1159. data_ptr = tpa_info->data_ptr;
  1160. prefetch(data_ptr);
  1161. len = tpa_info->len;
  1162. mapping = tpa_info->mapping;
  1163. agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  1164. RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
  1165. if (agg_bufs) {
  1166. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
  1167. return ERR_PTR(-EBUSY);
  1168. *event |= BNXT_AGG_EVENT;
  1169. cp_cons = NEXT_CMP(cp_cons);
  1170. }
  1171. if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
  1172. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1173. if (agg_bufs > MAX_SKB_FRAGS)
  1174. netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
  1175. agg_bufs, (int)MAX_SKB_FRAGS);
  1176. return NULL;
  1177. }
  1178. if (len <= bp->rx_copy_thresh) {
  1179. skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
  1180. if (!skb) {
  1181. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1182. return NULL;
  1183. }
  1184. } else {
  1185. u8 *new_data;
  1186. dma_addr_t new_mapping;
  1187. new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
  1188. if (!new_data) {
  1189. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1190. return NULL;
  1191. }
  1192. tpa_info->data = new_data;
  1193. tpa_info->data_ptr = new_data + bp->rx_offset;
  1194. tpa_info->mapping = new_mapping;
  1195. skb = build_skb(data, 0);
  1196. dma_unmap_single_attrs(&bp->pdev->dev, mapping,
  1197. bp->rx_buf_use_size, bp->rx_dir,
  1198. DMA_ATTR_WEAK_ORDERING);
  1199. if (!skb) {
  1200. kfree(data);
  1201. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  1202. return NULL;
  1203. }
  1204. skb_reserve(skb, bp->rx_offset);
  1205. skb_put(skb, len);
  1206. }
  1207. if (agg_bufs) {
  1208. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  1209. if (!skb) {
  1210. /* Page reuse already handled by bnxt_rx_pages(). */
  1211. return NULL;
  1212. }
  1213. }
  1214. skb->protocol =
  1215. eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
  1216. if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
  1217. skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
  1218. if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
  1219. (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  1220. u16 vlan_proto = tpa_info->metadata >>
  1221. RX_CMP_FLAGS2_METADATA_TPID_SFT;
  1222. u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
  1223. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
  1224. }
  1225. skb_checksum_none_assert(skb);
  1226. if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
  1227. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1228. skb->csum_level =
  1229. (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
  1230. }
  1231. if (TPA_END_GRO(tpa_end))
  1232. skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
  1233. return skb;
  1234. }
  1235. static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
  1236. struct sk_buff *skb)
  1237. {
  1238. if (skb->dev != bp->dev) {
  1239. /* this packet belongs to a vf-rep */
  1240. bnxt_vf_rep_rx(bp, skb);
  1241. return;
  1242. }
  1243. skb_record_rx_queue(skb, bnapi->index);
  1244. napi_gro_receive(&bnapi->napi, skb);
  1245. }
  1246. /* returns the following:
  1247. * 1 - 1 packet successfully received
  1248. * 0 - successful TPA_START, packet not completed yet
  1249. * -EBUSY - completion ring does not have all the agg buffers yet
  1250. * -ENOMEM - packet aborted due to out of memory
  1251. * -EIO - packet aborted due to hw error indicated in BD
  1252. */
  1253. static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
  1254. u8 *event)
  1255. {
  1256. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1257. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1258. struct net_device *dev = bp->dev;
  1259. struct rx_cmp *rxcmp;
  1260. struct rx_cmp_ext *rxcmp1;
  1261. u32 tmp_raw_cons = *raw_cons;
  1262. u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
  1263. struct bnxt_sw_rx_bd *rx_buf;
  1264. unsigned int len;
  1265. u8 *data_ptr, agg_bufs, cmp_type;
  1266. dma_addr_t dma_addr;
  1267. struct sk_buff *skb;
  1268. void *data;
  1269. int rc = 0;
  1270. u32 misc;
  1271. rxcmp = (struct rx_cmp *)
  1272. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1273. tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
  1274. cp_cons = RING_CMP(tmp_raw_cons);
  1275. rxcmp1 = (struct rx_cmp_ext *)
  1276. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1277. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  1278. return -EBUSY;
  1279. cmp_type = RX_CMP_TYPE(rxcmp);
  1280. prod = rxr->rx_prod;
  1281. if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
  1282. bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
  1283. (struct rx_tpa_start_cmp_ext *)rxcmp1);
  1284. *event |= BNXT_RX_EVENT;
  1285. goto next_rx_no_prod_no_len;
  1286. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  1287. skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
  1288. (struct rx_tpa_end_cmp *)rxcmp,
  1289. (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
  1290. if (IS_ERR(skb))
  1291. return -EBUSY;
  1292. rc = -ENOMEM;
  1293. if (likely(skb)) {
  1294. bnxt_deliver_skb(bp, bnapi, skb);
  1295. rc = 1;
  1296. }
  1297. *event |= BNXT_RX_EVENT;
  1298. goto next_rx_no_prod_no_len;
  1299. }
  1300. cons = rxcmp->rx_cmp_opaque;
  1301. rx_buf = &rxr->rx_buf_ring[cons];
  1302. data = rx_buf->data;
  1303. data_ptr = rx_buf->data_ptr;
  1304. if (unlikely(cons != rxr->rx_next_cons)) {
  1305. int rc1 = bnxt_discard_rx(bp, bnapi, raw_cons, rxcmp);
  1306. bnxt_sched_reset(bp, rxr);
  1307. return rc1;
  1308. }
  1309. prefetch(data_ptr);
  1310. misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
  1311. agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
  1312. if (agg_bufs) {
  1313. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
  1314. return -EBUSY;
  1315. cp_cons = NEXT_CMP(cp_cons);
  1316. *event |= BNXT_AGG_EVENT;
  1317. }
  1318. *event |= BNXT_RX_EVENT;
  1319. rx_buf->data = NULL;
  1320. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
  1321. bnxt_reuse_rx_data(rxr, cons, data);
  1322. if (agg_bufs)
  1323. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  1324. rc = -EIO;
  1325. goto next_rx;
  1326. }
  1327. len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
  1328. dma_addr = rx_buf->mapping;
  1329. if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
  1330. rc = 1;
  1331. goto next_rx;
  1332. }
  1333. if (len <= bp->rx_copy_thresh) {
  1334. skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
  1335. bnxt_reuse_rx_data(rxr, cons, data);
  1336. if (!skb) {
  1337. rc = -ENOMEM;
  1338. goto next_rx;
  1339. }
  1340. } else {
  1341. u32 payload;
  1342. if (rx_buf->data_ptr == data_ptr)
  1343. payload = misc & RX_CMP_PAYLOAD_OFFSET;
  1344. else
  1345. payload = 0;
  1346. skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
  1347. payload | len);
  1348. if (!skb) {
  1349. rc = -ENOMEM;
  1350. goto next_rx;
  1351. }
  1352. }
  1353. if (agg_bufs) {
  1354. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  1355. if (!skb) {
  1356. rc = -ENOMEM;
  1357. goto next_rx;
  1358. }
  1359. }
  1360. if (RX_CMP_HASH_VALID(rxcmp)) {
  1361. u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
  1362. enum pkt_hash_types type = PKT_HASH_TYPE_L4;
  1363. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  1364. if (hash_type != 1 && hash_type != 3)
  1365. type = PKT_HASH_TYPE_L3;
  1366. skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
  1367. }
  1368. cfa_code = RX_CMP_CFA_CODE(rxcmp1);
  1369. skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
  1370. if ((rxcmp1->rx_cmp_flags2 &
  1371. cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
  1372. (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
  1373. u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
  1374. u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
  1375. u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
  1376. __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
  1377. }
  1378. skb_checksum_none_assert(skb);
  1379. if (RX_CMP_L4_CS_OK(rxcmp1)) {
  1380. if (dev->features & NETIF_F_RXCSUM) {
  1381. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1382. skb->csum_level = RX_CMP_ENCAP(rxcmp1);
  1383. }
  1384. } else {
  1385. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
  1386. if (dev->features & NETIF_F_RXCSUM)
  1387. cpr->rx_l4_csum_errors++;
  1388. }
  1389. }
  1390. bnxt_deliver_skb(bp, bnapi, skb);
  1391. rc = 1;
  1392. next_rx:
  1393. rxr->rx_prod = NEXT_RX(prod);
  1394. rxr->rx_next_cons = NEXT_RX(cons);
  1395. cpr->rx_packets += 1;
  1396. cpr->rx_bytes += len;
  1397. next_rx_no_prod_no_len:
  1398. *raw_cons = tmp_raw_cons;
  1399. return rc;
  1400. }
  1401. /* In netpoll mode, if we are using a combined completion ring, we need to
  1402. * discard the rx packets and recycle the buffers.
  1403. */
  1404. static int bnxt_force_rx_discard(struct bnxt *bp, struct bnxt_napi *bnapi,
  1405. u32 *raw_cons, u8 *event)
  1406. {
  1407. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1408. u32 tmp_raw_cons = *raw_cons;
  1409. struct rx_cmp_ext *rxcmp1;
  1410. struct rx_cmp *rxcmp;
  1411. u16 cp_cons;
  1412. u8 cmp_type;
  1413. cp_cons = RING_CMP(tmp_raw_cons);
  1414. rxcmp = (struct rx_cmp *)
  1415. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1416. tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
  1417. cp_cons = RING_CMP(tmp_raw_cons);
  1418. rxcmp1 = (struct rx_cmp_ext *)
  1419. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1420. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  1421. return -EBUSY;
  1422. cmp_type = RX_CMP_TYPE(rxcmp);
  1423. if (cmp_type == CMP_TYPE_RX_L2_CMP) {
  1424. rxcmp1->rx_cmp_cfa_code_errors_v2 |=
  1425. cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
  1426. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  1427. struct rx_tpa_end_cmp_ext *tpa_end1;
  1428. tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
  1429. tpa_end1->rx_tpa_end_cmp_errors_v2 |=
  1430. cpu_to_le32(RX_TPA_END_CMP_ERRORS);
  1431. }
  1432. return bnxt_rx_pkt(bp, bnapi, raw_cons, event);
  1433. }
  1434. #define BNXT_GET_EVENT_PORT(data) \
  1435. ((data) & \
  1436. ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
  1437. static int bnxt_async_event_process(struct bnxt *bp,
  1438. struct hwrm_async_event_cmpl *cmpl)
  1439. {
  1440. u16 event_id = le16_to_cpu(cmpl->event_id);
  1441. /* TODO CHIMP_FW: Define event id's for link change, error etc */
  1442. switch (event_id) {
  1443. case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
  1444. u32 data1 = le32_to_cpu(cmpl->event_data1);
  1445. struct bnxt_link_info *link_info = &bp->link_info;
  1446. if (BNXT_VF(bp))
  1447. goto async_event_process_exit;
  1448. /* print unsupported speed warning in forced speed mode only */
  1449. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
  1450. (data1 & 0x20000)) {
  1451. u16 fw_speed = link_info->force_link_speed;
  1452. u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
  1453. if (speed != SPEED_UNKNOWN)
  1454. netdev_warn(bp->dev, "Link speed %d no longer supported\n",
  1455. speed);
  1456. }
  1457. set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
  1458. /* fall thru */
  1459. }
  1460. case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
  1461. set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
  1462. break;
  1463. case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
  1464. set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
  1465. break;
  1466. case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
  1467. u32 data1 = le32_to_cpu(cmpl->event_data1);
  1468. u16 port_id = BNXT_GET_EVENT_PORT(data1);
  1469. if (BNXT_VF(bp))
  1470. break;
  1471. if (bp->pf.port_id != port_id)
  1472. break;
  1473. set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
  1474. break;
  1475. }
  1476. case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
  1477. if (BNXT_PF(bp))
  1478. goto async_event_process_exit;
  1479. set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
  1480. break;
  1481. default:
  1482. goto async_event_process_exit;
  1483. }
  1484. bnxt_queue_sp_work(bp);
  1485. async_event_process_exit:
  1486. bnxt_ulp_async_events(bp, cmpl);
  1487. return 0;
  1488. }
  1489. static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
  1490. {
  1491. u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
  1492. struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
  1493. struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
  1494. (struct hwrm_fwd_req_cmpl *)txcmp;
  1495. switch (cmpl_type) {
  1496. case CMPL_BASE_TYPE_HWRM_DONE:
  1497. seq_id = le16_to_cpu(h_cmpl->sequence_id);
  1498. if (seq_id == bp->hwrm_intr_seq_id)
  1499. bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
  1500. else
  1501. netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
  1502. break;
  1503. case CMPL_BASE_TYPE_HWRM_FWD_REQ:
  1504. vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
  1505. if ((vf_id < bp->pf.first_vf_id) ||
  1506. (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
  1507. netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
  1508. vf_id);
  1509. return -EINVAL;
  1510. }
  1511. set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
  1512. set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
  1513. bnxt_queue_sp_work(bp);
  1514. break;
  1515. case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
  1516. bnxt_async_event_process(bp,
  1517. (struct hwrm_async_event_cmpl *)txcmp);
  1518. default:
  1519. break;
  1520. }
  1521. return 0;
  1522. }
  1523. static irqreturn_t bnxt_msix(int irq, void *dev_instance)
  1524. {
  1525. struct bnxt_napi *bnapi = dev_instance;
  1526. struct bnxt *bp = bnapi->bp;
  1527. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1528. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1529. cpr->event_ctr++;
  1530. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1531. napi_schedule(&bnapi->napi);
  1532. return IRQ_HANDLED;
  1533. }
  1534. static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
  1535. {
  1536. u32 raw_cons = cpr->cp_raw_cons;
  1537. u16 cons = RING_CMP(raw_cons);
  1538. struct tx_cmp *txcmp;
  1539. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1540. return TX_CMP_VALID(txcmp, raw_cons);
  1541. }
  1542. static irqreturn_t bnxt_inta(int irq, void *dev_instance)
  1543. {
  1544. struct bnxt_napi *bnapi = dev_instance;
  1545. struct bnxt *bp = bnapi->bp;
  1546. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1547. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1548. u32 int_status;
  1549. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1550. if (!bnxt_has_work(bp, cpr)) {
  1551. int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
  1552. /* return if erroneous interrupt */
  1553. if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
  1554. return IRQ_NONE;
  1555. }
  1556. /* disable ring IRQ */
  1557. BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
  1558. /* Return here if interrupt is shared and is disabled. */
  1559. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  1560. return IRQ_HANDLED;
  1561. napi_schedule(&bnapi->napi);
  1562. return IRQ_HANDLED;
  1563. }
  1564. static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
  1565. {
  1566. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1567. u32 raw_cons = cpr->cp_raw_cons;
  1568. u32 cons;
  1569. int tx_pkts = 0;
  1570. int rx_pkts = 0;
  1571. u8 event = 0;
  1572. struct tx_cmp *txcmp;
  1573. while (1) {
  1574. int rc;
  1575. cons = RING_CMP(raw_cons);
  1576. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1577. if (!TX_CMP_VALID(txcmp, raw_cons))
  1578. break;
  1579. /* The valid test of the entry must be done first before
  1580. * reading any further.
  1581. */
  1582. dma_rmb();
  1583. if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
  1584. tx_pkts++;
  1585. /* return full budget so NAPI will complete. */
  1586. if (unlikely(tx_pkts > bp->tx_wake_thresh))
  1587. rx_pkts = budget;
  1588. } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
  1589. if (likely(budget))
  1590. rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
  1591. else
  1592. rc = bnxt_force_rx_discard(bp, bnapi, &raw_cons,
  1593. &event);
  1594. if (likely(rc >= 0))
  1595. rx_pkts += rc;
  1596. /* Increment rx_pkts when rc is -ENOMEM to count towards
  1597. * the NAPI budget. Otherwise, we may potentially loop
  1598. * here forever if we consistently cannot allocate
  1599. * buffers.
  1600. */
  1601. else if (rc == -ENOMEM && budget)
  1602. rx_pkts++;
  1603. else if (rc == -EBUSY) /* partial completion */
  1604. break;
  1605. } else if (unlikely((TX_CMP_TYPE(txcmp) ==
  1606. CMPL_BASE_TYPE_HWRM_DONE) ||
  1607. (TX_CMP_TYPE(txcmp) ==
  1608. CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
  1609. (TX_CMP_TYPE(txcmp) ==
  1610. CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
  1611. bnxt_hwrm_handler(bp, txcmp);
  1612. }
  1613. raw_cons = NEXT_RAW_CMP(raw_cons);
  1614. if (rx_pkts == budget)
  1615. break;
  1616. }
  1617. if (event & BNXT_TX_EVENT) {
  1618. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  1619. void __iomem *db = txr->tx_doorbell;
  1620. u16 prod = txr->tx_prod;
  1621. /* Sync BD data before updating doorbell */
  1622. wmb();
  1623. bnxt_db_write(bp, db, DB_KEY_TX | prod);
  1624. }
  1625. cpr->cp_raw_cons = raw_cons;
  1626. /* ACK completion ring before freeing tx ring and producing new
  1627. * buffers in rx/agg rings to prevent overflowing the completion
  1628. * ring.
  1629. */
  1630. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  1631. if (tx_pkts)
  1632. bnapi->tx_int(bp, bnapi, tx_pkts);
  1633. if (event & BNXT_RX_EVENT) {
  1634. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1635. bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
  1636. if (event & BNXT_AGG_EVENT)
  1637. bnxt_db_write(bp, rxr->rx_agg_doorbell,
  1638. DB_KEY_RX | rxr->rx_agg_prod);
  1639. }
  1640. return rx_pkts;
  1641. }
  1642. static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
  1643. {
  1644. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1645. struct bnxt *bp = bnapi->bp;
  1646. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1647. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  1648. struct tx_cmp *txcmp;
  1649. struct rx_cmp_ext *rxcmp1;
  1650. u32 cp_cons, tmp_raw_cons;
  1651. u32 raw_cons = cpr->cp_raw_cons;
  1652. u32 rx_pkts = 0;
  1653. u8 event = 0;
  1654. while (1) {
  1655. int rc;
  1656. cp_cons = RING_CMP(raw_cons);
  1657. txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1658. if (!TX_CMP_VALID(txcmp, raw_cons))
  1659. break;
  1660. if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
  1661. tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
  1662. cp_cons = RING_CMP(tmp_raw_cons);
  1663. rxcmp1 = (struct rx_cmp_ext *)
  1664. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  1665. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  1666. break;
  1667. /* force an error to recycle the buffer */
  1668. rxcmp1->rx_cmp_cfa_code_errors_v2 |=
  1669. cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
  1670. rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
  1671. if (likely(rc == -EIO) && budget)
  1672. rx_pkts++;
  1673. else if (rc == -EBUSY) /* partial completion */
  1674. break;
  1675. } else if (unlikely(TX_CMP_TYPE(txcmp) ==
  1676. CMPL_BASE_TYPE_HWRM_DONE)) {
  1677. bnxt_hwrm_handler(bp, txcmp);
  1678. } else {
  1679. netdev_err(bp->dev,
  1680. "Invalid completion received on special ring\n");
  1681. }
  1682. raw_cons = NEXT_RAW_CMP(raw_cons);
  1683. if (rx_pkts == budget)
  1684. break;
  1685. }
  1686. cpr->cp_raw_cons = raw_cons;
  1687. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  1688. bnxt_db_write(bp, rxr->rx_doorbell, DB_KEY_RX | rxr->rx_prod);
  1689. if (event & BNXT_AGG_EVENT)
  1690. bnxt_db_write(bp, rxr->rx_agg_doorbell,
  1691. DB_KEY_RX | rxr->rx_agg_prod);
  1692. if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
  1693. napi_complete_done(napi, rx_pkts);
  1694. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1695. }
  1696. return rx_pkts;
  1697. }
  1698. static int bnxt_poll(struct napi_struct *napi, int budget)
  1699. {
  1700. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1701. struct bnxt *bp = bnapi->bp;
  1702. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1703. int work_done = 0;
  1704. while (1) {
  1705. work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
  1706. if (work_done >= budget)
  1707. break;
  1708. if (!bnxt_has_work(bp, cpr)) {
  1709. if (napi_complete_done(napi, work_done))
  1710. BNXT_CP_DB_REARM(cpr->cp_doorbell,
  1711. cpr->cp_raw_cons);
  1712. break;
  1713. }
  1714. }
  1715. if (bp->flags & BNXT_FLAG_DIM) {
  1716. struct net_dim_sample dim_sample;
  1717. net_dim_sample(cpr->event_ctr,
  1718. cpr->rx_packets,
  1719. cpr->rx_bytes,
  1720. &dim_sample);
  1721. net_dim(&cpr->dim, dim_sample);
  1722. }
  1723. mmiowb();
  1724. return work_done;
  1725. }
  1726. static void bnxt_free_tx_skbs(struct bnxt *bp)
  1727. {
  1728. int i, max_idx;
  1729. struct pci_dev *pdev = bp->pdev;
  1730. if (!bp->tx_ring)
  1731. return;
  1732. max_idx = bp->tx_nr_pages * TX_DESC_CNT;
  1733. for (i = 0; i < bp->tx_nr_rings; i++) {
  1734. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1735. int j;
  1736. for (j = 0; j < max_idx;) {
  1737. struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  1738. struct sk_buff *skb = tx_buf->skb;
  1739. int k, last;
  1740. if (!skb) {
  1741. j++;
  1742. continue;
  1743. }
  1744. tx_buf->skb = NULL;
  1745. if (tx_buf->is_push) {
  1746. dev_kfree_skb(skb);
  1747. j += 2;
  1748. continue;
  1749. }
  1750. dma_unmap_single(&pdev->dev,
  1751. dma_unmap_addr(tx_buf, mapping),
  1752. skb_headlen(skb),
  1753. PCI_DMA_TODEVICE);
  1754. last = tx_buf->nr_frags;
  1755. j += 2;
  1756. for (k = 0; k < last; k++, j++) {
  1757. int ring_idx = j & bp->tx_ring_mask;
  1758. skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
  1759. tx_buf = &txr->tx_buf_ring[ring_idx];
  1760. dma_unmap_page(
  1761. &pdev->dev,
  1762. dma_unmap_addr(tx_buf, mapping),
  1763. skb_frag_size(frag), PCI_DMA_TODEVICE);
  1764. }
  1765. dev_kfree_skb(skb);
  1766. }
  1767. netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
  1768. }
  1769. }
  1770. static void bnxt_free_rx_skbs(struct bnxt *bp)
  1771. {
  1772. int i, max_idx, max_agg_idx;
  1773. struct pci_dev *pdev = bp->pdev;
  1774. if (!bp->rx_ring)
  1775. return;
  1776. max_idx = bp->rx_nr_pages * RX_DESC_CNT;
  1777. max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
  1778. for (i = 0; i < bp->rx_nr_rings; i++) {
  1779. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1780. int j;
  1781. if (rxr->rx_tpa) {
  1782. for (j = 0; j < MAX_TPA; j++) {
  1783. struct bnxt_tpa_info *tpa_info =
  1784. &rxr->rx_tpa[j];
  1785. u8 *data = tpa_info->data;
  1786. if (!data)
  1787. continue;
  1788. dma_unmap_single_attrs(&pdev->dev,
  1789. tpa_info->mapping,
  1790. bp->rx_buf_use_size,
  1791. bp->rx_dir,
  1792. DMA_ATTR_WEAK_ORDERING);
  1793. tpa_info->data = NULL;
  1794. kfree(data);
  1795. }
  1796. }
  1797. for (j = 0; j < max_idx; j++) {
  1798. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
  1799. dma_addr_t mapping = rx_buf->mapping;
  1800. void *data = rx_buf->data;
  1801. if (!data)
  1802. continue;
  1803. rx_buf->data = NULL;
  1804. if (BNXT_RX_PAGE_MODE(bp)) {
  1805. mapping -= bp->rx_dma_offset;
  1806. dma_unmap_page_attrs(&pdev->dev, mapping,
  1807. PAGE_SIZE, bp->rx_dir,
  1808. DMA_ATTR_WEAK_ORDERING);
  1809. __free_page(data);
  1810. } else {
  1811. dma_unmap_single_attrs(&pdev->dev, mapping,
  1812. bp->rx_buf_use_size,
  1813. bp->rx_dir,
  1814. DMA_ATTR_WEAK_ORDERING);
  1815. kfree(data);
  1816. }
  1817. }
  1818. for (j = 0; j < max_agg_idx; j++) {
  1819. struct bnxt_sw_rx_agg_bd *rx_agg_buf =
  1820. &rxr->rx_agg_ring[j];
  1821. struct page *page = rx_agg_buf->page;
  1822. if (!page)
  1823. continue;
  1824. dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
  1825. BNXT_RX_PAGE_SIZE,
  1826. PCI_DMA_FROMDEVICE,
  1827. DMA_ATTR_WEAK_ORDERING);
  1828. rx_agg_buf->page = NULL;
  1829. __clear_bit(j, rxr->rx_agg_bmap);
  1830. __free_page(page);
  1831. }
  1832. if (rxr->rx_page) {
  1833. __free_page(rxr->rx_page);
  1834. rxr->rx_page = NULL;
  1835. }
  1836. }
  1837. }
  1838. static void bnxt_free_skbs(struct bnxt *bp)
  1839. {
  1840. bnxt_free_tx_skbs(bp);
  1841. bnxt_free_rx_skbs(bp);
  1842. }
  1843. static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1844. {
  1845. struct pci_dev *pdev = bp->pdev;
  1846. int i;
  1847. for (i = 0; i < ring->nr_pages; i++) {
  1848. if (!ring->pg_arr[i])
  1849. continue;
  1850. dma_free_coherent(&pdev->dev, ring->page_size,
  1851. ring->pg_arr[i], ring->dma_arr[i]);
  1852. ring->pg_arr[i] = NULL;
  1853. }
  1854. if (ring->pg_tbl) {
  1855. dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
  1856. ring->pg_tbl, ring->pg_tbl_map);
  1857. ring->pg_tbl = NULL;
  1858. }
  1859. if (ring->vmem_size && *ring->vmem) {
  1860. vfree(*ring->vmem);
  1861. *ring->vmem = NULL;
  1862. }
  1863. }
  1864. static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1865. {
  1866. int i;
  1867. struct pci_dev *pdev = bp->pdev;
  1868. if (ring->nr_pages > 1) {
  1869. ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
  1870. ring->nr_pages * 8,
  1871. &ring->pg_tbl_map,
  1872. GFP_KERNEL);
  1873. if (!ring->pg_tbl)
  1874. return -ENOMEM;
  1875. }
  1876. for (i = 0; i < ring->nr_pages; i++) {
  1877. ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
  1878. ring->page_size,
  1879. &ring->dma_arr[i],
  1880. GFP_KERNEL);
  1881. if (!ring->pg_arr[i])
  1882. return -ENOMEM;
  1883. if (ring->nr_pages > 1)
  1884. ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
  1885. }
  1886. if (ring->vmem_size) {
  1887. *ring->vmem = vzalloc(ring->vmem_size);
  1888. if (!(*ring->vmem))
  1889. return -ENOMEM;
  1890. }
  1891. return 0;
  1892. }
  1893. static void bnxt_free_rx_rings(struct bnxt *bp)
  1894. {
  1895. int i;
  1896. if (!bp->rx_ring)
  1897. return;
  1898. for (i = 0; i < bp->rx_nr_rings; i++) {
  1899. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1900. struct bnxt_ring_struct *ring;
  1901. if (rxr->xdp_prog)
  1902. bpf_prog_put(rxr->xdp_prog);
  1903. if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
  1904. xdp_rxq_info_unreg(&rxr->xdp_rxq);
  1905. kfree(rxr->rx_tpa);
  1906. rxr->rx_tpa = NULL;
  1907. kfree(rxr->rx_agg_bmap);
  1908. rxr->rx_agg_bmap = NULL;
  1909. ring = &rxr->rx_ring_struct;
  1910. bnxt_free_ring(bp, ring);
  1911. ring = &rxr->rx_agg_ring_struct;
  1912. bnxt_free_ring(bp, ring);
  1913. }
  1914. }
  1915. static int bnxt_alloc_rx_rings(struct bnxt *bp)
  1916. {
  1917. int i, rc, agg_rings = 0, tpa_rings = 0;
  1918. if (!bp->rx_ring)
  1919. return -ENOMEM;
  1920. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  1921. agg_rings = 1;
  1922. if (bp->flags & BNXT_FLAG_TPA)
  1923. tpa_rings = 1;
  1924. for (i = 0; i < bp->rx_nr_rings; i++) {
  1925. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  1926. struct bnxt_ring_struct *ring;
  1927. ring = &rxr->rx_ring_struct;
  1928. rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
  1929. if (rc < 0)
  1930. return rc;
  1931. rc = bnxt_alloc_ring(bp, ring);
  1932. if (rc)
  1933. return rc;
  1934. if (agg_rings) {
  1935. u16 mem_size;
  1936. ring = &rxr->rx_agg_ring_struct;
  1937. rc = bnxt_alloc_ring(bp, ring);
  1938. if (rc)
  1939. return rc;
  1940. rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
  1941. mem_size = rxr->rx_agg_bmap_size / 8;
  1942. rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
  1943. if (!rxr->rx_agg_bmap)
  1944. return -ENOMEM;
  1945. if (tpa_rings) {
  1946. rxr->rx_tpa = kcalloc(MAX_TPA,
  1947. sizeof(struct bnxt_tpa_info),
  1948. GFP_KERNEL);
  1949. if (!rxr->rx_tpa)
  1950. return -ENOMEM;
  1951. }
  1952. }
  1953. }
  1954. return 0;
  1955. }
  1956. static void bnxt_free_tx_rings(struct bnxt *bp)
  1957. {
  1958. int i;
  1959. struct pci_dev *pdev = bp->pdev;
  1960. if (!bp->tx_ring)
  1961. return;
  1962. for (i = 0; i < bp->tx_nr_rings; i++) {
  1963. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1964. struct bnxt_ring_struct *ring;
  1965. if (txr->tx_push) {
  1966. dma_free_coherent(&pdev->dev, bp->tx_push_size,
  1967. txr->tx_push, txr->tx_push_mapping);
  1968. txr->tx_push = NULL;
  1969. }
  1970. ring = &txr->tx_ring_struct;
  1971. bnxt_free_ring(bp, ring);
  1972. }
  1973. }
  1974. static int bnxt_alloc_tx_rings(struct bnxt *bp)
  1975. {
  1976. int i, j, rc;
  1977. struct pci_dev *pdev = bp->pdev;
  1978. bp->tx_push_size = 0;
  1979. if (bp->tx_push_thresh) {
  1980. int push_size;
  1981. push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
  1982. bp->tx_push_thresh);
  1983. if (push_size > 256) {
  1984. push_size = 0;
  1985. bp->tx_push_thresh = 0;
  1986. }
  1987. bp->tx_push_size = push_size;
  1988. }
  1989. for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
  1990. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  1991. struct bnxt_ring_struct *ring;
  1992. ring = &txr->tx_ring_struct;
  1993. rc = bnxt_alloc_ring(bp, ring);
  1994. if (rc)
  1995. return rc;
  1996. if (bp->tx_push_size) {
  1997. dma_addr_t mapping;
  1998. /* One pre-allocated DMA buffer to backup
  1999. * TX push operation
  2000. */
  2001. txr->tx_push = dma_alloc_coherent(&pdev->dev,
  2002. bp->tx_push_size,
  2003. &txr->tx_push_mapping,
  2004. GFP_KERNEL);
  2005. if (!txr->tx_push)
  2006. return -ENOMEM;
  2007. mapping = txr->tx_push_mapping +
  2008. sizeof(struct tx_push_bd);
  2009. txr->data_mapping = cpu_to_le64(mapping);
  2010. memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
  2011. }
  2012. ring->queue_id = bp->q_info[j].queue_id;
  2013. if (i < bp->tx_nr_rings_xdp)
  2014. continue;
  2015. if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
  2016. j++;
  2017. }
  2018. return 0;
  2019. }
  2020. static void bnxt_free_cp_rings(struct bnxt *bp)
  2021. {
  2022. int i;
  2023. if (!bp->bnapi)
  2024. return;
  2025. for (i = 0; i < bp->cp_nr_rings; i++) {
  2026. struct bnxt_napi *bnapi = bp->bnapi[i];
  2027. struct bnxt_cp_ring_info *cpr;
  2028. struct bnxt_ring_struct *ring;
  2029. if (!bnapi)
  2030. continue;
  2031. cpr = &bnapi->cp_ring;
  2032. ring = &cpr->cp_ring_struct;
  2033. bnxt_free_ring(bp, ring);
  2034. }
  2035. }
  2036. static int bnxt_alloc_cp_rings(struct bnxt *bp)
  2037. {
  2038. int i, rc;
  2039. for (i = 0; i < bp->cp_nr_rings; i++) {
  2040. struct bnxt_napi *bnapi = bp->bnapi[i];
  2041. struct bnxt_cp_ring_info *cpr;
  2042. struct bnxt_ring_struct *ring;
  2043. if (!bnapi)
  2044. continue;
  2045. cpr = &bnapi->cp_ring;
  2046. ring = &cpr->cp_ring_struct;
  2047. rc = bnxt_alloc_ring(bp, ring);
  2048. if (rc)
  2049. return rc;
  2050. }
  2051. return 0;
  2052. }
  2053. static void bnxt_init_ring_struct(struct bnxt *bp)
  2054. {
  2055. int i;
  2056. for (i = 0; i < bp->cp_nr_rings; i++) {
  2057. struct bnxt_napi *bnapi = bp->bnapi[i];
  2058. struct bnxt_cp_ring_info *cpr;
  2059. struct bnxt_rx_ring_info *rxr;
  2060. struct bnxt_tx_ring_info *txr;
  2061. struct bnxt_ring_struct *ring;
  2062. if (!bnapi)
  2063. continue;
  2064. cpr = &bnapi->cp_ring;
  2065. ring = &cpr->cp_ring_struct;
  2066. ring->nr_pages = bp->cp_nr_pages;
  2067. ring->page_size = HW_CMPD_RING_SIZE;
  2068. ring->pg_arr = (void **)cpr->cp_desc_ring;
  2069. ring->dma_arr = cpr->cp_desc_mapping;
  2070. ring->vmem_size = 0;
  2071. rxr = bnapi->rx_ring;
  2072. if (!rxr)
  2073. goto skip_rx;
  2074. ring = &rxr->rx_ring_struct;
  2075. ring->nr_pages = bp->rx_nr_pages;
  2076. ring->page_size = HW_RXBD_RING_SIZE;
  2077. ring->pg_arr = (void **)rxr->rx_desc_ring;
  2078. ring->dma_arr = rxr->rx_desc_mapping;
  2079. ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
  2080. ring->vmem = (void **)&rxr->rx_buf_ring;
  2081. ring = &rxr->rx_agg_ring_struct;
  2082. ring->nr_pages = bp->rx_agg_nr_pages;
  2083. ring->page_size = HW_RXBD_RING_SIZE;
  2084. ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
  2085. ring->dma_arr = rxr->rx_agg_desc_mapping;
  2086. ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
  2087. ring->vmem = (void **)&rxr->rx_agg_ring;
  2088. skip_rx:
  2089. txr = bnapi->tx_ring;
  2090. if (!txr)
  2091. continue;
  2092. ring = &txr->tx_ring_struct;
  2093. ring->nr_pages = bp->tx_nr_pages;
  2094. ring->page_size = HW_RXBD_RING_SIZE;
  2095. ring->pg_arr = (void **)txr->tx_desc_ring;
  2096. ring->dma_arr = txr->tx_desc_mapping;
  2097. ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
  2098. ring->vmem = (void **)&txr->tx_buf_ring;
  2099. }
  2100. }
  2101. static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
  2102. {
  2103. int i;
  2104. u32 prod;
  2105. struct rx_bd **rx_buf_ring;
  2106. rx_buf_ring = (struct rx_bd **)ring->pg_arr;
  2107. for (i = 0, prod = 0; i < ring->nr_pages; i++) {
  2108. int j;
  2109. struct rx_bd *rxbd;
  2110. rxbd = rx_buf_ring[i];
  2111. if (!rxbd)
  2112. continue;
  2113. for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
  2114. rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
  2115. rxbd->rx_bd_opaque = prod;
  2116. }
  2117. }
  2118. }
  2119. static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
  2120. {
  2121. struct net_device *dev = bp->dev;
  2122. struct bnxt_rx_ring_info *rxr;
  2123. struct bnxt_ring_struct *ring;
  2124. u32 prod, type;
  2125. int i;
  2126. type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
  2127. RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
  2128. if (NET_IP_ALIGN == 2)
  2129. type |= RX_BD_FLAGS_SOP;
  2130. rxr = &bp->rx_ring[ring_nr];
  2131. ring = &rxr->rx_ring_struct;
  2132. bnxt_init_rxbd_pages(ring, type);
  2133. if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
  2134. rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
  2135. if (IS_ERR(rxr->xdp_prog)) {
  2136. int rc = PTR_ERR(rxr->xdp_prog);
  2137. rxr->xdp_prog = NULL;
  2138. return rc;
  2139. }
  2140. }
  2141. prod = rxr->rx_prod;
  2142. for (i = 0; i < bp->rx_ring_size; i++) {
  2143. if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
  2144. netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
  2145. ring_nr, i, bp->rx_ring_size);
  2146. break;
  2147. }
  2148. prod = NEXT_RX(prod);
  2149. }
  2150. rxr->rx_prod = prod;
  2151. ring->fw_ring_id = INVALID_HW_RING_ID;
  2152. ring = &rxr->rx_agg_ring_struct;
  2153. ring->fw_ring_id = INVALID_HW_RING_ID;
  2154. if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
  2155. return 0;
  2156. type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
  2157. RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
  2158. bnxt_init_rxbd_pages(ring, type);
  2159. prod = rxr->rx_agg_prod;
  2160. for (i = 0; i < bp->rx_agg_ring_size; i++) {
  2161. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
  2162. netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
  2163. ring_nr, i, bp->rx_ring_size);
  2164. break;
  2165. }
  2166. prod = NEXT_RX_AGG(prod);
  2167. }
  2168. rxr->rx_agg_prod = prod;
  2169. if (bp->flags & BNXT_FLAG_TPA) {
  2170. if (rxr->rx_tpa) {
  2171. u8 *data;
  2172. dma_addr_t mapping;
  2173. for (i = 0; i < MAX_TPA; i++) {
  2174. data = __bnxt_alloc_rx_data(bp, &mapping,
  2175. GFP_KERNEL);
  2176. if (!data)
  2177. return -ENOMEM;
  2178. rxr->rx_tpa[i].data = data;
  2179. rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
  2180. rxr->rx_tpa[i].mapping = mapping;
  2181. }
  2182. } else {
  2183. netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
  2184. return -ENOMEM;
  2185. }
  2186. }
  2187. return 0;
  2188. }
  2189. static void bnxt_init_cp_rings(struct bnxt *bp)
  2190. {
  2191. int i;
  2192. for (i = 0; i < bp->cp_nr_rings; i++) {
  2193. struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
  2194. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  2195. ring->fw_ring_id = INVALID_HW_RING_ID;
  2196. cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
  2197. cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
  2198. }
  2199. }
  2200. static int bnxt_init_rx_rings(struct bnxt *bp)
  2201. {
  2202. int i, rc = 0;
  2203. if (BNXT_RX_PAGE_MODE(bp)) {
  2204. bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
  2205. bp->rx_dma_offset = XDP_PACKET_HEADROOM;
  2206. } else {
  2207. bp->rx_offset = BNXT_RX_OFFSET;
  2208. bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
  2209. }
  2210. for (i = 0; i < bp->rx_nr_rings; i++) {
  2211. rc = bnxt_init_one_rx_ring(bp, i);
  2212. if (rc)
  2213. break;
  2214. }
  2215. return rc;
  2216. }
  2217. static int bnxt_init_tx_rings(struct bnxt *bp)
  2218. {
  2219. u16 i;
  2220. bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
  2221. MAX_SKB_FRAGS + 1);
  2222. for (i = 0; i < bp->tx_nr_rings; i++) {
  2223. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  2224. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  2225. ring->fw_ring_id = INVALID_HW_RING_ID;
  2226. }
  2227. return 0;
  2228. }
  2229. static void bnxt_free_ring_grps(struct bnxt *bp)
  2230. {
  2231. kfree(bp->grp_info);
  2232. bp->grp_info = NULL;
  2233. }
  2234. static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
  2235. {
  2236. int i;
  2237. if (irq_re_init) {
  2238. bp->grp_info = kcalloc(bp->cp_nr_rings,
  2239. sizeof(struct bnxt_ring_grp_info),
  2240. GFP_KERNEL);
  2241. if (!bp->grp_info)
  2242. return -ENOMEM;
  2243. }
  2244. for (i = 0; i < bp->cp_nr_rings; i++) {
  2245. if (irq_re_init)
  2246. bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
  2247. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  2248. bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
  2249. bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
  2250. bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
  2251. }
  2252. return 0;
  2253. }
  2254. static void bnxt_free_vnics(struct bnxt *bp)
  2255. {
  2256. kfree(bp->vnic_info);
  2257. bp->vnic_info = NULL;
  2258. bp->nr_vnics = 0;
  2259. }
  2260. static int bnxt_alloc_vnics(struct bnxt *bp)
  2261. {
  2262. int num_vnics = 1;
  2263. #ifdef CONFIG_RFS_ACCEL
  2264. if (bp->flags & BNXT_FLAG_RFS)
  2265. num_vnics += bp->rx_nr_rings;
  2266. #endif
  2267. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  2268. num_vnics++;
  2269. bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
  2270. GFP_KERNEL);
  2271. if (!bp->vnic_info)
  2272. return -ENOMEM;
  2273. bp->nr_vnics = num_vnics;
  2274. return 0;
  2275. }
  2276. static void bnxt_init_vnics(struct bnxt *bp)
  2277. {
  2278. int i;
  2279. for (i = 0; i < bp->nr_vnics; i++) {
  2280. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2281. vnic->fw_vnic_id = INVALID_HW_RING_ID;
  2282. vnic->fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
  2283. vnic->fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
  2284. vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
  2285. if (bp->vnic_info[i].rss_hash_key) {
  2286. if (i == 0)
  2287. prandom_bytes(vnic->rss_hash_key,
  2288. HW_HASH_KEY_SIZE);
  2289. else
  2290. memcpy(vnic->rss_hash_key,
  2291. bp->vnic_info[0].rss_hash_key,
  2292. HW_HASH_KEY_SIZE);
  2293. }
  2294. }
  2295. }
  2296. static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
  2297. {
  2298. int pages;
  2299. pages = ring_size / desc_per_pg;
  2300. if (!pages)
  2301. return 1;
  2302. pages++;
  2303. while (pages & (pages - 1))
  2304. pages++;
  2305. return pages;
  2306. }
  2307. void bnxt_set_tpa_flags(struct bnxt *bp)
  2308. {
  2309. bp->flags &= ~BNXT_FLAG_TPA;
  2310. if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
  2311. return;
  2312. if (bp->dev->features & NETIF_F_LRO)
  2313. bp->flags |= BNXT_FLAG_LRO;
  2314. else if (bp->dev->features & NETIF_F_GRO_HW)
  2315. bp->flags |= BNXT_FLAG_GRO;
  2316. }
  2317. /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
  2318. * be set on entry.
  2319. */
  2320. void bnxt_set_ring_params(struct bnxt *bp)
  2321. {
  2322. u32 ring_size, rx_size, rx_space;
  2323. u32 agg_factor = 0, agg_ring_size = 0;
  2324. /* 8 for CRC and VLAN */
  2325. rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
  2326. rx_space = rx_size + NET_SKB_PAD +
  2327. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  2328. bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
  2329. ring_size = bp->rx_ring_size;
  2330. bp->rx_agg_ring_size = 0;
  2331. bp->rx_agg_nr_pages = 0;
  2332. if (bp->flags & BNXT_FLAG_TPA)
  2333. agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
  2334. bp->flags &= ~BNXT_FLAG_JUMBO;
  2335. if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
  2336. u32 jumbo_factor;
  2337. bp->flags |= BNXT_FLAG_JUMBO;
  2338. jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  2339. if (jumbo_factor > agg_factor)
  2340. agg_factor = jumbo_factor;
  2341. }
  2342. agg_ring_size = ring_size * agg_factor;
  2343. if (agg_ring_size) {
  2344. bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
  2345. RX_DESC_CNT);
  2346. if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
  2347. u32 tmp = agg_ring_size;
  2348. bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
  2349. agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
  2350. netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
  2351. tmp, agg_ring_size);
  2352. }
  2353. bp->rx_agg_ring_size = agg_ring_size;
  2354. bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
  2355. rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
  2356. rx_space = rx_size + NET_SKB_PAD +
  2357. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  2358. }
  2359. bp->rx_buf_use_size = rx_size;
  2360. bp->rx_buf_size = rx_space;
  2361. bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
  2362. bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
  2363. ring_size = bp->tx_ring_size;
  2364. bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
  2365. bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
  2366. ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
  2367. bp->cp_ring_size = ring_size;
  2368. bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
  2369. if (bp->cp_nr_pages > MAX_CP_PAGES) {
  2370. bp->cp_nr_pages = MAX_CP_PAGES;
  2371. bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
  2372. netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
  2373. ring_size, bp->cp_ring_size);
  2374. }
  2375. bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
  2376. bp->cp_ring_mask = bp->cp_bit - 1;
  2377. }
  2378. /* Changing allocation mode of RX rings.
  2379. * TODO: Update when extending xdp_rxq_info to support allocation modes.
  2380. */
  2381. int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
  2382. {
  2383. if (page_mode) {
  2384. if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
  2385. return -EOPNOTSUPP;
  2386. bp->dev->max_mtu =
  2387. min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
  2388. bp->flags &= ~BNXT_FLAG_AGG_RINGS;
  2389. bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
  2390. bp->rx_dir = DMA_BIDIRECTIONAL;
  2391. bp->rx_skb_func = bnxt_rx_page_skb;
  2392. /* Disable LRO or GRO_HW */
  2393. netdev_update_features(bp->dev);
  2394. } else {
  2395. bp->dev->max_mtu = bp->max_mtu;
  2396. bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
  2397. bp->rx_dir = DMA_FROM_DEVICE;
  2398. bp->rx_skb_func = bnxt_rx_skb;
  2399. }
  2400. return 0;
  2401. }
  2402. static void bnxt_free_vnic_attributes(struct bnxt *bp)
  2403. {
  2404. int i;
  2405. struct bnxt_vnic_info *vnic;
  2406. struct pci_dev *pdev = bp->pdev;
  2407. if (!bp->vnic_info)
  2408. return;
  2409. for (i = 0; i < bp->nr_vnics; i++) {
  2410. vnic = &bp->vnic_info[i];
  2411. kfree(vnic->fw_grp_ids);
  2412. vnic->fw_grp_ids = NULL;
  2413. kfree(vnic->uc_list);
  2414. vnic->uc_list = NULL;
  2415. if (vnic->mc_list) {
  2416. dma_free_coherent(&pdev->dev, vnic->mc_list_size,
  2417. vnic->mc_list, vnic->mc_list_mapping);
  2418. vnic->mc_list = NULL;
  2419. }
  2420. if (vnic->rss_table) {
  2421. dma_free_coherent(&pdev->dev, PAGE_SIZE,
  2422. vnic->rss_table,
  2423. vnic->rss_table_dma_addr);
  2424. vnic->rss_table = NULL;
  2425. }
  2426. vnic->rss_hash_key = NULL;
  2427. vnic->flags = 0;
  2428. }
  2429. }
  2430. static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
  2431. {
  2432. int i, rc = 0, size;
  2433. struct bnxt_vnic_info *vnic;
  2434. struct pci_dev *pdev = bp->pdev;
  2435. int max_rings;
  2436. for (i = 0; i < bp->nr_vnics; i++) {
  2437. vnic = &bp->vnic_info[i];
  2438. if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
  2439. int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
  2440. if (mem_size > 0) {
  2441. vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
  2442. if (!vnic->uc_list) {
  2443. rc = -ENOMEM;
  2444. goto out;
  2445. }
  2446. }
  2447. }
  2448. if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
  2449. vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
  2450. vnic->mc_list =
  2451. dma_alloc_coherent(&pdev->dev,
  2452. vnic->mc_list_size,
  2453. &vnic->mc_list_mapping,
  2454. GFP_KERNEL);
  2455. if (!vnic->mc_list) {
  2456. rc = -ENOMEM;
  2457. goto out;
  2458. }
  2459. }
  2460. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  2461. max_rings = bp->rx_nr_rings;
  2462. else
  2463. max_rings = 1;
  2464. vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
  2465. if (!vnic->fw_grp_ids) {
  2466. rc = -ENOMEM;
  2467. goto out;
  2468. }
  2469. if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
  2470. !(vnic->flags & BNXT_VNIC_RSS_FLAG))
  2471. continue;
  2472. /* Allocate rss table and hash key */
  2473. vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  2474. &vnic->rss_table_dma_addr,
  2475. GFP_KERNEL);
  2476. if (!vnic->rss_table) {
  2477. rc = -ENOMEM;
  2478. goto out;
  2479. }
  2480. size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
  2481. vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
  2482. vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
  2483. }
  2484. return 0;
  2485. out:
  2486. return rc;
  2487. }
  2488. static void bnxt_free_hwrm_resources(struct bnxt *bp)
  2489. {
  2490. struct pci_dev *pdev = bp->pdev;
  2491. dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
  2492. bp->hwrm_cmd_resp_dma_addr);
  2493. bp->hwrm_cmd_resp_addr = NULL;
  2494. if (bp->hwrm_dbg_resp_addr) {
  2495. dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
  2496. bp->hwrm_dbg_resp_addr,
  2497. bp->hwrm_dbg_resp_dma_addr);
  2498. bp->hwrm_dbg_resp_addr = NULL;
  2499. }
  2500. }
  2501. static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
  2502. {
  2503. struct pci_dev *pdev = bp->pdev;
  2504. bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  2505. &bp->hwrm_cmd_resp_dma_addr,
  2506. GFP_KERNEL);
  2507. if (!bp->hwrm_cmd_resp_addr)
  2508. return -ENOMEM;
  2509. bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
  2510. HWRM_DBG_REG_BUF_SIZE,
  2511. &bp->hwrm_dbg_resp_dma_addr,
  2512. GFP_KERNEL);
  2513. if (!bp->hwrm_dbg_resp_addr)
  2514. netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
  2515. return 0;
  2516. }
  2517. static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
  2518. {
  2519. if (bp->hwrm_short_cmd_req_addr) {
  2520. struct pci_dev *pdev = bp->pdev;
  2521. dma_free_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
  2522. bp->hwrm_short_cmd_req_addr,
  2523. bp->hwrm_short_cmd_req_dma_addr);
  2524. bp->hwrm_short_cmd_req_addr = NULL;
  2525. }
  2526. }
  2527. static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
  2528. {
  2529. struct pci_dev *pdev = bp->pdev;
  2530. bp->hwrm_short_cmd_req_addr =
  2531. dma_alloc_coherent(&pdev->dev, BNXT_HWRM_MAX_REQ_LEN,
  2532. &bp->hwrm_short_cmd_req_dma_addr,
  2533. GFP_KERNEL);
  2534. if (!bp->hwrm_short_cmd_req_addr)
  2535. return -ENOMEM;
  2536. return 0;
  2537. }
  2538. static void bnxt_free_stats(struct bnxt *bp)
  2539. {
  2540. u32 size, i;
  2541. struct pci_dev *pdev = bp->pdev;
  2542. if (bp->hw_rx_port_stats) {
  2543. dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
  2544. bp->hw_rx_port_stats,
  2545. bp->hw_rx_port_stats_map);
  2546. bp->hw_rx_port_stats = NULL;
  2547. bp->flags &= ~BNXT_FLAG_PORT_STATS;
  2548. }
  2549. if (!bp->bnapi)
  2550. return;
  2551. size = sizeof(struct ctx_hw_stats);
  2552. for (i = 0; i < bp->cp_nr_rings; i++) {
  2553. struct bnxt_napi *bnapi = bp->bnapi[i];
  2554. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2555. if (cpr->hw_stats) {
  2556. dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
  2557. cpr->hw_stats_map);
  2558. cpr->hw_stats = NULL;
  2559. }
  2560. }
  2561. }
  2562. static int bnxt_alloc_stats(struct bnxt *bp)
  2563. {
  2564. u32 size, i;
  2565. struct pci_dev *pdev = bp->pdev;
  2566. size = sizeof(struct ctx_hw_stats);
  2567. for (i = 0; i < bp->cp_nr_rings; i++) {
  2568. struct bnxt_napi *bnapi = bp->bnapi[i];
  2569. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2570. cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
  2571. &cpr->hw_stats_map,
  2572. GFP_KERNEL);
  2573. if (!cpr->hw_stats)
  2574. return -ENOMEM;
  2575. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  2576. }
  2577. if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
  2578. bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
  2579. sizeof(struct tx_port_stats) + 1024;
  2580. bp->hw_rx_port_stats =
  2581. dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
  2582. &bp->hw_rx_port_stats_map,
  2583. GFP_KERNEL);
  2584. if (!bp->hw_rx_port_stats)
  2585. return -ENOMEM;
  2586. bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
  2587. 512;
  2588. bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
  2589. sizeof(struct rx_port_stats) + 512;
  2590. bp->flags |= BNXT_FLAG_PORT_STATS;
  2591. }
  2592. return 0;
  2593. }
  2594. static void bnxt_clear_ring_indices(struct bnxt *bp)
  2595. {
  2596. int i;
  2597. if (!bp->bnapi)
  2598. return;
  2599. for (i = 0; i < bp->cp_nr_rings; i++) {
  2600. struct bnxt_napi *bnapi = bp->bnapi[i];
  2601. struct bnxt_cp_ring_info *cpr;
  2602. struct bnxt_rx_ring_info *rxr;
  2603. struct bnxt_tx_ring_info *txr;
  2604. if (!bnapi)
  2605. continue;
  2606. cpr = &bnapi->cp_ring;
  2607. cpr->cp_raw_cons = 0;
  2608. txr = bnapi->tx_ring;
  2609. if (txr) {
  2610. txr->tx_prod = 0;
  2611. txr->tx_cons = 0;
  2612. }
  2613. rxr = bnapi->rx_ring;
  2614. if (rxr) {
  2615. rxr->rx_prod = 0;
  2616. rxr->rx_agg_prod = 0;
  2617. rxr->rx_sw_agg_prod = 0;
  2618. rxr->rx_next_cons = 0;
  2619. }
  2620. }
  2621. }
  2622. static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
  2623. {
  2624. #ifdef CONFIG_RFS_ACCEL
  2625. int i;
  2626. /* Under rtnl_lock and all our NAPIs have been disabled. It's
  2627. * safe to delete the hash table.
  2628. */
  2629. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  2630. struct hlist_head *head;
  2631. struct hlist_node *tmp;
  2632. struct bnxt_ntuple_filter *fltr;
  2633. head = &bp->ntp_fltr_hash_tbl[i];
  2634. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  2635. hlist_del(&fltr->hash);
  2636. kfree(fltr);
  2637. }
  2638. }
  2639. if (irq_reinit) {
  2640. kfree(bp->ntp_fltr_bmap);
  2641. bp->ntp_fltr_bmap = NULL;
  2642. }
  2643. bp->ntp_fltr_count = 0;
  2644. #endif
  2645. }
  2646. static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
  2647. {
  2648. #ifdef CONFIG_RFS_ACCEL
  2649. int i, rc = 0;
  2650. if (!(bp->flags & BNXT_FLAG_RFS))
  2651. return 0;
  2652. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
  2653. INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
  2654. bp->ntp_fltr_count = 0;
  2655. bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
  2656. sizeof(long),
  2657. GFP_KERNEL);
  2658. if (!bp->ntp_fltr_bmap)
  2659. rc = -ENOMEM;
  2660. return rc;
  2661. #else
  2662. return 0;
  2663. #endif
  2664. }
  2665. static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
  2666. {
  2667. bnxt_free_vnic_attributes(bp);
  2668. bnxt_free_tx_rings(bp);
  2669. bnxt_free_rx_rings(bp);
  2670. bnxt_free_cp_rings(bp);
  2671. bnxt_free_ntp_fltrs(bp, irq_re_init);
  2672. if (irq_re_init) {
  2673. bnxt_free_stats(bp);
  2674. bnxt_free_ring_grps(bp);
  2675. bnxt_free_vnics(bp);
  2676. kfree(bp->tx_ring_map);
  2677. bp->tx_ring_map = NULL;
  2678. kfree(bp->tx_ring);
  2679. bp->tx_ring = NULL;
  2680. kfree(bp->rx_ring);
  2681. bp->rx_ring = NULL;
  2682. kfree(bp->bnapi);
  2683. bp->bnapi = NULL;
  2684. } else {
  2685. bnxt_clear_ring_indices(bp);
  2686. }
  2687. }
  2688. static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
  2689. {
  2690. int i, j, rc, size, arr_size;
  2691. void *bnapi;
  2692. if (irq_re_init) {
  2693. /* Allocate bnapi mem pointer array and mem block for
  2694. * all queues
  2695. */
  2696. arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
  2697. bp->cp_nr_rings);
  2698. size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
  2699. bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
  2700. if (!bnapi)
  2701. return -ENOMEM;
  2702. bp->bnapi = bnapi;
  2703. bnapi += arr_size;
  2704. for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
  2705. bp->bnapi[i] = bnapi;
  2706. bp->bnapi[i]->index = i;
  2707. bp->bnapi[i]->bp = bp;
  2708. }
  2709. bp->rx_ring = kcalloc(bp->rx_nr_rings,
  2710. sizeof(struct bnxt_rx_ring_info),
  2711. GFP_KERNEL);
  2712. if (!bp->rx_ring)
  2713. return -ENOMEM;
  2714. for (i = 0; i < bp->rx_nr_rings; i++) {
  2715. bp->rx_ring[i].bnapi = bp->bnapi[i];
  2716. bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
  2717. }
  2718. bp->tx_ring = kcalloc(bp->tx_nr_rings,
  2719. sizeof(struct bnxt_tx_ring_info),
  2720. GFP_KERNEL);
  2721. if (!bp->tx_ring)
  2722. return -ENOMEM;
  2723. bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
  2724. GFP_KERNEL);
  2725. if (!bp->tx_ring_map)
  2726. return -ENOMEM;
  2727. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  2728. j = 0;
  2729. else
  2730. j = bp->rx_nr_rings;
  2731. for (i = 0; i < bp->tx_nr_rings; i++, j++) {
  2732. bp->tx_ring[i].bnapi = bp->bnapi[j];
  2733. bp->bnapi[j]->tx_ring = &bp->tx_ring[i];
  2734. bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
  2735. if (i >= bp->tx_nr_rings_xdp) {
  2736. bp->tx_ring[i].txq_index = i -
  2737. bp->tx_nr_rings_xdp;
  2738. bp->bnapi[j]->tx_int = bnxt_tx_int;
  2739. } else {
  2740. bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
  2741. bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
  2742. }
  2743. }
  2744. rc = bnxt_alloc_stats(bp);
  2745. if (rc)
  2746. goto alloc_mem_err;
  2747. rc = bnxt_alloc_ntp_fltrs(bp);
  2748. if (rc)
  2749. goto alloc_mem_err;
  2750. rc = bnxt_alloc_vnics(bp);
  2751. if (rc)
  2752. goto alloc_mem_err;
  2753. }
  2754. bnxt_init_ring_struct(bp);
  2755. rc = bnxt_alloc_rx_rings(bp);
  2756. if (rc)
  2757. goto alloc_mem_err;
  2758. rc = bnxt_alloc_tx_rings(bp);
  2759. if (rc)
  2760. goto alloc_mem_err;
  2761. rc = bnxt_alloc_cp_rings(bp);
  2762. if (rc)
  2763. goto alloc_mem_err;
  2764. bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
  2765. BNXT_VNIC_UCAST_FLAG;
  2766. rc = bnxt_alloc_vnic_attributes(bp);
  2767. if (rc)
  2768. goto alloc_mem_err;
  2769. return 0;
  2770. alloc_mem_err:
  2771. bnxt_free_mem(bp, true);
  2772. return rc;
  2773. }
  2774. static void bnxt_disable_int(struct bnxt *bp)
  2775. {
  2776. int i;
  2777. if (!bp->bnapi)
  2778. return;
  2779. for (i = 0; i < bp->cp_nr_rings; i++) {
  2780. struct bnxt_napi *bnapi = bp->bnapi[i];
  2781. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2782. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  2783. if (ring->fw_ring_id != INVALID_HW_RING_ID)
  2784. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  2785. }
  2786. }
  2787. static void bnxt_disable_int_sync(struct bnxt *bp)
  2788. {
  2789. int i;
  2790. atomic_inc(&bp->intr_sem);
  2791. bnxt_disable_int(bp);
  2792. for (i = 0; i < bp->cp_nr_rings; i++)
  2793. synchronize_irq(bp->irq_tbl[i].vector);
  2794. }
  2795. static void bnxt_enable_int(struct bnxt *bp)
  2796. {
  2797. int i;
  2798. atomic_set(&bp->intr_sem, 0);
  2799. for (i = 0; i < bp->cp_nr_rings; i++) {
  2800. struct bnxt_napi *bnapi = bp->bnapi[i];
  2801. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2802. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  2803. }
  2804. }
  2805. void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
  2806. u16 cmpl_ring, u16 target_id)
  2807. {
  2808. struct input *req = request;
  2809. req->req_type = cpu_to_le16(req_type);
  2810. req->cmpl_ring = cpu_to_le16(cmpl_ring);
  2811. req->target_id = cpu_to_le16(target_id);
  2812. req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
  2813. }
  2814. static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
  2815. int timeout, bool silent)
  2816. {
  2817. int i, intr_process, rc, tmo_count;
  2818. struct input *req = msg;
  2819. u32 *data = msg;
  2820. __le32 *resp_len, *valid;
  2821. u16 cp_ring_id, len = 0;
  2822. struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
  2823. u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
  2824. struct hwrm_short_input short_input = {0};
  2825. req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
  2826. memset(resp, 0, PAGE_SIZE);
  2827. cp_ring_id = le16_to_cpu(req->cmpl_ring);
  2828. intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
  2829. if (bp->flags & BNXT_FLAG_SHORT_CMD) {
  2830. void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
  2831. memcpy(short_cmd_req, req, msg_len);
  2832. memset(short_cmd_req + msg_len, 0, BNXT_HWRM_MAX_REQ_LEN -
  2833. msg_len);
  2834. short_input.req_type = req->req_type;
  2835. short_input.signature =
  2836. cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
  2837. short_input.size = cpu_to_le16(msg_len);
  2838. short_input.req_addr =
  2839. cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
  2840. data = (u32 *)&short_input;
  2841. msg_len = sizeof(short_input);
  2842. /* Sync memory write before updating doorbell */
  2843. wmb();
  2844. max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
  2845. }
  2846. /* Write request msg to hwrm channel */
  2847. __iowrite32_copy(bp->bar0, data, msg_len / 4);
  2848. for (i = msg_len; i < max_req_len; i += 4)
  2849. writel(0, bp->bar0 + i);
  2850. /* currently supports only one outstanding message */
  2851. if (intr_process)
  2852. bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
  2853. /* Ring channel doorbell */
  2854. writel(1, bp->bar0 + 0x100);
  2855. if (!timeout)
  2856. timeout = DFLT_HWRM_CMD_TIMEOUT;
  2857. i = 0;
  2858. tmo_count = timeout * 40;
  2859. if (intr_process) {
  2860. /* Wait until hwrm response cmpl interrupt is processed */
  2861. while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
  2862. i++ < tmo_count) {
  2863. usleep_range(25, 40);
  2864. }
  2865. if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
  2866. netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
  2867. le16_to_cpu(req->req_type));
  2868. return -1;
  2869. }
  2870. } else {
  2871. /* Check if response len is updated */
  2872. resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
  2873. for (i = 0; i < tmo_count; i++) {
  2874. len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
  2875. HWRM_RESP_LEN_SFT;
  2876. if (len)
  2877. break;
  2878. usleep_range(25, 40);
  2879. }
  2880. if (i >= tmo_count) {
  2881. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
  2882. timeout, le16_to_cpu(req->req_type),
  2883. le16_to_cpu(req->seq_id), len);
  2884. return -1;
  2885. }
  2886. /* Last word of resp contains valid bit */
  2887. valid = bp->hwrm_cmd_resp_addr + len - 4;
  2888. for (i = 0; i < 5; i++) {
  2889. if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
  2890. break;
  2891. udelay(1);
  2892. }
  2893. if (i >= 5) {
  2894. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
  2895. timeout, le16_to_cpu(req->req_type),
  2896. le16_to_cpu(req->seq_id), len, *valid);
  2897. return -1;
  2898. }
  2899. }
  2900. rc = le16_to_cpu(resp->error_code);
  2901. if (rc && !silent)
  2902. netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
  2903. le16_to_cpu(resp->req_type),
  2904. le16_to_cpu(resp->seq_id), rc);
  2905. return rc;
  2906. }
  2907. int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2908. {
  2909. return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
  2910. }
  2911. int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
  2912. int timeout)
  2913. {
  2914. return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
  2915. }
  2916. int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2917. {
  2918. int rc;
  2919. mutex_lock(&bp->hwrm_cmd_lock);
  2920. rc = _hwrm_send_message(bp, msg, msg_len, timeout);
  2921. mutex_unlock(&bp->hwrm_cmd_lock);
  2922. return rc;
  2923. }
  2924. int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
  2925. int timeout)
  2926. {
  2927. int rc;
  2928. mutex_lock(&bp->hwrm_cmd_lock);
  2929. rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
  2930. mutex_unlock(&bp->hwrm_cmd_lock);
  2931. return rc;
  2932. }
  2933. int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
  2934. int bmap_size)
  2935. {
  2936. struct hwrm_func_drv_rgtr_input req = {0};
  2937. DECLARE_BITMAP(async_events_bmap, 256);
  2938. u32 *events = (u32 *)async_events_bmap;
  2939. int i;
  2940. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
  2941. req.enables =
  2942. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
  2943. memset(async_events_bmap, 0, sizeof(async_events_bmap));
  2944. for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
  2945. __set_bit(bnxt_async_events_arr[i], async_events_bmap);
  2946. if (bmap && bmap_size) {
  2947. for (i = 0; i < bmap_size; i++) {
  2948. if (test_bit(i, bmap))
  2949. __set_bit(i, async_events_bmap);
  2950. }
  2951. }
  2952. for (i = 0; i < 8; i++)
  2953. req.async_event_fwd[i] |= cpu_to_le32(events[i]);
  2954. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2955. }
  2956. static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
  2957. {
  2958. struct hwrm_func_drv_rgtr_input req = {0};
  2959. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
  2960. req.enables =
  2961. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
  2962. FUNC_DRV_RGTR_REQ_ENABLES_VER);
  2963. req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
  2964. req.ver_maj = DRV_VER_MAJ;
  2965. req.ver_min = DRV_VER_MIN;
  2966. req.ver_upd = DRV_VER_UPD;
  2967. if (BNXT_PF(bp)) {
  2968. u32 data[8];
  2969. int i;
  2970. memset(data, 0, sizeof(data));
  2971. for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
  2972. u16 cmd = bnxt_vf_req_snif[i];
  2973. unsigned int bit, idx;
  2974. idx = cmd / 32;
  2975. bit = cmd % 32;
  2976. data[idx] |= 1 << bit;
  2977. }
  2978. for (i = 0; i < 8; i++)
  2979. req.vf_req_fwd[i] = cpu_to_le32(data[i]);
  2980. req.enables |=
  2981. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
  2982. }
  2983. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2984. }
  2985. static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
  2986. {
  2987. struct hwrm_func_drv_unrgtr_input req = {0};
  2988. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
  2989. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2990. }
  2991. static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
  2992. {
  2993. u32 rc = 0;
  2994. struct hwrm_tunnel_dst_port_free_input req = {0};
  2995. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
  2996. req.tunnel_type = tunnel_type;
  2997. switch (tunnel_type) {
  2998. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
  2999. req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
  3000. break;
  3001. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
  3002. req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
  3003. break;
  3004. default:
  3005. break;
  3006. }
  3007. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3008. if (rc)
  3009. netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
  3010. rc);
  3011. return rc;
  3012. }
  3013. static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
  3014. u8 tunnel_type)
  3015. {
  3016. u32 rc = 0;
  3017. struct hwrm_tunnel_dst_port_alloc_input req = {0};
  3018. struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3019. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
  3020. req.tunnel_type = tunnel_type;
  3021. req.tunnel_dst_port_val = port;
  3022. mutex_lock(&bp->hwrm_cmd_lock);
  3023. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3024. if (rc) {
  3025. netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
  3026. rc);
  3027. goto err_out;
  3028. }
  3029. switch (tunnel_type) {
  3030. case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
  3031. bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
  3032. break;
  3033. case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
  3034. bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
  3035. break;
  3036. default:
  3037. break;
  3038. }
  3039. err_out:
  3040. mutex_unlock(&bp->hwrm_cmd_lock);
  3041. return rc;
  3042. }
  3043. static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
  3044. {
  3045. struct hwrm_cfa_l2_set_rx_mask_input req = {0};
  3046. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  3047. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
  3048. req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  3049. req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
  3050. req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
  3051. req.mask = cpu_to_le32(vnic->rx_mask);
  3052. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3053. }
  3054. #ifdef CONFIG_RFS_ACCEL
  3055. static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
  3056. struct bnxt_ntuple_filter *fltr)
  3057. {
  3058. struct hwrm_cfa_ntuple_filter_free_input req = {0};
  3059. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
  3060. req.ntuple_filter_id = fltr->filter_id;
  3061. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3062. }
  3063. #define BNXT_NTP_FLTR_FLAGS \
  3064. (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
  3065. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
  3066. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
  3067. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
  3068. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
  3069. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
  3070. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
  3071. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
  3072. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
  3073. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
  3074. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
  3075. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
  3076. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
  3077. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
  3078. #define BNXT_NTP_TUNNEL_FLTR_FLAG \
  3079. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
  3080. static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
  3081. struct bnxt_ntuple_filter *fltr)
  3082. {
  3083. int rc = 0;
  3084. struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
  3085. struct hwrm_cfa_ntuple_filter_alloc_output *resp =
  3086. bp->hwrm_cmd_resp_addr;
  3087. struct flow_keys *keys = &fltr->fkeys;
  3088. struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
  3089. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
  3090. req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
  3091. req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
  3092. req.ethertype = htons(ETH_P_IP);
  3093. memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
  3094. req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
  3095. req.ip_protocol = keys->basic.ip_proto;
  3096. if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
  3097. int i;
  3098. req.ethertype = htons(ETH_P_IPV6);
  3099. req.ip_addr_type =
  3100. CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
  3101. *(struct in6_addr *)&req.src_ipaddr[0] =
  3102. keys->addrs.v6addrs.src;
  3103. *(struct in6_addr *)&req.dst_ipaddr[0] =
  3104. keys->addrs.v6addrs.dst;
  3105. for (i = 0; i < 4; i++) {
  3106. req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
  3107. req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
  3108. }
  3109. } else {
  3110. req.src_ipaddr[0] = keys->addrs.v4addrs.src;
  3111. req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  3112. req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
  3113. req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  3114. }
  3115. if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
  3116. req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
  3117. req.tunnel_type =
  3118. CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
  3119. }
  3120. req.src_port = keys->ports.src;
  3121. req.src_port_mask = cpu_to_be16(0xffff);
  3122. req.dst_port = keys->ports.dst;
  3123. req.dst_port_mask = cpu_to_be16(0xffff);
  3124. req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
  3125. mutex_lock(&bp->hwrm_cmd_lock);
  3126. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3127. if (!rc)
  3128. fltr->filter_id = resp->ntuple_filter_id;
  3129. mutex_unlock(&bp->hwrm_cmd_lock);
  3130. return rc;
  3131. }
  3132. #endif
  3133. static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
  3134. u8 *mac_addr)
  3135. {
  3136. u32 rc = 0;
  3137. struct hwrm_cfa_l2_filter_alloc_input req = {0};
  3138. struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3139. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
  3140. req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
  3141. if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
  3142. req.flags |=
  3143. cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
  3144. req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
  3145. req.enables =
  3146. cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
  3147. CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
  3148. CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
  3149. memcpy(req.l2_addr, mac_addr, ETH_ALEN);
  3150. req.l2_addr_mask[0] = 0xff;
  3151. req.l2_addr_mask[1] = 0xff;
  3152. req.l2_addr_mask[2] = 0xff;
  3153. req.l2_addr_mask[3] = 0xff;
  3154. req.l2_addr_mask[4] = 0xff;
  3155. req.l2_addr_mask[5] = 0xff;
  3156. mutex_lock(&bp->hwrm_cmd_lock);
  3157. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3158. if (!rc)
  3159. bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
  3160. resp->l2_filter_id;
  3161. mutex_unlock(&bp->hwrm_cmd_lock);
  3162. return rc;
  3163. }
  3164. static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
  3165. {
  3166. u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
  3167. int rc = 0;
  3168. /* Any associated ntuple filters will also be cleared by firmware. */
  3169. mutex_lock(&bp->hwrm_cmd_lock);
  3170. for (i = 0; i < num_of_vnics; i++) {
  3171. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  3172. for (j = 0; j < vnic->uc_filter_count; j++) {
  3173. struct hwrm_cfa_l2_filter_free_input req = {0};
  3174. bnxt_hwrm_cmd_hdr_init(bp, &req,
  3175. HWRM_CFA_L2_FILTER_FREE, -1, -1);
  3176. req.l2_filter_id = vnic->fw_l2_filter_id[j];
  3177. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3178. HWRM_CMD_TIMEOUT);
  3179. }
  3180. vnic->uc_filter_count = 0;
  3181. }
  3182. mutex_unlock(&bp->hwrm_cmd_lock);
  3183. return rc;
  3184. }
  3185. static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
  3186. {
  3187. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  3188. struct hwrm_vnic_tpa_cfg_input req = {0};
  3189. if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
  3190. return 0;
  3191. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
  3192. if (tpa_flags) {
  3193. u16 mss = bp->dev->mtu - 40;
  3194. u32 nsegs, n, segs = 0, flags;
  3195. flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
  3196. VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
  3197. VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
  3198. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
  3199. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
  3200. if (tpa_flags & BNXT_FLAG_GRO)
  3201. flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
  3202. req.flags = cpu_to_le32(flags);
  3203. req.enables =
  3204. cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
  3205. VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
  3206. VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
  3207. /* Number of segs are log2 units, and first packet is not
  3208. * included as part of this units.
  3209. */
  3210. if (mss <= BNXT_RX_PAGE_SIZE) {
  3211. n = BNXT_RX_PAGE_SIZE / mss;
  3212. nsegs = (MAX_SKB_FRAGS - 1) * n;
  3213. } else {
  3214. n = mss / BNXT_RX_PAGE_SIZE;
  3215. if (mss & (BNXT_RX_PAGE_SIZE - 1))
  3216. n++;
  3217. nsegs = (MAX_SKB_FRAGS - n) / n;
  3218. }
  3219. segs = ilog2(nsegs);
  3220. req.max_agg_segs = cpu_to_le16(segs);
  3221. req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
  3222. req.min_agg_len = cpu_to_le32(512);
  3223. }
  3224. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  3225. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3226. }
  3227. static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
  3228. {
  3229. u32 i, j, max_rings;
  3230. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  3231. struct hwrm_vnic_rss_cfg_input req = {0};
  3232. if (vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
  3233. return 0;
  3234. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
  3235. if (set_rss) {
  3236. req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
  3237. if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
  3238. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  3239. max_rings = bp->rx_nr_rings - 1;
  3240. else
  3241. max_rings = bp->rx_nr_rings;
  3242. } else {
  3243. max_rings = 1;
  3244. }
  3245. /* Fill the RSS indirection table with ring group ids */
  3246. for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
  3247. if (j == max_rings)
  3248. j = 0;
  3249. vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
  3250. }
  3251. req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
  3252. req.hash_key_tbl_addr =
  3253. cpu_to_le64(vnic->rss_hash_key_dma_addr);
  3254. }
  3255. req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
  3256. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3257. }
  3258. static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
  3259. {
  3260. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  3261. struct hwrm_vnic_plcmodes_cfg_input req = {0};
  3262. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
  3263. req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
  3264. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
  3265. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
  3266. req.enables =
  3267. cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
  3268. VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
  3269. /* thresholds not implemented in firmware yet */
  3270. req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
  3271. req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
  3272. req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  3273. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3274. }
  3275. static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
  3276. u16 ctx_idx)
  3277. {
  3278. struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
  3279. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
  3280. req.rss_cos_lb_ctx_id =
  3281. cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
  3282. hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3283. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
  3284. }
  3285. static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
  3286. {
  3287. int i, j;
  3288. for (i = 0; i < bp->nr_vnics; i++) {
  3289. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  3290. for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
  3291. if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
  3292. bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
  3293. }
  3294. }
  3295. bp->rsscos_nr_ctxs = 0;
  3296. }
  3297. static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
  3298. {
  3299. int rc;
  3300. struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
  3301. struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
  3302. bp->hwrm_cmd_resp_addr;
  3303. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
  3304. -1);
  3305. mutex_lock(&bp->hwrm_cmd_lock);
  3306. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3307. if (!rc)
  3308. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
  3309. le16_to_cpu(resp->rss_cos_lb_ctx_id);
  3310. mutex_unlock(&bp->hwrm_cmd_lock);
  3311. return rc;
  3312. }
  3313. int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
  3314. {
  3315. unsigned int ring = 0, grp_idx;
  3316. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  3317. struct hwrm_vnic_cfg_input req = {0};
  3318. u16 def_vlan = 0;
  3319. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
  3320. req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
  3321. /* Only RSS support for now TBD: COS & LB */
  3322. if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
  3323. req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
  3324. req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
  3325. VNIC_CFG_REQ_ENABLES_MRU);
  3326. } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
  3327. req.rss_rule =
  3328. cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
  3329. req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
  3330. VNIC_CFG_REQ_ENABLES_MRU);
  3331. req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
  3332. } else {
  3333. req.rss_rule = cpu_to_le16(0xffff);
  3334. }
  3335. if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
  3336. (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
  3337. req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
  3338. req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
  3339. } else {
  3340. req.cos_rule = cpu_to_le16(0xffff);
  3341. }
  3342. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  3343. ring = 0;
  3344. else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
  3345. ring = vnic_id - 1;
  3346. else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
  3347. ring = bp->rx_nr_rings - 1;
  3348. grp_idx = bp->rx_ring[ring].bnapi->index;
  3349. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  3350. req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
  3351. req.lb_rule = cpu_to_le16(0xffff);
  3352. req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
  3353. VLAN_HLEN);
  3354. #ifdef CONFIG_BNXT_SRIOV
  3355. if (BNXT_VF(bp))
  3356. def_vlan = bp->vf.vlan;
  3357. #endif
  3358. if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
  3359. req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
  3360. if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
  3361. req.flags |=
  3362. cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
  3363. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3364. }
  3365. static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
  3366. {
  3367. u32 rc = 0;
  3368. if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
  3369. struct hwrm_vnic_free_input req = {0};
  3370. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
  3371. req.vnic_id =
  3372. cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
  3373. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3374. if (rc)
  3375. return rc;
  3376. bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
  3377. }
  3378. return rc;
  3379. }
  3380. static void bnxt_hwrm_vnic_free(struct bnxt *bp)
  3381. {
  3382. u16 i;
  3383. for (i = 0; i < bp->nr_vnics; i++)
  3384. bnxt_hwrm_vnic_free_one(bp, i);
  3385. }
  3386. static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
  3387. unsigned int start_rx_ring_idx,
  3388. unsigned int nr_rings)
  3389. {
  3390. int rc = 0;
  3391. unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
  3392. struct hwrm_vnic_alloc_input req = {0};
  3393. struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3394. /* map ring groups to this vnic */
  3395. for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
  3396. grp_idx = bp->rx_ring[i].bnapi->index;
  3397. if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
  3398. netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
  3399. j, nr_rings);
  3400. break;
  3401. }
  3402. bp->vnic_info[vnic_id].fw_grp_ids[j] =
  3403. bp->grp_info[grp_idx].fw_grp_id;
  3404. }
  3405. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[0] = INVALID_HW_RING_ID;
  3406. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[1] = INVALID_HW_RING_ID;
  3407. if (vnic_id == 0)
  3408. req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
  3409. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
  3410. mutex_lock(&bp->hwrm_cmd_lock);
  3411. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3412. if (!rc)
  3413. bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
  3414. mutex_unlock(&bp->hwrm_cmd_lock);
  3415. return rc;
  3416. }
  3417. static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
  3418. {
  3419. struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  3420. struct hwrm_vnic_qcaps_input req = {0};
  3421. int rc;
  3422. if (bp->hwrm_spec_code < 0x10600)
  3423. return 0;
  3424. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
  3425. mutex_lock(&bp->hwrm_cmd_lock);
  3426. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3427. if (!rc) {
  3428. if (resp->flags &
  3429. cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
  3430. bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
  3431. }
  3432. mutex_unlock(&bp->hwrm_cmd_lock);
  3433. return rc;
  3434. }
  3435. static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
  3436. {
  3437. u16 i;
  3438. u32 rc = 0;
  3439. mutex_lock(&bp->hwrm_cmd_lock);
  3440. for (i = 0; i < bp->rx_nr_rings; i++) {
  3441. struct hwrm_ring_grp_alloc_input req = {0};
  3442. struct hwrm_ring_grp_alloc_output *resp =
  3443. bp->hwrm_cmd_resp_addr;
  3444. unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
  3445. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
  3446. req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
  3447. req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
  3448. req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
  3449. req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
  3450. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3451. HWRM_CMD_TIMEOUT);
  3452. if (rc)
  3453. break;
  3454. bp->grp_info[grp_idx].fw_grp_id =
  3455. le32_to_cpu(resp->ring_group_id);
  3456. }
  3457. mutex_unlock(&bp->hwrm_cmd_lock);
  3458. return rc;
  3459. }
  3460. static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
  3461. {
  3462. u16 i;
  3463. u32 rc = 0;
  3464. struct hwrm_ring_grp_free_input req = {0};
  3465. if (!bp->grp_info)
  3466. return 0;
  3467. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
  3468. mutex_lock(&bp->hwrm_cmd_lock);
  3469. for (i = 0; i < bp->cp_nr_rings; i++) {
  3470. if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
  3471. continue;
  3472. req.ring_group_id =
  3473. cpu_to_le32(bp->grp_info[i].fw_grp_id);
  3474. rc = _hwrm_send_message(bp, &req, sizeof(req),
  3475. HWRM_CMD_TIMEOUT);
  3476. if (rc)
  3477. break;
  3478. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  3479. }
  3480. mutex_unlock(&bp->hwrm_cmd_lock);
  3481. return rc;
  3482. }
  3483. static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
  3484. struct bnxt_ring_struct *ring,
  3485. u32 ring_type, u32 map_index,
  3486. u32 stats_ctx_id)
  3487. {
  3488. int rc = 0, err = 0;
  3489. struct hwrm_ring_alloc_input req = {0};
  3490. struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  3491. u16 ring_id;
  3492. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
  3493. req.enables = 0;
  3494. if (ring->nr_pages > 1) {
  3495. req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
  3496. /* Page size is in log2 units */
  3497. req.page_size = BNXT_PAGE_SHIFT;
  3498. req.page_tbl_depth = 1;
  3499. } else {
  3500. req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
  3501. }
  3502. req.fbo = 0;
  3503. /* Association of ring index with doorbell index and MSIX number */
  3504. req.logical_id = cpu_to_le16(map_index);
  3505. switch (ring_type) {
  3506. case HWRM_RING_ALLOC_TX:
  3507. req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
  3508. /* Association of transmit ring with completion ring */
  3509. req.cmpl_ring_id =
  3510. cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
  3511. req.length = cpu_to_le32(bp->tx_ring_mask + 1);
  3512. req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
  3513. req.queue_id = cpu_to_le16(ring->queue_id);
  3514. break;
  3515. case HWRM_RING_ALLOC_RX:
  3516. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  3517. req.length = cpu_to_le32(bp->rx_ring_mask + 1);
  3518. break;
  3519. case HWRM_RING_ALLOC_AGG:
  3520. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  3521. req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
  3522. break;
  3523. case HWRM_RING_ALLOC_CMPL:
  3524. req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
  3525. req.length = cpu_to_le32(bp->cp_ring_mask + 1);
  3526. if (bp->flags & BNXT_FLAG_USING_MSIX)
  3527. req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
  3528. break;
  3529. default:
  3530. netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
  3531. ring_type);
  3532. return -1;
  3533. }
  3534. mutex_lock(&bp->hwrm_cmd_lock);
  3535. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3536. err = le16_to_cpu(resp->error_code);
  3537. ring_id = le16_to_cpu(resp->ring_id);
  3538. mutex_unlock(&bp->hwrm_cmd_lock);
  3539. if (rc || err) {
  3540. switch (ring_type) {
  3541. case RING_FREE_REQ_RING_TYPE_L2_CMPL:
  3542. netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
  3543. rc, err);
  3544. return -1;
  3545. case RING_FREE_REQ_RING_TYPE_RX:
  3546. netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
  3547. rc, err);
  3548. return -1;
  3549. case RING_FREE_REQ_RING_TYPE_TX:
  3550. netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
  3551. rc, err);
  3552. return -1;
  3553. default:
  3554. netdev_err(bp->dev, "Invalid ring\n");
  3555. return -1;
  3556. }
  3557. }
  3558. ring->fw_ring_id = ring_id;
  3559. return rc;
  3560. }
  3561. static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
  3562. {
  3563. int rc;
  3564. if (BNXT_PF(bp)) {
  3565. struct hwrm_func_cfg_input req = {0};
  3566. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  3567. req.fid = cpu_to_le16(0xffff);
  3568. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
  3569. req.async_event_cr = cpu_to_le16(idx);
  3570. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3571. } else {
  3572. struct hwrm_func_vf_cfg_input req = {0};
  3573. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
  3574. req.enables =
  3575. cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
  3576. req.async_event_cr = cpu_to_le16(idx);
  3577. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3578. }
  3579. return rc;
  3580. }
  3581. static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
  3582. {
  3583. int i, rc = 0;
  3584. for (i = 0; i < bp->cp_nr_rings; i++) {
  3585. struct bnxt_napi *bnapi = bp->bnapi[i];
  3586. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3587. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  3588. cpr->cp_doorbell = bp->bar1 + i * 0x80;
  3589. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
  3590. INVALID_STATS_CTX_ID);
  3591. if (rc)
  3592. goto err_out;
  3593. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  3594. bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
  3595. if (!i) {
  3596. rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
  3597. if (rc)
  3598. netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
  3599. }
  3600. }
  3601. for (i = 0; i < bp->tx_nr_rings; i++) {
  3602. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  3603. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  3604. u32 map_idx = txr->bnapi->index;
  3605. u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
  3606. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
  3607. map_idx, fw_stats_ctx);
  3608. if (rc)
  3609. goto err_out;
  3610. txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
  3611. }
  3612. for (i = 0; i < bp->rx_nr_rings; i++) {
  3613. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3614. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  3615. u32 map_idx = rxr->bnapi->index;
  3616. rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
  3617. map_idx, INVALID_STATS_CTX_ID);
  3618. if (rc)
  3619. goto err_out;
  3620. rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
  3621. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  3622. bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
  3623. }
  3624. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  3625. for (i = 0; i < bp->rx_nr_rings; i++) {
  3626. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3627. struct bnxt_ring_struct *ring =
  3628. &rxr->rx_agg_ring_struct;
  3629. u32 grp_idx = rxr->bnapi->index;
  3630. u32 map_idx = grp_idx + bp->rx_nr_rings;
  3631. rc = hwrm_ring_alloc_send_msg(bp, ring,
  3632. HWRM_RING_ALLOC_AGG,
  3633. map_idx,
  3634. INVALID_STATS_CTX_ID);
  3635. if (rc)
  3636. goto err_out;
  3637. rxr->rx_agg_doorbell = bp->bar1 + map_idx * 0x80;
  3638. writel(DB_KEY_RX | rxr->rx_agg_prod,
  3639. rxr->rx_agg_doorbell);
  3640. bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
  3641. }
  3642. }
  3643. err_out:
  3644. return rc;
  3645. }
  3646. static int hwrm_ring_free_send_msg(struct bnxt *bp,
  3647. struct bnxt_ring_struct *ring,
  3648. u32 ring_type, int cmpl_ring_id)
  3649. {
  3650. int rc;
  3651. struct hwrm_ring_free_input req = {0};
  3652. struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
  3653. u16 error_code;
  3654. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
  3655. req.ring_type = ring_type;
  3656. req.ring_id = cpu_to_le16(ring->fw_ring_id);
  3657. mutex_lock(&bp->hwrm_cmd_lock);
  3658. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3659. error_code = le16_to_cpu(resp->error_code);
  3660. mutex_unlock(&bp->hwrm_cmd_lock);
  3661. if (rc || error_code) {
  3662. switch (ring_type) {
  3663. case RING_FREE_REQ_RING_TYPE_L2_CMPL:
  3664. netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
  3665. rc);
  3666. return rc;
  3667. case RING_FREE_REQ_RING_TYPE_RX:
  3668. netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
  3669. rc);
  3670. return rc;
  3671. case RING_FREE_REQ_RING_TYPE_TX:
  3672. netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
  3673. rc);
  3674. return rc;
  3675. default:
  3676. netdev_err(bp->dev, "Invalid ring\n");
  3677. return -1;
  3678. }
  3679. }
  3680. return 0;
  3681. }
  3682. static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
  3683. {
  3684. int i;
  3685. if (!bp->bnapi)
  3686. return;
  3687. for (i = 0; i < bp->tx_nr_rings; i++) {
  3688. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  3689. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  3690. u32 grp_idx = txr->bnapi->index;
  3691. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3692. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3693. hwrm_ring_free_send_msg(bp, ring,
  3694. RING_FREE_REQ_RING_TYPE_TX,
  3695. close_path ? cmpl_ring_id :
  3696. INVALID_HW_RING_ID);
  3697. ring->fw_ring_id = INVALID_HW_RING_ID;
  3698. }
  3699. }
  3700. for (i = 0; i < bp->rx_nr_rings; i++) {
  3701. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3702. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  3703. u32 grp_idx = rxr->bnapi->index;
  3704. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3705. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3706. hwrm_ring_free_send_msg(bp, ring,
  3707. RING_FREE_REQ_RING_TYPE_RX,
  3708. close_path ? cmpl_ring_id :
  3709. INVALID_HW_RING_ID);
  3710. ring->fw_ring_id = INVALID_HW_RING_ID;
  3711. bp->grp_info[grp_idx].rx_fw_ring_id =
  3712. INVALID_HW_RING_ID;
  3713. }
  3714. }
  3715. for (i = 0; i < bp->rx_nr_rings; i++) {
  3716. struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
  3717. struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
  3718. u32 grp_idx = rxr->bnapi->index;
  3719. u32 cmpl_ring_id = bp->grp_info[grp_idx].cp_fw_ring_id;
  3720. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3721. hwrm_ring_free_send_msg(bp, ring,
  3722. RING_FREE_REQ_RING_TYPE_RX,
  3723. close_path ? cmpl_ring_id :
  3724. INVALID_HW_RING_ID);
  3725. ring->fw_ring_id = INVALID_HW_RING_ID;
  3726. bp->grp_info[grp_idx].agg_fw_ring_id =
  3727. INVALID_HW_RING_ID;
  3728. }
  3729. }
  3730. /* The completion rings are about to be freed. After that the
  3731. * IRQ doorbell will not work anymore. So we need to disable
  3732. * IRQ here.
  3733. */
  3734. bnxt_disable_int_sync(bp);
  3735. for (i = 0; i < bp->cp_nr_rings; i++) {
  3736. struct bnxt_napi *bnapi = bp->bnapi[i];
  3737. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3738. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  3739. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  3740. hwrm_ring_free_send_msg(bp, ring,
  3741. RING_FREE_REQ_RING_TYPE_L2_CMPL,
  3742. INVALID_HW_RING_ID);
  3743. ring->fw_ring_id = INVALID_HW_RING_ID;
  3744. bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
  3745. }
  3746. }
  3747. }
  3748. static int bnxt_hwrm_get_rings(struct bnxt *bp)
  3749. {
  3750. struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3751. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  3752. struct hwrm_func_qcfg_input req = {0};
  3753. int rc;
  3754. if (bp->hwrm_spec_code < 0x10601)
  3755. return 0;
  3756. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
  3757. req.fid = cpu_to_le16(0xffff);
  3758. mutex_lock(&bp->hwrm_cmd_lock);
  3759. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3760. if (rc) {
  3761. mutex_unlock(&bp->hwrm_cmd_lock);
  3762. return -EIO;
  3763. }
  3764. hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
  3765. if (bp->flags & BNXT_FLAG_NEW_RM) {
  3766. u16 cp, stats;
  3767. hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
  3768. hw_resc->resv_hw_ring_grps =
  3769. le32_to_cpu(resp->alloc_hw_ring_grps);
  3770. hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
  3771. cp = le16_to_cpu(resp->alloc_cmpl_rings);
  3772. stats = le16_to_cpu(resp->alloc_stat_ctx);
  3773. cp = min_t(u16, cp, stats);
  3774. hw_resc->resv_cp_rings = cp;
  3775. }
  3776. mutex_unlock(&bp->hwrm_cmd_lock);
  3777. return 0;
  3778. }
  3779. /* Caller must hold bp->hwrm_cmd_lock */
  3780. int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
  3781. {
  3782. struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3783. struct hwrm_func_qcfg_input req = {0};
  3784. int rc;
  3785. if (bp->hwrm_spec_code < 0x10601)
  3786. return 0;
  3787. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
  3788. req.fid = cpu_to_le16(fid);
  3789. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3790. if (!rc)
  3791. *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
  3792. return rc;
  3793. }
  3794. static void
  3795. __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
  3796. int tx_rings, int rx_rings, int ring_grps,
  3797. int cp_rings, int vnics)
  3798. {
  3799. u32 enables = 0;
  3800. bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
  3801. req->fid = cpu_to_le16(0xffff);
  3802. enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
  3803. req->num_tx_rings = cpu_to_le16(tx_rings);
  3804. if (bp->flags & BNXT_FLAG_NEW_RM) {
  3805. enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
  3806. enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
  3807. FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
  3808. enables |= ring_grps ?
  3809. FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
  3810. enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
  3811. req->num_rx_rings = cpu_to_le16(rx_rings);
  3812. req->num_hw_ring_grps = cpu_to_le16(ring_grps);
  3813. req->num_cmpl_rings = cpu_to_le16(cp_rings);
  3814. req->num_stat_ctxs = req->num_cmpl_rings;
  3815. req->num_vnics = cpu_to_le16(vnics);
  3816. }
  3817. req->enables = cpu_to_le32(enables);
  3818. }
  3819. static void
  3820. __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
  3821. struct hwrm_func_vf_cfg_input *req, int tx_rings,
  3822. int rx_rings, int ring_grps, int cp_rings,
  3823. int vnics)
  3824. {
  3825. u32 enables = 0;
  3826. bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
  3827. enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
  3828. enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
  3829. enables |= cp_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
  3830. FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
  3831. enables |= ring_grps ? FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
  3832. enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
  3833. req->num_tx_rings = cpu_to_le16(tx_rings);
  3834. req->num_rx_rings = cpu_to_le16(rx_rings);
  3835. req->num_hw_ring_grps = cpu_to_le16(ring_grps);
  3836. req->num_cmpl_rings = cpu_to_le16(cp_rings);
  3837. req->num_stat_ctxs = req->num_cmpl_rings;
  3838. req->num_vnics = cpu_to_le16(vnics);
  3839. req->enables = cpu_to_le32(enables);
  3840. }
  3841. static int
  3842. bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
  3843. int ring_grps, int cp_rings, int vnics)
  3844. {
  3845. struct hwrm_func_cfg_input req = {0};
  3846. int rc;
  3847. __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
  3848. cp_rings, vnics);
  3849. if (!req.enables)
  3850. return 0;
  3851. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3852. if (rc)
  3853. return -ENOMEM;
  3854. if (bp->hwrm_spec_code < 0x10601)
  3855. bp->hw_resc.resv_tx_rings = tx_rings;
  3856. rc = bnxt_hwrm_get_rings(bp);
  3857. return rc;
  3858. }
  3859. static int
  3860. bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
  3861. int ring_grps, int cp_rings, int vnics)
  3862. {
  3863. struct hwrm_func_vf_cfg_input req = {0};
  3864. int rc;
  3865. if (!(bp->flags & BNXT_FLAG_NEW_RM)) {
  3866. bp->hw_resc.resv_tx_rings = tx_rings;
  3867. return 0;
  3868. }
  3869. __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
  3870. cp_rings, vnics);
  3871. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3872. if (rc)
  3873. return -ENOMEM;
  3874. rc = bnxt_hwrm_get_rings(bp);
  3875. return rc;
  3876. }
  3877. static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
  3878. int cp, int vnic)
  3879. {
  3880. if (BNXT_PF(bp))
  3881. return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
  3882. else
  3883. return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
  3884. }
  3885. static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
  3886. bool shared);
  3887. static int __bnxt_reserve_rings(struct bnxt *bp)
  3888. {
  3889. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  3890. int tx = bp->tx_nr_rings;
  3891. int rx = bp->rx_nr_rings;
  3892. int cp = bp->cp_nr_rings;
  3893. int grp, rx_rings, rc;
  3894. bool sh = false;
  3895. int vnic = 1;
  3896. if (bp->hwrm_spec_code < 0x10601)
  3897. return 0;
  3898. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  3899. sh = true;
  3900. if (bp->flags & BNXT_FLAG_RFS)
  3901. vnic = rx + 1;
  3902. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  3903. rx <<= 1;
  3904. grp = bp->rx_nr_rings;
  3905. if (tx == hw_resc->resv_tx_rings &&
  3906. (!(bp->flags & BNXT_FLAG_NEW_RM) ||
  3907. (rx == hw_resc->resv_rx_rings &&
  3908. grp == hw_resc->resv_hw_ring_grps &&
  3909. cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
  3910. return 0;
  3911. rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
  3912. if (rc)
  3913. return rc;
  3914. tx = hw_resc->resv_tx_rings;
  3915. if (bp->flags & BNXT_FLAG_NEW_RM) {
  3916. rx = hw_resc->resv_rx_rings;
  3917. cp = hw_resc->resv_cp_rings;
  3918. grp = hw_resc->resv_hw_ring_grps;
  3919. vnic = hw_resc->resv_vnics;
  3920. }
  3921. rx_rings = rx;
  3922. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  3923. if (rx >= 2) {
  3924. rx_rings = rx >> 1;
  3925. } else {
  3926. if (netif_running(bp->dev))
  3927. return -ENOMEM;
  3928. bp->flags &= ~BNXT_FLAG_AGG_RINGS;
  3929. bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
  3930. bp->dev->hw_features &= ~NETIF_F_LRO;
  3931. bp->dev->features &= ~NETIF_F_LRO;
  3932. bnxt_set_ring_params(bp);
  3933. }
  3934. }
  3935. rx_rings = min_t(int, rx_rings, grp);
  3936. rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
  3937. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  3938. rx = rx_rings << 1;
  3939. cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
  3940. bp->tx_nr_rings = tx;
  3941. bp->rx_nr_rings = rx_rings;
  3942. bp->cp_nr_rings = cp;
  3943. if (!tx || !rx || !cp || !grp || !vnic)
  3944. return -ENOMEM;
  3945. return rc;
  3946. }
  3947. static bool bnxt_need_reserve_rings(struct bnxt *bp)
  3948. {
  3949. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  3950. int rx = bp->rx_nr_rings;
  3951. int vnic = 1;
  3952. if (bp->hwrm_spec_code < 0x10601)
  3953. return false;
  3954. if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
  3955. return true;
  3956. if (bp->flags & BNXT_FLAG_RFS)
  3957. vnic = rx + 1;
  3958. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  3959. rx <<= 1;
  3960. if ((bp->flags & BNXT_FLAG_NEW_RM) &&
  3961. (hw_resc->resv_rx_rings != rx ||
  3962. hw_resc->resv_cp_rings != bp->cp_nr_rings ||
  3963. hw_resc->resv_vnics != vnic))
  3964. return true;
  3965. return false;
  3966. }
  3967. static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
  3968. int ring_grps, int cp_rings, int vnics)
  3969. {
  3970. struct hwrm_func_vf_cfg_input req = {0};
  3971. u32 flags;
  3972. int rc;
  3973. if (!(bp->flags & BNXT_FLAG_NEW_RM))
  3974. return 0;
  3975. __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
  3976. cp_rings, vnics);
  3977. flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
  3978. FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
  3979. FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
  3980. FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
  3981. FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
  3982. FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
  3983. req.flags = cpu_to_le32(flags);
  3984. rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3985. if (rc)
  3986. return -ENOMEM;
  3987. return 0;
  3988. }
  3989. static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
  3990. int ring_grps, int cp_rings, int vnics)
  3991. {
  3992. struct hwrm_func_cfg_input req = {0};
  3993. u32 flags;
  3994. int rc;
  3995. __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
  3996. cp_rings, vnics);
  3997. flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
  3998. if (bp->flags & BNXT_FLAG_NEW_RM)
  3999. flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
  4000. FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
  4001. FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST |
  4002. FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
  4003. FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
  4004. req.flags = cpu_to_le32(flags);
  4005. rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4006. if (rc)
  4007. return -ENOMEM;
  4008. return 0;
  4009. }
  4010. static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
  4011. int ring_grps, int cp_rings, int vnics)
  4012. {
  4013. if (bp->hwrm_spec_code < 0x10801)
  4014. return 0;
  4015. if (BNXT_PF(bp))
  4016. return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
  4017. ring_grps, cp_rings, vnics);
  4018. return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
  4019. cp_rings, vnics);
  4020. }
  4021. static void bnxt_hwrm_set_coal_params(struct bnxt_coal *hw_coal,
  4022. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
  4023. {
  4024. u16 val, tmr, max, flags;
  4025. max = hw_coal->bufs_per_record * 128;
  4026. if (hw_coal->budget)
  4027. max = hw_coal->bufs_per_record * hw_coal->budget;
  4028. val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
  4029. req->num_cmpl_aggr_int = cpu_to_le16(val);
  4030. /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
  4031. val = min_t(u16, val, 63);
  4032. req->num_cmpl_dma_aggr = cpu_to_le16(val);
  4033. /* This is a 6-bit value and must not be 0, or we'll get non stop IRQ */
  4034. val = clamp_t(u16, hw_coal->coal_bufs_irq, 1, 63);
  4035. req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
  4036. tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks);
  4037. tmr = max_t(u16, tmr, 1);
  4038. req->int_lat_tmr_max = cpu_to_le16(tmr);
  4039. /* min timer set to 1/2 of interrupt timer */
  4040. val = tmr / 2;
  4041. req->int_lat_tmr_min = cpu_to_le16(val);
  4042. /* buf timer set to 1/4 of interrupt timer */
  4043. val = max_t(u16, tmr / 4, 1);
  4044. req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
  4045. tmr = BNXT_USEC_TO_COAL_TIMER(hw_coal->coal_ticks_irq);
  4046. tmr = max_t(u16, tmr, 1);
  4047. req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
  4048. flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  4049. if (hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
  4050. flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
  4051. req->flags = cpu_to_le16(flags);
  4052. }
  4053. int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
  4054. {
  4055. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
  4056. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  4057. struct bnxt_coal coal;
  4058. unsigned int grp_idx;
  4059. /* Tick values in micro seconds.
  4060. * 1 coal_buf x bufs_per_record = 1 completion record.
  4061. */
  4062. memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
  4063. coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
  4064. coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
  4065. if (!bnapi->rx_ring)
  4066. return -ENODEV;
  4067. bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
  4068. HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  4069. bnxt_hwrm_set_coal_params(&coal, &req_rx);
  4070. grp_idx = bnapi->index;
  4071. req_rx.ring_id = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
  4072. return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
  4073. HWRM_CMD_TIMEOUT);
  4074. }
  4075. int bnxt_hwrm_set_coal(struct bnxt *bp)
  4076. {
  4077. int i, rc = 0;
  4078. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
  4079. req_tx = {0}, *req;
  4080. bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
  4081. HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  4082. bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
  4083. HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
  4084. bnxt_hwrm_set_coal_params(&bp->rx_coal, &req_rx);
  4085. bnxt_hwrm_set_coal_params(&bp->tx_coal, &req_tx);
  4086. mutex_lock(&bp->hwrm_cmd_lock);
  4087. for (i = 0; i < bp->cp_nr_rings; i++) {
  4088. struct bnxt_napi *bnapi = bp->bnapi[i];
  4089. req = &req_rx;
  4090. if (!bnapi->rx_ring)
  4091. req = &req_tx;
  4092. req->ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
  4093. rc = _hwrm_send_message(bp, req, sizeof(*req),
  4094. HWRM_CMD_TIMEOUT);
  4095. if (rc)
  4096. break;
  4097. }
  4098. mutex_unlock(&bp->hwrm_cmd_lock);
  4099. return rc;
  4100. }
  4101. static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
  4102. {
  4103. int rc = 0, i;
  4104. struct hwrm_stat_ctx_free_input req = {0};
  4105. if (!bp->bnapi)
  4106. return 0;
  4107. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  4108. return 0;
  4109. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
  4110. mutex_lock(&bp->hwrm_cmd_lock);
  4111. for (i = 0; i < bp->cp_nr_rings; i++) {
  4112. struct bnxt_napi *bnapi = bp->bnapi[i];
  4113. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  4114. if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
  4115. req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
  4116. rc = _hwrm_send_message(bp, &req, sizeof(req),
  4117. HWRM_CMD_TIMEOUT);
  4118. if (rc)
  4119. break;
  4120. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  4121. }
  4122. }
  4123. mutex_unlock(&bp->hwrm_cmd_lock);
  4124. return rc;
  4125. }
  4126. static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
  4127. {
  4128. int rc = 0, i;
  4129. struct hwrm_stat_ctx_alloc_input req = {0};
  4130. struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  4131. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  4132. return 0;
  4133. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
  4134. req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
  4135. mutex_lock(&bp->hwrm_cmd_lock);
  4136. for (i = 0; i < bp->cp_nr_rings; i++) {
  4137. struct bnxt_napi *bnapi = bp->bnapi[i];
  4138. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  4139. req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
  4140. rc = _hwrm_send_message(bp, &req, sizeof(req),
  4141. HWRM_CMD_TIMEOUT);
  4142. if (rc)
  4143. break;
  4144. cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
  4145. bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
  4146. }
  4147. mutex_unlock(&bp->hwrm_cmd_lock);
  4148. return rc;
  4149. }
  4150. static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
  4151. {
  4152. struct hwrm_func_qcfg_input req = {0};
  4153. struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  4154. u16 flags;
  4155. int rc;
  4156. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
  4157. req.fid = cpu_to_le16(0xffff);
  4158. mutex_lock(&bp->hwrm_cmd_lock);
  4159. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4160. if (rc)
  4161. goto func_qcfg_exit;
  4162. #ifdef CONFIG_BNXT_SRIOV
  4163. if (BNXT_VF(bp)) {
  4164. struct bnxt_vf_info *vf = &bp->vf;
  4165. vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
  4166. }
  4167. #endif
  4168. flags = le16_to_cpu(resp->flags);
  4169. if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
  4170. FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
  4171. bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
  4172. if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
  4173. bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
  4174. }
  4175. if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
  4176. bp->flags |= BNXT_FLAG_MULTI_HOST;
  4177. switch (resp->port_partition_type) {
  4178. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
  4179. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
  4180. case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
  4181. bp->port_partition_type = resp->port_partition_type;
  4182. break;
  4183. }
  4184. if (bp->hwrm_spec_code < 0x10707 ||
  4185. resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
  4186. bp->br_mode = BRIDGE_MODE_VEB;
  4187. else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
  4188. bp->br_mode = BRIDGE_MODE_VEPA;
  4189. else
  4190. bp->br_mode = BRIDGE_MODE_UNDEF;
  4191. bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
  4192. if (!bp->max_mtu)
  4193. bp->max_mtu = BNXT_MAX_MTU;
  4194. func_qcfg_exit:
  4195. mutex_unlock(&bp->hwrm_cmd_lock);
  4196. return rc;
  4197. }
  4198. static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
  4199. {
  4200. struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  4201. struct hwrm_func_resource_qcaps_input req = {0};
  4202. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  4203. int rc;
  4204. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
  4205. req.fid = cpu_to_le16(0xffff);
  4206. mutex_lock(&bp->hwrm_cmd_lock);
  4207. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4208. if (rc) {
  4209. rc = -EIO;
  4210. goto hwrm_func_resc_qcaps_exit;
  4211. }
  4212. hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
  4213. hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  4214. hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
  4215. hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  4216. hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
  4217. hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  4218. hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
  4219. hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  4220. hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
  4221. hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
  4222. hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
  4223. hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  4224. hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
  4225. hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
  4226. hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
  4227. hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  4228. if (BNXT_PF(bp)) {
  4229. struct bnxt_pf_info *pf = &bp->pf;
  4230. pf->vf_resv_strategy =
  4231. le16_to_cpu(resp->vf_reservation_strategy);
  4232. if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL)
  4233. pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
  4234. }
  4235. hwrm_func_resc_qcaps_exit:
  4236. mutex_unlock(&bp->hwrm_cmd_lock);
  4237. return rc;
  4238. }
  4239. static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
  4240. {
  4241. int rc = 0;
  4242. struct hwrm_func_qcaps_input req = {0};
  4243. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  4244. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  4245. u32 flags;
  4246. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  4247. req.fid = cpu_to_le16(0xffff);
  4248. mutex_lock(&bp->hwrm_cmd_lock);
  4249. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4250. if (rc)
  4251. goto hwrm_func_qcaps_exit;
  4252. flags = le32_to_cpu(resp->flags);
  4253. if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
  4254. bp->flags |= BNXT_FLAG_ROCEV1_CAP;
  4255. if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
  4256. bp->flags |= BNXT_FLAG_ROCEV2_CAP;
  4257. bp->tx_push_thresh = 0;
  4258. if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
  4259. bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
  4260. hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  4261. hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  4262. hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  4263. hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  4264. hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
  4265. if (!hw_resc->max_hw_ring_grps)
  4266. hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
  4267. hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  4268. hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
  4269. hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  4270. if (BNXT_PF(bp)) {
  4271. struct bnxt_pf_info *pf = &bp->pf;
  4272. pf->fw_fid = le16_to_cpu(resp->fid);
  4273. pf->port_id = le16_to_cpu(resp->port_id);
  4274. bp->dev->dev_port = pf->port_id;
  4275. memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
  4276. pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
  4277. pf->max_vfs = le16_to_cpu(resp->max_vfs);
  4278. pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
  4279. pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
  4280. pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
  4281. pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
  4282. pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
  4283. pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
  4284. if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
  4285. bp->flags |= BNXT_FLAG_WOL_CAP;
  4286. } else {
  4287. #ifdef CONFIG_BNXT_SRIOV
  4288. struct bnxt_vf_info *vf = &bp->vf;
  4289. vf->fw_fid = le16_to_cpu(resp->fid);
  4290. memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
  4291. #endif
  4292. }
  4293. hwrm_func_qcaps_exit:
  4294. mutex_unlock(&bp->hwrm_cmd_lock);
  4295. return rc;
  4296. }
  4297. static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
  4298. {
  4299. int rc;
  4300. rc = __bnxt_hwrm_func_qcaps(bp);
  4301. if (rc)
  4302. return rc;
  4303. if (bp->hwrm_spec_code >= 0x10803) {
  4304. rc = bnxt_hwrm_func_resc_qcaps(bp);
  4305. if (!rc)
  4306. bp->flags |= BNXT_FLAG_NEW_RM;
  4307. }
  4308. return 0;
  4309. }
  4310. static int bnxt_hwrm_func_reset(struct bnxt *bp)
  4311. {
  4312. struct hwrm_func_reset_input req = {0};
  4313. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
  4314. req.enables = 0;
  4315. return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
  4316. }
  4317. static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
  4318. {
  4319. int rc = 0;
  4320. struct hwrm_queue_qportcfg_input req = {0};
  4321. struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
  4322. u8 i, *qptr;
  4323. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
  4324. mutex_lock(&bp->hwrm_cmd_lock);
  4325. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4326. if (rc)
  4327. goto qportcfg_exit;
  4328. if (!resp->max_configurable_queues) {
  4329. rc = -EINVAL;
  4330. goto qportcfg_exit;
  4331. }
  4332. bp->max_tc = resp->max_configurable_queues;
  4333. bp->max_lltc = resp->max_configurable_lossless_queues;
  4334. if (bp->max_tc > BNXT_MAX_QUEUE)
  4335. bp->max_tc = BNXT_MAX_QUEUE;
  4336. if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
  4337. bp->max_tc = 1;
  4338. if (bp->max_lltc > bp->max_tc)
  4339. bp->max_lltc = bp->max_tc;
  4340. qptr = &resp->queue_id0;
  4341. for (i = 0; i < bp->max_tc; i++) {
  4342. bp->q_info[i].queue_id = *qptr++;
  4343. bp->q_info[i].queue_profile = *qptr++;
  4344. }
  4345. qportcfg_exit:
  4346. mutex_unlock(&bp->hwrm_cmd_lock);
  4347. return rc;
  4348. }
  4349. static int bnxt_hwrm_ver_get(struct bnxt *bp)
  4350. {
  4351. int rc;
  4352. struct hwrm_ver_get_input req = {0};
  4353. struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
  4354. u32 dev_caps_cfg;
  4355. bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
  4356. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
  4357. req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
  4358. req.hwrm_intf_min = HWRM_VERSION_MINOR;
  4359. req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
  4360. mutex_lock(&bp->hwrm_cmd_lock);
  4361. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4362. if (rc)
  4363. goto hwrm_ver_get_exit;
  4364. memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
  4365. bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
  4366. resp->hwrm_intf_min_8b << 8 |
  4367. resp->hwrm_intf_upd_8b;
  4368. if (resp->hwrm_intf_maj_8b < 1) {
  4369. netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
  4370. resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
  4371. resp->hwrm_intf_upd_8b);
  4372. netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
  4373. }
  4374. snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
  4375. resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
  4376. resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
  4377. bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
  4378. if (!bp->hwrm_cmd_timeout)
  4379. bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
  4380. if (resp->hwrm_intf_maj_8b >= 1)
  4381. bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
  4382. bp->chip_num = le16_to_cpu(resp->chip_num);
  4383. if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
  4384. !resp->chip_metal)
  4385. bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
  4386. dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
  4387. if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
  4388. (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
  4389. bp->flags |= BNXT_FLAG_SHORT_CMD;
  4390. hwrm_ver_get_exit:
  4391. mutex_unlock(&bp->hwrm_cmd_lock);
  4392. return rc;
  4393. }
  4394. int bnxt_hwrm_fw_set_time(struct bnxt *bp)
  4395. {
  4396. struct hwrm_fw_set_time_input req = {0};
  4397. struct tm tm;
  4398. time64_t now = ktime_get_real_seconds();
  4399. if (bp->hwrm_spec_code < 0x10400)
  4400. return -EOPNOTSUPP;
  4401. time64_to_tm(now, 0, &tm);
  4402. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
  4403. req.year = cpu_to_le16(1900 + tm.tm_year);
  4404. req.month = 1 + tm.tm_mon;
  4405. req.day = tm.tm_mday;
  4406. req.hour = tm.tm_hour;
  4407. req.minute = tm.tm_min;
  4408. req.second = tm.tm_sec;
  4409. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4410. }
  4411. static int bnxt_hwrm_port_qstats(struct bnxt *bp)
  4412. {
  4413. int rc;
  4414. struct bnxt_pf_info *pf = &bp->pf;
  4415. struct hwrm_port_qstats_input req = {0};
  4416. if (!(bp->flags & BNXT_FLAG_PORT_STATS))
  4417. return 0;
  4418. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
  4419. req.port_id = cpu_to_le16(pf->port_id);
  4420. req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
  4421. req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
  4422. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4423. return rc;
  4424. }
  4425. static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
  4426. {
  4427. if (bp->vxlan_port_cnt) {
  4428. bnxt_hwrm_tunnel_dst_port_free(
  4429. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  4430. }
  4431. bp->vxlan_port_cnt = 0;
  4432. if (bp->nge_port_cnt) {
  4433. bnxt_hwrm_tunnel_dst_port_free(
  4434. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  4435. }
  4436. bp->nge_port_cnt = 0;
  4437. }
  4438. static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
  4439. {
  4440. int rc, i;
  4441. u32 tpa_flags = 0;
  4442. if (set_tpa)
  4443. tpa_flags = bp->flags & BNXT_FLAG_TPA;
  4444. for (i = 0; i < bp->nr_vnics; i++) {
  4445. rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
  4446. if (rc) {
  4447. netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
  4448. i, rc);
  4449. return rc;
  4450. }
  4451. }
  4452. return 0;
  4453. }
  4454. static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
  4455. {
  4456. int i;
  4457. for (i = 0; i < bp->nr_vnics; i++)
  4458. bnxt_hwrm_vnic_set_rss(bp, i, false);
  4459. }
  4460. static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
  4461. bool irq_re_init)
  4462. {
  4463. if (bp->vnic_info) {
  4464. bnxt_hwrm_clear_vnic_filter(bp);
  4465. /* clear all RSS setting before free vnic ctx */
  4466. bnxt_hwrm_clear_vnic_rss(bp);
  4467. bnxt_hwrm_vnic_ctx_free(bp);
  4468. /* before free the vnic, undo the vnic tpa settings */
  4469. if (bp->flags & BNXT_FLAG_TPA)
  4470. bnxt_set_tpa(bp, false);
  4471. bnxt_hwrm_vnic_free(bp);
  4472. }
  4473. bnxt_hwrm_ring_free(bp, close_path);
  4474. bnxt_hwrm_ring_grp_free(bp);
  4475. if (irq_re_init) {
  4476. bnxt_hwrm_stat_ctx_free(bp);
  4477. bnxt_hwrm_free_tunnel_ports(bp);
  4478. }
  4479. }
  4480. static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
  4481. {
  4482. struct hwrm_func_cfg_input req = {0};
  4483. int rc;
  4484. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  4485. req.fid = cpu_to_le16(0xffff);
  4486. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
  4487. if (br_mode == BRIDGE_MODE_VEB)
  4488. req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
  4489. else if (br_mode == BRIDGE_MODE_VEPA)
  4490. req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
  4491. else
  4492. return -EINVAL;
  4493. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4494. if (rc)
  4495. rc = -EIO;
  4496. return rc;
  4497. }
  4498. static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
  4499. {
  4500. struct hwrm_func_cfg_input req = {0};
  4501. int rc;
  4502. if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
  4503. return 0;
  4504. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
  4505. req.fid = cpu_to_le16(0xffff);
  4506. req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
  4507. req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
  4508. if (size == 128)
  4509. req.cache_linesize =
  4510. FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
  4511. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  4512. if (rc)
  4513. rc = -EIO;
  4514. return rc;
  4515. }
  4516. static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
  4517. {
  4518. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  4519. int rc;
  4520. if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
  4521. goto skip_rss_ctx;
  4522. /* allocate context for vnic */
  4523. rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
  4524. if (rc) {
  4525. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  4526. vnic_id, rc);
  4527. goto vnic_setup_err;
  4528. }
  4529. bp->rsscos_nr_ctxs++;
  4530. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  4531. rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
  4532. if (rc) {
  4533. netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
  4534. vnic_id, rc);
  4535. goto vnic_setup_err;
  4536. }
  4537. bp->rsscos_nr_ctxs++;
  4538. }
  4539. skip_rss_ctx:
  4540. /* configure default vnic, ring grp */
  4541. rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
  4542. if (rc) {
  4543. netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
  4544. vnic_id, rc);
  4545. goto vnic_setup_err;
  4546. }
  4547. /* Enable RSS hashing on vnic */
  4548. rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
  4549. if (rc) {
  4550. netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
  4551. vnic_id, rc);
  4552. goto vnic_setup_err;
  4553. }
  4554. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  4555. rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
  4556. if (rc) {
  4557. netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
  4558. vnic_id, rc);
  4559. }
  4560. }
  4561. vnic_setup_err:
  4562. return rc;
  4563. }
  4564. static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
  4565. {
  4566. #ifdef CONFIG_RFS_ACCEL
  4567. int i, rc = 0;
  4568. for (i = 0; i < bp->rx_nr_rings; i++) {
  4569. struct bnxt_vnic_info *vnic;
  4570. u16 vnic_id = i + 1;
  4571. u16 ring_id = i;
  4572. if (vnic_id >= bp->nr_vnics)
  4573. break;
  4574. vnic = &bp->vnic_info[vnic_id];
  4575. vnic->flags |= BNXT_VNIC_RFS_FLAG;
  4576. if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
  4577. vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
  4578. rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
  4579. if (rc) {
  4580. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  4581. vnic_id, rc);
  4582. break;
  4583. }
  4584. rc = bnxt_setup_vnic(bp, vnic_id);
  4585. if (rc)
  4586. break;
  4587. }
  4588. return rc;
  4589. #else
  4590. return 0;
  4591. #endif
  4592. }
  4593. /* Allow PF and VF with default VLAN to be in promiscuous mode */
  4594. static bool bnxt_promisc_ok(struct bnxt *bp)
  4595. {
  4596. #ifdef CONFIG_BNXT_SRIOV
  4597. if (BNXT_VF(bp) && !bp->vf.vlan)
  4598. return false;
  4599. #endif
  4600. return true;
  4601. }
  4602. static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
  4603. {
  4604. unsigned int rc = 0;
  4605. rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
  4606. if (rc) {
  4607. netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
  4608. rc);
  4609. return rc;
  4610. }
  4611. rc = bnxt_hwrm_vnic_cfg(bp, 1);
  4612. if (rc) {
  4613. netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
  4614. rc);
  4615. return rc;
  4616. }
  4617. return rc;
  4618. }
  4619. static int bnxt_cfg_rx_mode(struct bnxt *);
  4620. static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
  4621. static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
  4622. {
  4623. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4624. int rc = 0;
  4625. unsigned int rx_nr_rings = bp->rx_nr_rings;
  4626. if (irq_re_init) {
  4627. rc = bnxt_hwrm_stat_ctx_alloc(bp);
  4628. if (rc) {
  4629. netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
  4630. rc);
  4631. goto err_out;
  4632. }
  4633. }
  4634. rc = bnxt_hwrm_ring_alloc(bp);
  4635. if (rc) {
  4636. netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
  4637. goto err_out;
  4638. }
  4639. rc = bnxt_hwrm_ring_grp_alloc(bp);
  4640. if (rc) {
  4641. netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
  4642. goto err_out;
  4643. }
  4644. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  4645. rx_nr_rings--;
  4646. /* default vnic 0 */
  4647. rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
  4648. if (rc) {
  4649. netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
  4650. goto err_out;
  4651. }
  4652. rc = bnxt_setup_vnic(bp, 0);
  4653. if (rc)
  4654. goto err_out;
  4655. if (bp->flags & BNXT_FLAG_RFS) {
  4656. rc = bnxt_alloc_rfs_vnics(bp);
  4657. if (rc)
  4658. goto err_out;
  4659. }
  4660. if (bp->flags & BNXT_FLAG_TPA) {
  4661. rc = bnxt_set_tpa(bp, true);
  4662. if (rc)
  4663. goto err_out;
  4664. }
  4665. if (BNXT_VF(bp))
  4666. bnxt_update_vf_mac(bp);
  4667. /* Filter for default vnic 0 */
  4668. rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
  4669. if (rc) {
  4670. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
  4671. goto err_out;
  4672. }
  4673. vnic->uc_filter_count = 1;
  4674. vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
  4675. if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
  4676. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  4677. if (bp->dev->flags & IFF_ALLMULTI) {
  4678. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  4679. vnic->mc_list_count = 0;
  4680. } else {
  4681. u32 mask = 0;
  4682. bnxt_mc_list_updated(bp, &mask);
  4683. vnic->rx_mask |= mask;
  4684. }
  4685. rc = bnxt_cfg_rx_mode(bp);
  4686. if (rc)
  4687. goto err_out;
  4688. rc = bnxt_hwrm_set_coal(bp);
  4689. if (rc)
  4690. netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
  4691. rc);
  4692. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  4693. rc = bnxt_setup_nitroa0_vnic(bp);
  4694. if (rc)
  4695. netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
  4696. rc);
  4697. }
  4698. if (BNXT_VF(bp)) {
  4699. bnxt_hwrm_func_qcfg(bp);
  4700. netdev_update_features(bp->dev);
  4701. }
  4702. return 0;
  4703. err_out:
  4704. bnxt_hwrm_resource_free(bp, 0, true);
  4705. return rc;
  4706. }
  4707. static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
  4708. {
  4709. bnxt_hwrm_resource_free(bp, 1, irq_re_init);
  4710. return 0;
  4711. }
  4712. static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
  4713. {
  4714. bnxt_init_cp_rings(bp);
  4715. bnxt_init_rx_rings(bp);
  4716. bnxt_init_tx_rings(bp);
  4717. bnxt_init_ring_grps(bp, irq_re_init);
  4718. bnxt_init_vnics(bp);
  4719. return bnxt_init_chip(bp, irq_re_init);
  4720. }
  4721. static int bnxt_set_real_num_queues(struct bnxt *bp)
  4722. {
  4723. int rc;
  4724. struct net_device *dev = bp->dev;
  4725. rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
  4726. bp->tx_nr_rings_xdp);
  4727. if (rc)
  4728. return rc;
  4729. rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
  4730. if (rc)
  4731. return rc;
  4732. #ifdef CONFIG_RFS_ACCEL
  4733. if (bp->flags & BNXT_FLAG_RFS)
  4734. dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
  4735. #endif
  4736. return rc;
  4737. }
  4738. static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
  4739. bool shared)
  4740. {
  4741. int _rx = *rx, _tx = *tx;
  4742. if (shared) {
  4743. *rx = min_t(int, _rx, max);
  4744. *tx = min_t(int, _tx, max);
  4745. } else {
  4746. if (max < 2)
  4747. return -ENOMEM;
  4748. while (_rx + _tx > max) {
  4749. if (_rx > _tx && _rx > 1)
  4750. _rx--;
  4751. else if (_tx > 1)
  4752. _tx--;
  4753. }
  4754. *rx = _rx;
  4755. *tx = _tx;
  4756. }
  4757. return 0;
  4758. }
  4759. static void bnxt_setup_msix(struct bnxt *bp)
  4760. {
  4761. const int len = sizeof(bp->irq_tbl[0].name);
  4762. struct net_device *dev = bp->dev;
  4763. int tcs, i;
  4764. tcs = netdev_get_num_tc(dev);
  4765. if (tcs > 1) {
  4766. int i, off, count;
  4767. for (i = 0; i < tcs; i++) {
  4768. count = bp->tx_nr_rings_per_tc;
  4769. off = i * count;
  4770. netdev_set_tc_queue(dev, i, count, off);
  4771. }
  4772. }
  4773. for (i = 0; i < bp->cp_nr_rings; i++) {
  4774. char *attr;
  4775. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  4776. attr = "TxRx";
  4777. else if (i < bp->rx_nr_rings)
  4778. attr = "rx";
  4779. else
  4780. attr = "tx";
  4781. snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
  4782. i);
  4783. bp->irq_tbl[i].handler = bnxt_msix;
  4784. }
  4785. }
  4786. static void bnxt_setup_inta(struct bnxt *bp)
  4787. {
  4788. const int len = sizeof(bp->irq_tbl[0].name);
  4789. if (netdev_get_num_tc(bp->dev))
  4790. netdev_reset_tc(bp->dev);
  4791. snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
  4792. 0);
  4793. bp->irq_tbl[0].handler = bnxt_inta;
  4794. }
  4795. static int bnxt_setup_int_mode(struct bnxt *bp)
  4796. {
  4797. int rc;
  4798. if (bp->flags & BNXT_FLAG_USING_MSIX)
  4799. bnxt_setup_msix(bp);
  4800. else
  4801. bnxt_setup_inta(bp);
  4802. rc = bnxt_set_real_num_queues(bp);
  4803. return rc;
  4804. }
  4805. #ifdef CONFIG_RFS_ACCEL
  4806. static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
  4807. {
  4808. return bp->hw_resc.max_rsscos_ctxs;
  4809. }
  4810. static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
  4811. {
  4812. return bp->hw_resc.max_vnics;
  4813. }
  4814. #endif
  4815. unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
  4816. {
  4817. return bp->hw_resc.max_stat_ctxs;
  4818. }
  4819. void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
  4820. {
  4821. bp->hw_resc.max_stat_ctxs = max;
  4822. }
  4823. unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
  4824. {
  4825. return bp->hw_resc.max_cp_rings;
  4826. }
  4827. void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
  4828. {
  4829. bp->hw_resc.max_cp_rings = max;
  4830. }
  4831. static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
  4832. {
  4833. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  4834. return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
  4835. }
  4836. void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
  4837. {
  4838. bp->hw_resc.max_irqs = max_irqs;
  4839. }
  4840. static int bnxt_init_msix(struct bnxt *bp)
  4841. {
  4842. int i, total_vecs, rc = 0, min = 1;
  4843. struct msix_entry *msix_ent;
  4844. total_vecs = bnxt_get_max_func_irqs(bp);
  4845. msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
  4846. if (!msix_ent)
  4847. return -ENOMEM;
  4848. for (i = 0; i < total_vecs; i++) {
  4849. msix_ent[i].entry = i;
  4850. msix_ent[i].vector = 0;
  4851. }
  4852. if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
  4853. min = 2;
  4854. total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
  4855. if (total_vecs < 0) {
  4856. rc = -ENODEV;
  4857. goto msix_setup_exit;
  4858. }
  4859. bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
  4860. if (bp->irq_tbl) {
  4861. for (i = 0; i < total_vecs; i++)
  4862. bp->irq_tbl[i].vector = msix_ent[i].vector;
  4863. bp->total_irqs = total_vecs;
  4864. /* Trim rings based upon num of vectors allocated */
  4865. rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
  4866. total_vecs, min == 1);
  4867. if (rc)
  4868. goto msix_setup_exit;
  4869. bp->cp_nr_rings = (min == 1) ?
  4870. max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
  4871. bp->tx_nr_rings + bp->rx_nr_rings;
  4872. } else {
  4873. rc = -ENOMEM;
  4874. goto msix_setup_exit;
  4875. }
  4876. bp->flags |= BNXT_FLAG_USING_MSIX;
  4877. kfree(msix_ent);
  4878. return 0;
  4879. msix_setup_exit:
  4880. netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
  4881. kfree(bp->irq_tbl);
  4882. bp->irq_tbl = NULL;
  4883. pci_disable_msix(bp->pdev);
  4884. kfree(msix_ent);
  4885. return rc;
  4886. }
  4887. static int bnxt_init_inta(struct bnxt *bp)
  4888. {
  4889. bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
  4890. if (!bp->irq_tbl)
  4891. return -ENOMEM;
  4892. bp->total_irqs = 1;
  4893. bp->rx_nr_rings = 1;
  4894. bp->tx_nr_rings = 1;
  4895. bp->cp_nr_rings = 1;
  4896. bp->flags |= BNXT_FLAG_SHARED_RINGS;
  4897. bp->irq_tbl[0].vector = bp->pdev->irq;
  4898. return 0;
  4899. }
  4900. static int bnxt_init_int_mode(struct bnxt *bp)
  4901. {
  4902. int rc = 0;
  4903. if (bp->flags & BNXT_FLAG_MSIX_CAP)
  4904. rc = bnxt_init_msix(bp);
  4905. if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
  4906. /* fallback to INTA */
  4907. rc = bnxt_init_inta(bp);
  4908. }
  4909. return rc;
  4910. }
  4911. static void bnxt_clear_int_mode(struct bnxt *bp)
  4912. {
  4913. if (bp->flags & BNXT_FLAG_USING_MSIX)
  4914. pci_disable_msix(bp->pdev);
  4915. kfree(bp->irq_tbl);
  4916. bp->irq_tbl = NULL;
  4917. bp->flags &= ~BNXT_FLAG_USING_MSIX;
  4918. }
  4919. static int bnxt_reserve_rings(struct bnxt *bp)
  4920. {
  4921. int orig_cp = bp->hw_resc.resv_cp_rings;
  4922. int tcs = netdev_get_num_tc(bp->dev);
  4923. int rc;
  4924. if (!bnxt_need_reserve_rings(bp))
  4925. return 0;
  4926. rc = __bnxt_reserve_rings(bp);
  4927. if (rc) {
  4928. netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
  4929. return rc;
  4930. }
  4931. if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
  4932. bnxt_clear_int_mode(bp);
  4933. rc = bnxt_init_int_mode(bp);
  4934. if (rc)
  4935. return rc;
  4936. }
  4937. if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
  4938. netdev_err(bp->dev, "tx ring reservation failure\n");
  4939. netdev_reset_tc(bp->dev);
  4940. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  4941. return -ENOMEM;
  4942. }
  4943. bp->num_stat_ctxs = bp->cp_nr_rings;
  4944. return 0;
  4945. }
  4946. static void bnxt_free_irq(struct bnxt *bp)
  4947. {
  4948. struct bnxt_irq *irq;
  4949. int i;
  4950. #ifdef CONFIG_RFS_ACCEL
  4951. free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
  4952. bp->dev->rx_cpu_rmap = NULL;
  4953. #endif
  4954. if (!bp->irq_tbl)
  4955. return;
  4956. for (i = 0; i < bp->cp_nr_rings; i++) {
  4957. irq = &bp->irq_tbl[i];
  4958. if (irq->requested) {
  4959. if (irq->have_cpumask) {
  4960. irq_set_affinity_hint(irq->vector, NULL);
  4961. free_cpumask_var(irq->cpu_mask);
  4962. irq->have_cpumask = 0;
  4963. }
  4964. free_irq(irq->vector, bp->bnapi[i]);
  4965. }
  4966. irq->requested = 0;
  4967. }
  4968. }
  4969. static int bnxt_request_irq(struct bnxt *bp)
  4970. {
  4971. int i, j, rc = 0;
  4972. unsigned long flags = 0;
  4973. #ifdef CONFIG_RFS_ACCEL
  4974. struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
  4975. #endif
  4976. if (!(bp->flags & BNXT_FLAG_USING_MSIX))
  4977. flags = IRQF_SHARED;
  4978. for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
  4979. struct bnxt_irq *irq = &bp->irq_tbl[i];
  4980. #ifdef CONFIG_RFS_ACCEL
  4981. if (rmap && bp->bnapi[i]->rx_ring) {
  4982. rc = irq_cpu_rmap_add(rmap, irq->vector);
  4983. if (rc)
  4984. netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
  4985. j);
  4986. j++;
  4987. }
  4988. #endif
  4989. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  4990. bp->bnapi[i]);
  4991. if (rc)
  4992. break;
  4993. irq->requested = 1;
  4994. if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
  4995. int numa_node = dev_to_node(&bp->pdev->dev);
  4996. irq->have_cpumask = 1;
  4997. cpumask_set_cpu(cpumask_local_spread(i, numa_node),
  4998. irq->cpu_mask);
  4999. rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
  5000. if (rc) {
  5001. netdev_warn(bp->dev,
  5002. "Set affinity failed, IRQ = %d\n",
  5003. irq->vector);
  5004. break;
  5005. }
  5006. }
  5007. }
  5008. return rc;
  5009. }
  5010. static void bnxt_del_napi(struct bnxt *bp)
  5011. {
  5012. int i;
  5013. if (!bp->bnapi)
  5014. return;
  5015. for (i = 0; i < bp->cp_nr_rings; i++) {
  5016. struct bnxt_napi *bnapi = bp->bnapi[i];
  5017. napi_hash_del(&bnapi->napi);
  5018. netif_napi_del(&bnapi->napi);
  5019. }
  5020. /* We called napi_hash_del() before netif_napi_del(), we need
  5021. * to respect an RCU grace period before freeing napi structures.
  5022. */
  5023. synchronize_net();
  5024. }
  5025. static void bnxt_init_napi(struct bnxt *bp)
  5026. {
  5027. int i;
  5028. unsigned int cp_nr_rings = bp->cp_nr_rings;
  5029. struct bnxt_napi *bnapi;
  5030. if (bp->flags & BNXT_FLAG_USING_MSIX) {
  5031. if (BNXT_CHIP_TYPE_NITRO_A0(bp))
  5032. cp_nr_rings--;
  5033. for (i = 0; i < cp_nr_rings; i++) {
  5034. bnapi = bp->bnapi[i];
  5035. netif_napi_add(bp->dev, &bnapi->napi,
  5036. bnxt_poll, 64);
  5037. }
  5038. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  5039. bnapi = bp->bnapi[cp_nr_rings];
  5040. netif_napi_add(bp->dev, &bnapi->napi,
  5041. bnxt_poll_nitroa0, 64);
  5042. }
  5043. } else {
  5044. bnapi = bp->bnapi[0];
  5045. netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
  5046. }
  5047. }
  5048. static void bnxt_disable_napi(struct bnxt *bp)
  5049. {
  5050. int i;
  5051. if (!bp->bnapi)
  5052. return;
  5053. for (i = 0; i < bp->cp_nr_rings; i++) {
  5054. struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
  5055. if (bp->bnapi[i]->rx_ring)
  5056. cancel_work_sync(&cpr->dim.work);
  5057. napi_disable(&bp->bnapi[i]->napi);
  5058. }
  5059. }
  5060. static void bnxt_enable_napi(struct bnxt *bp)
  5061. {
  5062. int i;
  5063. for (i = 0; i < bp->cp_nr_rings; i++) {
  5064. struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
  5065. bp->bnapi[i]->in_reset = false;
  5066. if (bp->bnapi[i]->rx_ring) {
  5067. INIT_WORK(&cpr->dim.work, bnxt_dim_work);
  5068. cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
  5069. }
  5070. napi_enable(&bp->bnapi[i]->napi);
  5071. }
  5072. }
  5073. void bnxt_tx_disable(struct bnxt *bp)
  5074. {
  5075. int i;
  5076. struct bnxt_tx_ring_info *txr;
  5077. if (bp->tx_ring) {
  5078. for (i = 0; i < bp->tx_nr_rings; i++) {
  5079. txr = &bp->tx_ring[i];
  5080. txr->dev_state = BNXT_DEV_STATE_CLOSING;
  5081. }
  5082. }
  5083. /* Stop all TX queues */
  5084. netif_tx_disable(bp->dev);
  5085. netif_carrier_off(bp->dev);
  5086. }
  5087. void bnxt_tx_enable(struct bnxt *bp)
  5088. {
  5089. int i;
  5090. struct bnxt_tx_ring_info *txr;
  5091. for (i = 0; i < bp->tx_nr_rings; i++) {
  5092. txr = &bp->tx_ring[i];
  5093. txr->dev_state = 0;
  5094. }
  5095. netif_tx_wake_all_queues(bp->dev);
  5096. if (bp->link_info.link_up)
  5097. netif_carrier_on(bp->dev);
  5098. }
  5099. static void bnxt_report_link(struct bnxt *bp)
  5100. {
  5101. if (bp->link_info.link_up) {
  5102. const char *duplex;
  5103. const char *flow_ctrl;
  5104. u32 speed;
  5105. u16 fec;
  5106. netif_carrier_on(bp->dev);
  5107. if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
  5108. duplex = "full";
  5109. else
  5110. duplex = "half";
  5111. if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
  5112. flow_ctrl = "ON - receive & transmit";
  5113. else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
  5114. flow_ctrl = "ON - transmit";
  5115. else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
  5116. flow_ctrl = "ON - receive";
  5117. else
  5118. flow_ctrl = "none";
  5119. speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  5120. netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
  5121. speed, duplex, flow_ctrl);
  5122. if (bp->flags & BNXT_FLAG_EEE_CAP)
  5123. netdev_info(bp->dev, "EEE is %s\n",
  5124. bp->eee.eee_active ? "active" :
  5125. "not active");
  5126. fec = bp->link_info.fec_cfg;
  5127. if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
  5128. netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
  5129. (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
  5130. (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
  5131. (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
  5132. } else {
  5133. netif_carrier_off(bp->dev);
  5134. netdev_err(bp->dev, "NIC Link is Down\n");
  5135. }
  5136. }
  5137. static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
  5138. {
  5139. int rc = 0;
  5140. struct hwrm_port_phy_qcaps_input req = {0};
  5141. struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  5142. struct bnxt_link_info *link_info = &bp->link_info;
  5143. if (bp->hwrm_spec_code < 0x10201)
  5144. return 0;
  5145. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
  5146. mutex_lock(&bp->hwrm_cmd_lock);
  5147. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5148. if (rc)
  5149. goto hwrm_phy_qcaps_exit;
  5150. if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
  5151. struct ethtool_eee *eee = &bp->eee;
  5152. u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
  5153. bp->flags |= BNXT_FLAG_EEE_CAP;
  5154. eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  5155. bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
  5156. PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
  5157. bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
  5158. PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
  5159. }
  5160. if (resp->supported_speeds_auto_mode)
  5161. link_info->support_auto_speeds =
  5162. le16_to_cpu(resp->supported_speeds_auto_mode);
  5163. bp->port_count = resp->port_cnt;
  5164. hwrm_phy_qcaps_exit:
  5165. mutex_unlock(&bp->hwrm_cmd_lock);
  5166. return rc;
  5167. }
  5168. static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
  5169. {
  5170. int rc = 0;
  5171. struct bnxt_link_info *link_info = &bp->link_info;
  5172. struct hwrm_port_phy_qcfg_input req = {0};
  5173. struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  5174. u8 link_up = link_info->link_up;
  5175. u16 diff;
  5176. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
  5177. mutex_lock(&bp->hwrm_cmd_lock);
  5178. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5179. if (rc) {
  5180. mutex_unlock(&bp->hwrm_cmd_lock);
  5181. return rc;
  5182. }
  5183. memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
  5184. link_info->phy_link_status = resp->link;
  5185. link_info->duplex = resp->duplex_cfg;
  5186. if (bp->hwrm_spec_code >= 0x10800)
  5187. link_info->duplex = resp->duplex_state;
  5188. link_info->pause = resp->pause;
  5189. link_info->auto_mode = resp->auto_mode;
  5190. link_info->auto_pause_setting = resp->auto_pause;
  5191. link_info->lp_pause = resp->link_partner_adv_pause;
  5192. link_info->force_pause_setting = resp->force_pause;
  5193. link_info->duplex_setting = resp->duplex_cfg;
  5194. if (link_info->phy_link_status == BNXT_LINK_LINK)
  5195. link_info->link_speed = le16_to_cpu(resp->link_speed);
  5196. else
  5197. link_info->link_speed = 0;
  5198. link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
  5199. link_info->support_speeds = le16_to_cpu(resp->support_speeds);
  5200. link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
  5201. link_info->lp_auto_link_speeds =
  5202. le16_to_cpu(resp->link_partner_adv_speeds);
  5203. link_info->preemphasis = le32_to_cpu(resp->preemphasis);
  5204. link_info->phy_ver[0] = resp->phy_maj;
  5205. link_info->phy_ver[1] = resp->phy_min;
  5206. link_info->phy_ver[2] = resp->phy_bld;
  5207. link_info->media_type = resp->media_type;
  5208. link_info->phy_type = resp->phy_type;
  5209. link_info->transceiver = resp->xcvr_pkg_type;
  5210. link_info->phy_addr = resp->eee_config_phy_addr &
  5211. PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
  5212. link_info->module_status = resp->module_status;
  5213. if (bp->flags & BNXT_FLAG_EEE_CAP) {
  5214. struct ethtool_eee *eee = &bp->eee;
  5215. u16 fw_speeds;
  5216. eee->eee_active = 0;
  5217. if (resp->eee_config_phy_addr &
  5218. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
  5219. eee->eee_active = 1;
  5220. fw_speeds = le16_to_cpu(
  5221. resp->link_partner_adv_eee_link_speed_mask);
  5222. eee->lp_advertised =
  5223. _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  5224. }
  5225. /* Pull initial EEE config */
  5226. if (!chng_link_state) {
  5227. if (resp->eee_config_phy_addr &
  5228. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
  5229. eee->eee_enabled = 1;
  5230. fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
  5231. eee->advertised =
  5232. _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
  5233. if (resp->eee_config_phy_addr &
  5234. PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
  5235. __le32 tmr;
  5236. eee->tx_lpi_enabled = 1;
  5237. tmr = resp->xcvr_identifier_type_tx_lpi_timer;
  5238. eee->tx_lpi_timer = le32_to_cpu(tmr) &
  5239. PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
  5240. }
  5241. }
  5242. }
  5243. link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
  5244. if (bp->hwrm_spec_code >= 0x10504)
  5245. link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
  5246. /* TODO: need to add more logic to report VF link */
  5247. if (chng_link_state) {
  5248. if (link_info->phy_link_status == BNXT_LINK_LINK)
  5249. link_info->link_up = 1;
  5250. else
  5251. link_info->link_up = 0;
  5252. if (link_up != link_info->link_up)
  5253. bnxt_report_link(bp);
  5254. } else {
  5255. /* alwasy link down if not require to update link state */
  5256. link_info->link_up = 0;
  5257. }
  5258. mutex_unlock(&bp->hwrm_cmd_lock);
  5259. diff = link_info->support_auto_speeds ^ link_info->advertising;
  5260. if ((link_info->support_auto_speeds | diff) !=
  5261. link_info->support_auto_speeds) {
  5262. /* An advertised speed is no longer supported, so we need to
  5263. * update the advertisement settings. Caller holds RTNL
  5264. * so we can modify link settings.
  5265. */
  5266. link_info->advertising = link_info->support_auto_speeds;
  5267. if (link_info->autoneg & BNXT_AUTONEG_SPEED)
  5268. bnxt_hwrm_set_link_setting(bp, true, false);
  5269. }
  5270. return 0;
  5271. }
  5272. static void bnxt_get_port_module_status(struct bnxt *bp)
  5273. {
  5274. struct bnxt_link_info *link_info = &bp->link_info;
  5275. struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
  5276. u8 module_status;
  5277. if (bnxt_update_link(bp, true))
  5278. return;
  5279. module_status = link_info->module_status;
  5280. switch (module_status) {
  5281. case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
  5282. case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
  5283. case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
  5284. netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
  5285. bp->pf.port_id);
  5286. if (bp->hwrm_spec_code >= 0x10201) {
  5287. netdev_warn(bp->dev, "Module part number %s\n",
  5288. resp->phy_vendor_partnumber);
  5289. }
  5290. if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
  5291. netdev_warn(bp->dev, "TX is disabled\n");
  5292. if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
  5293. netdev_warn(bp->dev, "SFP+ module is shutdown\n");
  5294. }
  5295. }
  5296. static void
  5297. bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
  5298. {
  5299. if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
  5300. if (bp->hwrm_spec_code >= 0x10201)
  5301. req->auto_pause =
  5302. PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
  5303. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  5304. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
  5305. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  5306. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
  5307. req->enables |=
  5308. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
  5309. } else {
  5310. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  5311. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
  5312. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  5313. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
  5314. req->enables |=
  5315. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
  5316. if (bp->hwrm_spec_code >= 0x10201) {
  5317. req->auto_pause = req->force_pause;
  5318. req->enables |= cpu_to_le32(
  5319. PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
  5320. }
  5321. }
  5322. }
  5323. static void bnxt_hwrm_set_link_common(struct bnxt *bp,
  5324. struct hwrm_port_phy_cfg_input *req)
  5325. {
  5326. u8 autoneg = bp->link_info.autoneg;
  5327. u16 fw_link_speed = bp->link_info.req_link_speed;
  5328. u16 advertising = bp->link_info.advertising;
  5329. if (autoneg & BNXT_AUTONEG_SPEED) {
  5330. req->auto_mode |=
  5331. PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
  5332. req->enables |= cpu_to_le32(
  5333. PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
  5334. req->auto_link_speed_mask = cpu_to_le16(advertising);
  5335. req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
  5336. req->flags |=
  5337. cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
  5338. } else {
  5339. req->force_link_speed = cpu_to_le16(fw_link_speed);
  5340. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
  5341. }
  5342. /* tell chimp that the setting takes effect immediately */
  5343. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
  5344. }
  5345. int bnxt_hwrm_set_pause(struct bnxt *bp)
  5346. {
  5347. struct hwrm_port_phy_cfg_input req = {0};
  5348. int rc;
  5349. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  5350. bnxt_hwrm_set_pause_common(bp, &req);
  5351. if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
  5352. bp->link_info.force_link_chng)
  5353. bnxt_hwrm_set_link_common(bp, &req);
  5354. mutex_lock(&bp->hwrm_cmd_lock);
  5355. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5356. if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
  5357. /* since changing of pause setting doesn't trigger any link
  5358. * change event, the driver needs to update the current pause
  5359. * result upon successfully return of the phy_cfg command
  5360. */
  5361. bp->link_info.pause =
  5362. bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
  5363. bp->link_info.auto_pause_setting = 0;
  5364. if (!bp->link_info.force_link_chng)
  5365. bnxt_report_link(bp);
  5366. }
  5367. bp->link_info.force_link_chng = false;
  5368. mutex_unlock(&bp->hwrm_cmd_lock);
  5369. return rc;
  5370. }
  5371. static void bnxt_hwrm_set_eee(struct bnxt *bp,
  5372. struct hwrm_port_phy_cfg_input *req)
  5373. {
  5374. struct ethtool_eee *eee = &bp->eee;
  5375. if (eee->eee_enabled) {
  5376. u16 eee_speeds;
  5377. u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
  5378. if (eee->tx_lpi_enabled)
  5379. flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
  5380. else
  5381. flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
  5382. req->flags |= cpu_to_le32(flags);
  5383. eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
  5384. req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
  5385. req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
  5386. } else {
  5387. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
  5388. }
  5389. }
  5390. int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
  5391. {
  5392. struct hwrm_port_phy_cfg_input req = {0};
  5393. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  5394. if (set_pause)
  5395. bnxt_hwrm_set_pause_common(bp, &req);
  5396. bnxt_hwrm_set_link_common(bp, &req);
  5397. if (set_eee)
  5398. bnxt_hwrm_set_eee(bp, &req);
  5399. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5400. }
  5401. static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
  5402. {
  5403. struct hwrm_port_phy_cfg_input req = {0};
  5404. if (!BNXT_SINGLE_PF(bp))
  5405. return 0;
  5406. if (pci_num_vf(bp->pdev))
  5407. return 0;
  5408. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  5409. req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
  5410. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5411. }
  5412. static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
  5413. {
  5414. struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  5415. struct hwrm_port_led_qcaps_input req = {0};
  5416. struct bnxt_pf_info *pf = &bp->pf;
  5417. int rc;
  5418. if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
  5419. return 0;
  5420. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
  5421. req.port_id = cpu_to_le16(pf->port_id);
  5422. mutex_lock(&bp->hwrm_cmd_lock);
  5423. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5424. if (rc) {
  5425. mutex_unlock(&bp->hwrm_cmd_lock);
  5426. return rc;
  5427. }
  5428. if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
  5429. int i;
  5430. bp->num_leds = resp->num_leds;
  5431. memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
  5432. bp->num_leds);
  5433. for (i = 0; i < bp->num_leds; i++) {
  5434. struct bnxt_led_info *led = &bp->leds[i];
  5435. __le16 caps = led->led_state_caps;
  5436. if (!led->led_group_id ||
  5437. !BNXT_LED_ALT_BLINK_CAP(caps)) {
  5438. bp->num_leds = 0;
  5439. break;
  5440. }
  5441. }
  5442. }
  5443. mutex_unlock(&bp->hwrm_cmd_lock);
  5444. return 0;
  5445. }
  5446. int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
  5447. {
  5448. struct hwrm_wol_filter_alloc_input req = {0};
  5449. struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  5450. int rc;
  5451. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
  5452. req.port_id = cpu_to_le16(bp->pf.port_id);
  5453. req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
  5454. req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
  5455. memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
  5456. mutex_lock(&bp->hwrm_cmd_lock);
  5457. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5458. if (!rc)
  5459. bp->wol_filter_id = resp->wol_filter_id;
  5460. mutex_unlock(&bp->hwrm_cmd_lock);
  5461. return rc;
  5462. }
  5463. int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
  5464. {
  5465. struct hwrm_wol_filter_free_input req = {0};
  5466. int rc;
  5467. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
  5468. req.port_id = cpu_to_le16(bp->pf.port_id);
  5469. req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
  5470. req.wol_filter_id = bp->wol_filter_id;
  5471. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5472. return rc;
  5473. }
  5474. static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
  5475. {
  5476. struct hwrm_wol_filter_qcfg_input req = {0};
  5477. struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  5478. u16 next_handle = 0;
  5479. int rc;
  5480. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
  5481. req.port_id = cpu_to_le16(bp->pf.port_id);
  5482. req.handle = cpu_to_le16(handle);
  5483. mutex_lock(&bp->hwrm_cmd_lock);
  5484. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  5485. if (!rc) {
  5486. next_handle = le16_to_cpu(resp->next_handle);
  5487. if (next_handle != 0) {
  5488. if (resp->wol_type ==
  5489. WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
  5490. bp->wol = 1;
  5491. bp->wol_filter_id = resp->wol_filter_id;
  5492. }
  5493. }
  5494. }
  5495. mutex_unlock(&bp->hwrm_cmd_lock);
  5496. return next_handle;
  5497. }
  5498. static void bnxt_get_wol_settings(struct bnxt *bp)
  5499. {
  5500. u16 handle = 0;
  5501. if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
  5502. return;
  5503. do {
  5504. handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
  5505. } while (handle && handle != 0xffff);
  5506. }
  5507. static bool bnxt_eee_config_ok(struct bnxt *bp)
  5508. {
  5509. struct ethtool_eee *eee = &bp->eee;
  5510. struct bnxt_link_info *link_info = &bp->link_info;
  5511. if (!(bp->flags & BNXT_FLAG_EEE_CAP))
  5512. return true;
  5513. if (eee->eee_enabled) {
  5514. u32 advertising =
  5515. _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
  5516. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  5517. eee->eee_enabled = 0;
  5518. return false;
  5519. }
  5520. if (eee->advertised & ~advertising) {
  5521. eee->advertised = advertising & eee->supported;
  5522. return false;
  5523. }
  5524. }
  5525. return true;
  5526. }
  5527. static int bnxt_update_phy_setting(struct bnxt *bp)
  5528. {
  5529. int rc;
  5530. bool update_link = false;
  5531. bool update_pause = false;
  5532. bool update_eee = false;
  5533. struct bnxt_link_info *link_info = &bp->link_info;
  5534. rc = bnxt_update_link(bp, true);
  5535. if (rc) {
  5536. netdev_err(bp->dev, "failed to update link (rc: %x)\n",
  5537. rc);
  5538. return rc;
  5539. }
  5540. if (!BNXT_SINGLE_PF(bp))
  5541. return 0;
  5542. if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  5543. (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
  5544. link_info->req_flow_ctrl)
  5545. update_pause = true;
  5546. if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  5547. link_info->force_pause_setting != link_info->req_flow_ctrl)
  5548. update_pause = true;
  5549. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  5550. if (BNXT_AUTO_MODE(link_info->auto_mode))
  5551. update_link = true;
  5552. if (link_info->req_link_speed != link_info->force_link_speed)
  5553. update_link = true;
  5554. if (link_info->req_duplex != link_info->duplex_setting)
  5555. update_link = true;
  5556. } else {
  5557. if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
  5558. update_link = true;
  5559. if (link_info->advertising != link_info->auto_link_speeds)
  5560. update_link = true;
  5561. }
  5562. /* The last close may have shutdown the link, so need to call
  5563. * PHY_CFG to bring it back up.
  5564. */
  5565. if (!netif_carrier_ok(bp->dev))
  5566. update_link = true;
  5567. if (!bnxt_eee_config_ok(bp))
  5568. update_eee = true;
  5569. if (update_link)
  5570. rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
  5571. else if (update_pause)
  5572. rc = bnxt_hwrm_set_pause(bp);
  5573. if (rc) {
  5574. netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
  5575. rc);
  5576. return rc;
  5577. }
  5578. return rc;
  5579. }
  5580. /* Common routine to pre-map certain register block to different GRC window.
  5581. * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
  5582. * in PF and 3 windows in VF that can be customized to map in different
  5583. * register blocks.
  5584. */
  5585. static void bnxt_preset_reg_win(struct bnxt *bp)
  5586. {
  5587. if (BNXT_PF(bp)) {
  5588. /* CAG registers map to GRC window #4 */
  5589. writel(BNXT_CAG_REG_BASE,
  5590. bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
  5591. }
  5592. }
  5593. static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  5594. {
  5595. int rc = 0;
  5596. bnxt_preset_reg_win(bp);
  5597. netif_carrier_off(bp->dev);
  5598. if (irq_re_init) {
  5599. rc = bnxt_reserve_rings(bp);
  5600. if (rc)
  5601. return rc;
  5602. rc = bnxt_setup_int_mode(bp);
  5603. if (rc) {
  5604. netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
  5605. rc);
  5606. return rc;
  5607. }
  5608. }
  5609. if ((bp->flags & BNXT_FLAG_RFS) &&
  5610. !(bp->flags & BNXT_FLAG_USING_MSIX)) {
  5611. /* disable RFS if falling back to INTA */
  5612. bp->dev->hw_features &= ~NETIF_F_NTUPLE;
  5613. bp->flags &= ~BNXT_FLAG_RFS;
  5614. }
  5615. rc = bnxt_alloc_mem(bp, irq_re_init);
  5616. if (rc) {
  5617. netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  5618. goto open_err_free_mem;
  5619. }
  5620. if (irq_re_init) {
  5621. bnxt_init_napi(bp);
  5622. rc = bnxt_request_irq(bp);
  5623. if (rc) {
  5624. netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
  5625. goto open_err;
  5626. }
  5627. }
  5628. bnxt_enable_napi(bp);
  5629. rc = bnxt_init_nic(bp, irq_re_init);
  5630. if (rc) {
  5631. netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  5632. goto open_err;
  5633. }
  5634. if (link_re_init) {
  5635. mutex_lock(&bp->link_lock);
  5636. rc = bnxt_update_phy_setting(bp);
  5637. mutex_unlock(&bp->link_lock);
  5638. if (rc)
  5639. netdev_warn(bp->dev, "failed to update phy settings\n");
  5640. }
  5641. if (irq_re_init)
  5642. udp_tunnel_get_rx_info(bp->dev);
  5643. set_bit(BNXT_STATE_OPEN, &bp->state);
  5644. bnxt_enable_int(bp);
  5645. /* Enable TX queues */
  5646. bnxt_tx_enable(bp);
  5647. mod_timer(&bp->timer, jiffies + bp->current_interval);
  5648. /* Poll link status and check for SFP+ module status */
  5649. bnxt_get_port_module_status(bp);
  5650. /* VF-reps may need to be re-opened after the PF is re-opened */
  5651. if (BNXT_PF(bp))
  5652. bnxt_vf_reps_open(bp);
  5653. return 0;
  5654. open_err:
  5655. bnxt_disable_napi(bp);
  5656. bnxt_del_napi(bp);
  5657. open_err_free_mem:
  5658. bnxt_free_skbs(bp);
  5659. bnxt_free_irq(bp);
  5660. bnxt_free_mem(bp, true);
  5661. return rc;
  5662. }
  5663. /* rtnl_lock held */
  5664. int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  5665. {
  5666. int rc = 0;
  5667. rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
  5668. if (rc) {
  5669. netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
  5670. dev_close(bp->dev);
  5671. }
  5672. return rc;
  5673. }
  5674. /* rtnl_lock held, open the NIC half way by allocating all resources, but
  5675. * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
  5676. * self tests.
  5677. */
  5678. int bnxt_half_open_nic(struct bnxt *bp)
  5679. {
  5680. int rc = 0;
  5681. rc = bnxt_alloc_mem(bp, false);
  5682. if (rc) {
  5683. netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  5684. goto half_open_err;
  5685. }
  5686. rc = bnxt_init_nic(bp, false);
  5687. if (rc) {
  5688. netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  5689. goto half_open_err;
  5690. }
  5691. return 0;
  5692. half_open_err:
  5693. bnxt_free_skbs(bp);
  5694. bnxt_free_mem(bp, false);
  5695. dev_close(bp->dev);
  5696. return rc;
  5697. }
  5698. /* rtnl_lock held, this call can only be made after a previous successful
  5699. * call to bnxt_half_open_nic().
  5700. */
  5701. void bnxt_half_close_nic(struct bnxt *bp)
  5702. {
  5703. bnxt_hwrm_resource_free(bp, false, false);
  5704. bnxt_free_skbs(bp);
  5705. bnxt_free_mem(bp, false);
  5706. }
  5707. static int bnxt_open(struct net_device *dev)
  5708. {
  5709. struct bnxt *bp = netdev_priv(dev);
  5710. return __bnxt_open_nic(bp, true, true);
  5711. }
  5712. static bool bnxt_drv_busy(struct bnxt *bp)
  5713. {
  5714. return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
  5715. test_bit(BNXT_STATE_READ_STATS, &bp->state));
  5716. }
  5717. static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
  5718. bool link_re_init)
  5719. {
  5720. /* Close the VF-reps before closing PF */
  5721. if (BNXT_PF(bp))
  5722. bnxt_vf_reps_close(bp);
  5723. /* Change device state to avoid TX queue wake up's */
  5724. bnxt_tx_disable(bp);
  5725. clear_bit(BNXT_STATE_OPEN, &bp->state);
  5726. smp_mb__after_atomic();
  5727. while (bnxt_drv_busy(bp))
  5728. msleep(20);
  5729. /* Flush rings and and disable interrupts */
  5730. bnxt_shutdown_nic(bp, irq_re_init);
  5731. /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
  5732. bnxt_disable_napi(bp);
  5733. del_timer_sync(&bp->timer);
  5734. bnxt_free_skbs(bp);
  5735. if (irq_re_init) {
  5736. bnxt_free_irq(bp);
  5737. bnxt_del_napi(bp);
  5738. }
  5739. bnxt_free_mem(bp, irq_re_init);
  5740. }
  5741. int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  5742. {
  5743. int rc = 0;
  5744. #ifdef CONFIG_BNXT_SRIOV
  5745. if (bp->sriov_cfg) {
  5746. rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
  5747. !bp->sriov_cfg,
  5748. BNXT_SRIOV_CFG_WAIT_TMO);
  5749. if (rc)
  5750. netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
  5751. }
  5752. #endif
  5753. __bnxt_close_nic(bp, irq_re_init, link_re_init);
  5754. return rc;
  5755. }
  5756. static int bnxt_close(struct net_device *dev)
  5757. {
  5758. struct bnxt *bp = netdev_priv(dev);
  5759. bnxt_close_nic(bp, true, true);
  5760. bnxt_hwrm_shutdown_link(bp);
  5761. return 0;
  5762. }
  5763. /* rtnl_lock held */
  5764. static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  5765. {
  5766. switch (cmd) {
  5767. case SIOCGMIIPHY:
  5768. /* fallthru */
  5769. case SIOCGMIIREG: {
  5770. if (!netif_running(dev))
  5771. return -EAGAIN;
  5772. return 0;
  5773. }
  5774. case SIOCSMIIREG:
  5775. if (!netif_running(dev))
  5776. return -EAGAIN;
  5777. return 0;
  5778. default:
  5779. /* do nothing */
  5780. break;
  5781. }
  5782. return -EOPNOTSUPP;
  5783. }
  5784. static void
  5785. bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  5786. {
  5787. u32 i;
  5788. struct bnxt *bp = netdev_priv(dev);
  5789. set_bit(BNXT_STATE_READ_STATS, &bp->state);
  5790. /* Make sure bnxt_close_nic() sees that we are reading stats before
  5791. * we check the BNXT_STATE_OPEN flag.
  5792. */
  5793. smp_mb__after_atomic();
  5794. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  5795. clear_bit(BNXT_STATE_READ_STATS, &bp->state);
  5796. return;
  5797. }
  5798. /* TODO check if we need to synchronize with bnxt_close path */
  5799. for (i = 0; i < bp->cp_nr_rings; i++) {
  5800. struct bnxt_napi *bnapi = bp->bnapi[i];
  5801. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  5802. struct ctx_hw_stats *hw_stats = cpr->hw_stats;
  5803. stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
  5804. stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
  5805. stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
  5806. stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
  5807. stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
  5808. stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
  5809. stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
  5810. stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
  5811. stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
  5812. stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
  5813. stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
  5814. stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
  5815. stats->rx_missed_errors +=
  5816. le64_to_cpu(hw_stats->rx_discard_pkts);
  5817. stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
  5818. stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
  5819. }
  5820. if (bp->flags & BNXT_FLAG_PORT_STATS) {
  5821. struct rx_port_stats *rx = bp->hw_rx_port_stats;
  5822. struct tx_port_stats *tx = bp->hw_tx_port_stats;
  5823. stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
  5824. stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
  5825. stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
  5826. le64_to_cpu(rx->rx_ovrsz_frames) +
  5827. le64_to_cpu(rx->rx_runt_frames);
  5828. stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
  5829. le64_to_cpu(rx->rx_jbr_frames);
  5830. stats->collisions = le64_to_cpu(tx->tx_total_collisions);
  5831. stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
  5832. stats->tx_errors = le64_to_cpu(tx->tx_err);
  5833. }
  5834. clear_bit(BNXT_STATE_READ_STATS, &bp->state);
  5835. }
  5836. static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
  5837. {
  5838. struct net_device *dev = bp->dev;
  5839. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  5840. struct netdev_hw_addr *ha;
  5841. u8 *haddr;
  5842. int mc_count = 0;
  5843. bool update = false;
  5844. int off = 0;
  5845. netdev_for_each_mc_addr(ha, dev) {
  5846. if (mc_count >= BNXT_MAX_MC_ADDRS) {
  5847. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  5848. vnic->mc_list_count = 0;
  5849. return false;
  5850. }
  5851. haddr = ha->addr;
  5852. if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
  5853. memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
  5854. update = true;
  5855. }
  5856. off += ETH_ALEN;
  5857. mc_count++;
  5858. }
  5859. if (mc_count)
  5860. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
  5861. if (mc_count != vnic->mc_list_count) {
  5862. vnic->mc_list_count = mc_count;
  5863. update = true;
  5864. }
  5865. return update;
  5866. }
  5867. static bool bnxt_uc_list_updated(struct bnxt *bp)
  5868. {
  5869. struct net_device *dev = bp->dev;
  5870. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  5871. struct netdev_hw_addr *ha;
  5872. int off = 0;
  5873. if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
  5874. return true;
  5875. netdev_for_each_uc_addr(ha, dev) {
  5876. if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
  5877. return true;
  5878. off += ETH_ALEN;
  5879. }
  5880. return false;
  5881. }
  5882. static void bnxt_set_rx_mode(struct net_device *dev)
  5883. {
  5884. struct bnxt *bp = netdev_priv(dev);
  5885. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  5886. u32 mask = vnic->rx_mask;
  5887. bool mc_update = false;
  5888. bool uc_update;
  5889. if (!netif_running(dev))
  5890. return;
  5891. mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
  5892. CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
  5893. CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
  5894. if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
  5895. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  5896. uc_update = bnxt_uc_list_updated(bp);
  5897. if (dev->flags & IFF_ALLMULTI) {
  5898. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  5899. vnic->mc_list_count = 0;
  5900. } else {
  5901. mc_update = bnxt_mc_list_updated(bp, &mask);
  5902. }
  5903. if (mask != vnic->rx_mask || uc_update || mc_update) {
  5904. vnic->rx_mask = mask;
  5905. set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
  5906. bnxt_queue_sp_work(bp);
  5907. }
  5908. }
  5909. static int bnxt_cfg_rx_mode(struct bnxt *bp)
  5910. {
  5911. struct net_device *dev = bp->dev;
  5912. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  5913. struct netdev_hw_addr *ha;
  5914. int i, off = 0, rc;
  5915. bool uc_update;
  5916. netif_addr_lock_bh(dev);
  5917. uc_update = bnxt_uc_list_updated(bp);
  5918. netif_addr_unlock_bh(dev);
  5919. if (!uc_update)
  5920. goto skip_uc;
  5921. mutex_lock(&bp->hwrm_cmd_lock);
  5922. for (i = 1; i < vnic->uc_filter_count; i++) {
  5923. struct hwrm_cfa_l2_filter_free_input req = {0};
  5924. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
  5925. -1);
  5926. req.l2_filter_id = vnic->fw_l2_filter_id[i];
  5927. rc = _hwrm_send_message(bp, &req, sizeof(req),
  5928. HWRM_CMD_TIMEOUT);
  5929. }
  5930. mutex_unlock(&bp->hwrm_cmd_lock);
  5931. vnic->uc_filter_count = 1;
  5932. netif_addr_lock_bh(dev);
  5933. if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
  5934. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  5935. } else {
  5936. netdev_for_each_uc_addr(ha, dev) {
  5937. memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
  5938. off += ETH_ALEN;
  5939. vnic->uc_filter_count++;
  5940. }
  5941. }
  5942. netif_addr_unlock_bh(dev);
  5943. for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
  5944. rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
  5945. if (rc) {
  5946. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
  5947. rc);
  5948. vnic->uc_filter_count = i;
  5949. return rc;
  5950. }
  5951. }
  5952. skip_uc:
  5953. rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  5954. if (rc)
  5955. netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
  5956. rc);
  5957. return rc;
  5958. }
  5959. /* If the chip and firmware supports RFS */
  5960. static bool bnxt_rfs_supported(struct bnxt *bp)
  5961. {
  5962. if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
  5963. return true;
  5964. if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
  5965. return true;
  5966. return false;
  5967. }
  5968. /* If runtime conditions support RFS */
  5969. static bool bnxt_rfs_capable(struct bnxt *bp)
  5970. {
  5971. #ifdef CONFIG_RFS_ACCEL
  5972. int vnics, max_vnics, max_rss_ctxs;
  5973. if (!(bp->flags & BNXT_FLAG_MSIX_CAP))
  5974. return false;
  5975. vnics = 1 + bp->rx_nr_rings;
  5976. max_vnics = bnxt_get_max_func_vnics(bp);
  5977. max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
  5978. /* RSS contexts not a limiting factor */
  5979. if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
  5980. max_rss_ctxs = max_vnics;
  5981. if (vnics > max_vnics || vnics > max_rss_ctxs) {
  5982. if (bp->rx_nr_rings > 1)
  5983. netdev_warn(bp->dev,
  5984. "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
  5985. min(max_rss_ctxs - 1, max_vnics - 1));
  5986. return false;
  5987. }
  5988. if (!(bp->flags & BNXT_FLAG_NEW_RM))
  5989. return true;
  5990. if (vnics == bp->hw_resc.resv_vnics)
  5991. return true;
  5992. bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
  5993. if (vnics <= bp->hw_resc.resv_vnics)
  5994. return true;
  5995. netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
  5996. bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
  5997. return false;
  5998. #else
  5999. return false;
  6000. #endif
  6001. }
  6002. static netdev_features_t bnxt_fix_features(struct net_device *dev,
  6003. netdev_features_t features)
  6004. {
  6005. struct bnxt *bp = netdev_priv(dev);
  6006. if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
  6007. features &= ~NETIF_F_NTUPLE;
  6008. if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
  6009. features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  6010. if (!(features & NETIF_F_GRO))
  6011. features &= ~NETIF_F_GRO_HW;
  6012. if (features & NETIF_F_GRO_HW)
  6013. features &= ~NETIF_F_LRO;
  6014. /* Both CTAG and STAG VLAN accelaration on the RX side have to be
  6015. * turned on or off together.
  6016. */
  6017. if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
  6018. (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
  6019. if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
  6020. features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
  6021. NETIF_F_HW_VLAN_STAG_RX);
  6022. else
  6023. features |= NETIF_F_HW_VLAN_CTAG_RX |
  6024. NETIF_F_HW_VLAN_STAG_RX;
  6025. }
  6026. #ifdef CONFIG_BNXT_SRIOV
  6027. if (BNXT_VF(bp)) {
  6028. if (bp->vf.vlan) {
  6029. features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
  6030. NETIF_F_HW_VLAN_STAG_RX);
  6031. }
  6032. }
  6033. #endif
  6034. return features;
  6035. }
  6036. static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
  6037. {
  6038. struct bnxt *bp = netdev_priv(dev);
  6039. u32 flags = bp->flags;
  6040. u32 changes;
  6041. int rc = 0;
  6042. bool re_init = false;
  6043. bool update_tpa = false;
  6044. flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
  6045. if (features & NETIF_F_GRO_HW)
  6046. flags |= BNXT_FLAG_GRO;
  6047. else if (features & NETIF_F_LRO)
  6048. flags |= BNXT_FLAG_LRO;
  6049. if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
  6050. flags &= ~BNXT_FLAG_TPA;
  6051. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  6052. flags |= BNXT_FLAG_STRIP_VLAN;
  6053. if (features & NETIF_F_NTUPLE)
  6054. flags |= BNXT_FLAG_RFS;
  6055. changes = flags ^ bp->flags;
  6056. if (changes & BNXT_FLAG_TPA) {
  6057. update_tpa = true;
  6058. if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
  6059. (flags & BNXT_FLAG_TPA) == 0)
  6060. re_init = true;
  6061. }
  6062. if (changes & ~BNXT_FLAG_TPA)
  6063. re_init = true;
  6064. if (flags != bp->flags) {
  6065. u32 old_flags = bp->flags;
  6066. bp->flags = flags;
  6067. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  6068. if (update_tpa)
  6069. bnxt_set_ring_params(bp);
  6070. return rc;
  6071. }
  6072. if (re_init) {
  6073. bnxt_close_nic(bp, false, false);
  6074. if (update_tpa)
  6075. bnxt_set_ring_params(bp);
  6076. return bnxt_open_nic(bp, false, false);
  6077. }
  6078. if (update_tpa) {
  6079. rc = bnxt_set_tpa(bp,
  6080. (flags & BNXT_FLAG_TPA) ?
  6081. true : false);
  6082. if (rc)
  6083. bp->flags = old_flags;
  6084. }
  6085. }
  6086. return rc;
  6087. }
  6088. static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
  6089. {
  6090. struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
  6091. int i = bnapi->index;
  6092. if (!txr)
  6093. return;
  6094. netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
  6095. i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
  6096. txr->tx_cons);
  6097. }
  6098. static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
  6099. {
  6100. struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
  6101. int i = bnapi->index;
  6102. if (!rxr)
  6103. return;
  6104. netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
  6105. i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
  6106. rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
  6107. rxr->rx_sw_agg_prod);
  6108. }
  6109. static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
  6110. {
  6111. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  6112. int i = bnapi->index;
  6113. netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
  6114. i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
  6115. }
  6116. static void bnxt_dbg_dump_states(struct bnxt *bp)
  6117. {
  6118. int i;
  6119. struct bnxt_napi *bnapi;
  6120. for (i = 0; i < bp->cp_nr_rings; i++) {
  6121. bnapi = bp->bnapi[i];
  6122. if (netif_msg_drv(bp)) {
  6123. bnxt_dump_tx_sw_state(bnapi);
  6124. bnxt_dump_rx_sw_state(bnapi);
  6125. bnxt_dump_cp_sw_state(bnapi);
  6126. }
  6127. }
  6128. }
  6129. static void bnxt_reset_task(struct bnxt *bp, bool silent)
  6130. {
  6131. if (!silent)
  6132. bnxt_dbg_dump_states(bp);
  6133. if (netif_running(bp->dev)) {
  6134. int rc;
  6135. if (!silent)
  6136. bnxt_ulp_stop(bp);
  6137. bnxt_close_nic(bp, false, false);
  6138. rc = bnxt_open_nic(bp, false, false);
  6139. if (!silent && !rc)
  6140. bnxt_ulp_start(bp);
  6141. }
  6142. }
  6143. static void bnxt_tx_timeout(struct net_device *dev)
  6144. {
  6145. struct bnxt *bp = netdev_priv(dev);
  6146. netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
  6147. set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  6148. bnxt_queue_sp_work(bp);
  6149. }
  6150. #ifdef CONFIG_NET_POLL_CONTROLLER
  6151. static void bnxt_poll_controller(struct net_device *dev)
  6152. {
  6153. struct bnxt *bp = netdev_priv(dev);
  6154. int i;
  6155. /* Only process tx rings/combined rings in netpoll mode. */
  6156. for (i = 0; i < bp->tx_nr_rings; i++) {
  6157. struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
  6158. napi_schedule(&txr->bnapi->napi);
  6159. }
  6160. }
  6161. #endif
  6162. static void bnxt_timer(struct timer_list *t)
  6163. {
  6164. struct bnxt *bp = from_timer(bp, t, timer);
  6165. struct net_device *dev = bp->dev;
  6166. if (!netif_running(dev))
  6167. return;
  6168. if (atomic_read(&bp->intr_sem) != 0)
  6169. goto bnxt_restart_timer;
  6170. if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
  6171. bp->stats_coal_ticks) {
  6172. set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
  6173. bnxt_queue_sp_work(bp);
  6174. }
  6175. if (bnxt_tc_flower_enabled(bp)) {
  6176. set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
  6177. bnxt_queue_sp_work(bp);
  6178. }
  6179. bnxt_restart_timer:
  6180. mod_timer(&bp->timer, jiffies + bp->current_interval);
  6181. }
  6182. static void bnxt_rtnl_lock_sp(struct bnxt *bp)
  6183. {
  6184. /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
  6185. * set. If the device is being closed, bnxt_close() may be holding
  6186. * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
  6187. * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
  6188. */
  6189. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  6190. rtnl_lock();
  6191. }
  6192. static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
  6193. {
  6194. set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  6195. rtnl_unlock();
  6196. }
  6197. /* Only called from bnxt_sp_task() */
  6198. static void bnxt_reset(struct bnxt *bp, bool silent)
  6199. {
  6200. bnxt_rtnl_lock_sp(bp);
  6201. if (test_bit(BNXT_STATE_OPEN, &bp->state))
  6202. bnxt_reset_task(bp, silent);
  6203. bnxt_rtnl_unlock_sp(bp);
  6204. }
  6205. static void bnxt_cfg_ntp_filters(struct bnxt *);
  6206. static void bnxt_sp_task(struct work_struct *work)
  6207. {
  6208. struct bnxt *bp = container_of(work, struct bnxt, sp_task);
  6209. set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  6210. smp_mb__after_atomic();
  6211. if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
  6212. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  6213. return;
  6214. }
  6215. if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
  6216. bnxt_cfg_rx_mode(bp);
  6217. if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
  6218. bnxt_cfg_ntp_filters(bp);
  6219. if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
  6220. bnxt_hwrm_exec_fwd_req(bp);
  6221. if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
  6222. bnxt_hwrm_tunnel_dst_port_alloc(
  6223. bp, bp->vxlan_port,
  6224. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  6225. }
  6226. if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
  6227. bnxt_hwrm_tunnel_dst_port_free(
  6228. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  6229. }
  6230. if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
  6231. bnxt_hwrm_tunnel_dst_port_alloc(
  6232. bp, bp->nge_port,
  6233. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  6234. }
  6235. if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
  6236. bnxt_hwrm_tunnel_dst_port_free(
  6237. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  6238. }
  6239. if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
  6240. bnxt_hwrm_port_qstats(bp);
  6241. if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
  6242. int rc;
  6243. mutex_lock(&bp->link_lock);
  6244. if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
  6245. &bp->sp_event))
  6246. bnxt_hwrm_phy_qcaps(bp);
  6247. rc = bnxt_update_link(bp, true);
  6248. mutex_unlock(&bp->link_lock);
  6249. if (rc)
  6250. netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
  6251. rc);
  6252. }
  6253. if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
  6254. mutex_lock(&bp->link_lock);
  6255. bnxt_get_port_module_status(bp);
  6256. mutex_unlock(&bp->link_lock);
  6257. }
  6258. if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
  6259. bnxt_tc_flow_stats_work(bp);
  6260. /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
  6261. * must be the last functions to be called before exiting.
  6262. */
  6263. if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
  6264. bnxt_reset(bp, false);
  6265. if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
  6266. bnxt_reset(bp, true);
  6267. smp_mb__before_atomic();
  6268. clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
  6269. }
  6270. /* Under rtnl_lock */
  6271. int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
  6272. int tx_xdp)
  6273. {
  6274. int max_rx, max_tx, tx_sets = 1;
  6275. int tx_rings_needed;
  6276. int rx_rings = rx;
  6277. int cp, vnics, rc;
  6278. if (tcs)
  6279. tx_sets = tcs;
  6280. rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
  6281. if (rc)
  6282. return rc;
  6283. if (max_rx < rx)
  6284. return -ENOMEM;
  6285. tx_rings_needed = tx * tx_sets + tx_xdp;
  6286. if (max_tx < tx_rings_needed)
  6287. return -ENOMEM;
  6288. vnics = 1;
  6289. if (bp->flags & BNXT_FLAG_RFS)
  6290. vnics += rx_rings;
  6291. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  6292. rx_rings <<= 1;
  6293. cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
  6294. return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
  6295. vnics);
  6296. }
  6297. static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
  6298. {
  6299. if (bp->bar2) {
  6300. pci_iounmap(pdev, bp->bar2);
  6301. bp->bar2 = NULL;
  6302. }
  6303. if (bp->bar1) {
  6304. pci_iounmap(pdev, bp->bar1);
  6305. bp->bar1 = NULL;
  6306. }
  6307. if (bp->bar0) {
  6308. pci_iounmap(pdev, bp->bar0);
  6309. bp->bar0 = NULL;
  6310. }
  6311. }
  6312. static void bnxt_cleanup_pci(struct bnxt *bp)
  6313. {
  6314. bnxt_unmap_bars(bp, bp->pdev);
  6315. pci_release_regions(bp->pdev);
  6316. pci_disable_device(bp->pdev);
  6317. }
  6318. static void bnxt_init_dflt_coal(struct bnxt *bp)
  6319. {
  6320. struct bnxt_coal *coal;
  6321. /* Tick values in micro seconds.
  6322. * 1 coal_buf x bufs_per_record = 1 completion record.
  6323. */
  6324. coal = &bp->rx_coal;
  6325. coal->coal_ticks = 14;
  6326. coal->coal_bufs = 30;
  6327. coal->coal_ticks_irq = 1;
  6328. coal->coal_bufs_irq = 2;
  6329. coal->idle_thresh = 25;
  6330. coal->bufs_per_record = 2;
  6331. coal->budget = 64; /* NAPI budget */
  6332. coal = &bp->tx_coal;
  6333. coal->coal_ticks = 28;
  6334. coal->coal_bufs = 30;
  6335. coal->coal_ticks_irq = 2;
  6336. coal->coal_bufs_irq = 2;
  6337. coal->bufs_per_record = 1;
  6338. bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
  6339. }
  6340. static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
  6341. {
  6342. int rc;
  6343. struct bnxt *bp = netdev_priv(dev);
  6344. SET_NETDEV_DEV(dev, &pdev->dev);
  6345. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  6346. rc = pci_enable_device(pdev);
  6347. if (rc) {
  6348. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  6349. goto init_err;
  6350. }
  6351. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  6352. dev_err(&pdev->dev,
  6353. "Cannot find PCI device base address, aborting\n");
  6354. rc = -ENODEV;
  6355. goto init_err_disable;
  6356. }
  6357. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  6358. if (rc) {
  6359. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  6360. goto init_err_disable;
  6361. }
  6362. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
  6363. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  6364. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  6365. goto init_err_disable;
  6366. }
  6367. pci_set_master(pdev);
  6368. bp->dev = dev;
  6369. bp->pdev = pdev;
  6370. bp->bar0 = pci_ioremap_bar(pdev, 0);
  6371. if (!bp->bar0) {
  6372. dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
  6373. rc = -ENOMEM;
  6374. goto init_err_release;
  6375. }
  6376. bp->bar1 = pci_ioremap_bar(pdev, 2);
  6377. if (!bp->bar1) {
  6378. dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
  6379. rc = -ENOMEM;
  6380. goto init_err_release;
  6381. }
  6382. bp->bar2 = pci_ioremap_bar(pdev, 4);
  6383. if (!bp->bar2) {
  6384. dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
  6385. rc = -ENOMEM;
  6386. goto init_err_release;
  6387. }
  6388. pci_enable_pcie_error_reporting(pdev);
  6389. INIT_WORK(&bp->sp_task, bnxt_sp_task);
  6390. spin_lock_init(&bp->ntp_fltr_lock);
  6391. bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
  6392. bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
  6393. bnxt_init_dflt_coal(bp);
  6394. timer_setup(&bp->timer, bnxt_timer, 0);
  6395. bp->current_interval = BNXT_TIMER_INTERVAL;
  6396. clear_bit(BNXT_STATE_OPEN, &bp->state);
  6397. return 0;
  6398. init_err_release:
  6399. bnxt_unmap_bars(bp, pdev);
  6400. pci_release_regions(pdev);
  6401. init_err_disable:
  6402. pci_disable_device(pdev);
  6403. init_err:
  6404. return rc;
  6405. }
  6406. /* rtnl_lock held */
  6407. static int bnxt_change_mac_addr(struct net_device *dev, void *p)
  6408. {
  6409. struct sockaddr *addr = p;
  6410. struct bnxt *bp = netdev_priv(dev);
  6411. int rc = 0;
  6412. if (!is_valid_ether_addr(addr->sa_data))
  6413. return -EADDRNOTAVAIL;
  6414. if (ether_addr_equal(addr->sa_data, dev->dev_addr))
  6415. return 0;
  6416. rc = bnxt_approve_mac(bp, addr->sa_data);
  6417. if (rc)
  6418. return rc;
  6419. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  6420. if (netif_running(dev)) {
  6421. bnxt_close_nic(bp, false, false);
  6422. rc = bnxt_open_nic(bp, false, false);
  6423. }
  6424. return rc;
  6425. }
  6426. /* rtnl_lock held */
  6427. static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
  6428. {
  6429. struct bnxt *bp = netdev_priv(dev);
  6430. if (netif_running(dev))
  6431. bnxt_close_nic(bp, false, false);
  6432. dev->mtu = new_mtu;
  6433. bnxt_set_ring_params(bp);
  6434. if (netif_running(dev))
  6435. return bnxt_open_nic(bp, false, false);
  6436. return 0;
  6437. }
  6438. int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
  6439. {
  6440. struct bnxt *bp = netdev_priv(dev);
  6441. bool sh = false;
  6442. int rc;
  6443. if (tc > bp->max_tc) {
  6444. netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
  6445. tc, bp->max_tc);
  6446. return -EINVAL;
  6447. }
  6448. if (netdev_get_num_tc(dev) == tc)
  6449. return 0;
  6450. if (bp->flags & BNXT_FLAG_SHARED_RINGS)
  6451. sh = true;
  6452. rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
  6453. sh, tc, bp->tx_nr_rings_xdp);
  6454. if (rc)
  6455. return rc;
  6456. /* Needs to close the device and do hw resource re-allocations */
  6457. if (netif_running(bp->dev))
  6458. bnxt_close_nic(bp, true, false);
  6459. if (tc) {
  6460. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
  6461. netdev_set_num_tc(dev, tc);
  6462. } else {
  6463. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  6464. netdev_reset_tc(dev);
  6465. }
  6466. bp->tx_nr_rings += bp->tx_nr_rings_xdp;
  6467. bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
  6468. bp->tx_nr_rings + bp->rx_nr_rings;
  6469. bp->num_stat_ctxs = bp->cp_nr_rings;
  6470. if (netif_running(bp->dev))
  6471. return bnxt_open_nic(bp, true, false);
  6472. return 0;
  6473. }
  6474. static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  6475. void *cb_priv)
  6476. {
  6477. struct bnxt *bp = cb_priv;
  6478. if (!bnxt_tc_flower_enabled(bp) ||
  6479. !tc_cls_can_offload_and_chain0(bp->dev, type_data))
  6480. return -EOPNOTSUPP;
  6481. switch (type) {
  6482. case TC_SETUP_CLSFLOWER:
  6483. return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
  6484. default:
  6485. return -EOPNOTSUPP;
  6486. }
  6487. }
  6488. static int bnxt_setup_tc_block(struct net_device *dev,
  6489. struct tc_block_offload *f)
  6490. {
  6491. struct bnxt *bp = netdev_priv(dev);
  6492. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  6493. return -EOPNOTSUPP;
  6494. switch (f->command) {
  6495. case TC_BLOCK_BIND:
  6496. return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
  6497. bp, bp);
  6498. case TC_BLOCK_UNBIND:
  6499. tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
  6500. return 0;
  6501. default:
  6502. return -EOPNOTSUPP;
  6503. }
  6504. }
  6505. static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
  6506. void *type_data)
  6507. {
  6508. switch (type) {
  6509. case TC_SETUP_BLOCK:
  6510. return bnxt_setup_tc_block(dev, type_data);
  6511. case TC_SETUP_QDISC_MQPRIO: {
  6512. struct tc_mqprio_qopt *mqprio = type_data;
  6513. mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
  6514. return bnxt_setup_mq_tc(dev, mqprio->num_tc);
  6515. }
  6516. default:
  6517. return -EOPNOTSUPP;
  6518. }
  6519. }
  6520. #ifdef CONFIG_RFS_ACCEL
  6521. static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
  6522. struct bnxt_ntuple_filter *f2)
  6523. {
  6524. struct flow_keys *keys1 = &f1->fkeys;
  6525. struct flow_keys *keys2 = &f2->fkeys;
  6526. if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
  6527. keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
  6528. keys1->ports.ports == keys2->ports.ports &&
  6529. keys1->basic.ip_proto == keys2->basic.ip_proto &&
  6530. keys1->basic.n_proto == keys2->basic.n_proto &&
  6531. keys1->control.flags == keys2->control.flags &&
  6532. ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
  6533. ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
  6534. return true;
  6535. return false;
  6536. }
  6537. static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
  6538. u16 rxq_index, u32 flow_id)
  6539. {
  6540. struct bnxt *bp = netdev_priv(dev);
  6541. struct bnxt_ntuple_filter *fltr, *new_fltr;
  6542. struct flow_keys *fkeys;
  6543. struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
  6544. int rc = 0, idx, bit_id, l2_idx = 0;
  6545. struct hlist_head *head;
  6546. if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
  6547. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  6548. int off = 0, j;
  6549. netif_addr_lock_bh(dev);
  6550. for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
  6551. if (ether_addr_equal(eth->h_dest,
  6552. vnic->uc_list + off)) {
  6553. l2_idx = j + 1;
  6554. break;
  6555. }
  6556. }
  6557. netif_addr_unlock_bh(dev);
  6558. if (!l2_idx)
  6559. return -EINVAL;
  6560. }
  6561. new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
  6562. if (!new_fltr)
  6563. return -ENOMEM;
  6564. fkeys = &new_fltr->fkeys;
  6565. if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
  6566. rc = -EPROTONOSUPPORT;
  6567. goto err_free;
  6568. }
  6569. if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
  6570. fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
  6571. ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
  6572. (fkeys->basic.ip_proto != IPPROTO_UDP))) {
  6573. rc = -EPROTONOSUPPORT;
  6574. goto err_free;
  6575. }
  6576. if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
  6577. bp->hwrm_spec_code < 0x10601) {
  6578. rc = -EPROTONOSUPPORT;
  6579. goto err_free;
  6580. }
  6581. if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
  6582. bp->hwrm_spec_code < 0x10601) {
  6583. rc = -EPROTONOSUPPORT;
  6584. goto err_free;
  6585. }
  6586. memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
  6587. memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
  6588. idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
  6589. head = &bp->ntp_fltr_hash_tbl[idx];
  6590. rcu_read_lock();
  6591. hlist_for_each_entry_rcu(fltr, head, hash) {
  6592. if (bnxt_fltr_match(fltr, new_fltr)) {
  6593. rcu_read_unlock();
  6594. rc = 0;
  6595. goto err_free;
  6596. }
  6597. }
  6598. rcu_read_unlock();
  6599. spin_lock_bh(&bp->ntp_fltr_lock);
  6600. bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
  6601. BNXT_NTP_FLTR_MAX_FLTR, 0);
  6602. if (bit_id < 0) {
  6603. spin_unlock_bh(&bp->ntp_fltr_lock);
  6604. rc = -ENOMEM;
  6605. goto err_free;
  6606. }
  6607. new_fltr->sw_id = (u16)bit_id;
  6608. new_fltr->flow_id = flow_id;
  6609. new_fltr->l2_fltr_idx = l2_idx;
  6610. new_fltr->rxq = rxq_index;
  6611. hlist_add_head_rcu(&new_fltr->hash, head);
  6612. bp->ntp_fltr_count++;
  6613. spin_unlock_bh(&bp->ntp_fltr_lock);
  6614. set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
  6615. bnxt_queue_sp_work(bp);
  6616. return new_fltr->sw_id;
  6617. err_free:
  6618. kfree(new_fltr);
  6619. return rc;
  6620. }
  6621. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  6622. {
  6623. int i;
  6624. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  6625. struct hlist_head *head;
  6626. struct hlist_node *tmp;
  6627. struct bnxt_ntuple_filter *fltr;
  6628. int rc;
  6629. head = &bp->ntp_fltr_hash_tbl[i];
  6630. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  6631. bool del = false;
  6632. if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
  6633. if (rps_may_expire_flow(bp->dev, fltr->rxq,
  6634. fltr->flow_id,
  6635. fltr->sw_id)) {
  6636. bnxt_hwrm_cfa_ntuple_filter_free(bp,
  6637. fltr);
  6638. del = true;
  6639. }
  6640. } else {
  6641. rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
  6642. fltr);
  6643. if (rc)
  6644. del = true;
  6645. else
  6646. set_bit(BNXT_FLTR_VALID, &fltr->state);
  6647. }
  6648. if (del) {
  6649. spin_lock_bh(&bp->ntp_fltr_lock);
  6650. hlist_del_rcu(&fltr->hash);
  6651. bp->ntp_fltr_count--;
  6652. spin_unlock_bh(&bp->ntp_fltr_lock);
  6653. synchronize_rcu();
  6654. clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
  6655. kfree(fltr);
  6656. }
  6657. }
  6658. }
  6659. if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
  6660. netdev_info(bp->dev, "Receive PF driver unload event!");
  6661. }
  6662. #else
  6663. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  6664. {
  6665. }
  6666. #endif /* CONFIG_RFS_ACCEL */
  6667. static void bnxt_udp_tunnel_add(struct net_device *dev,
  6668. struct udp_tunnel_info *ti)
  6669. {
  6670. struct bnxt *bp = netdev_priv(dev);
  6671. if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
  6672. return;
  6673. if (!netif_running(dev))
  6674. return;
  6675. switch (ti->type) {
  6676. case UDP_TUNNEL_TYPE_VXLAN:
  6677. if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
  6678. return;
  6679. bp->vxlan_port_cnt++;
  6680. if (bp->vxlan_port_cnt == 1) {
  6681. bp->vxlan_port = ti->port;
  6682. set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
  6683. bnxt_queue_sp_work(bp);
  6684. }
  6685. break;
  6686. case UDP_TUNNEL_TYPE_GENEVE:
  6687. if (bp->nge_port_cnt && bp->nge_port != ti->port)
  6688. return;
  6689. bp->nge_port_cnt++;
  6690. if (bp->nge_port_cnt == 1) {
  6691. bp->nge_port = ti->port;
  6692. set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
  6693. }
  6694. break;
  6695. default:
  6696. return;
  6697. }
  6698. bnxt_queue_sp_work(bp);
  6699. }
  6700. static void bnxt_udp_tunnel_del(struct net_device *dev,
  6701. struct udp_tunnel_info *ti)
  6702. {
  6703. struct bnxt *bp = netdev_priv(dev);
  6704. if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
  6705. return;
  6706. if (!netif_running(dev))
  6707. return;
  6708. switch (ti->type) {
  6709. case UDP_TUNNEL_TYPE_VXLAN:
  6710. if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
  6711. return;
  6712. bp->vxlan_port_cnt--;
  6713. if (bp->vxlan_port_cnt != 0)
  6714. return;
  6715. set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
  6716. break;
  6717. case UDP_TUNNEL_TYPE_GENEVE:
  6718. if (!bp->nge_port_cnt || bp->nge_port != ti->port)
  6719. return;
  6720. bp->nge_port_cnt--;
  6721. if (bp->nge_port_cnt != 0)
  6722. return;
  6723. set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
  6724. break;
  6725. default:
  6726. return;
  6727. }
  6728. bnxt_queue_sp_work(bp);
  6729. }
  6730. static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
  6731. struct net_device *dev, u32 filter_mask,
  6732. int nlflags)
  6733. {
  6734. struct bnxt *bp = netdev_priv(dev);
  6735. return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
  6736. nlflags, filter_mask, NULL);
  6737. }
  6738. static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
  6739. u16 flags)
  6740. {
  6741. struct bnxt *bp = netdev_priv(dev);
  6742. struct nlattr *attr, *br_spec;
  6743. int rem, rc = 0;
  6744. if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
  6745. return -EOPNOTSUPP;
  6746. br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
  6747. if (!br_spec)
  6748. return -EINVAL;
  6749. nla_for_each_nested(attr, br_spec, rem) {
  6750. u16 mode;
  6751. if (nla_type(attr) != IFLA_BRIDGE_MODE)
  6752. continue;
  6753. if (nla_len(attr) < sizeof(mode))
  6754. return -EINVAL;
  6755. mode = nla_get_u16(attr);
  6756. if (mode == bp->br_mode)
  6757. break;
  6758. rc = bnxt_hwrm_set_br_mode(bp, mode);
  6759. if (!rc)
  6760. bp->br_mode = mode;
  6761. break;
  6762. }
  6763. return rc;
  6764. }
  6765. static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
  6766. size_t len)
  6767. {
  6768. struct bnxt *bp = netdev_priv(dev);
  6769. int rc;
  6770. /* The PF and it's VF-reps only support the switchdev framework */
  6771. if (!BNXT_PF(bp))
  6772. return -EOPNOTSUPP;
  6773. rc = snprintf(buf, len, "p%d", bp->pf.port_id);
  6774. if (rc >= len)
  6775. return -EOPNOTSUPP;
  6776. return 0;
  6777. }
  6778. int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
  6779. {
  6780. if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
  6781. return -EOPNOTSUPP;
  6782. /* The PF and it's VF-reps only support the switchdev framework */
  6783. if (!BNXT_PF(bp))
  6784. return -EOPNOTSUPP;
  6785. switch (attr->id) {
  6786. case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
  6787. attr->u.ppid.id_len = sizeof(bp->switch_id);
  6788. memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
  6789. break;
  6790. default:
  6791. return -EOPNOTSUPP;
  6792. }
  6793. return 0;
  6794. }
  6795. static int bnxt_swdev_port_attr_get(struct net_device *dev,
  6796. struct switchdev_attr *attr)
  6797. {
  6798. return bnxt_port_attr_get(netdev_priv(dev), attr);
  6799. }
  6800. static const struct switchdev_ops bnxt_switchdev_ops = {
  6801. .switchdev_port_attr_get = bnxt_swdev_port_attr_get
  6802. };
  6803. static const struct net_device_ops bnxt_netdev_ops = {
  6804. .ndo_open = bnxt_open,
  6805. .ndo_start_xmit = bnxt_start_xmit,
  6806. .ndo_stop = bnxt_close,
  6807. .ndo_get_stats64 = bnxt_get_stats64,
  6808. .ndo_set_rx_mode = bnxt_set_rx_mode,
  6809. .ndo_do_ioctl = bnxt_ioctl,
  6810. .ndo_validate_addr = eth_validate_addr,
  6811. .ndo_set_mac_address = bnxt_change_mac_addr,
  6812. .ndo_change_mtu = bnxt_change_mtu,
  6813. .ndo_fix_features = bnxt_fix_features,
  6814. .ndo_set_features = bnxt_set_features,
  6815. .ndo_tx_timeout = bnxt_tx_timeout,
  6816. #ifdef CONFIG_BNXT_SRIOV
  6817. .ndo_get_vf_config = bnxt_get_vf_config,
  6818. .ndo_set_vf_mac = bnxt_set_vf_mac,
  6819. .ndo_set_vf_vlan = bnxt_set_vf_vlan,
  6820. .ndo_set_vf_rate = bnxt_set_vf_bw,
  6821. .ndo_set_vf_link_state = bnxt_set_vf_link_state,
  6822. .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
  6823. #endif
  6824. #ifdef CONFIG_NET_POLL_CONTROLLER
  6825. .ndo_poll_controller = bnxt_poll_controller,
  6826. #endif
  6827. .ndo_setup_tc = bnxt_setup_tc,
  6828. #ifdef CONFIG_RFS_ACCEL
  6829. .ndo_rx_flow_steer = bnxt_rx_flow_steer,
  6830. #endif
  6831. .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
  6832. .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
  6833. .ndo_bpf = bnxt_xdp,
  6834. .ndo_bridge_getlink = bnxt_bridge_getlink,
  6835. .ndo_bridge_setlink = bnxt_bridge_setlink,
  6836. .ndo_get_phys_port_name = bnxt_get_phys_port_name
  6837. };
  6838. static void bnxt_remove_one(struct pci_dev *pdev)
  6839. {
  6840. struct net_device *dev = pci_get_drvdata(pdev);
  6841. struct bnxt *bp = netdev_priv(dev);
  6842. if (BNXT_PF(bp)) {
  6843. bnxt_sriov_disable(bp);
  6844. bnxt_dl_unregister(bp);
  6845. }
  6846. pci_disable_pcie_error_reporting(pdev);
  6847. unregister_netdev(dev);
  6848. bnxt_shutdown_tc(bp);
  6849. bnxt_cancel_sp_work(bp);
  6850. bp->sp_event = 0;
  6851. bnxt_clear_int_mode(bp);
  6852. bnxt_hwrm_func_drv_unrgtr(bp);
  6853. bnxt_free_hwrm_resources(bp);
  6854. bnxt_free_hwrm_short_cmd_req(bp);
  6855. bnxt_ethtool_free(bp);
  6856. bnxt_dcb_free(bp);
  6857. kfree(bp->edev);
  6858. bp->edev = NULL;
  6859. bnxt_cleanup_pci(bp);
  6860. free_netdev(dev);
  6861. }
  6862. static int bnxt_probe_phy(struct bnxt *bp)
  6863. {
  6864. int rc = 0;
  6865. struct bnxt_link_info *link_info = &bp->link_info;
  6866. rc = bnxt_hwrm_phy_qcaps(bp);
  6867. if (rc) {
  6868. netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
  6869. rc);
  6870. return rc;
  6871. }
  6872. mutex_init(&bp->link_lock);
  6873. rc = bnxt_update_link(bp, false);
  6874. if (rc) {
  6875. netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
  6876. rc);
  6877. return rc;
  6878. }
  6879. /* Older firmware does not have supported_auto_speeds, so assume
  6880. * that all supported speeds can be autonegotiated.
  6881. */
  6882. if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
  6883. link_info->support_auto_speeds = link_info->support_speeds;
  6884. /*initialize the ethool setting copy with NVM settings */
  6885. if (BNXT_AUTO_MODE(link_info->auto_mode)) {
  6886. link_info->autoneg = BNXT_AUTONEG_SPEED;
  6887. if (bp->hwrm_spec_code >= 0x10201) {
  6888. if (link_info->auto_pause_setting &
  6889. PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
  6890. link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  6891. } else {
  6892. link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  6893. }
  6894. link_info->advertising = link_info->auto_link_speeds;
  6895. } else {
  6896. link_info->req_link_speed = link_info->force_link_speed;
  6897. link_info->req_duplex = link_info->duplex_setting;
  6898. }
  6899. if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
  6900. link_info->req_flow_ctrl =
  6901. link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
  6902. else
  6903. link_info->req_flow_ctrl = link_info->force_pause_setting;
  6904. return rc;
  6905. }
  6906. static int bnxt_get_max_irq(struct pci_dev *pdev)
  6907. {
  6908. u16 ctrl;
  6909. if (!pdev->msix_cap)
  6910. return 1;
  6911. pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
  6912. return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
  6913. }
  6914. static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
  6915. int *max_cp)
  6916. {
  6917. struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
  6918. int max_ring_grps = 0;
  6919. *max_tx = hw_resc->max_tx_rings;
  6920. *max_rx = hw_resc->max_rx_rings;
  6921. *max_cp = min_t(int, hw_resc->max_irqs, hw_resc->max_cp_rings);
  6922. *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
  6923. max_ring_grps = hw_resc->max_hw_ring_grps;
  6924. if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
  6925. *max_cp -= 1;
  6926. *max_rx -= 2;
  6927. }
  6928. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  6929. *max_rx >>= 1;
  6930. *max_rx = min_t(int, *max_rx, max_ring_grps);
  6931. }
  6932. int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
  6933. {
  6934. int rx, tx, cp;
  6935. _bnxt_get_max_rings(bp, &rx, &tx, &cp);
  6936. if (!rx || !tx || !cp)
  6937. return -ENOMEM;
  6938. *max_rx = rx;
  6939. *max_tx = tx;
  6940. return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
  6941. }
  6942. static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
  6943. bool shared)
  6944. {
  6945. int rc;
  6946. rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
  6947. if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
  6948. /* Not enough rings, try disabling agg rings. */
  6949. bp->flags &= ~BNXT_FLAG_AGG_RINGS;
  6950. rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
  6951. if (rc)
  6952. return rc;
  6953. bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
  6954. bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  6955. bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
  6956. bnxt_set_ring_params(bp);
  6957. }
  6958. if (bp->flags & BNXT_FLAG_ROCE_CAP) {
  6959. int max_cp, max_stat, max_irq;
  6960. /* Reserve minimum resources for RoCE */
  6961. max_cp = bnxt_get_max_func_cp_rings(bp);
  6962. max_stat = bnxt_get_max_func_stat_ctxs(bp);
  6963. max_irq = bnxt_get_max_func_irqs(bp);
  6964. if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
  6965. max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
  6966. max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
  6967. return 0;
  6968. max_cp -= BNXT_MIN_ROCE_CP_RINGS;
  6969. max_irq -= BNXT_MIN_ROCE_CP_RINGS;
  6970. max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
  6971. max_cp = min_t(int, max_cp, max_irq);
  6972. max_cp = min_t(int, max_cp, max_stat);
  6973. rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
  6974. if (rc)
  6975. rc = 0;
  6976. }
  6977. return rc;
  6978. }
  6979. /* In initial default shared ring setting, each shared ring must have a
  6980. * RX/TX ring pair.
  6981. */
  6982. static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
  6983. {
  6984. bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
  6985. bp->rx_nr_rings = bp->cp_nr_rings;
  6986. bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
  6987. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  6988. }
  6989. static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
  6990. {
  6991. int dflt_rings, max_rx_rings, max_tx_rings, rc;
  6992. if (sh)
  6993. bp->flags |= BNXT_FLAG_SHARED_RINGS;
  6994. dflt_rings = netif_get_num_default_rss_queues();
  6995. /* Reduce default rings to reduce memory usage on multi-port cards */
  6996. if (bp->port_count > 1)
  6997. dflt_rings = min_t(int, dflt_rings, 4);
  6998. rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
  6999. if (rc)
  7000. return rc;
  7001. bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
  7002. bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
  7003. if (sh)
  7004. bnxt_trim_dflt_sh_rings(bp);
  7005. else
  7006. bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
  7007. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  7008. rc = __bnxt_reserve_rings(bp);
  7009. if (rc)
  7010. netdev_warn(bp->dev, "Unable to reserve tx rings\n");
  7011. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  7012. if (sh)
  7013. bnxt_trim_dflt_sh_rings(bp);
  7014. /* Rings may have been trimmed, re-reserve the trimmed rings. */
  7015. if (bnxt_need_reserve_rings(bp)) {
  7016. rc = __bnxt_reserve_rings(bp);
  7017. if (rc)
  7018. netdev_warn(bp->dev, "2nd rings reservation failed.\n");
  7019. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  7020. }
  7021. bp->num_stat_ctxs = bp->cp_nr_rings;
  7022. if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
  7023. bp->rx_nr_rings++;
  7024. bp->cp_nr_rings++;
  7025. }
  7026. return rc;
  7027. }
  7028. int bnxt_restore_pf_fw_resources(struct bnxt *bp)
  7029. {
  7030. int rc;
  7031. ASSERT_RTNL();
  7032. if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
  7033. return 0;
  7034. bnxt_hwrm_func_qcaps(bp);
  7035. if (netif_running(bp->dev))
  7036. __bnxt_close_nic(bp, true, false);
  7037. bnxt_clear_int_mode(bp);
  7038. rc = bnxt_init_int_mode(bp);
  7039. if (netif_running(bp->dev)) {
  7040. if (rc)
  7041. dev_close(bp->dev);
  7042. else
  7043. rc = bnxt_open_nic(bp, true, false);
  7044. }
  7045. return rc;
  7046. }
  7047. static int bnxt_init_mac_addr(struct bnxt *bp)
  7048. {
  7049. int rc = 0;
  7050. if (BNXT_PF(bp)) {
  7051. memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
  7052. } else {
  7053. #ifdef CONFIG_BNXT_SRIOV
  7054. struct bnxt_vf_info *vf = &bp->vf;
  7055. if (is_valid_ether_addr(vf->mac_addr)) {
  7056. /* overwrite netdev dev_addr with admin VF MAC */
  7057. memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
  7058. } else {
  7059. eth_hw_addr_random(bp->dev);
  7060. rc = bnxt_approve_mac(bp, bp->dev->dev_addr);
  7061. }
  7062. #endif
  7063. }
  7064. return rc;
  7065. }
  7066. static void bnxt_parse_log_pcie_link(struct bnxt *bp)
  7067. {
  7068. enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
  7069. enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
  7070. if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
  7071. speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
  7072. netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
  7073. else
  7074. netdev_info(bp->dev, "PCIe: Speed %s Width x%d\n",
  7075. speed == PCIE_SPEED_2_5GT ? "2.5GT/s" :
  7076. speed == PCIE_SPEED_5_0GT ? "5.0GT/s" :
  7077. speed == PCIE_SPEED_8_0GT ? "8.0GT/s" :
  7078. "Unknown", width);
  7079. }
  7080. static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  7081. {
  7082. static int version_printed;
  7083. struct net_device *dev;
  7084. struct bnxt *bp;
  7085. int rc, max_irqs;
  7086. if (pci_is_bridge(pdev))
  7087. return -ENODEV;
  7088. if (version_printed++ == 0)
  7089. pr_info("%s", version);
  7090. max_irqs = bnxt_get_max_irq(pdev);
  7091. dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
  7092. if (!dev)
  7093. return -ENOMEM;
  7094. bp = netdev_priv(dev);
  7095. if (bnxt_vf_pciid(ent->driver_data))
  7096. bp->flags |= BNXT_FLAG_VF;
  7097. if (pdev->msix_cap)
  7098. bp->flags |= BNXT_FLAG_MSIX_CAP;
  7099. rc = bnxt_init_board(pdev, dev);
  7100. if (rc < 0)
  7101. goto init_err_free;
  7102. dev->netdev_ops = &bnxt_netdev_ops;
  7103. dev->watchdog_timeo = BNXT_TX_TIMEOUT;
  7104. dev->ethtool_ops = &bnxt_ethtool_ops;
  7105. SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
  7106. pci_set_drvdata(pdev, dev);
  7107. rc = bnxt_alloc_hwrm_resources(bp);
  7108. if (rc)
  7109. goto init_err_pci_clean;
  7110. mutex_init(&bp->hwrm_cmd_lock);
  7111. rc = bnxt_hwrm_ver_get(bp);
  7112. if (rc)
  7113. goto init_err_pci_clean;
  7114. if (bp->flags & BNXT_FLAG_SHORT_CMD) {
  7115. rc = bnxt_alloc_hwrm_short_cmd_req(bp);
  7116. if (rc)
  7117. goto init_err_pci_clean;
  7118. }
  7119. rc = bnxt_hwrm_func_reset(bp);
  7120. if (rc)
  7121. goto init_err_pci_clean;
  7122. bnxt_hwrm_fw_set_time(bp);
  7123. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  7124. NETIF_F_TSO | NETIF_F_TSO6 |
  7125. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  7126. NETIF_F_GSO_IPXIP4 |
  7127. NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  7128. NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
  7129. NETIF_F_RXCSUM | NETIF_F_GRO;
  7130. if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
  7131. dev->hw_features |= NETIF_F_LRO;
  7132. dev->hw_enc_features =
  7133. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  7134. NETIF_F_TSO | NETIF_F_TSO6 |
  7135. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  7136. NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
  7137. NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
  7138. dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
  7139. NETIF_F_GSO_GRE_CSUM;
  7140. dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
  7141. dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
  7142. NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
  7143. if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
  7144. dev->hw_features |= NETIF_F_GRO_HW;
  7145. dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
  7146. if (dev->features & NETIF_F_GRO_HW)
  7147. dev->features &= ~NETIF_F_LRO;
  7148. dev->priv_flags |= IFF_UNICAST_FLT;
  7149. #ifdef CONFIG_BNXT_SRIOV
  7150. init_waitqueue_head(&bp->sriov_cfg_wait);
  7151. mutex_init(&bp->sriov_lock);
  7152. #endif
  7153. bp->gro_func = bnxt_gro_func_5730x;
  7154. if (BNXT_CHIP_P4_PLUS(bp))
  7155. bp->gro_func = bnxt_gro_func_5731x;
  7156. else
  7157. bp->flags |= BNXT_FLAG_DOUBLE_DB;
  7158. rc = bnxt_hwrm_func_drv_rgtr(bp);
  7159. if (rc)
  7160. goto init_err_pci_clean;
  7161. rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
  7162. if (rc)
  7163. goto init_err_pci_clean;
  7164. bp->ulp_probe = bnxt_ulp_probe;
  7165. /* Get the MAX capabilities for this function */
  7166. rc = bnxt_hwrm_func_qcaps(bp);
  7167. if (rc) {
  7168. netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
  7169. rc);
  7170. rc = -1;
  7171. goto init_err_pci_clean;
  7172. }
  7173. rc = bnxt_init_mac_addr(bp);
  7174. if (rc) {
  7175. dev_err(&pdev->dev, "Unable to initialize mac address.\n");
  7176. rc = -EADDRNOTAVAIL;
  7177. goto init_err_pci_clean;
  7178. }
  7179. rc = bnxt_hwrm_queue_qportcfg(bp);
  7180. if (rc) {
  7181. netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
  7182. rc);
  7183. rc = -1;
  7184. goto init_err_pci_clean;
  7185. }
  7186. bnxt_hwrm_func_qcfg(bp);
  7187. bnxt_hwrm_port_led_qcaps(bp);
  7188. bnxt_ethtool_init(bp);
  7189. bnxt_dcb_init(bp);
  7190. /* MTU range: 60 - FW defined max */
  7191. dev->min_mtu = ETH_ZLEN;
  7192. dev->max_mtu = bp->max_mtu;
  7193. rc = bnxt_probe_phy(bp);
  7194. if (rc)
  7195. goto init_err_pci_clean;
  7196. bnxt_set_rx_skb_mode(bp, false);
  7197. bnxt_set_tpa_flags(bp);
  7198. bnxt_set_ring_params(bp);
  7199. bnxt_set_max_func_irqs(bp, max_irqs);
  7200. rc = bnxt_set_dflt_rings(bp, true);
  7201. if (rc) {
  7202. netdev_err(bp->dev, "Not enough rings available.\n");
  7203. rc = -ENOMEM;
  7204. goto init_err_pci_clean;
  7205. }
  7206. /* Default RSS hash cfg. */
  7207. bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
  7208. VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
  7209. VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
  7210. VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
  7211. if (BNXT_CHIP_P4_PLUS(bp) && bp->hwrm_spec_code >= 0x10501) {
  7212. bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
  7213. bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
  7214. VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
  7215. }
  7216. bnxt_hwrm_vnic_qcaps(bp);
  7217. if (bnxt_rfs_supported(bp)) {
  7218. dev->hw_features |= NETIF_F_NTUPLE;
  7219. if (bnxt_rfs_capable(bp)) {
  7220. bp->flags |= BNXT_FLAG_RFS;
  7221. dev->features |= NETIF_F_NTUPLE;
  7222. }
  7223. }
  7224. if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
  7225. bp->flags |= BNXT_FLAG_STRIP_VLAN;
  7226. rc = bnxt_init_int_mode(bp);
  7227. if (rc)
  7228. goto init_err_pci_clean;
  7229. /* No TC has been set yet and rings may have been trimmed due to
  7230. * limited MSIX, so we re-initialize the TX rings per TC.
  7231. */
  7232. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  7233. bnxt_get_wol_settings(bp);
  7234. if (bp->flags & BNXT_FLAG_WOL_CAP)
  7235. device_set_wakeup_enable(&pdev->dev, bp->wol);
  7236. else
  7237. device_set_wakeup_capable(&pdev->dev, false);
  7238. bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
  7239. if (BNXT_PF(bp)) {
  7240. if (!bnxt_pf_wq) {
  7241. bnxt_pf_wq =
  7242. create_singlethread_workqueue("bnxt_pf_wq");
  7243. if (!bnxt_pf_wq) {
  7244. dev_err(&pdev->dev, "Unable to create workqueue.\n");
  7245. goto init_err_pci_clean;
  7246. }
  7247. }
  7248. bnxt_init_tc(bp);
  7249. }
  7250. rc = register_netdev(dev);
  7251. if (rc)
  7252. goto init_err_cleanup_tc;
  7253. if (BNXT_PF(bp))
  7254. bnxt_dl_register(bp);
  7255. netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
  7256. board_info[ent->driver_data].name,
  7257. (long)pci_resource_start(pdev, 0), dev->dev_addr);
  7258. bnxt_parse_log_pcie_link(bp);
  7259. return 0;
  7260. init_err_cleanup_tc:
  7261. bnxt_shutdown_tc(bp);
  7262. bnxt_clear_int_mode(bp);
  7263. init_err_pci_clean:
  7264. bnxt_cleanup_pci(bp);
  7265. init_err_free:
  7266. free_netdev(dev);
  7267. return rc;
  7268. }
  7269. static void bnxt_shutdown(struct pci_dev *pdev)
  7270. {
  7271. struct net_device *dev = pci_get_drvdata(pdev);
  7272. struct bnxt *bp;
  7273. if (!dev)
  7274. return;
  7275. rtnl_lock();
  7276. bp = netdev_priv(dev);
  7277. if (!bp)
  7278. goto shutdown_exit;
  7279. if (netif_running(dev))
  7280. dev_close(dev);
  7281. bnxt_ulp_shutdown(bp);
  7282. if (system_state == SYSTEM_POWER_OFF) {
  7283. bnxt_clear_int_mode(bp);
  7284. pci_wake_from_d3(pdev, bp->wol);
  7285. pci_set_power_state(pdev, PCI_D3hot);
  7286. }
  7287. shutdown_exit:
  7288. rtnl_unlock();
  7289. }
  7290. #ifdef CONFIG_PM_SLEEP
  7291. static int bnxt_suspend(struct device *device)
  7292. {
  7293. struct pci_dev *pdev = to_pci_dev(device);
  7294. struct net_device *dev = pci_get_drvdata(pdev);
  7295. struct bnxt *bp = netdev_priv(dev);
  7296. int rc = 0;
  7297. rtnl_lock();
  7298. if (netif_running(dev)) {
  7299. netif_device_detach(dev);
  7300. rc = bnxt_close(dev);
  7301. }
  7302. bnxt_hwrm_func_drv_unrgtr(bp);
  7303. rtnl_unlock();
  7304. return rc;
  7305. }
  7306. static int bnxt_resume(struct device *device)
  7307. {
  7308. struct pci_dev *pdev = to_pci_dev(device);
  7309. struct net_device *dev = pci_get_drvdata(pdev);
  7310. struct bnxt *bp = netdev_priv(dev);
  7311. int rc = 0;
  7312. rtnl_lock();
  7313. if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
  7314. rc = -ENODEV;
  7315. goto resume_exit;
  7316. }
  7317. rc = bnxt_hwrm_func_reset(bp);
  7318. if (rc) {
  7319. rc = -EBUSY;
  7320. goto resume_exit;
  7321. }
  7322. bnxt_get_wol_settings(bp);
  7323. if (netif_running(dev)) {
  7324. rc = bnxt_open(dev);
  7325. if (!rc)
  7326. netif_device_attach(dev);
  7327. }
  7328. resume_exit:
  7329. rtnl_unlock();
  7330. return rc;
  7331. }
  7332. static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
  7333. #define BNXT_PM_OPS (&bnxt_pm_ops)
  7334. #else
  7335. #define BNXT_PM_OPS NULL
  7336. #endif /* CONFIG_PM_SLEEP */
  7337. /**
  7338. * bnxt_io_error_detected - called when PCI error is detected
  7339. * @pdev: Pointer to PCI device
  7340. * @state: The current pci connection state
  7341. *
  7342. * This function is called after a PCI bus error affecting
  7343. * this device has been detected.
  7344. */
  7345. static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
  7346. pci_channel_state_t state)
  7347. {
  7348. struct net_device *netdev = pci_get_drvdata(pdev);
  7349. struct bnxt *bp = netdev_priv(netdev);
  7350. netdev_info(netdev, "PCI I/O error detected\n");
  7351. rtnl_lock();
  7352. netif_device_detach(netdev);
  7353. bnxt_ulp_stop(bp);
  7354. if (state == pci_channel_io_perm_failure) {
  7355. rtnl_unlock();
  7356. return PCI_ERS_RESULT_DISCONNECT;
  7357. }
  7358. if (netif_running(netdev))
  7359. bnxt_close(netdev);
  7360. pci_disable_device(pdev);
  7361. rtnl_unlock();
  7362. /* Request a slot slot reset. */
  7363. return PCI_ERS_RESULT_NEED_RESET;
  7364. }
  7365. /**
  7366. * bnxt_io_slot_reset - called after the pci bus has been reset.
  7367. * @pdev: Pointer to PCI device
  7368. *
  7369. * Restart the card from scratch, as if from a cold-boot.
  7370. * At this point, the card has exprienced a hard reset,
  7371. * followed by fixups by BIOS, and has its config space
  7372. * set up identically to what it was at cold boot.
  7373. */
  7374. static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
  7375. {
  7376. struct net_device *netdev = pci_get_drvdata(pdev);
  7377. struct bnxt *bp = netdev_priv(netdev);
  7378. int err = 0;
  7379. pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
  7380. netdev_info(bp->dev, "PCI Slot Reset\n");
  7381. rtnl_lock();
  7382. if (pci_enable_device(pdev)) {
  7383. dev_err(&pdev->dev,
  7384. "Cannot re-enable PCI device after reset.\n");
  7385. } else {
  7386. pci_set_master(pdev);
  7387. err = bnxt_hwrm_func_reset(bp);
  7388. if (!err && netif_running(netdev))
  7389. err = bnxt_open(netdev);
  7390. if (!err) {
  7391. result = PCI_ERS_RESULT_RECOVERED;
  7392. bnxt_ulp_start(bp);
  7393. }
  7394. }
  7395. if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
  7396. dev_close(netdev);
  7397. rtnl_unlock();
  7398. err = pci_cleanup_aer_uncorrect_error_status(pdev);
  7399. if (err) {
  7400. dev_err(&pdev->dev,
  7401. "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
  7402. err); /* non-fatal, continue */
  7403. }
  7404. return PCI_ERS_RESULT_RECOVERED;
  7405. }
  7406. /**
  7407. * bnxt_io_resume - called when traffic can start flowing again.
  7408. * @pdev: Pointer to PCI device
  7409. *
  7410. * This callback is called when the error recovery driver tells
  7411. * us that its OK to resume normal operation.
  7412. */
  7413. static void bnxt_io_resume(struct pci_dev *pdev)
  7414. {
  7415. struct net_device *netdev = pci_get_drvdata(pdev);
  7416. rtnl_lock();
  7417. netif_device_attach(netdev);
  7418. rtnl_unlock();
  7419. }
  7420. static const struct pci_error_handlers bnxt_err_handler = {
  7421. .error_detected = bnxt_io_error_detected,
  7422. .slot_reset = bnxt_io_slot_reset,
  7423. .resume = bnxt_io_resume
  7424. };
  7425. static struct pci_driver bnxt_pci_driver = {
  7426. .name = DRV_MODULE_NAME,
  7427. .id_table = bnxt_pci_tbl,
  7428. .probe = bnxt_init_one,
  7429. .remove = bnxt_remove_one,
  7430. .shutdown = bnxt_shutdown,
  7431. .driver.pm = BNXT_PM_OPS,
  7432. .err_handler = &bnxt_err_handler,
  7433. #if defined(CONFIG_BNXT_SRIOV)
  7434. .sriov_configure = bnxt_sriov_configure,
  7435. #endif
  7436. };
  7437. static int __init bnxt_init(void)
  7438. {
  7439. return pci_register_driver(&bnxt_pci_driver);
  7440. }
  7441. static void __exit bnxt_exit(void)
  7442. {
  7443. pci_unregister_driver(&bnxt_pci_driver);
  7444. if (bnxt_pf_wq)
  7445. destroy_workqueue(bnxt_pf_wq);
  7446. }
  7447. module_init(bnxt_init);
  7448. module_exit(bnxt_exit);