t4_hw.c 238 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/delay.h>
  35. #include "cxgb4.h"
  36. #include "t4_regs.h"
  37. #include "t4_values.h"
  38. #include "t4fw_api.h"
  39. #include "t4fw_version.h"
  40. /**
  41. * t4_wait_op_done_val - wait until an operation is completed
  42. * @adapter: the adapter performing the operation
  43. * @reg: the register to check for completion
  44. * @mask: a single-bit field within @reg that indicates completion
  45. * @polarity: the value of the field when the operation is completed
  46. * @attempts: number of check iterations
  47. * @delay: delay in usecs between iterations
  48. * @valp: where to store the value of the register at completion time
  49. *
  50. * Wait until an operation is completed by checking a bit in a register
  51. * up to @attempts times. If @valp is not NULL the value of the register
  52. * at the time it indicated completion is stored there. Returns 0 if the
  53. * operation completes and -EAGAIN otherwise.
  54. */
  55. static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
  56. int polarity, int attempts, int delay, u32 *valp)
  57. {
  58. while (1) {
  59. u32 val = t4_read_reg(adapter, reg);
  60. if (!!(val & mask) == polarity) {
  61. if (valp)
  62. *valp = val;
  63. return 0;
  64. }
  65. if (--attempts == 0)
  66. return -EAGAIN;
  67. if (delay)
  68. udelay(delay);
  69. }
  70. }
  71. static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
  72. int polarity, int attempts, int delay)
  73. {
  74. return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
  75. delay, NULL);
  76. }
  77. /**
  78. * t4_set_reg_field - set a register field to a value
  79. * @adapter: the adapter to program
  80. * @addr: the register address
  81. * @mask: specifies the portion of the register to modify
  82. * @val: the new value for the register field
  83. *
  84. * Sets a register field specified by the supplied mask to the
  85. * given value.
  86. */
  87. void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
  88. u32 val)
  89. {
  90. u32 v = t4_read_reg(adapter, addr) & ~mask;
  91. t4_write_reg(adapter, addr, v | val);
  92. (void) t4_read_reg(adapter, addr); /* flush */
  93. }
  94. /**
  95. * t4_read_indirect - read indirectly addressed registers
  96. * @adap: the adapter
  97. * @addr_reg: register holding the indirect address
  98. * @data_reg: register holding the value of the indirect register
  99. * @vals: where the read register values are stored
  100. * @nregs: how many indirect registers to read
  101. * @start_idx: index of first indirect register to read
  102. *
  103. * Reads registers that are accessed indirectly through an address/data
  104. * register pair.
  105. */
  106. void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
  107. unsigned int data_reg, u32 *vals,
  108. unsigned int nregs, unsigned int start_idx)
  109. {
  110. while (nregs--) {
  111. t4_write_reg(adap, addr_reg, start_idx);
  112. *vals++ = t4_read_reg(adap, data_reg);
  113. start_idx++;
  114. }
  115. }
  116. /**
  117. * t4_write_indirect - write indirectly addressed registers
  118. * @adap: the adapter
  119. * @addr_reg: register holding the indirect addresses
  120. * @data_reg: register holding the value for the indirect registers
  121. * @vals: values to write
  122. * @nregs: how many indirect registers to write
  123. * @start_idx: address of first indirect register to write
  124. *
  125. * Writes a sequential block of registers that are accessed indirectly
  126. * through an address/data register pair.
  127. */
  128. void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
  129. unsigned int data_reg, const u32 *vals,
  130. unsigned int nregs, unsigned int start_idx)
  131. {
  132. while (nregs--) {
  133. t4_write_reg(adap, addr_reg, start_idx++);
  134. t4_write_reg(adap, data_reg, *vals++);
  135. }
  136. }
  137. /*
  138. * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
  139. * mechanism. This guarantees that we get the real value even if we're
  140. * operating within a Virtual Machine and the Hypervisor is trapping our
  141. * Configuration Space accesses.
  142. */
  143. void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
  144. {
  145. u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
  146. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  147. req |= ENABLE_F;
  148. else
  149. req |= T6_ENABLE_F;
  150. if (is_t4(adap->params.chip))
  151. req |= LOCALCFG_F;
  152. t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
  153. *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
  154. /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
  155. * Configuration Space read. (None of the other fields matter when
  156. * ENABLE is 0 so a simple register write is easier than a
  157. * read-modify-write via t4_set_reg_field().)
  158. */
  159. t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
  160. }
  161. /*
  162. * t4_report_fw_error - report firmware error
  163. * @adap: the adapter
  164. *
  165. * The adapter firmware can indicate error conditions to the host.
  166. * If the firmware has indicated an error, print out the reason for
  167. * the firmware error.
  168. */
  169. static void t4_report_fw_error(struct adapter *adap)
  170. {
  171. static const char *const reason[] = {
  172. "Crash", /* PCIE_FW_EVAL_CRASH */
  173. "During Device Preparation", /* PCIE_FW_EVAL_PREP */
  174. "During Device Configuration", /* PCIE_FW_EVAL_CONF */
  175. "During Device Initialization", /* PCIE_FW_EVAL_INIT */
  176. "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
  177. "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
  178. "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
  179. "Reserved", /* reserved */
  180. };
  181. u32 pcie_fw;
  182. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  183. if (pcie_fw & PCIE_FW_ERR_F)
  184. dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
  185. reason[PCIE_FW_EVAL_G(pcie_fw)]);
  186. }
  187. /*
  188. * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  189. */
  190. static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
  191. u32 mbox_addr)
  192. {
  193. for ( ; nflit; nflit--, mbox_addr += 8)
  194. *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
  195. }
  196. /*
  197. * Handle a FW assertion reported in a mailbox.
  198. */
  199. static void fw_asrt(struct adapter *adap, u32 mbox_addr)
  200. {
  201. struct fw_debug_cmd asrt;
  202. get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
  203. dev_alert(adap->pdev_dev,
  204. "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
  205. asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
  206. be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
  207. }
  208. /**
  209. * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
  210. * @adapter: the adapter
  211. * @cmd: the Firmware Mailbox Command or Reply
  212. * @size: command length in bytes
  213. * @access: the time (ms) needed to access the Firmware Mailbox
  214. * @execute: the time (ms) the command spent being executed
  215. */
  216. static void t4_record_mbox(struct adapter *adapter,
  217. const __be64 *cmd, unsigned int size,
  218. int access, int execute)
  219. {
  220. struct mbox_cmd_log *log = adapter->mbox_log;
  221. struct mbox_cmd *entry;
  222. int i;
  223. entry = mbox_cmd_log_entry(log, log->cursor++);
  224. if (log->cursor == log->size)
  225. log->cursor = 0;
  226. for (i = 0; i < size / 8; i++)
  227. entry->cmd[i] = be64_to_cpu(cmd[i]);
  228. while (i < MBOX_LEN / 8)
  229. entry->cmd[i++] = 0;
  230. entry->timestamp = jiffies;
  231. entry->seqno = log->seqno++;
  232. entry->access = access;
  233. entry->execute = execute;
  234. }
  235. /**
  236. * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
  237. * @adap: the adapter
  238. * @mbox: index of the mailbox to use
  239. * @cmd: the command to write
  240. * @size: command length in bytes
  241. * @rpl: where to optionally store the reply
  242. * @sleep_ok: if true we may sleep while awaiting command completion
  243. * @timeout: time to wait for command to finish before timing out
  244. *
  245. * Sends the given command to FW through the selected mailbox and waits
  246. * for the FW to execute the command. If @rpl is not %NULL it is used to
  247. * store the FW's reply to the command. The command and its optional
  248. * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
  249. * to respond. @sleep_ok determines whether we may sleep while awaiting
  250. * the response. If sleeping is allowed we use progressive backoff
  251. * otherwise we spin.
  252. *
  253. * The return value is 0 on success or a negative errno on failure. A
  254. * failure can happen either because we are not able to execute the
  255. * command or FW executes it but signals an error. In the latter case
  256. * the return value is the error code indicated by FW (negated).
  257. */
  258. int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
  259. int size, void *rpl, bool sleep_ok, int timeout)
  260. {
  261. static const int delay[] = {
  262. 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
  263. };
  264. struct mbox_list entry;
  265. u16 access = 0;
  266. u16 execute = 0;
  267. u32 v;
  268. u64 res;
  269. int i, ms, delay_idx, ret;
  270. const __be64 *p = cmd;
  271. u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
  272. u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
  273. __be64 cmd_rpl[MBOX_LEN / 8];
  274. u32 pcie_fw;
  275. if ((size & 15) || size > MBOX_LEN)
  276. return -EINVAL;
  277. /*
  278. * If the device is off-line, as in EEH, commands will time out.
  279. * Fail them early so we don't waste time waiting.
  280. */
  281. if (adap->pdev->error_state != pci_channel_io_normal)
  282. return -EIO;
  283. /* If we have a negative timeout, that implies that we can't sleep. */
  284. if (timeout < 0) {
  285. sleep_ok = false;
  286. timeout = -timeout;
  287. }
  288. /* Queue ourselves onto the mailbox access list. When our entry is at
  289. * the front of the list, we have rights to access the mailbox. So we
  290. * wait [for a while] till we're at the front [or bail out with an
  291. * EBUSY] ...
  292. */
  293. spin_lock(&adap->mbox_lock);
  294. list_add_tail(&entry.list, &adap->mlist.list);
  295. spin_unlock(&adap->mbox_lock);
  296. delay_idx = 0;
  297. ms = delay[0];
  298. for (i = 0; ; i += ms) {
  299. /* If we've waited too long, return a busy indication. This
  300. * really ought to be based on our initial position in the
  301. * mailbox access list but this is a start. We very rearely
  302. * contend on access to the mailbox ...
  303. */
  304. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  305. if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
  306. spin_lock(&adap->mbox_lock);
  307. list_del(&entry.list);
  308. spin_unlock(&adap->mbox_lock);
  309. ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
  310. t4_record_mbox(adap, cmd, size, access, ret);
  311. return ret;
  312. }
  313. /* If we're at the head, break out and start the mailbox
  314. * protocol.
  315. */
  316. if (list_first_entry(&adap->mlist.list, struct mbox_list,
  317. list) == &entry)
  318. break;
  319. /* Delay for a bit before checking again ... */
  320. if (sleep_ok) {
  321. ms = delay[delay_idx]; /* last element may repeat */
  322. if (delay_idx < ARRAY_SIZE(delay) - 1)
  323. delay_idx++;
  324. msleep(ms);
  325. } else {
  326. mdelay(ms);
  327. }
  328. }
  329. /* Loop trying to get ownership of the mailbox. Return an error
  330. * if we can't gain ownership.
  331. */
  332. v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
  333. for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
  334. v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
  335. if (v != MBOX_OWNER_DRV) {
  336. spin_lock(&adap->mbox_lock);
  337. list_del(&entry.list);
  338. spin_unlock(&adap->mbox_lock);
  339. ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
  340. t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
  341. return ret;
  342. }
  343. /* Copy in the new mailbox command and send it on its way ... */
  344. t4_record_mbox(adap, cmd, MBOX_LEN, access, 0);
  345. for (i = 0; i < size; i += 8)
  346. t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
  347. t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
  348. t4_read_reg(adap, ctl_reg); /* flush write */
  349. delay_idx = 0;
  350. ms = delay[0];
  351. for (i = 0;
  352. !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
  353. i < timeout;
  354. i += ms) {
  355. if (sleep_ok) {
  356. ms = delay[delay_idx]; /* last element may repeat */
  357. if (delay_idx < ARRAY_SIZE(delay) - 1)
  358. delay_idx++;
  359. msleep(ms);
  360. } else
  361. mdelay(ms);
  362. v = t4_read_reg(adap, ctl_reg);
  363. if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
  364. if (!(v & MBMSGVALID_F)) {
  365. t4_write_reg(adap, ctl_reg, 0);
  366. continue;
  367. }
  368. get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
  369. res = be64_to_cpu(cmd_rpl[0]);
  370. if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
  371. fw_asrt(adap, data_reg);
  372. res = FW_CMD_RETVAL_V(EIO);
  373. } else if (rpl) {
  374. memcpy(rpl, cmd_rpl, size);
  375. }
  376. t4_write_reg(adap, ctl_reg, 0);
  377. execute = i + ms;
  378. t4_record_mbox(adap, cmd_rpl,
  379. MBOX_LEN, access, execute);
  380. spin_lock(&adap->mbox_lock);
  381. list_del(&entry.list);
  382. spin_unlock(&adap->mbox_lock);
  383. return -FW_CMD_RETVAL_G((int)res);
  384. }
  385. }
  386. ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
  387. t4_record_mbox(adap, cmd, MBOX_LEN, access, ret);
  388. dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
  389. *(const u8 *)cmd, mbox);
  390. t4_report_fw_error(adap);
  391. spin_lock(&adap->mbox_lock);
  392. list_del(&entry.list);
  393. spin_unlock(&adap->mbox_lock);
  394. t4_fatal_err(adap);
  395. return ret;
  396. }
  397. int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
  398. void *rpl, bool sleep_ok)
  399. {
  400. return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
  401. FW_CMD_MAX_TIMEOUT);
  402. }
  403. static int t4_edc_err_read(struct adapter *adap, int idx)
  404. {
  405. u32 edc_ecc_err_addr_reg;
  406. u32 rdata_reg;
  407. if (is_t4(adap->params.chip)) {
  408. CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
  409. return 0;
  410. }
  411. if (idx != 0 && idx != 1) {
  412. CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
  413. return 0;
  414. }
  415. edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
  416. rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
  417. CH_WARN(adap,
  418. "edc%d err addr 0x%x: 0x%x.\n",
  419. idx, edc_ecc_err_addr_reg,
  420. t4_read_reg(adap, edc_ecc_err_addr_reg));
  421. CH_WARN(adap,
  422. "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
  423. rdata_reg,
  424. (unsigned long long)t4_read_reg64(adap, rdata_reg),
  425. (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
  426. (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
  427. (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
  428. (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
  429. (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
  430. (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
  431. (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
  432. (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
  433. return 0;
  434. }
  435. /**
  436. * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  437. * @adap: the adapter
  438. * @win: PCI-E Memory Window to use
  439. * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  440. * @addr: address within indicated memory type
  441. * @len: amount of memory to transfer
  442. * @hbuf: host memory buffer
  443. * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  444. *
  445. * Reads/writes an [almost] arbitrary memory region in the firmware: the
  446. * firmware memory address and host buffer must be aligned on 32-bit
  447. * boudaries; the length may be arbitrary. The memory is transferred as
  448. * a raw byte sequence from/to the firmware's memory. If this memory
  449. * contains data structures which contain multi-byte integers, it's the
  450. * caller's responsibility to perform appropriate byte order conversions.
  451. */
  452. int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
  453. u32 len, void *hbuf, int dir)
  454. {
  455. u32 pos, offset, resid, memoffset;
  456. u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
  457. u32 *buf;
  458. /* Argument sanity checks ...
  459. */
  460. if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
  461. return -EINVAL;
  462. buf = (u32 *)hbuf;
  463. /* It's convenient to be able to handle lengths which aren't a
  464. * multiple of 32-bits because we often end up transferring files to
  465. * the firmware. So we'll handle that by normalizing the length here
  466. * and then handling any residual transfer at the end.
  467. */
  468. resid = len & 0x3;
  469. len -= resid;
  470. /* Offset into the region of memory which is being accessed
  471. * MEM_EDC0 = 0
  472. * MEM_EDC1 = 1
  473. * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
  474. * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
  475. */
  476. edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
  477. if (mtype != MEM_MC1)
  478. memoffset = (mtype * (edc_size * 1024 * 1024));
  479. else {
  480. mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
  481. MA_EXT_MEMORY0_BAR_A));
  482. memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
  483. }
  484. /* Determine the PCIE_MEM_ACCESS_OFFSET */
  485. addr = addr + memoffset;
  486. /* Each PCI-E Memory Window is programmed with a window size -- or
  487. * "aperture" -- which controls the granularity of its mapping onto
  488. * adapter memory. We need to grab that aperture in order to know
  489. * how to use the specified window. The window is also programmed
  490. * with the base address of the Memory Window in BAR0's address
  491. * space. For T4 this is an absolute PCI-E Bus Address. For T5
  492. * the address is relative to BAR0.
  493. */
  494. mem_reg = t4_read_reg(adap,
  495. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
  496. win));
  497. mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
  498. mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
  499. if (is_t4(adap->params.chip))
  500. mem_base -= adap->t4_bar0;
  501. win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
  502. /* Calculate our initial PCI-E Memory Window Position and Offset into
  503. * that Window.
  504. */
  505. pos = addr & ~(mem_aperture-1);
  506. offset = addr - pos;
  507. /* Set up initial PCI-E Memory Window to cover the start of our
  508. * transfer. (Read it back to ensure that changes propagate before we
  509. * attempt to use the new value.)
  510. */
  511. t4_write_reg(adap,
  512. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
  513. pos | win_pf);
  514. t4_read_reg(adap,
  515. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
  516. /* Transfer data to/from the adapter as long as there's an integral
  517. * number of 32-bit transfers to complete.
  518. *
  519. * A note on Endianness issues:
  520. *
  521. * The "register" reads and writes below from/to the PCI-E Memory
  522. * Window invoke the standard adapter Big-Endian to PCI-E Link
  523. * Little-Endian "swizzel." As a result, if we have the following
  524. * data in adapter memory:
  525. *
  526. * Memory: ... | b0 | b1 | b2 | b3 | ...
  527. * Address: i+0 i+1 i+2 i+3
  528. *
  529. * Then a read of the adapter memory via the PCI-E Memory Window
  530. * will yield:
  531. *
  532. * x = readl(i)
  533. * 31 0
  534. * [ b3 | b2 | b1 | b0 ]
  535. *
  536. * If this value is stored into local memory on a Little-Endian system
  537. * it will show up correctly in local memory as:
  538. *
  539. * ( ..., b0, b1, b2, b3, ... )
  540. *
  541. * But on a Big-Endian system, the store will show up in memory
  542. * incorrectly swizzled as:
  543. *
  544. * ( ..., b3, b2, b1, b0, ... )
  545. *
  546. * So we need to account for this in the reads and writes to the
  547. * PCI-E Memory Window below by undoing the register read/write
  548. * swizzels.
  549. */
  550. while (len > 0) {
  551. if (dir == T4_MEMORY_READ)
  552. *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
  553. mem_base + offset));
  554. else
  555. t4_write_reg(adap, mem_base + offset,
  556. (__force u32)cpu_to_le32(*buf++));
  557. offset += sizeof(__be32);
  558. len -= sizeof(__be32);
  559. /* If we've reached the end of our current window aperture,
  560. * move the PCI-E Memory Window on to the next. Note that
  561. * doing this here after "len" may be 0 allows us to set up
  562. * the PCI-E Memory Window for a possible final residual
  563. * transfer below ...
  564. */
  565. if (offset == mem_aperture) {
  566. pos += mem_aperture;
  567. offset = 0;
  568. t4_write_reg(adap,
  569. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
  570. win), pos | win_pf);
  571. t4_read_reg(adap,
  572. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
  573. win));
  574. }
  575. }
  576. /* If the original transfer had a length which wasn't a multiple of
  577. * 32-bits, now's where we need to finish off the transfer of the
  578. * residual amount. The PCI-E Memory Window has already been moved
  579. * above (if necessary) to cover this final transfer.
  580. */
  581. if (resid) {
  582. union {
  583. u32 word;
  584. char byte[4];
  585. } last;
  586. unsigned char *bp;
  587. int i;
  588. if (dir == T4_MEMORY_READ) {
  589. last.word = le32_to_cpu(
  590. (__force __le32)t4_read_reg(adap,
  591. mem_base + offset));
  592. for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
  593. bp[i] = last.byte[i];
  594. } else {
  595. last.word = *buf;
  596. for (i = resid; i < 4; i++)
  597. last.byte[i] = 0;
  598. t4_write_reg(adap, mem_base + offset,
  599. (__force u32)cpu_to_le32(last.word));
  600. }
  601. }
  602. return 0;
  603. }
  604. /* Return the specified PCI-E Configuration Space register from our Physical
  605. * Function. We try first via a Firmware LDST Command since we prefer to let
  606. * the firmware own all of these registers, but if that fails we go for it
  607. * directly ourselves.
  608. */
  609. u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
  610. {
  611. u32 val, ldst_addrspace;
  612. /* If fw_attach != 0, construct and send the Firmware LDST Command to
  613. * retrieve the specified PCI-E Configuration Space register.
  614. */
  615. struct fw_ldst_cmd ldst_cmd;
  616. int ret;
  617. memset(&ldst_cmd, 0, sizeof(ldst_cmd));
  618. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
  619. ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  620. FW_CMD_REQUEST_F |
  621. FW_CMD_READ_F |
  622. ldst_addrspace);
  623. ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
  624. ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
  625. ldst_cmd.u.pcie.ctrl_to_fn =
  626. (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
  627. ldst_cmd.u.pcie.r = reg;
  628. /* If the LDST Command succeeds, return the result, otherwise
  629. * fall through to reading it directly ourselves ...
  630. */
  631. ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
  632. &ldst_cmd);
  633. if (ret == 0)
  634. val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
  635. else
  636. /* Read the desired Configuration Space register via the PCI-E
  637. * Backdoor mechanism.
  638. */
  639. t4_hw_pci_read_cfg4(adap, reg, &val);
  640. return val;
  641. }
  642. /* Get the window based on base passed to it.
  643. * Window aperture is currently unhandled, but there is no use case for it
  644. * right now
  645. */
  646. static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
  647. u32 memwin_base)
  648. {
  649. u32 ret;
  650. if (is_t4(adap->params.chip)) {
  651. u32 bar0;
  652. /* Truncation intentional: we only read the bottom 32-bits of
  653. * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
  654. * mechanism to read BAR0 instead of using
  655. * pci_resource_start() because we could be operating from
  656. * within a Virtual Machine which is trapping our accesses to
  657. * our Configuration Space and we need to set up the PCI-E
  658. * Memory Window decoders with the actual addresses which will
  659. * be coming across the PCI-E link.
  660. */
  661. bar0 = t4_read_pcie_cfg4(adap, pci_base);
  662. bar0 &= pci_mask;
  663. adap->t4_bar0 = bar0;
  664. ret = bar0 + memwin_base;
  665. } else {
  666. /* For T5, only relative offset inside the PCIe BAR is passed */
  667. ret = memwin_base;
  668. }
  669. return ret;
  670. }
  671. /* Get the default utility window (win0) used by everyone */
  672. u32 t4_get_util_window(struct adapter *adap)
  673. {
  674. return t4_get_window(adap, PCI_BASE_ADDRESS_0,
  675. PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
  676. }
  677. /* Set up memory window for accessing adapter memory ranges. (Read
  678. * back MA register to ensure that changes propagate before we attempt
  679. * to use the new values.)
  680. */
  681. void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
  682. {
  683. t4_write_reg(adap,
  684. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
  685. memwin_base | BIR_V(0) |
  686. WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
  687. t4_read_reg(adap,
  688. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
  689. }
  690. /**
  691. * t4_get_regs_len - return the size of the chips register set
  692. * @adapter: the adapter
  693. *
  694. * Returns the size of the chip's BAR0 register space.
  695. */
  696. unsigned int t4_get_regs_len(struct adapter *adapter)
  697. {
  698. unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
  699. switch (chip_version) {
  700. case CHELSIO_T4:
  701. return T4_REGMAP_SIZE;
  702. case CHELSIO_T5:
  703. case CHELSIO_T6:
  704. return T5_REGMAP_SIZE;
  705. }
  706. dev_err(adapter->pdev_dev,
  707. "Unsupported chip version %d\n", chip_version);
  708. return 0;
  709. }
  710. /**
  711. * t4_get_regs - read chip registers into provided buffer
  712. * @adap: the adapter
  713. * @buf: register buffer
  714. * @buf_size: size (in bytes) of register buffer
  715. *
  716. * If the provided register buffer isn't large enough for the chip's
  717. * full register range, the register dump will be truncated to the
  718. * register buffer's size.
  719. */
  720. void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
  721. {
  722. static const unsigned int t4_reg_ranges[] = {
  723. 0x1008, 0x1108,
  724. 0x1180, 0x1184,
  725. 0x1190, 0x1194,
  726. 0x11a0, 0x11a4,
  727. 0x11b0, 0x11b4,
  728. 0x11fc, 0x123c,
  729. 0x1300, 0x173c,
  730. 0x1800, 0x18fc,
  731. 0x3000, 0x30d8,
  732. 0x30e0, 0x30e4,
  733. 0x30ec, 0x5910,
  734. 0x5920, 0x5924,
  735. 0x5960, 0x5960,
  736. 0x5968, 0x5968,
  737. 0x5970, 0x5970,
  738. 0x5978, 0x5978,
  739. 0x5980, 0x5980,
  740. 0x5988, 0x5988,
  741. 0x5990, 0x5990,
  742. 0x5998, 0x5998,
  743. 0x59a0, 0x59d4,
  744. 0x5a00, 0x5ae0,
  745. 0x5ae8, 0x5ae8,
  746. 0x5af0, 0x5af0,
  747. 0x5af8, 0x5af8,
  748. 0x6000, 0x6098,
  749. 0x6100, 0x6150,
  750. 0x6200, 0x6208,
  751. 0x6240, 0x6248,
  752. 0x6280, 0x62b0,
  753. 0x62c0, 0x6338,
  754. 0x6370, 0x638c,
  755. 0x6400, 0x643c,
  756. 0x6500, 0x6524,
  757. 0x6a00, 0x6a04,
  758. 0x6a14, 0x6a38,
  759. 0x6a60, 0x6a70,
  760. 0x6a78, 0x6a78,
  761. 0x6b00, 0x6b0c,
  762. 0x6b1c, 0x6b84,
  763. 0x6bf0, 0x6bf8,
  764. 0x6c00, 0x6c0c,
  765. 0x6c1c, 0x6c84,
  766. 0x6cf0, 0x6cf8,
  767. 0x6d00, 0x6d0c,
  768. 0x6d1c, 0x6d84,
  769. 0x6df0, 0x6df8,
  770. 0x6e00, 0x6e0c,
  771. 0x6e1c, 0x6e84,
  772. 0x6ef0, 0x6ef8,
  773. 0x6f00, 0x6f0c,
  774. 0x6f1c, 0x6f84,
  775. 0x6ff0, 0x6ff8,
  776. 0x7000, 0x700c,
  777. 0x701c, 0x7084,
  778. 0x70f0, 0x70f8,
  779. 0x7100, 0x710c,
  780. 0x711c, 0x7184,
  781. 0x71f0, 0x71f8,
  782. 0x7200, 0x720c,
  783. 0x721c, 0x7284,
  784. 0x72f0, 0x72f8,
  785. 0x7300, 0x730c,
  786. 0x731c, 0x7384,
  787. 0x73f0, 0x73f8,
  788. 0x7400, 0x7450,
  789. 0x7500, 0x7530,
  790. 0x7600, 0x760c,
  791. 0x7614, 0x761c,
  792. 0x7680, 0x76cc,
  793. 0x7700, 0x7798,
  794. 0x77c0, 0x77fc,
  795. 0x7900, 0x79fc,
  796. 0x7b00, 0x7b58,
  797. 0x7b60, 0x7b84,
  798. 0x7b8c, 0x7c38,
  799. 0x7d00, 0x7d38,
  800. 0x7d40, 0x7d80,
  801. 0x7d8c, 0x7ddc,
  802. 0x7de4, 0x7e04,
  803. 0x7e10, 0x7e1c,
  804. 0x7e24, 0x7e38,
  805. 0x7e40, 0x7e44,
  806. 0x7e4c, 0x7e78,
  807. 0x7e80, 0x7ea4,
  808. 0x7eac, 0x7edc,
  809. 0x7ee8, 0x7efc,
  810. 0x8dc0, 0x8e04,
  811. 0x8e10, 0x8e1c,
  812. 0x8e30, 0x8e78,
  813. 0x8ea0, 0x8eb8,
  814. 0x8ec0, 0x8f6c,
  815. 0x8fc0, 0x9008,
  816. 0x9010, 0x9058,
  817. 0x9060, 0x9060,
  818. 0x9068, 0x9074,
  819. 0x90fc, 0x90fc,
  820. 0x9400, 0x9408,
  821. 0x9410, 0x9458,
  822. 0x9600, 0x9600,
  823. 0x9608, 0x9638,
  824. 0x9640, 0x96bc,
  825. 0x9800, 0x9808,
  826. 0x9820, 0x983c,
  827. 0x9850, 0x9864,
  828. 0x9c00, 0x9c6c,
  829. 0x9c80, 0x9cec,
  830. 0x9d00, 0x9d6c,
  831. 0x9d80, 0x9dec,
  832. 0x9e00, 0x9e6c,
  833. 0x9e80, 0x9eec,
  834. 0x9f00, 0x9f6c,
  835. 0x9f80, 0x9fec,
  836. 0xd004, 0xd004,
  837. 0xd010, 0xd03c,
  838. 0xdfc0, 0xdfe0,
  839. 0xe000, 0xea7c,
  840. 0xf000, 0x11190,
  841. 0x19040, 0x1906c,
  842. 0x19078, 0x19080,
  843. 0x1908c, 0x190e4,
  844. 0x190f0, 0x190f8,
  845. 0x19100, 0x19110,
  846. 0x19120, 0x19124,
  847. 0x19150, 0x19194,
  848. 0x1919c, 0x191b0,
  849. 0x191d0, 0x191e8,
  850. 0x19238, 0x1924c,
  851. 0x193f8, 0x1943c,
  852. 0x1944c, 0x19474,
  853. 0x19490, 0x194e0,
  854. 0x194f0, 0x194f8,
  855. 0x19800, 0x19c08,
  856. 0x19c10, 0x19c90,
  857. 0x19ca0, 0x19ce4,
  858. 0x19cf0, 0x19d40,
  859. 0x19d50, 0x19d94,
  860. 0x19da0, 0x19de8,
  861. 0x19df0, 0x19e40,
  862. 0x19e50, 0x19e90,
  863. 0x19ea0, 0x19f4c,
  864. 0x1a000, 0x1a004,
  865. 0x1a010, 0x1a06c,
  866. 0x1a0b0, 0x1a0e4,
  867. 0x1a0ec, 0x1a0f4,
  868. 0x1a100, 0x1a108,
  869. 0x1a114, 0x1a120,
  870. 0x1a128, 0x1a130,
  871. 0x1a138, 0x1a138,
  872. 0x1a190, 0x1a1c4,
  873. 0x1a1fc, 0x1a1fc,
  874. 0x1e040, 0x1e04c,
  875. 0x1e284, 0x1e28c,
  876. 0x1e2c0, 0x1e2c0,
  877. 0x1e2e0, 0x1e2e0,
  878. 0x1e300, 0x1e384,
  879. 0x1e3c0, 0x1e3c8,
  880. 0x1e440, 0x1e44c,
  881. 0x1e684, 0x1e68c,
  882. 0x1e6c0, 0x1e6c0,
  883. 0x1e6e0, 0x1e6e0,
  884. 0x1e700, 0x1e784,
  885. 0x1e7c0, 0x1e7c8,
  886. 0x1e840, 0x1e84c,
  887. 0x1ea84, 0x1ea8c,
  888. 0x1eac0, 0x1eac0,
  889. 0x1eae0, 0x1eae0,
  890. 0x1eb00, 0x1eb84,
  891. 0x1ebc0, 0x1ebc8,
  892. 0x1ec40, 0x1ec4c,
  893. 0x1ee84, 0x1ee8c,
  894. 0x1eec0, 0x1eec0,
  895. 0x1eee0, 0x1eee0,
  896. 0x1ef00, 0x1ef84,
  897. 0x1efc0, 0x1efc8,
  898. 0x1f040, 0x1f04c,
  899. 0x1f284, 0x1f28c,
  900. 0x1f2c0, 0x1f2c0,
  901. 0x1f2e0, 0x1f2e0,
  902. 0x1f300, 0x1f384,
  903. 0x1f3c0, 0x1f3c8,
  904. 0x1f440, 0x1f44c,
  905. 0x1f684, 0x1f68c,
  906. 0x1f6c0, 0x1f6c0,
  907. 0x1f6e0, 0x1f6e0,
  908. 0x1f700, 0x1f784,
  909. 0x1f7c0, 0x1f7c8,
  910. 0x1f840, 0x1f84c,
  911. 0x1fa84, 0x1fa8c,
  912. 0x1fac0, 0x1fac0,
  913. 0x1fae0, 0x1fae0,
  914. 0x1fb00, 0x1fb84,
  915. 0x1fbc0, 0x1fbc8,
  916. 0x1fc40, 0x1fc4c,
  917. 0x1fe84, 0x1fe8c,
  918. 0x1fec0, 0x1fec0,
  919. 0x1fee0, 0x1fee0,
  920. 0x1ff00, 0x1ff84,
  921. 0x1ffc0, 0x1ffc8,
  922. 0x20000, 0x2002c,
  923. 0x20100, 0x2013c,
  924. 0x20190, 0x201a0,
  925. 0x201a8, 0x201b8,
  926. 0x201c4, 0x201c8,
  927. 0x20200, 0x20318,
  928. 0x20400, 0x204b4,
  929. 0x204c0, 0x20528,
  930. 0x20540, 0x20614,
  931. 0x21000, 0x21040,
  932. 0x2104c, 0x21060,
  933. 0x210c0, 0x210ec,
  934. 0x21200, 0x21268,
  935. 0x21270, 0x21284,
  936. 0x212fc, 0x21388,
  937. 0x21400, 0x21404,
  938. 0x21500, 0x21500,
  939. 0x21510, 0x21518,
  940. 0x2152c, 0x21530,
  941. 0x2153c, 0x2153c,
  942. 0x21550, 0x21554,
  943. 0x21600, 0x21600,
  944. 0x21608, 0x2161c,
  945. 0x21624, 0x21628,
  946. 0x21630, 0x21634,
  947. 0x2163c, 0x2163c,
  948. 0x21700, 0x2171c,
  949. 0x21780, 0x2178c,
  950. 0x21800, 0x21818,
  951. 0x21820, 0x21828,
  952. 0x21830, 0x21848,
  953. 0x21850, 0x21854,
  954. 0x21860, 0x21868,
  955. 0x21870, 0x21870,
  956. 0x21878, 0x21898,
  957. 0x218a0, 0x218a8,
  958. 0x218b0, 0x218c8,
  959. 0x218d0, 0x218d4,
  960. 0x218e0, 0x218e8,
  961. 0x218f0, 0x218f0,
  962. 0x218f8, 0x21a18,
  963. 0x21a20, 0x21a28,
  964. 0x21a30, 0x21a48,
  965. 0x21a50, 0x21a54,
  966. 0x21a60, 0x21a68,
  967. 0x21a70, 0x21a70,
  968. 0x21a78, 0x21a98,
  969. 0x21aa0, 0x21aa8,
  970. 0x21ab0, 0x21ac8,
  971. 0x21ad0, 0x21ad4,
  972. 0x21ae0, 0x21ae8,
  973. 0x21af0, 0x21af0,
  974. 0x21af8, 0x21c18,
  975. 0x21c20, 0x21c20,
  976. 0x21c28, 0x21c30,
  977. 0x21c38, 0x21c38,
  978. 0x21c80, 0x21c98,
  979. 0x21ca0, 0x21ca8,
  980. 0x21cb0, 0x21cc8,
  981. 0x21cd0, 0x21cd4,
  982. 0x21ce0, 0x21ce8,
  983. 0x21cf0, 0x21cf0,
  984. 0x21cf8, 0x21d7c,
  985. 0x21e00, 0x21e04,
  986. 0x22000, 0x2202c,
  987. 0x22100, 0x2213c,
  988. 0x22190, 0x221a0,
  989. 0x221a8, 0x221b8,
  990. 0x221c4, 0x221c8,
  991. 0x22200, 0x22318,
  992. 0x22400, 0x224b4,
  993. 0x224c0, 0x22528,
  994. 0x22540, 0x22614,
  995. 0x23000, 0x23040,
  996. 0x2304c, 0x23060,
  997. 0x230c0, 0x230ec,
  998. 0x23200, 0x23268,
  999. 0x23270, 0x23284,
  1000. 0x232fc, 0x23388,
  1001. 0x23400, 0x23404,
  1002. 0x23500, 0x23500,
  1003. 0x23510, 0x23518,
  1004. 0x2352c, 0x23530,
  1005. 0x2353c, 0x2353c,
  1006. 0x23550, 0x23554,
  1007. 0x23600, 0x23600,
  1008. 0x23608, 0x2361c,
  1009. 0x23624, 0x23628,
  1010. 0x23630, 0x23634,
  1011. 0x2363c, 0x2363c,
  1012. 0x23700, 0x2371c,
  1013. 0x23780, 0x2378c,
  1014. 0x23800, 0x23818,
  1015. 0x23820, 0x23828,
  1016. 0x23830, 0x23848,
  1017. 0x23850, 0x23854,
  1018. 0x23860, 0x23868,
  1019. 0x23870, 0x23870,
  1020. 0x23878, 0x23898,
  1021. 0x238a0, 0x238a8,
  1022. 0x238b0, 0x238c8,
  1023. 0x238d0, 0x238d4,
  1024. 0x238e0, 0x238e8,
  1025. 0x238f0, 0x238f0,
  1026. 0x238f8, 0x23a18,
  1027. 0x23a20, 0x23a28,
  1028. 0x23a30, 0x23a48,
  1029. 0x23a50, 0x23a54,
  1030. 0x23a60, 0x23a68,
  1031. 0x23a70, 0x23a70,
  1032. 0x23a78, 0x23a98,
  1033. 0x23aa0, 0x23aa8,
  1034. 0x23ab0, 0x23ac8,
  1035. 0x23ad0, 0x23ad4,
  1036. 0x23ae0, 0x23ae8,
  1037. 0x23af0, 0x23af0,
  1038. 0x23af8, 0x23c18,
  1039. 0x23c20, 0x23c20,
  1040. 0x23c28, 0x23c30,
  1041. 0x23c38, 0x23c38,
  1042. 0x23c80, 0x23c98,
  1043. 0x23ca0, 0x23ca8,
  1044. 0x23cb0, 0x23cc8,
  1045. 0x23cd0, 0x23cd4,
  1046. 0x23ce0, 0x23ce8,
  1047. 0x23cf0, 0x23cf0,
  1048. 0x23cf8, 0x23d7c,
  1049. 0x23e00, 0x23e04,
  1050. 0x24000, 0x2402c,
  1051. 0x24100, 0x2413c,
  1052. 0x24190, 0x241a0,
  1053. 0x241a8, 0x241b8,
  1054. 0x241c4, 0x241c8,
  1055. 0x24200, 0x24318,
  1056. 0x24400, 0x244b4,
  1057. 0x244c0, 0x24528,
  1058. 0x24540, 0x24614,
  1059. 0x25000, 0x25040,
  1060. 0x2504c, 0x25060,
  1061. 0x250c0, 0x250ec,
  1062. 0x25200, 0x25268,
  1063. 0x25270, 0x25284,
  1064. 0x252fc, 0x25388,
  1065. 0x25400, 0x25404,
  1066. 0x25500, 0x25500,
  1067. 0x25510, 0x25518,
  1068. 0x2552c, 0x25530,
  1069. 0x2553c, 0x2553c,
  1070. 0x25550, 0x25554,
  1071. 0x25600, 0x25600,
  1072. 0x25608, 0x2561c,
  1073. 0x25624, 0x25628,
  1074. 0x25630, 0x25634,
  1075. 0x2563c, 0x2563c,
  1076. 0x25700, 0x2571c,
  1077. 0x25780, 0x2578c,
  1078. 0x25800, 0x25818,
  1079. 0x25820, 0x25828,
  1080. 0x25830, 0x25848,
  1081. 0x25850, 0x25854,
  1082. 0x25860, 0x25868,
  1083. 0x25870, 0x25870,
  1084. 0x25878, 0x25898,
  1085. 0x258a0, 0x258a8,
  1086. 0x258b0, 0x258c8,
  1087. 0x258d0, 0x258d4,
  1088. 0x258e0, 0x258e8,
  1089. 0x258f0, 0x258f0,
  1090. 0x258f8, 0x25a18,
  1091. 0x25a20, 0x25a28,
  1092. 0x25a30, 0x25a48,
  1093. 0x25a50, 0x25a54,
  1094. 0x25a60, 0x25a68,
  1095. 0x25a70, 0x25a70,
  1096. 0x25a78, 0x25a98,
  1097. 0x25aa0, 0x25aa8,
  1098. 0x25ab0, 0x25ac8,
  1099. 0x25ad0, 0x25ad4,
  1100. 0x25ae0, 0x25ae8,
  1101. 0x25af0, 0x25af0,
  1102. 0x25af8, 0x25c18,
  1103. 0x25c20, 0x25c20,
  1104. 0x25c28, 0x25c30,
  1105. 0x25c38, 0x25c38,
  1106. 0x25c80, 0x25c98,
  1107. 0x25ca0, 0x25ca8,
  1108. 0x25cb0, 0x25cc8,
  1109. 0x25cd0, 0x25cd4,
  1110. 0x25ce0, 0x25ce8,
  1111. 0x25cf0, 0x25cf0,
  1112. 0x25cf8, 0x25d7c,
  1113. 0x25e00, 0x25e04,
  1114. 0x26000, 0x2602c,
  1115. 0x26100, 0x2613c,
  1116. 0x26190, 0x261a0,
  1117. 0x261a8, 0x261b8,
  1118. 0x261c4, 0x261c8,
  1119. 0x26200, 0x26318,
  1120. 0x26400, 0x264b4,
  1121. 0x264c0, 0x26528,
  1122. 0x26540, 0x26614,
  1123. 0x27000, 0x27040,
  1124. 0x2704c, 0x27060,
  1125. 0x270c0, 0x270ec,
  1126. 0x27200, 0x27268,
  1127. 0x27270, 0x27284,
  1128. 0x272fc, 0x27388,
  1129. 0x27400, 0x27404,
  1130. 0x27500, 0x27500,
  1131. 0x27510, 0x27518,
  1132. 0x2752c, 0x27530,
  1133. 0x2753c, 0x2753c,
  1134. 0x27550, 0x27554,
  1135. 0x27600, 0x27600,
  1136. 0x27608, 0x2761c,
  1137. 0x27624, 0x27628,
  1138. 0x27630, 0x27634,
  1139. 0x2763c, 0x2763c,
  1140. 0x27700, 0x2771c,
  1141. 0x27780, 0x2778c,
  1142. 0x27800, 0x27818,
  1143. 0x27820, 0x27828,
  1144. 0x27830, 0x27848,
  1145. 0x27850, 0x27854,
  1146. 0x27860, 0x27868,
  1147. 0x27870, 0x27870,
  1148. 0x27878, 0x27898,
  1149. 0x278a0, 0x278a8,
  1150. 0x278b0, 0x278c8,
  1151. 0x278d0, 0x278d4,
  1152. 0x278e0, 0x278e8,
  1153. 0x278f0, 0x278f0,
  1154. 0x278f8, 0x27a18,
  1155. 0x27a20, 0x27a28,
  1156. 0x27a30, 0x27a48,
  1157. 0x27a50, 0x27a54,
  1158. 0x27a60, 0x27a68,
  1159. 0x27a70, 0x27a70,
  1160. 0x27a78, 0x27a98,
  1161. 0x27aa0, 0x27aa8,
  1162. 0x27ab0, 0x27ac8,
  1163. 0x27ad0, 0x27ad4,
  1164. 0x27ae0, 0x27ae8,
  1165. 0x27af0, 0x27af0,
  1166. 0x27af8, 0x27c18,
  1167. 0x27c20, 0x27c20,
  1168. 0x27c28, 0x27c30,
  1169. 0x27c38, 0x27c38,
  1170. 0x27c80, 0x27c98,
  1171. 0x27ca0, 0x27ca8,
  1172. 0x27cb0, 0x27cc8,
  1173. 0x27cd0, 0x27cd4,
  1174. 0x27ce0, 0x27ce8,
  1175. 0x27cf0, 0x27cf0,
  1176. 0x27cf8, 0x27d7c,
  1177. 0x27e00, 0x27e04,
  1178. };
  1179. static const unsigned int t5_reg_ranges[] = {
  1180. 0x1008, 0x10c0,
  1181. 0x10cc, 0x10f8,
  1182. 0x1100, 0x1100,
  1183. 0x110c, 0x1148,
  1184. 0x1180, 0x1184,
  1185. 0x1190, 0x1194,
  1186. 0x11a0, 0x11a4,
  1187. 0x11b0, 0x11b4,
  1188. 0x11fc, 0x123c,
  1189. 0x1280, 0x173c,
  1190. 0x1800, 0x18fc,
  1191. 0x3000, 0x3028,
  1192. 0x3060, 0x30b0,
  1193. 0x30b8, 0x30d8,
  1194. 0x30e0, 0x30fc,
  1195. 0x3140, 0x357c,
  1196. 0x35a8, 0x35cc,
  1197. 0x35ec, 0x35ec,
  1198. 0x3600, 0x5624,
  1199. 0x56cc, 0x56ec,
  1200. 0x56f4, 0x5720,
  1201. 0x5728, 0x575c,
  1202. 0x580c, 0x5814,
  1203. 0x5890, 0x589c,
  1204. 0x58a4, 0x58ac,
  1205. 0x58b8, 0x58bc,
  1206. 0x5940, 0x59c8,
  1207. 0x59d0, 0x59dc,
  1208. 0x59fc, 0x5a18,
  1209. 0x5a60, 0x5a70,
  1210. 0x5a80, 0x5a9c,
  1211. 0x5b94, 0x5bfc,
  1212. 0x6000, 0x6020,
  1213. 0x6028, 0x6040,
  1214. 0x6058, 0x609c,
  1215. 0x60a8, 0x614c,
  1216. 0x7700, 0x7798,
  1217. 0x77c0, 0x78fc,
  1218. 0x7b00, 0x7b58,
  1219. 0x7b60, 0x7b84,
  1220. 0x7b8c, 0x7c54,
  1221. 0x7d00, 0x7d38,
  1222. 0x7d40, 0x7d80,
  1223. 0x7d8c, 0x7ddc,
  1224. 0x7de4, 0x7e04,
  1225. 0x7e10, 0x7e1c,
  1226. 0x7e24, 0x7e38,
  1227. 0x7e40, 0x7e44,
  1228. 0x7e4c, 0x7e78,
  1229. 0x7e80, 0x7edc,
  1230. 0x7ee8, 0x7efc,
  1231. 0x8dc0, 0x8de0,
  1232. 0x8df8, 0x8e04,
  1233. 0x8e10, 0x8e84,
  1234. 0x8ea0, 0x8f84,
  1235. 0x8fc0, 0x9058,
  1236. 0x9060, 0x9060,
  1237. 0x9068, 0x90f8,
  1238. 0x9400, 0x9408,
  1239. 0x9410, 0x9470,
  1240. 0x9600, 0x9600,
  1241. 0x9608, 0x9638,
  1242. 0x9640, 0x96f4,
  1243. 0x9800, 0x9808,
  1244. 0x9820, 0x983c,
  1245. 0x9850, 0x9864,
  1246. 0x9c00, 0x9c6c,
  1247. 0x9c80, 0x9cec,
  1248. 0x9d00, 0x9d6c,
  1249. 0x9d80, 0x9dec,
  1250. 0x9e00, 0x9e6c,
  1251. 0x9e80, 0x9eec,
  1252. 0x9f00, 0x9f6c,
  1253. 0x9f80, 0xa020,
  1254. 0xd004, 0xd004,
  1255. 0xd010, 0xd03c,
  1256. 0xdfc0, 0xdfe0,
  1257. 0xe000, 0x1106c,
  1258. 0x11074, 0x11088,
  1259. 0x1109c, 0x1117c,
  1260. 0x11190, 0x11204,
  1261. 0x19040, 0x1906c,
  1262. 0x19078, 0x19080,
  1263. 0x1908c, 0x190e8,
  1264. 0x190f0, 0x190f8,
  1265. 0x19100, 0x19110,
  1266. 0x19120, 0x19124,
  1267. 0x19150, 0x19194,
  1268. 0x1919c, 0x191b0,
  1269. 0x191d0, 0x191e8,
  1270. 0x19238, 0x19290,
  1271. 0x193f8, 0x19428,
  1272. 0x19430, 0x19444,
  1273. 0x1944c, 0x1946c,
  1274. 0x19474, 0x19474,
  1275. 0x19490, 0x194cc,
  1276. 0x194f0, 0x194f8,
  1277. 0x19c00, 0x19c08,
  1278. 0x19c10, 0x19c60,
  1279. 0x19c94, 0x19ce4,
  1280. 0x19cf0, 0x19d40,
  1281. 0x19d50, 0x19d94,
  1282. 0x19da0, 0x19de8,
  1283. 0x19df0, 0x19e10,
  1284. 0x19e50, 0x19e90,
  1285. 0x19ea0, 0x19f24,
  1286. 0x19f34, 0x19f34,
  1287. 0x19f40, 0x19f50,
  1288. 0x19f90, 0x19fb4,
  1289. 0x19fc4, 0x19fe4,
  1290. 0x1a000, 0x1a004,
  1291. 0x1a010, 0x1a06c,
  1292. 0x1a0b0, 0x1a0e4,
  1293. 0x1a0ec, 0x1a0f8,
  1294. 0x1a100, 0x1a108,
  1295. 0x1a114, 0x1a120,
  1296. 0x1a128, 0x1a130,
  1297. 0x1a138, 0x1a138,
  1298. 0x1a190, 0x1a1c4,
  1299. 0x1a1fc, 0x1a1fc,
  1300. 0x1e008, 0x1e00c,
  1301. 0x1e040, 0x1e044,
  1302. 0x1e04c, 0x1e04c,
  1303. 0x1e284, 0x1e290,
  1304. 0x1e2c0, 0x1e2c0,
  1305. 0x1e2e0, 0x1e2e0,
  1306. 0x1e300, 0x1e384,
  1307. 0x1e3c0, 0x1e3c8,
  1308. 0x1e408, 0x1e40c,
  1309. 0x1e440, 0x1e444,
  1310. 0x1e44c, 0x1e44c,
  1311. 0x1e684, 0x1e690,
  1312. 0x1e6c0, 0x1e6c0,
  1313. 0x1e6e0, 0x1e6e0,
  1314. 0x1e700, 0x1e784,
  1315. 0x1e7c0, 0x1e7c8,
  1316. 0x1e808, 0x1e80c,
  1317. 0x1e840, 0x1e844,
  1318. 0x1e84c, 0x1e84c,
  1319. 0x1ea84, 0x1ea90,
  1320. 0x1eac0, 0x1eac0,
  1321. 0x1eae0, 0x1eae0,
  1322. 0x1eb00, 0x1eb84,
  1323. 0x1ebc0, 0x1ebc8,
  1324. 0x1ec08, 0x1ec0c,
  1325. 0x1ec40, 0x1ec44,
  1326. 0x1ec4c, 0x1ec4c,
  1327. 0x1ee84, 0x1ee90,
  1328. 0x1eec0, 0x1eec0,
  1329. 0x1eee0, 0x1eee0,
  1330. 0x1ef00, 0x1ef84,
  1331. 0x1efc0, 0x1efc8,
  1332. 0x1f008, 0x1f00c,
  1333. 0x1f040, 0x1f044,
  1334. 0x1f04c, 0x1f04c,
  1335. 0x1f284, 0x1f290,
  1336. 0x1f2c0, 0x1f2c0,
  1337. 0x1f2e0, 0x1f2e0,
  1338. 0x1f300, 0x1f384,
  1339. 0x1f3c0, 0x1f3c8,
  1340. 0x1f408, 0x1f40c,
  1341. 0x1f440, 0x1f444,
  1342. 0x1f44c, 0x1f44c,
  1343. 0x1f684, 0x1f690,
  1344. 0x1f6c0, 0x1f6c0,
  1345. 0x1f6e0, 0x1f6e0,
  1346. 0x1f700, 0x1f784,
  1347. 0x1f7c0, 0x1f7c8,
  1348. 0x1f808, 0x1f80c,
  1349. 0x1f840, 0x1f844,
  1350. 0x1f84c, 0x1f84c,
  1351. 0x1fa84, 0x1fa90,
  1352. 0x1fac0, 0x1fac0,
  1353. 0x1fae0, 0x1fae0,
  1354. 0x1fb00, 0x1fb84,
  1355. 0x1fbc0, 0x1fbc8,
  1356. 0x1fc08, 0x1fc0c,
  1357. 0x1fc40, 0x1fc44,
  1358. 0x1fc4c, 0x1fc4c,
  1359. 0x1fe84, 0x1fe90,
  1360. 0x1fec0, 0x1fec0,
  1361. 0x1fee0, 0x1fee0,
  1362. 0x1ff00, 0x1ff84,
  1363. 0x1ffc0, 0x1ffc8,
  1364. 0x30000, 0x30030,
  1365. 0x30038, 0x30038,
  1366. 0x30040, 0x30040,
  1367. 0x30100, 0x30144,
  1368. 0x30190, 0x301a0,
  1369. 0x301a8, 0x301b8,
  1370. 0x301c4, 0x301c8,
  1371. 0x301d0, 0x301d0,
  1372. 0x30200, 0x30318,
  1373. 0x30400, 0x304b4,
  1374. 0x304c0, 0x3052c,
  1375. 0x30540, 0x3061c,
  1376. 0x30800, 0x30828,
  1377. 0x30834, 0x30834,
  1378. 0x308c0, 0x30908,
  1379. 0x30910, 0x309ac,
  1380. 0x30a00, 0x30a14,
  1381. 0x30a1c, 0x30a2c,
  1382. 0x30a44, 0x30a50,
  1383. 0x30a74, 0x30a74,
  1384. 0x30a7c, 0x30afc,
  1385. 0x30b08, 0x30c24,
  1386. 0x30d00, 0x30d00,
  1387. 0x30d08, 0x30d14,
  1388. 0x30d1c, 0x30d20,
  1389. 0x30d3c, 0x30d3c,
  1390. 0x30d48, 0x30d50,
  1391. 0x31200, 0x3120c,
  1392. 0x31220, 0x31220,
  1393. 0x31240, 0x31240,
  1394. 0x31600, 0x3160c,
  1395. 0x31a00, 0x31a1c,
  1396. 0x31e00, 0x31e20,
  1397. 0x31e38, 0x31e3c,
  1398. 0x31e80, 0x31e80,
  1399. 0x31e88, 0x31ea8,
  1400. 0x31eb0, 0x31eb4,
  1401. 0x31ec8, 0x31ed4,
  1402. 0x31fb8, 0x32004,
  1403. 0x32200, 0x32200,
  1404. 0x32208, 0x32240,
  1405. 0x32248, 0x32280,
  1406. 0x32288, 0x322c0,
  1407. 0x322c8, 0x322fc,
  1408. 0x32600, 0x32630,
  1409. 0x32a00, 0x32abc,
  1410. 0x32b00, 0x32b10,
  1411. 0x32b20, 0x32b30,
  1412. 0x32b40, 0x32b50,
  1413. 0x32b60, 0x32b70,
  1414. 0x33000, 0x33028,
  1415. 0x33030, 0x33048,
  1416. 0x33060, 0x33068,
  1417. 0x33070, 0x3309c,
  1418. 0x330f0, 0x33128,
  1419. 0x33130, 0x33148,
  1420. 0x33160, 0x33168,
  1421. 0x33170, 0x3319c,
  1422. 0x331f0, 0x33238,
  1423. 0x33240, 0x33240,
  1424. 0x33248, 0x33250,
  1425. 0x3325c, 0x33264,
  1426. 0x33270, 0x332b8,
  1427. 0x332c0, 0x332e4,
  1428. 0x332f8, 0x33338,
  1429. 0x33340, 0x33340,
  1430. 0x33348, 0x33350,
  1431. 0x3335c, 0x33364,
  1432. 0x33370, 0x333b8,
  1433. 0x333c0, 0x333e4,
  1434. 0x333f8, 0x33428,
  1435. 0x33430, 0x33448,
  1436. 0x33460, 0x33468,
  1437. 0x33470, 0x3349c,
  1438. 0x334f0, 0x33528,
  1439. 0x33530, 0x33548,
  1440. 0x33560, 0x33568,
  1441. 0x33570, 0x3359c,
  1442. 0x335f0, 0x33638,
  1443. 0x33640, 0x33640,
  1444. 0x33648, 0x33650,
  1445. 0x3365c, 0x33664,
  1446. 0x33670, 0x336b8,
  1447. 0x336c0, 0x336e4,
  1448. 0x336f8, 0x33738,
  1449. 0x33740, 0x33740,
  1450. 0x33748, 0x33750,
  1451. 0x3375c, 0x33764,
  1452. 0x33770, 0x337b8,
  1453. 0x337c0, 0x337e4,
  1454. 0x337f8, 0x337fc,
  1455. 0x33814, 0x33814,
  1456. 0x3382c, 0x3382c,
  1457. 0x33880, 0x3388c,
  1458. 0x338e8, 0x338ec,
  1459. 0x33900, 0x33928,
  1460. 0x33930, 0x33948,
  1461. 0x33960, 0x33968,
  1462. 0x33970, 0x3399c,
  1463. 0x339f0, 0x33a38,
  1464. 0x33a40, 0x33a40,
  1465. 0x33a48, 0x33a50,
  1466. 0x33a5c, 0x33a64,
  1467. 0x33a70, 0x33ab8,
  1468. 0x33ac0, 0x33ae4,
  1469. 0x33af8, 0x33b10,
  1470. 0x33b28, 0x33b28,
  1471. 0x33b3c, 0x33b50,
  1472. 0x33bf0, 0x33c10,
  1473. 0x33c28, 0x33c28,
  1474. 0x33c3c, 0x33c50,
  1475. 0x33cf0, 0x33cfc,
  1476. 0x34000, 0x34030,
  1477. 0x34038, 0x34038,
  1478. 0x34040, 0x34040,
  1479. 0x34100, 0x34144,
  1480. 0x34190, 0x341a0,
  1481. 0x341a8, 0x341b8,
  1482. 0x341c4, 0x341c8,
  1483. 0x341d0, 0x341d0,
  1484. 0x34200, 0x34318,
  1485. 0x34400, 0x344b4,
  1486. 0x344c0, 0x3452c,
  1487. 0x34540, 0x3461c,
  1488. 0x34800, 0x34828,
  1489. 0x34834, 0x34834,
  1490. 0x348c0, 0x34908,
  1491. 0x34910, 0x349ac,
  1492. 0x34a00, 0x34a14,
  1493. 0x34a1c, 0x34a2c,
  1494. 0x34a44, 0x34a50,
  1495. 0x34a74, 0x34a74,
  1496. 0x34a7c, 0x34afc,
  1497. 0x34b08, 0x34c24,
  1498. 0x34d00, 0x34d00,
  1499. 0x34d08, 0x34d14,
  1500. 0x34d1c, 0x34d20,
  1501. 0x34d3c, 0x34d3c,
  1502. 0x34d48, 0x34d50,
  1503. 0x35200, 0x3520c,
  1504. 0x35220, 0x35220,
  1505. 0x35240, 0x35240,
  1506. 0x35600, 0x3560c,
  1507. 0x35a00, 0x35a1c,
  1508. 0x35e00, 0x35e20,
  1509. 0x35e38, 0x35e3c,
  1510. 0x35e80, 0x35e80,
  1511. 0x35e88, 0x35ea8,
  1512. 0x35eb0, 0x35eb4,
  1513. 0x35ec8, 0x35ed4,
  1514. 0x35fb8, 0x36004,
  1515. 0x36200, 0x36200,
  1516. 0x36208, 0x36240,
  1517. 0x36248, 0x36280,
  1518. 0x36288, 0x362c0,
  1519. 0x362c8, 0x362fc,
  1520. 0x36600, 0x36630,
  1521. 0x36a00, 0x36abc,
  1522. 0x36b00, 0x36b10,
  1523. 0x36b20, 0x36b30,
  1524. 0x36b40, 0x36b50,
  1525. 0x36b60, 0x36b70,
  1526. 0x37000, 0x37028,
  1527. 0x37030, 0x37048,
  1528. 0x37060, 0x37068,
  1529. 0x37070, 0x3709c,
  1530. 0x370f0, 0x37128,
  1531. 0x37130, 0x37148,
  1532. 0x37160, 0x37168,
  1533. 0x37170, 0x3719c,
  1534. 0x371f0, 0x37238,
  1535. 0x37240, 0x37240,
  1536. 0x37248, 0x37250,
  1537. 0x3725c, 0x37264,
  1538. 0x37270, 0x372b8,
  1539. 0x372c0, 0x372e4,
  1540. 0x372f8, 0x37338,
  1541. 0x37340, 0x37340,
  1542. 0x37348, 0x37350,
  1543. 0x3735c, 0x37364,
  1544. 0x37370, 0x373b8,
  1545. 0x373c0, 0x373e4,
  1546. 0x373f8, 0x37428,
  1547. 0x37430, 0x37448,
  1548. 0x37460, 0x37468,
  1549. 0x37470, 0x3749c,
  1550. 0x374f0, 0x37528,
  1551. 0x37530, 0x37548,
  1552. 0x37560, 0x37568,
  1553. 0x37570, 0x3759c,
  1554. 0x375f0, 0x37638,
  1555. 0x37640, 0x37640,
  1556. 0x37648, 0x37650,
  1557. 0x3765c, 0x37664,
  1558. 0x37670, 0x376b8,
  1559. 0x376c0, 0x376e4,
  1560. 0x376f8, 0x37738,
  1561. 0x37740, 0x37740,
  1562. 0x37748, 0x37750,
  1563. 0x3775c, 0x37764,
  1564. 0x37770, 0x377b8,
  1565. 0x377c0, 0x377e4,
  1566. 0x377f8, 0x377fc,
  1567. 0x37814, 0x37814,
  1568. 0x3782c, 0x3782c,
  1569. 0x37880, 0x3788c,
  1570. 0x378e8, 0x378ec,
  1571. 0x37900, 0x37928,
  1572. 0x37930, 0x37948,
  1573. 0x37960, 0x37968,
  1574. 0x37970, 0x3799c,
  1575. 0x379f0, 0x37a38,
  1576. 0x37a40, 0x37a40,
  1577. 0x37a48, 0x37a50,
  1578. 0x37a5c, 0x37a64,
  1579. 0x37a70, 0x37ab8,
  1580. 0x37ac0, 0x37ae4,
  1581. 0x37af8, 0x37b10,
  1582. 0x37b28, 0x37b28,
  1583. 0x37b3c, 0x37b50,
  1584. 0x37bf0, 0x37c10,
  1585. 0x37c28, 0x37c28,
  1586. 0x37c3c, 0x37c50,
  1587. 0x37cf0, 0x37cfc,
  1588. 0x38000, 0x38030,
  1589. 0x38038, 0x38038,
  1590. 0x38040, 0x38040,
  1591. 0x38100, 0x38144,
  1592. 0x38190, 0x381a0,
  1593. 0x381a8, 0x381b8,
  1594. 0x381c4, 0x381c8,
  1595. 0x381d0, 0x381d0,
  1596. 0x38200, 0x38318,
  1597. 0x38400, 0x384b4,
  1598. 0x384c0, 0x3852c,
  1599. 0x38540, 0x3861c,
  1600. 0x38800, 0x38828,
  1601. 0x38834, 0x38834,
  1602. 0x388c0, 0x38908,
  1603. 0x38910, 0x389ac,
  1604. 0x38a00, 0x38a14,
  1605. 0x38a1c, 0x38a2c,
  1606. 0x38a44, 0x38a50,
  1607. 0x38a74, 0x38a74,
  1608. 0x38a7c, 0x38afc,
  1609. 0x38b08, 0x38c24,
  1610. 0x38d00, 0x38d00,
  1611. 0x38d08, 0x38d14,
  1612. 0x38d1c, 0x38d20,
  1613. 0x38d3c, 0x38d3c,
  1614. 0x38d48, 0x38d50,
  1615. 0x39200, 0x3920c,
  1616. 0x39220, 0x39220,
  1617. 0x39240, 0x39240,
  1618. 0x39600, 0x3960c,
  1619. 0x39a00, 0x39a1c,
  1620. 0x39e00, 0x39e20,
  1621. 0x39e38, 0x39e3c,
  1622. 0x39e80, 0x39e80,
  1623. 0x39e88, 0x39ea8,
  1624. 0x39eb0, 0x39eb4,
  1625. 0x39ec8, 0x39ed4,
  1626. 0x39fb8, 0x3a004,
  1627. 0x3a200, 0x3a200,
  1628. 0x3a208, 0x3a240,
  1629. 0x3a248, 0x3a280,
  1630. 0x3a288, 0x3a2c0,
  1631. 0x3a2c8, 0x3a2fc,
  1632. 0x3a600, 0x3a630,
  1633. 0x3aa00, 0x3aabc,
  1634. 0x3ab00, 0x3ab10,
  1635. 0x3ab20, 0x3ab30,
  1636. 0x3ab40, 0x3ab50,
  1637. 0x3ab60, 0x3ab70,
  1638. 0x3b000, 0x3b028,
  1639. 0x3b030, 0x3b048,
  1640. 0x3b060, 0x3b068,
  1641. 0x3b070, 0x3b09c,
  1642. 0x3b0f0, 0x3b128,
  1643. 0x3b130, 0x3b148,
  1644. 0x3b160, 0x3b168,
  1645. 0x3b170, 0x3b19c,
  1646. 0x3b1f0, 0x3b238,
  1647. 0x3b240, 0x3b240,
  1648. 0x3b248, 0x3b250,
  1649. 0x3b25c, 0x3b264,
  1650. 0x3b270, 0x3b2b8,
  1651. 0x3b2c0, 0x3b2e4,
  1652. 0x3b2f8, 0x3b338,
  1653. 0x3b340, 0x3b340,
  1654. 0x3b348, 0x3b350,
  1655. 0x3b35c, 0x3b364,
  1656. 0x3b370, 0x3b3b8,
  1657. 0x3b3c0, 0x3b3e4,
  1658. 0x3b3f8, 0x3b428,
  1659. 0x3b430, 0x3b448,
  1660. 0x3b460, 0x3b468,
  1661. 0x3b470, 0x3b49c,
  1662. 0x3b4f0, 0x3b528,
  1663. 0x3b530, 0x3b548,
  1664. 0x3b560, 0x3b568,
  1665. 0x3b570, 0x3b59c,
  1666. 0x3b5f0, 0x3b638,
  1667. 0x3b640, 0x3b640,
  1668. 0x3b648, 0x3b650,
  1669. 0x3b65c, 0x3b664,
  1670. 0x3b670, 0x3b6b8,
  1671. 0x3b6c0, 0x3b6e4,
  1672. 0x3b6f8, 0x3b738,
  1673. 0x3b740, 0x3b740,
  1674. 0x3b748, 0x3b750,
  1675. 0x3b75c, 0x3b764,
  1676. 0x3b770, 0x3b7b8,
  1677. 0x3b7c0, 0x3b7e4,
  1678. 0x3b7f8, 0x3b7fc,
  1679. 0x3b814, 0x3b814,
  1680. 0x3b82c, 0x3b82c,
  1681. 0x3b880, 0x3b88c,
  1682. 0x3b8e8, 0x3b8ec,
  1683. 0x3b900, 0x3b928,
  1684. 0x3b930, 0x3b948,
  1685. 0x3b960, 0x3b968,
  1686. 0x3b970, 0x3b99c,
  1687. 0x3b9f0, 0x3ba38,
  1688. 0x3ba40, 0x3ba40,
  1689. 0x3ba48, 0x3ba50,
  1690. 0x3ba5c, 0x3ba64,
  1691. 0x3ba70, 0x3bab8,
  1692. 0x3bac0, 0x3bae4,
  1693. 0x3baf8, 0x3bb10,
  1694. 0x3bb28, 0x3bb28,
  1695. 0x3bb3c, 0x3bb50,
  1696. 0x3bbf0, 0x3bc10,
  1697. 0x3bc28, 0x3bc28,
  1698. 0x3bc3c, 0x3bc50,
  1699. 0x3bcf0, 0x3bcfc,
  1700. 0x3c000, 0x3c030,
  1701. 0x3c038, 0x3c038,
  1702. 0x3c040, 0x3c040,
  1703. 0x3c100, 0x3c144,
  1704. 0x3c190, 0x3c1a0,
  1705. 0x3c1a8, 0x3c1b8,
  1706. 0x3c1c4, 0x3c1c8,
  1707. 0x3c1d0, 0x3c1d0,
  1708. 0x3c200, 0x3c318,
  1709. 0x3c400, 0x3c4b4,
  1710. 0x3c4c0, 0x3c52c,
  1711. 0x3c540, 0x3c61c,
  1712. 0x3c800, 0x3c828,
  1713. 0x3c834, 0x3c834,
  1714. 0x3c8c0, 0x3c908,
  1715. 0x3c910, 0x3c9ac,
  1716. 0x3ca00, 0x3ca14,
  1717. 0x3ca1c, 0x3ca2c,
  1718. 0x3ca44, 0x3ca50,
  1719. 0x3ca74, 0x3ca74,
  1720. 0x3ca7c, 0x3cafc,
  1721. 0x3cb08, 0x3cc24,
  1722. 0x3cd00, 0x3cd00,
  1723. 0x3cd08, 0x3cd14,
  1724. 0x3cd1c, 0x3cd20,
  1725. 0x3cd3c, 0x3cd3c,
  1726. 0x3cd48, 0x3cd50,
  1727. 0x3d200, 0x3d20c,
  1728. 0x3d220, 0x3d220,
  1729. 0x3d240, 0x3d240,
  1730. 0x3d600, 0x3d60c,
  1731. 0x3da00, 0x3da1c,
  1732. 0x3de00, 0x3de20,
  1733. 0x3de38, 0x3de3c,
  1734. 0x3de80, 0x3de80,
  1735. 0x3de88, 0x3dea8,
  1736. 0x3deb0, 0x3deb4,
  1737. 0x3dec8, 0x3ded4,
  1738. 0x3dfb8, 0x3e004,
  1739. 0x3e200, 0x3e200,
  1740. 0x3e208, 0x3e240,
  1741. 0x3e248, 0x3e280,
  1742. 0x3e288, 0x3e2c0,
  1743. 0x3e2c8, 0x3e2fc,
  1744. 0x3e600, 0x3e630,
  1745. 0x3ea00, 0x3eabc,
  1746. 0x3eb00, 0x3eb10,
  1747. 0x3eb20, 0x3eb30,
  1748. 0x3eb40, 0x3eb50,
  1749. 0x3eb60, 0x3eb70,
  1750. 0x3f000, 0x3f028,
  1751. 0x3f030, 0x3f048,
  1752. 0x3f060, 0x3f068,
  1753. 0x3f070, 0x3f09c,
  1754. 0x3f0f0, 0x3f128,
  1755. 0x3f130, 0x3f148,
  1756. 0x3f160, 0x3f168,
  1757. 0x3f170, 0x3f19c,
  1758. 0x3f1f0, 0x3f238,
  1759. 0x3f240, 0x3f240,
  1760. 0x3f248, 0x3f250,
  1761. 0x3f25c, 0x3f264,
  1762. 0x3f270, 0x3f2b8,
  1763. 0x3f2c0, 0x3f2e4,
  1764. 0x3f2f8, 0x3f338,
  1765. 0x3f340, 0x3f340,
  1766. 0x3f348, 0x3f350,
  1767. 0x3f35c, 0x3f364,
  1768. 0x3f370, 0x3f3b8,
  1769. 0x3f3c0, 0x3f3e4,
  1770. 0x3f3f8, 0x3f428,
  1771. 0x3f430, 0x3f448,
  1772. 0x3f460, 0x3f468,
  1773. 0x3f470, 0x3f49c,
  1774. 0x3f4f0, 0x3f528,
  1775. 0x3f530, 0x3f548,
  1776. 0x3f560, 0x3f568,
  1777. 0x3f570, 0x3f59c,
  1778. 0x3f5f0, 0x3f638,
  1779. 0x3f640, 0x3f640,
  1780. 0x3f648, 0x3f650,
  1781. 0x3f65c, 0x3f664,
  1782. 0x3f670, 0x3f6b8,
  1783. 0x3f6c0, 0x3f6e4,
  1784. 0x3f6f8, 0x3f738,
  1785. 0x3f740, 0x3f740,
  1786. 0x3f748, 0x3f750,
  1787. 0x3f75c, 0x3f764,
  1788. 0x3f770, 0x3f7b8,
  1789. 0x3f7c0, 0x3f7e4,
  1790. 0x3f7f8, 0x3f7fc,
  1791. 0x3f814, 0x3f814,
  1792. 0x3f82c, 0x3f82c,
  1793. 0x3f880, 0x3f88c,
  1794. 0x3f8e8, 0x3f8ec,
  1795. 0x3f900, 0x3f928,
  1796. 0x3f930, 0x3f948,
  1797. 0x3f960, 0x3f968,
  1798. 0x3f970, 0x3f99c,
  1799. 0x3f9f0, 0x3fa38,
  1800. 0x3fa40, 0x3fa40,
  1801. 0x3fa48, 0x3fa50,
  1802. 0x3fa5c, 0x3fa64,
  1803. 0x3fa70, 0x3fab8,
  1804. 0x3fac0, 0x3fae4,
  1805. 0x3faf8, 0x3fb10,
  1806. 0x3fb28, 0x3fb28,
  1807. 0x3fb3c, 0x3fb50,
  1808. 0x3fbf0, 0x3fc10,
  1809. 0x3fc28, 0x3fc28,
  1810. 0x3fc3c, 0x3fc50,
  1811. 0x3fcf0, 0x3fcfc,
  1812. 0x40000, 0x4000c,
  1813. 0x40040, 0x40050,
  1814. 0x40060, 0x40068,
  1815. 0x4007c, 0x4008c,
  1816. 0x40094, 0x400b0,
  1817. 0x400c0, 0x40144,
  1818. 0x40180, 0x4018c,
  1819. 0x40200, 0x40254,
  1820. 0x40260, 0x40264,
  1821. 0x40270, 0x40288,
  1822. 0x40290, 0x40298,
  1823. 0x402ac, 0x402c8,
  1824. 0x402d0, 0x402e0,
  1825. 0x402f0, 0x402f0,
  1826. 0x40300, 0x4033c,
  1827. 0x403f8, 0x403fc,
  1828. 0x41304, 0x413c4,
  1829. 0x41400, 0x4140c,
  1830. 0x41414, 0x4141c,
  1831. 0x41480, 0x414d0,
  1832. 0x44000, 0x44054,
  1833. 0x4405c, 0x44078,
  1834. 0x440c0, 0x44174,
  1835. 0x44180, 0x441ac,
  1836. 0x441b4, 0x441b8,
  1837. 0x441c0, 0x44254,
  1838. 0x4425c, 0x44278,
  1839. 0x442c0, 0x44374,
  1840. 0x44380, 0x443ac,
  1841. 0x443b4, 0x443b8,
  1842. 0x443c0, 0x44454,
  1843. 0x4445c, 0x44478,
  1844. 0x444c0, 0x44574,
  1845. 0x44580, 0x445ac,
  1846. 0x445b4, 0x445b8,
  1847. 0x445c0, 0x44654,
  1848. 0x4465c, 0x44678,
  1849. 0x446c0, 0x44774,
  1850. 0x44780, 0x447ac,
  1851. 0x447b4, 0x447b8,
  1852. 0x447c0, 0x44854,
  1853. 0x4485c, 0x44878,
  1854. 0x448c0, 0x44974,
  1855. 0x44980, 0x449ac,
  1856. 0x449b4, 0x449b8,
  1857. 0x449c0, 0x449fc,
  1858. 0x45000, 0x45004,
  1859. 0x45010, 0x45030,
  1860. 0x45040, 0x45060,
  1861. 0x45068, 0x45068,
  1862. 0x45080, 0x45084,
  1863. 0x450a0, 0x450b0,
  1864. 0x45200, 0x45204,
  1865. 0x45210, 0x45230,
  1866. 0x45240, 0x45260,
  1867. 0x45268, 0x45268,
  1868. 0x45280, 0x45284,
  1869. 0x452a0, 0x452b0,
  1870. 0x460c0, 0x460e4,
  1871. 0x47000, 0x4703c,
  1872. 0x47044, 0x4708c,
  1873. 0x47200, 0x47250,
  1874. 0x47400, 0x47408,
  1875. 0x47414, 0x47420,
  1876. 0x47600, 0x47618,
  1877. 0x47800, 0x47814,
  1878. 0x48000, 0x4800c,
  1879. 0x48040, 0x48050,
  1880. 0x48060, 0x48068,
  1881. 0x4807c, 0x4808c,
  1882. 0x48094, 0x480b0,
  1883. 0x480c0, 0x48144,
  1884. 0x48180, 0x4818c,
  1885. 0x48200, 0x48254,
  1886. 0x48260, 0x48264,
  1887. 0x48270, 0x48288,
  1888. 0x48290, 0x48298,
  1889. 0x482ac, 0x482c8,
  1890. 0x482d0, 0x482e0,
  1891. 0x482f0, 0x482f0,
  1892. 0x48300, 0x4833c,
  1893. 0x483f8, 0x483fc,
  1894. 0x49304, 0x493c4,
  1895. 0x49400, 0x4940c,
  1896. 0x49414, 0x4941c,
  1897. 0x49480, 0x494d0,
  1898. 0x4c000, 0x4c054,
  1899. 0x4c05c, 0x4c078,
  1900. 0x4c0c0, 0x4c174,
  1901. 0x4c180, 0x4c1ac,
  1902. 0x4c1b4, 0x4c1b8,
  1903. 0x4c1c0, 0x4c254,
  1904. 0x4c25c, 0x4c278,
  1905. 0x4c2c0, 0x4c374,
  1906. 0x4c380, 0x4c3ac,
  1907. 0x4c3b4, 0x4c3b8,
  1908. 0x4c3c0, 0x4c454,
  1909. 0x4c45c, 0x4c478,
  1910. 0x4c4c0, 0x4c574,
  1911. 0x4c580, 0x4c5ac,
  1912. 0x4c5b4, 0x4c5b8,
  1913. 0x4c5c0, 0x4c654,
  1914. 0x4c65c, 0x4c678,
  1915. 0x4c6c0, 0x4c774,
  1916. 0x4c780, 0x4c7ac,
  1917. 0x4c7b4, 0x4c7b8,
  1918. 0x4c7c0, 0x4c854,
  1919. 0x4c85c, 0x4c878,
  1920. 0x4c8c0, 0x4c974,
  1921. 0x4c980, 0x4c9ac,
  1922. 0x4c9b4, 0x4c9b8,
  1923. 0x4c9c0, 0x4c9fc,
  1924. 0x4d000, 0x4d004,
  1925. 0x4d010, 0x4d030,
  1926. 0x4d040, 0x4d060,
  1927. 0x4d068, 0x4d068,
  1928. 0x4d080, 0x4d084,
  1929. 0x4d0a0, 0x4d0b0,
  1930. 0x4d200, 0x4d204,
  1931. 0x4d210, 0x4d230,
  1932. 0x4d240, 0x4d260,
  1933. 0x4d268, 0x4d268,
  1934. 0x4d280, 0x4d284,
  1935. 0x4d2a0, 0x4d2b0,
  1936. 0x4e0c0, 0x4e0e4,
  1937. 0x4f000, 0x4f03c,
  1938. 0x4f044, 0x4f08c,
  1939. 0x4f200, 0x4f250,
  1940. 0x4f400, 0x4f408,
  1941. 0x4f414, 0x4f420,
  1942. 0x4f600, 0x4f618,
  1943. 0x4f800, 0x4f814,
  1944. 0x50000, 0x50084,
  1945. 0x50090, 0x500cc,
  1946. 0x50400, 0x50400,
  1947. 0x50800, 0x50884,
  1948. 0x50890, 0x508cc,
  1949. 0x50c00, 0x50c00,
  1950. 0x51000, 0x5101c,
  1951. 0x51300, 0x51308,
  1952. };
  1953. static const unsigned int t6_reg_ranges[] = {
  1954. 0x1008, 0x101c,
  1955. 0x1024, 0x10a8,
  1956. 0x10b4, 0x10f8,
  1957. 0x1100, 0x1114,
  1958. 0x111c, 0x112c,
  1959. 0x1138, 0x113c,
  1960. 0x1144, 0x114c,
  1961. 0x1180, 0x1184,
  1962. 0x1190, 0x1194,
  1963. 0x11a0, 0x11a4,
  1964. 0x11b0, 0x11b4,
  1965. 0x11fc, 0x1258,
  1966. 0x1280, 0x12d4,
  1967. 0x12d9, 0x12d9,
  1968. 0x12de, 0x12de,
  1969. 0x12e3, 0x12e3,
  1970. 0x12e8, 0x133c,
  1971. 0x1800, 0x18fc,
  1972. 0x3000, 0x302c,
  1973. 0x3060, 0x30b0,
  1974. 0x30b8, 0x30d8,
  1975. 0x30e0, 0x30fc,
  1976. 0x3140, 0x357c,
  1977. 0x35a8, 0x35cc,
  1978. 0x35ec, 0x35ec,
  1979. 0x3600, 0x5624,
  1980. 0x56cc, 0x56ec,
  1981. 0x56f4, 0x5720,
  1982. 0x5728, 0x575c,
  1983. 0x580c, 0x5814,
  1984. 0x5890, 0x589c,
  1985. 0x58a4, 0x58ac,
  1986. 0x58b8, 0x58bc,
  1987. 0x5940, 0x595c,
  1988. 0x5980, 0x598c,
  1989. 0x59b0, 0x59c8,
  1990. 0x59d0, 0x59dc,
  1991. 0x59fc, 0x5a18,
  1992. 0x5a60, 0x5a6c,
  1993. 0x5a80, 0x5a8c,
  1994. 0x5a94, 0x5a9c,
  1995. 0x5b94, 0x5bfc,
  1996. 0x5c10, 0x5e48,
  1997. 0x5e50, 0x5e94,
  1998. 0x5ea0, 0x5eb0,
  1999. 0x5ec0, 0x5ec0,
  2000. 0x5ec8, 0x5ed0,
  2001. 0x6000, 0x6020,
  2002. 0x6028, 0x6040,
  2003. 0x6058, 0x609c,
  2004. 0x60a8, 0x619c,
  2005. 0x7700, 0x7798,
  2006. 0x77c0, 0x7880,
  2007. 0x78cc, 0x78fc,
  2008. 0x7b00, 0x7b58,
  2009. 0x7b60, 0x7b84,
  2010. 0x7b8c, 0x7c54,
  2011. 0x7d00, 0x7d38,
  2012. 0x7d40, 0x7d84,
  2013. 0x7d8c, 0x7ddc,
  2014. 0x7de4, 0x7e04,
  2015. 0x7e10, 0x7e1c,
  2016. 0x7e24, 0x7e38,
  2017. 0x7e40, 0x7e44,
  2018. 0x7e4c, 0x7e78,
  2019. 0x7e80, 0x7edc,
  2020. 0x7ee8, 0x7efc,
  2021. 0x8dc0, 0x8de4,
  2022. 0x8df8, 0x8e04,
  2023. 0x8e10, 0x8e84,
  2024. 0x8ea0, 0x8f88,
  2025. 0x8fb8, 0x9058,
  2026. 0x9060, 0x9060,
  2027. 0x9068, 0x90f8,
  2028. 0x9100, 0x9124,
  2029. 0x9400, 0x9470,
  2030. 0x9600, 0x9600,
  2031. 0x9608, 0x9638,
  2032. 0x9640, 0x9704,
  2033. 0x9710, 0x971c,
  2034. 0x9800, 0x9808,
  2035. 0x9820, 0x983c,
  2036. 0x9850, 0x9864,
  2037. 0x9c00, 0x9c6c,
  2038. 0x9c80, 0x9cec,
  2039. 0x9d00, 0x9d6c,
  2040. 0x9d80, 0x9dec,
  2041. 0x9e00, 0x9e6c,
  2042. 0x9e80, 0x9eec,
  2043. 0x9f00, 0x9f6c,
  2044. 0x9f80, 0xa020,
  2045. 0xd004, 0xd03c,
  2046. 0xd100, 0xd118,
  2047. 0xd200, 0xd214,
  2048. 0xd220, 0xd234,
  2049. 0xd240, 0xd254,
  2050. 0xd260, 0xd274,
  2051. 0xd280, 0xd294,
  2052. 0xd2a0, 0xd2b4,
  2053. 0xd2c0, 0xd2d4,
  2054. 0xd2e0, 0xd2f4,
  2055. 0xd300, 0xd31c,
  2056. 0xdfc0, 0xdfe0,
  2057. 0xe000, 0xf008,
  2058. 0x11000, 0x11014,
  2059. 0x11048, 0x1106c,
  2060. 0x11074, 0x11088,
  2061. 0x11098, 0x11120,
  2062. 0x1112c, 0x1117c,
  2063. 0x11190, 0x112e0,
  2064. 0x11300, 0x1130c,
  2065. 0x12000, 0x1206c,
  2066. 0x19040, 0x1906c,
  2067. 0x19078, 0x19080,
  2068. 0x1908c, 0x190e8,
  2069. 0x190f0, 0x190f8,
  2070. 0x19100, 0x19110,
  2071. 0x19120, 0x19124,
  2072. 0x19150, 0x19194,
  2073. 0x1919c, 0x191b0,
  2074. 0x191d0, 0x191e8,
  2075. 0x19238, 0x19290,
  2076. 0x192a4, 0x192b0,
  2077. 0x192bc, 0x192bc,
  2078. 0x19348, 0x1934c,
  2079. 0x193f8, 0x19418,
  2080. 0x19420, 0x19428,
  2081. 0x19430, 0x19444,
  2082. 0x1944c, 0x1946c,
  2083. 0x19474, 0x19474,
  2084. 0x19490, 0x194cc,
  2085. 0x194f0, 0x194f8,
  2086. 0x19c00, 0x19c48,
  2087. 0x19c50, 0x19c80,
  2088. 0x19c94, 0x19c98,
  2089. 0x19ca0, 0x19cbc,
  2090. 0x19ce4, 0x19ce4,
  2091. 0x19cf0, 0x19cf8,
  2092. 0x19d00, 0x19d28,
  2093. 0x19d50, 0x19d78,
  2094. 0x19d94, 0x19d98,
  2095. 0x19da0, 0x19dc8,
  2096. 0x19df0, 0x19e10,
  2097. 0x19e50, 0x19e6c,
  2098. 0x19ea0, 0x19ebc,
  2099. 0x19ec4, 0x19ef4,
  2100. 0x19f04, 0x19f2c,
  2101. 0x19f34, 0x19f34,
  2102. 0x19f40, 0x19f50,
  2103. 0x19f90, 0x19fac,
  2104. 0x19fc4, 0x19fc8,
  2105. 0x19fd0, 0x19fe4,
  2106. 0x1a000, 0x1a004,
  2107. 0x1a010, 0x1a06c,
  2108. 0x1a0b0, 0x1a0e4,
  2109. 0x1a0ec, 0x1a0f8,
  2110. 0x1a100, 0x1a108,
  2111. 0x1a114, 0x1a120,
  2112. 0x1a128, 0x1a130,
  2113. 0x1a138, 0x1a138,
  2114. 0x1a190, 0x1a1c4,
  2115. 0x1a1fc, 0x1a1fc,
  2116. 0x1e008, 0x1e00c,
  2117. 0x1e040, 0x1e044,
  2118. 0x1e04c, 0x1e04c,
  2119. 0x1e284, 0x1e290,
  2120. 0x1e2c0, 0x1e2c0,
  2121. 0x1e2e0, 0x1e2e0,
  2122. 0x1e300, 0x1e384,
  2123. 0x1e3c0, 0x1e3c8,
  2124. 0x1e408, 0x1e40c,
  2125. 0x1e440, 0x1e444,
  2126. 0x1e44c, 0x1e44c,
  2127. 0x1e684, 0x1e690,
  2128. 0x1e6c0, 0x1e6c0,
  2129. 0x1e6e0, 0x1e6e0,
  2130. 0x1e700, 0x1e784,
  2131. 0x1e7c0, 0x1e7c8,
  2132. 0x1e808, 0x1e80c,
  2133. 0x1e840, 0x1e844,
  2134. 0x1e84c, 0x1e84c,
  2135. 0x1ea84, 0x1ea90,
  2136. 0x1eac0, 0x1eac0,
  2137. 0x1eae0, 0x1eae0,
  2138. 0x1eb00, 0x1eb84,
  2139. 0x1ebc0, 0x1ebc8,
  2140. 0x1ec08, 0x1ec0c,
  2141. 0x1ec40, 0x1ec44,
  2142. 0x1ec4c, 0x1ec4c,
  2143. 0x1ee84, 0x1ee90,
  2144. 0x1eec0, 0x1eec0,
  2145. 0x1eee0, 0x1eee0,
  2146. 0x1ef00, 0x1ef84,
  2147. 0x1efc0, 0x1efc8,
  2148. 0x1f008, 0x1f00c,
  2149. 0x1f040, 0x1f044,
  2150. 0x1f04c, 0x1f04c,
  2151. 0x1f284, 0x1f290,
  2152. 0x1f2c0, 0x1f2c0,
  2153. 0x1f2e0, 0x1f2e0,
  2154. 0x1f300, 0x1f384,
  2155. 0x1f3c0, 0x1f3c8,
  2156. 0x1f408, 0x1f40c,
  2157. 0x1f440, 0x1f444,
  2158. 0x1f44c, 0x1f44c,
  2159. 0x1f684, 0x1f690,
  2160. 0x1f6c0, 0x1f6c0,
  2161. 0x1f6e0, 0x1f6e0,
  2162. 0x1f700, 0x1f784,
  2163. 0x1f7c0, 0x1f7c8,
  2164. 0x1f808, 0x1f80c,
  2165. 0x1f840, 0x1f844,
  2166. 0x1f84c, 0x1f84c,
  2167. 0x1fa84, 0x1fa90,
  2168. 0x1fac0, 0x1fac0,
  2169. 0x1fae0, 0x1fae0,
  2170. 0x1fb00, 0x1fb84,
  2171. 0x1fbc0, 0x1fbc8,
  2172. 0x1fc08, 0x1fc0c,
  2173. 0x1fc40, 0x1fc44,
  2174. 0x1fc4c, 0x1fc4c,
  2175. 0x1fe84, 0x1fe90,
  2176. 0x1fec0, 0x1fec0,
  2177. 0x1fee0, 0x1fee0,
  2178. 0x1ff00, 0x1ff84,
  2179. 0x1ffc0, 0x1ffc8,
  2180. 0x30000, 0x30030,
  2181. 0x30038, 0x30038,
  2182. 0x30040, 0x30040,
  2183. 0x30048, 0x30048,
  2184. 0x30050, 0x30050,
  2185. 0x3005c, 0x30060,
  2186. 0x30068, 0x30068,
  2187. 0x30070, 0x30070,
  2188. 0x30100, 0x30168,
  2189. 0x30190, 0x301a0,
  2190. 0x301a8, 0x301b8,
  2191. 0x301c4, 0x301c8,
  2192. 0x301d0, 0x301d0,
  2193. 0x30200, 0x30320,
  2194. 0x30400, 0x304b4,
  2195. 0x304c0, 0x3052c,
  2196. 0x30540, 0x3061c,
  2197. 0x30800, 0x308a0,
  2198. 0x308c0, 0x30908,
  2199. 0x30910, 0x309b8,
  2200. 0x30a00, 0x30a04,
  2201. 0x30a0c, 0x30a14,
  2202. 0x30a1c, 0x30a2c,
  2203. 0x30a44, 0x30a50,
  2204. 0x30a74, 0x30a74,
  2205. 0x30a7c, 0x30afc,
  2206. 0x30b08, 0x30c24,
  2207. 0x30d00, 0x30d14,
  2208. 0x30d1c, 0x30d3c,
  2209. 0x30d44, 0x30d4c,
  2210. 0x30d54, 0x30d74,
  2211. 0x30d7c, 0x30d7c,
  2212. 0x30de0, 0x30de0,
  2213. 0x30e00, 0x30ed4,
  2214. 0x30f00, 0x30fa4,
  2215. 0x30fc0, 0x30fc4,
  2216. 0x31000, 0x31004,
  2217. 0x31080, 0x310fc,
  2218. 0x31208, 0x31220,
  2219. 0x3123c, 0x31254,
  2220. 0x31300, 0x31300,
  2221. 0x31308, 0x3131c,
  2222. 0x31338, 0x3133c,
  2223. 0x31380, 0x31380,
  2224. 0x31388, 0x313a8,
  2225. 0x313b4, 0x313b4,
  2226. 0x31400, 0x31420,
  2227. 0x31438, 0x3143c,
  2228. 0x31480, 0x31480,
  2229. 0x314a8, 0x314a8,
  2230. 0x314b0, 0x314b4,
  2231. 0x314c8, 0x314d4,
  2232. 0x31a40, 0x31a4c,
  2233. 0x31af0, 0x31b20,
  2234. 0x31b38, 0x31b3c,
  2235. 0x31b80, 0x31b80,
  2236. 0x31ba8, 0x31ba8,
  2237. 0x31bb0, 0x31bb4,
  2238. 0x31bc8, 0x31bd4,
  2239. 0x32140, 0x3218c,
  2240. 0x321f0, 0x321f4,
  2241. 0x32200, 0x32200,
  2242. 0x32218, 0x32218,
  2243. 0x32400, 0x32400,
  2244. 0x32408, 0x3241c,
  2245. 0x32618, 0x32620,
  2246. 0x32664, 0x32664,
  2247. 0x326a8, 0x326a8,
  2248. 0x326ec, 0x326ec,
  2249. 0x32a00, 0x32abc,
  2250. 0x32b00, 0x32b38,
  2251. 0x32b40, 0x32b58,
  2252. 0x32b60, 0x32b78,
  2253. 0x32c00, 0x32c00,
  2254. 0x32c08, 0x32c3c,
  2255. 0x32e00, 0x32e2c,
  2256. 0x32f00, 0x32f2c,
  2257. 0x33000, 0x3302c,
  2258. 0x33034, 0x33050,
  2259. 0x33058, 0x33058,
  2260. 0x33060, 0x3308c,
  2261. 0x3309c, 0x330ac,
  2262. 0x330c0, 0x330c0,
  2263. 0x330c8, 0x330d0,
  2264. 0x330d8, 0x330e0,
  2265. 0x330ec, 0x3312c,
  2266. 0x33134, 0x33150,
  2267. 0x33158, 0x33158,
  2268. 0x33160, 0x3318c,
  2269. 0x3319c, 0x331ac,
  2270. 0x331c0, 0x331c0,
  2271. 0x331c8, 0x331d0,
  2272. 0x331d8, 0x331e0,
  2273. 0x331ec, 0x33290,
  2274. 0x33298, 0x332c4,
  2275. 0x332e4, 0x33390,
  2276. 0x33398, 0x333c4,
  2277. 0x333e4, 0x3342c,
  2278. 0x33434, 0x33450,
  2279. 0x33458, 0x33458,
  2280. 0x33460, 0x3348c,
  2281. 0x3349c, 0x334ac,
  2282. 0x334c0, 0x334c0,
  2283. 0x334c8, 0x334d0,
  2284. 0x334d8, 0x334e0,
  2285. 0x334ec, 0x3352c,
  2286. 0x33534, 0x33550,
  2287. 0x33558, 0x33558,
  2288. 0x33560, 0x3358c,
  2289. 0x3359c, 0x335ac,
  2290. 0x335c0, 0x335c0,
  2291. 0x335c8, 0x335d0,
  2292. 0x335d8, 0x335e0,
  2293. 0x335ec, 0x33690,
  2294. 0x33698, 0x336c4,
  2295. 0x336e4, 0x33790,
  2296. 0x33798, 0x337c4,
  2297. 0x337e4, 0x337fc,
  2298. 0x33814, 0x33814,
  2299. 0x33854, 0x33868,
  2300. 0x33880, 0x3388c,
  2301. 0x338c0, 0x338d0,
  2302. 0x338e8, 0x338ec,
  2303. 0x33900, 0x3392c,
  2304. 0x33934, 0x33950,
  2305. 0x33958, 0x33958,
  2306. 0x33960, 0x3398c,
  2307. 0x3399c, 0x339ac,
  2308. 0x339c0, 0x339c0,
  2309. 0x339c8, 0x339d0,
  2310. 0x339d8, 0x339e0,
  2311. 0x339ec, 0x33a90,
  2312. 0x33a98, 0x33ac4,
  2313. 0x33ae4, 0x33b10,
  2314. 0x33b24, 0x33b28,
  2315. 0x33b38, 0x33b50,
  2316. 0x33bf0, 0x33c10,
  2317. 0x33c24, 0x33c28,
  2318. 0x33c38, 0x33c50,
  2319. 0x33cf0, 0x33cfc,
  2320. 0x34000, 0x34030,
  2321. 0x34038, 0x34038,
  2322. 0x34040, 0x34040,
  2323. 0x34048, 0x34048,
  2324. 0x34050, 0x34050,
  2325. 0x3405c, 0x34060,
  2326. 0x34068, 0x34068,
  2327. 0x34070, 0x34070,
  2328. 0x34100, 0x34168,
  2329. 0x34190, 0x341a0,
  2330. 0x341a8, 0x341b8,
  2331. 0x341c4, 0x341c8,
  2332. 0x341d0, 0x341d0,
  2333. 0x34200, 0x34320,
  2334. 0x34400, 0x344b4,
  2335. 0x344c0, 0x3452c,
  2336. 0x34540, 0x3461c,
  2337. 0x34800, 0x348a0,
  2338. 0x348c0, 0x34908,
  2339. 0x34910, 0x349b8,
  2340. 0x34a00, 0x34a04,
  2341. 0x34a0c, 0x34a14,
  2342. 0x34a1c, 0x34a2c,
  2343. 0x34a44, 0x34a50,
  2344. 0x34a74, 0x34a74,
  2345. 0x34a7c, 0x34afc,
  2346. 0x34b08, 0x34c24,
  2347. 0x34d00, 0x34d14,
  2348. 0x34d1c, 0x34d3c,
  2349. 0x34d44, 0x34d4c,
  2350. 0x34d54, 0x34d74,
  2351. 0x34d7c, 0x34d7c,
  2352. 0x34de0, 0x34de0,
  2353. 0x34e00, 0x34ed4,
  2354. 0x34f00, 0x34fa4,
  2355. 0x34fc0, 0x34fc4,
  2356. 0x35000, 0x35004,
  2357. 0x35080, 0x350fc,
  2358. 0x35208, 0x35220,
  2359. 0x3523c, 0x35254,
  2360. 0x35300, 0x35300,
  2361. 0x35308, 0x3531c,
  2362. 0x35338, 0x3533c,
  2363. 0x35380, 0x35380,
  2364. 0x35388, 0x353a8,
  2365. 0x353b4, 0x353b4,
  2366. 0x35400, 0x35420,
  2367. 0x35438, 0x3543c,
  2368. 0x35480, 0x35480,
  2369. 0x354a8, 0x354a8,
  2370. 0x354b0, 0x354b4,
  2371. 0x354c8, 0x354d4,
  2372. 0x35a40, 0x35a4c,
  2373. 0x35af0, 0x35b20,
  2374. 0x35b38, 0x35b3c,
  2375. 0x35b80, 0x35b80,
  2376. 0x35ba8, 0x35ba8,
  2377. 0x35bb0, 0x35bb4,
  2378. 0x35bc8, 0x35bd4,
  2379. 0x36140, 0x3618c,
  2380. 0x361f0, 0x361f4,
  2381. 0x36200, 0x36200,
  2382. 0x36218, 0x36218,
  2383. 0x36400, 0x36400,
  2384. 0x36408, 0x3641c,
  2385. 0x36618, 0x36620,
  2386. 0x36664, 0x36664,
  2387. 0x366a8, 0x366a8,
  2388. 0x366ec, 0x366ec,
  2389. 0x36a00, 0x36abc,
  2390. 0x36b00, 0x36b38,
  2391. 0x36b40, 0x36b58,
  2392. 0x36b60, 0x36b78,
  2393. 0x36c00, 0x36c00,
  2394. 0x36c08, 0x36c3c,
  2395. 0x36e00, 0x36e2c,
  2396. 0x36f00, 0x36f2c,
  2397. 0x37000, 0x3702c,
  2398. 0x37034, 0x37050,
  2399. 0x37058, 0x37058,
  2400. 0x37060, 0x3708c,
  2401. 0x3709c, 0x370ac,
  2402. 0x370c0, 0x370c0,
  2403. 0x370c8, 0x370d0,
  2404. 0x370d8, 0x370e0,
  2405. 0x370ec, 0x3712c,
  2406. 0x37134, 0x37150,
  2407. 0x37158, 0x37158,
  2408. 0x37160, 0x3718c,
  2409. 0x3719c, 0x371ac,
  2410. 0x371c0, 0x371c0,
  2411. 0x371c8, 0x371d0,
  2412. 0x371d8, 0x371e0,
  2413. 0x371ec, 0x37290,
  2414. 0x37298, 0x372c4,
  2415. 0x372e4, 0x37390,
  2416. 0x37398, 0x373c4,
  2417. 0x373e4, 0x3742c,
  2418. 0x37434, 0x37450,
  2419. 0x37458, 0x37458,
  2420. 0x37460, 0x3748c,
  2421. 0x3749c, 0x374ac,
  2422. 0x374c0, 0x374c0,
  2423. 0x374c8, 0x374d0,
  2424. 0x374d8, 0x374e0,
  2425. 0x374ec, 0x3752c,
  2426. 0x37534, 0x37550,
  2427. 0x37558, 0x37558,
  2428. 0x37560, 0x3758c,
  2429. 0x3759c, 0x375ac,
  2430. 0x375c0, 0x375c0,
  2431. 0x375c8, 0x375d0,
  2432. 0x375d8, 0x375e0,
  2433. 0x375ec, 0x37690,
  2434. 0x37698, 0x376c4,
  2435. 0x376e4, 0x37790,
  2436. 0x37798, 0x377c4,
  2437. 0x377e4, 0x377fc,
  2438. 0x37814, 0x37814,
  2439. 0x37854, 0x37868,
  2440. 0x37880, 0x3788c,
  2441. 0x378c0, 0x378d0,
  2442. 0x378e8, 0x378ec,
  2443. 0x37900, 0x3792c,
  2444. 0x37934, 0x37950,
  2445. 0x37958, 0x37958,
  2446. 0x37960, 0x3798c,
  2447. 0x3799c, 0x379ac,
  2448. 0x379c0, 0x379c0,
  2449. 0x379c8, 0x379d0,
  2450. 0x379d8, 0x379e0,
  2451. 0x379ec, 0x37a90,
  2452. 0x37a98, 0x37ac4,
  2453. 0x37ae4, 0x37b10,
  2454. 0x37b24, 0x37b28,
  2455. 0x37b38, 0x37b50,
  2456. 0x37bf0, 0x37c10,
  2457. 0x37c24, 0x37c28,
  2458. 0x37c38, 0x37c50,
  2459. 0x37cf0, 0x37cfc,
  2460. 0x40040, 0x40040,
  2461. 0x40080, 0x40084,
  2462. 0x40100, 0x40100,
  2463. 0x40140, 0x401bc,
  2464. 0x40200, 0x40214,
  2465. 0x40228, 0x40228,
  2466. 0x40240, 0x40258,
  2467. 0x40280, 0x40280,
  2468. 0x40304, 0x40304,
  2469. 0x40330, 0x4033c,
  2470. 0x41304, 0x413b8,
  2471. 0x413c0, 0x413c8,
  2472. 0x413d0, 0x413dc,
  2473. 0x413f0, 0x413f0,
  2474. 0x41400, 0x4140c,
  2475. 0x41414, 0x4141c,
  2476. 0x41480, 0x414d0,
  2477. 0x44000, 0x4407c,
  2478. 0x440c0, 0x441ac,
  2479. 0x441b4, 0x4427c,
  2480. 0x442c0, 0x443ac,
  2481. 0x443b4, 0x4447c,
  2482. 0x444c0, 0x445ac,
  2483. 0x445b4, 0x4467c,
  2484. 0x446c0, 0x447ac,
  2485. 0x447b4, 0x4487c,
  2486. 0x448c0, 0x449ac,
  2487. 0x449b4, 0x44a7c,
  2488. 0x44ac0, 0x44bac,
  2489. 0x44bb4, 0x44c7c,
  2490. 0x44cc0, 0x44dac,
  2491. 0x44db4, 0x44e7c,
  2492. 0x44ec0, 0x44fac,
  2493. 0x44fb4, 0x4507c,
  2494. 0x450c0, 0x451ac,
  2495. 0x451b4, 0x451fc,
  2496. 0x45800, 0x45804,
  2497. 0x45810, 0x45830,
  2498. 0x45840, 0x45860,
  2499. 0x45868, 0x45868,
  2500. 0x45880, 0x45884,
  2501. 0x458a0, 0x458b0,
  2502. 0x45a00, 0x45a04,
  2503. 0x45a10, 0x45a30,
  2504. 0x45a40, 0x45a60,
  2505. 0x45a68, 0x45a68,
  2506. 0x45a80, 0x45a84,
  2507. 0x45aa0, 0x45ab0,
  2508. 0x460c0, 0x460e4,
  2509. 0x47000, 0x4703c,
  2510. 0x47044, 0x4708c,
  2511. 0x47200, 0x47250,
  2512. 0x47400, 0x47408,
  2513. 0x47414, 0x47420,
  2514. 0x47600, 0x47618,
  2515. 0x47800, 0x47814,
  2516. 0x47820, 0x4782c,
  2517. 0x50000, 0x50084,
  2518. 0x50090, 0x500cc,
  2519. 0x50300, 0x50384,
  2520. 0x50400, 0x50400,
  2521. 0x50800, 0x50884,
  2522. 0x50890, 0x508cc,
  2523. 0x50b00, 0x50b84,
  2524. 0x50c00, 0x50c00,
  2525. 0x51000, 0x51020,
  2526. 0x51028, 0x510b0,
  2527. 0x51300, 0x51324,
  2528. };
  2529. u32 *buf_end = (u32 *)((char *)buf + buf_size);
  2530. const unsigned int *reg_ranges;
  2531. int reg_ranges_size, range;
  2532. unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
  2533. /* Select the right set of register ranges to dump depending on the
  2534. * adapter chip type.
  2535. */
  2536. switch (chip_version) {
  2537. case CHELSIO_T4:
  2538. reg_ranges = t4_reg_ranges;
  2539. reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
  2540. break;
  2541. case CHELSIO_T5:
  2542. reg_ranges = t5_reg_ranges;
  2543. reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
  2544. break;
  2545. case CHELSIO_T6:
  2546. reg_ranges = t6_reg_ranges;
  2547. reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
  2548. break;
  2549. default:
  2550. dev_err(adap->pdev_dev,
  2551. "Unsupported chip version %d\n", chip_version);
  2552. return;
  2553. }
  2554. /* Clear the register buffer and insert the appropriate register
  2555. * values selected by the above register ranges.
  2556. */
  2557. memset(buf, 0, buf_size);
  2558. for (range = 0; range < reg_ranges_size; range += 2) {
  2559. unsigned int reg = reg_ranges[range];
  2560. unsigned int last_reg = reg_ranges[range + 1];
  2561. u32 *bufp = (u32 *)((char *)buf + reg);
  2562. /* Iterate across the register range filling in the register
  2563. * buffer but don't write past the end of the register buffer.
  2564. */
  2565. while (reg <= last_reg && bufp < buf_end) {
  2566. *bufp++ = t4_read_reg(adap, reg);
  2567. reg += sizeof(u32);
  2568. }
  2569. }
  2570. }
  2571. #define EEPROM_STAT_ADDR 0x7bfc
  2572. #define VPD_SIZE 0x800
  2573. #define VPD_BASE 0x400
  2574. #define VPD_BASE_OLD 0
  2575. #define VPD_LEN 1024
  2576. #define CHELSIO_VPD_UNIQUE_ID 0x82
  2577. /**
  2578. * t4_seeprom_wp - enable/disable EEPROM write protection
  2579. * @adapter: the adapter
  2580. * @enable: whether to enable or disable write protection
  2581. *
  2582. * Enables or disables write protection on the serial EEPROM.
  2583. */
  2584. int t4_seeprom_wp(struct adapter *adapter, bool enable)
  2585. {
  2586. unsigned int v = enable ? 0xc : 0;
  2587. int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
  2588. return ret < 0 ? ret : 0;
  2589. }
  2590. /**
  2591. * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
  2592. * @adapter: adapter to read
  2593. * @p: where to store the parameters
  2594. *
  2595. * Reads card parameters stored in VPD EEPROM.
  2596. */
  2597. int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
  2598. {
  2599. int i, ret = 0, addr;
  2600. int ec, sn, pn, na;
  2601. u8 *vpd, csum;
  2602. unsigned int vpdr_len, kw_offset, id_len;
  2603. vpd = vmalloc(VPD_LEN);
  2604. if (!vpd)
  2605. return -ENOMEM;
  2606. /* We have two VPD data structures stored in the adapter VPD area.
  2607. * By default, Linux calculates the size of the VPD area by traversing
  2608. * the first VPD area at offset 0x0, so we need to tell the OS what
  2609. * our real VPD size is.
  2610. */
  2611. ret = pci_set_vpd_size(adapter->pdev, VPD_SIZE);
  2612. if (ret < 0)
  2613. goto out;
  2614. /* Card information normally starts at VPD_BASE but early cards had
  2615. * it at 0.
  2616. */
  2617. ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
  2618. if (ret < 0)
  2619. goto out;
  2620. /* The VPD shall have a unique identifier specified by the PCI SIG.
  2621. * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
  2622. * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
  2623. * is expected to automatically put this entry at the
  2624. * beginning of the VPD.
  2625. */
  2626. addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
  2627. ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
  2628. if (ret < 0)
  2629. goto out;
  2630. if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
  2631. dev_err(adapter->pdev_dev, "missing VPD ID string\n");
  2632. ret = -EINVAL;
  2633. goto out;
  2634. }
  2635. id_len = pci_vpd_lrdt_size(vpd);
  2636. if (id_len > ID_LEN)
  2637. id_len = ID_LEN;
  2638. i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
  2639. if (i < 0) {
  2640. dev_err(adapter->pdev_dev, "missing VPD-R section\n");
  2641. ret = -EINVAL;
  2642. goto out;
  2643. }
  2644. vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
  2645. kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
  2646. if (vpdr_len + kw_offset > VPD_LEN) {
  2647. dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
  2648. ret = -EINVAL;
  2649. goto out;
  2650. }
  2651. #define FIND_VPD_KW(var, name) do { \
  2652. var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
  2653. if (var < 0) { \
  2654. dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
  2655. ret = -EINVAL; \
  2656. goto out; \
  2657. } \
  2658. var += PCI_VPD_INFO_FLD_HDR_SIZE; \
  2659. } while (0)
  2660. FIND_VPD_KW(i, "RV");
  2661. for (csum = 0; i >= 0; i--)
  2662. csum += vpd[i];
  2663. if (csum) {
  2664. dev_err(adapter->pdev_dev,
  2665. "corrupted VPD EEPROM, actual csum %u\n", csum);
  2666. ret = -EINVAL;
  2667. goto out;
  2668. }
  2669. FIND_VPD_KW(ec, "EC");
  2670. FIND_VPD_KW(sn, "SN");
  2671. FIND_VPD_KW(pn, "PN");
  2672. FIND_VPD_KW(na, "NA");
  2673. #undef FIND_VPD_KW
  2674. memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
  2675. strim(p->id);
  2676. memcpy(p->ec, vpd + ec, EC_LEN);
  2677. strim(p->ec);
  2678. i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
  2679. memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
  2680. strim(p->sn);
  2681. i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
  2682. memcpy(p->pn, vpd + pn, min(i, PN_LEN));
  2683. strim(p->pn);
  2684. memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
  2685. strim((char *)p->na);
  2686. out:
  2687. vfree(vpd);
  2688. return ret < 0 ? ret : 0;
  2689. }
  2690. /**
  2691. * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
  2692. * @adapter: adapter to read
  2693. * @p: where to store the parameters
  2694. *
  2695. * Reads card parameters stored in VPD EEPROM and retrieves the Core
  2696. * Clock. This can only be called after a connection to the firmware
  2697. * is established.
  2698. */
  2699. int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
  2700. {
  2701. u32 cclk_param, cclk_val;
  2702. int ret;
  2703. /* Grab the raw VPD parameters.
  2704. */
  2705. ret = t4_get_raw_vpd_params(adapter, p);
  2706. if (ret)
  2707. return ret;
  2708. /* Ask firmware for the Core Clock since it knows how to translate the
  2709. * Reference Clock ('V2') VPD field into a Core Clock value ...
  2710. */
  2711. cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  2712. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
  2713. ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
  2714. 1, &cclk_param, &cclk_val);
  2715. if (ret)
  2716. return ret;
  2717. p->cclk = cclk_val;
  2718. return 0;
  2719. }
  2720. /* serial flash and firmware constants */
  2721. enum {
  2722. SF_ATTEMPTS = 10, /* max retries for SF operations */
  2723. /* flash command opcodes */
  2724. SF_PROG_PAGE = 2, /* program page */
  2725. SF_WR_DISABLE = 4, /* disable writes */
  2726. SF_RD_STATUS = 5, /* read status register */
  2727. SF_WR_ENABLE = 6, /* enable writes */
  2728. SF_RD_DATA_FAST = 0xb, /* read flash */
  2729. SF_RD_ID = 0x9f, /* read ID */
  2730. SF_ERASE_SECTOR = 0xd8, /* erase sector */
  2731. FW_MAX_SIZE = 16 * SF_SEC_SIZE,
  2732. };
  2733. /**
  2734. * sf1_read - read data from the serial flash
  2735. * @adapter: the adapter
  2736. * @byte_cnt: number of bytes to read
  2737. * @cont: whether another operation will be chained
  2738. * @lock: whether to lock SF for PL access only
  2739. * @valp: where to store the read data
  2740. *
  2741. * Reads up to 4 bytes of data from the serial flash. The location of
  2742. * the read needs to be specified prior to calling this by issuing the
  2743. * appropriate commands to the serial flash.
  2744. */
  2745. static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
  2746. int lock, u32 *valp)
  2747. {
  2748. int ret;
  2749. if (!byte_cnt || byte_cnt > 4)
  2750. return -EINVAL;
  2751. if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
  2752. return -EBUSY;
  2753. t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
  2754. SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
  2755. ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
  2756. if (!ret)
  2757. *valp = t4_read_reg(adapter, SF_DATA_A);
  2758. return ret;
  2759. }
  2760. /**
  2761. * sf1_write - write data to the serial flash
  2762. * @adapter: the adapter
  2763. * @byte_cnt: number of bytes to write
  2764. * @cont: whether another operation will be chained
  2765. * @lock: whether to lock SF for PL access only
  2766. * @val: value to write
  2767. *
  2768. * Writes up to 4 bytes of data to the serial flash. The location of
  2769. * the write needs to be specified prior to calling this by issuing the
  2770. * appropriate commands to the serial flash.
  2771. */
  2772. static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
  2773. int lock, u32 val)
  2774. {
  2775. if (!byte_cnt || byte_cnt > 4)
  2776. return -EINVAL;
  2777. if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
  2778. return -EBUSY;
  2779. t4_write_reg(adapter, SF_DATA_A, val);
  2780. t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
  2781. SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
  2782. return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
  2783. }
  2784. /**
  2785. * flash_wait_op - wait for a flash operation to complete
  2786. * @adapter: the adapter
  2787. * @attempts: max number of polls of the status register
  2788. * @delay: delay between polls in ms
  2789. *
  2790. * Wait for a flash operation to complete by polling the status register.
  2791. */
  2792. static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
  2793. {
  2794. int ret;
  2795. u32 status;
  2796. while (1) {
  2797. if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
  2798. (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
  2799. return ret;
  2800. if (!(status & 1))
  2801. return 0;
  2802. if (--attempts == 0)
  2803. return -EAGAIN;
  2804. if (delay)
  2805. msleep(delay);
  2806. }
  2807. }
  2808. /**
  2809. * t4_read_flash - read words from serial flash
  2810. * @adapter: the adapter
  2811. * @addr: the start address for the read
  2812. * @nwords: how many 32-bit words to read
  2813. * @data: where to store the read data
  2814. * @byte_oriented: whether to store data as bytes or as words
  2815. *
  2816. * Read the specified number of 32-bit words from the serial flash.
  2817. * If @byte_oriented is set the read data is stored as a byte array
  2818. * (i.e., big-endian), otherwise as 32-bit words in the platform's
  2819. * natural endianness.
  2820. */
  2821. int t4_read_flash(struct adapter *adapter, unsigned int addr,
  2822. unsigned int nwords, u32 *data, int byte_oriented)
  2823. {
  2824. int ret;
  2825. if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
  2826. return -EINVAL;
  2827. addr = swab32(addr) | SF_RD_DATA_FAST;
  2828. if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
  2829. (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
  2830. return ret;
  2831. for ( ; nwords; nwords--, data++) {
  2832. ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
  2833. if (nwords == 1)
  2834. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2835. if (ret)
  2836. return ret;
  2837. if (byte_oriented)
  2838. *data = (__force __u32)(cpu_to_be32(*data));
  2839. }
  2840. return 0;
  2841. }
  2842. /**
  2843. * t4_write_flash - write up to a page of data to the serial flash
  2844. * @adapter: the adapter
  2845. * @addr: the start address to write
  2846. * @n: length of data to write in bytes
  2847. * @data: the data to write
  2848. *
  2849. * Writes up to a page of data (256 bytes) to the serial flash starting
  2850. * at the given address. All the data must be written to the same page.
  2851. */
  2852. static int t4_write_flash(struct adapter *adapter, unsigned int addr,
  2853. unsigned int n, const u8 *data)
  2854. {
  2855. int ret;
  2856. u32 buf[64];
  2857. unsigned int i, c, left, val, offset = addr & 0xff;
  2858. if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
  2859. return -EINVAL;
  2860. val = swab32(addr) | SF_PROG_PAGE;
  2861. if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
  2862. (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
  2863. goto unlock;
  2864. for (left = n; left; left -= c) {
  2865. c = min(left, 4U);
  2866. for (val = 0, i = 0; i < c; ++i)
  2867. val = (val << 8) + *data++;
  2868. ret = sf1_write(adapter, c, c != left, 1, val);
  2869. if (ret)
  2870. goto unlock;
  2871. }
  2872. ret = flash_wait_op(adapter, 8, 1);
  2873. if (ret)
  2874. goto unlock;
  2875. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2876. /* Read the page to verify the write succeeded */
  2877. ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
  2878. if (ret)
  2879. return ret;
  2880. if (memcmp(data - n, (u8 *)buf + offset, n)) {
  2881. dev_err(adapter->pdev_dev,
  2882. "failed to correctly write the flash page at %#x\n",
  2883. addr);
  2884. return -EIO;
  2885. }
  2886. return 0;
  2887. unlock:
  2888. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  2889. return ret;
  2890. }
  2891. /**
  2892. * t4_get_fw_version - read the firmware version
  2893. * @adapter: the adapter
  2894. * @vers: where to place the version
  2895. *
  2896. * Reads the FW version from flash.
  2897. */
  2898. int t4_get_fw_version(struct adapter *adapter, u32 *vers)
  2899. {
  2900. return t4_read_flash(adapter, FLASH_FW_START +
  2901. offsetof(struct fw_hdr, fw_ver), 1,
  2902. vers, 0);
  2903. }
  2904. /**
  2905. * t4_get_bs_version - read the firmware bootstrap version
  2906. * @adapter: the adapter
  2907. * @vers: where to place the version
  2908. *
  2909. * Reads the FW Bootstrap version from flash.
  2910. */
  2911. int t4_get_bs_version(struct adapter *adapter, u32 *vers)
  2912. {
  2913. return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
  2914. offsetof(struct fw_hdr, fw_ver), 1,
  2915. vers, 0);
  2916. }
  2917. /**
  2918. * t4_get_tp_version - read the TP microcode version
  2919. * @adapter: the adapter
  2920. * @vers: where to place the version
  2921. *
  2922. * Reads the TP microcode version from flash.
  2923. */
  2924. int t4_get_tp_version(struct adapter *adapter, u32 *vers)
  2925. {
  2926. return t4_read_flash(adapter, FLASH_FW_START +
  2927. offsetof(struct fw_hdr, tp_microcode_ver),
  2928. 1, vers, 0);
  2929. }
  2930. /**
  2931. * t4_get_exprom_version - return the Expansion ROM version (if any)
  2932. * @adapter: the adapter
  2933. * @vers: where to place the version
  2934. *
  2935. * Reads the Expansion ROM header from FLASH and returns the version
  2936. * number (if present) through the @vers return value pointer. We return
  2937. * this in the Firmware Version Format since it's convenient. Return
  2938. * 0 on success, -ENOENT if no Expansion ROM is present.
  2939. */
  2940. int t4_get_exprom_version(struct adapter *adap, u32 *vers)
  2941. {
  2942. struct exprom_header {
  2943. unsigned char hdr_arr[16]; /* must start with 0x55aa */
  2944. unsigned char hdr_ver[4]; /* Expansion ROM version */
  2945. } *hdr;
  2946. u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
  2947. sizeof(u32))];
  2948. int ret;
  2949. ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
  2950. ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
  2951. 0);
  2952. if (ret)
  2953. return ret;
  2954. hdr = (struct exprom_header *)exprom_header_buf;
  2955. if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
  2956. return -ENOENT;
  2957. *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
  2958. FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
  2959. FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
  2960. FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
  2961. return 0;
  2962. }
  2963. /**
  2964. * t4_check_fw_version - check if the FW is supported with this driver
  2965. * @adap: the adapter
  2966. *
  2967. * Checks if an adapter's FW is compatible with the driver. Returns 0
  2968. * if there's exact match, a negative error if the version could not be
  2969. * read or there's a major version mismatch
  2970. */
  2971. int t4_check_fw_version(struct adapter *adap)
  2972. {
  2973. int i, ret, major, minor, micro;
  2974. int exp_major, exp_minor, exp_micro;
  2975. unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
  2976. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  2977. /* Try multiple times before returning error */
  2978. for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
  2979. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  2980. if (ret)
  2981. return ret;
  2982. major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
  2983. minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
  2984. micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
  2985. switch (chip_version) {
  2986. case CHELSIO_T4:
  2987. exp_major = T4FW_MIN_VERSION_MAJOR;
  2988. exp_minor = T4FW_MIN_VERSION_MINOR;
  2989. exp_micro = T4FW_MIN_VERSION_MICRO;
  2990. break;
  2991. case CHELSIO_T5:
  2992. exp_major = T5FW_MIN_VERSION_MAJOR;
  2993. exp_minor = T5FW_MIN_VERSION_MINOR;
  2994. exp_micro = T5FW_MIN_VERSION_MICRO;
  2995. break;
  2996. case CHELSIO_T6:
  2997. exp_major = T6FW_MIN_VERSION_MAJOR;
  2998. exp_minor = T6FW_MIN_VERSION_MINOR;
  2999. exp_micro = T6FW_MIN_VERSION_MICRO;
  3000. break;
  3001. default:
  3002. dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
  3003. adap->chip);
  3004. return -EINVAL;
  3005. }
  3006. if (major < exp_major || (major == exp_major && minor < exp_minor) ||
  3007. (major == exp_major && minor == exp_minor && micro < exp_micro)) {
  3008. dev_err(adap->pdev_dev,
  3009. "Card has firmware version %u.%u.%u, minimum "
  3010. "supported firmware is %u.%u.%u.\n", major, minor,
  3011. micro, exp_major, exp_minor, exp_micro);
  3012. return -EFAULT;
  3013. }
  3014. return 0;
  3015. }
  3016. /* Is the given firmware API compatible with the one the driver was compiled
  3017. * with?
  3018. */
  3019. static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
  3020. {
  3021. /* short circuit if it's the exact same firmware version */
  3022. if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
  3023. return 1;
  3024. #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
  3025. if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
  3026. SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
  3027. return 1;
  3028. #undef SAME_INTF
  3029. return 0;
  3030. }
  3031. /* The firmware in the filesystem is usable, but should it be installed?
  3032. * This routine explains itself in detail if it indicates the filesystem
  3033. * firmware should be installed.
  3034. */
  3035. static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
  3036. int k, int c)
  3037. {
  3038. const char *reason;
  3039. if (!card_fw_usable) {
  3040. reason = "incompatible or unusable";
  3041. goto install;
  3042. }
  3043. if (k > c) {
  3044. reason = "older than the version supported with this driver";
  3045. goto install;
  3046. }
  3047. return 0;
  3048. install:
  3049. dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
  3050. "installing firmware %u.%u.%u.%u on card.\n",
  3051. FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
  3052. FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
  3053. FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
  3054. FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
  3055. return 1;
  3056. }
  3057. int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
  3058. const u8 *fw_data, unsigned int fw_size,
  3059. struct fw_hdr *card_fw, enum dev_state state,
  3060. int *reset)
  3061. {
  3062. int ret, card_fw_usable, fs_fw_usable;
  3063. const struct fw_hdr *fs_fw;
  3064. const struct fw_hdr *drv_fw;
  3065. drv_fw = &fw_info->fw_hdr;
  3066. /* Read the header of the firmware on the card */
  3067. ret = -t4_read_flash(adap, FLASH_FW_START,
  3068. sizeof(*card_fw) / sizeof(uint32_t),
  3069. (uint32_t *)card_fw, 1);
  3070. if (ret == 0) {
  3071. card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
  3072. } else {
  3073. dev_err(adap->pdev_dev,
  3074. "Unable to read card's firmware header: %d\n", ret);
  3075. card_fw_usable = 0;
  3076. }
  3077. if (fw_data != NULL) {
  3078. fs_fw = (const void *)fw_data;
  3079. fs_fw_usable = fw_compatible(drv_fw, fs_fw);
  3080. } else {
  3081. fs_fw = NULL;
  3082. fs_fw_usable = 0;
  3083. }
  3084. if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
  3085. (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
  3086. /* Common case: the firmware on the card is an exact match and
  3087. * the filesystem one is an exact match too, or the filesystem
  3088. * one is absent/incompatible.
  3089. */
  3090. } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
  3091. should_install_fs_fw(adap, card_fw_usable,
  3092. be32_to_cpu(fs_fw->fw_ver),
  3093. be32_to_cpu(card_fw->fw_ver))) {
  3094. ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
  3095. fw_size, 0);
  3096. if (ret != 0) {
  3097. dev_err(adap->pdev_dev,
  3098. "failed to install firmware: %d\n", ret);
  3099. goto bye;
  3100. }
  3101. /* Installed successfully, update the cached header too. */
  3102. *card_fw = *fs_fw;
  3103. card_fw_usable = 1;
  3104. *reset = 0; /* already reset as part of load_fw */
  3105. }
  3106. if (!card_fw_usable) {
  3107. uint32_t d, c, k;
  3108. d = be32_to_cpu(drv_fw->fw_ver);
  3109. c = be32_to_cpu(card_fw->fw_ver);
  3110. k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
  3111. dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
  3112. "chip state %d, "
  3113. "driver compiled with %d.%d.%d.%d, "
  3114. "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
  3115. state,
  3116. FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
  3117. FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
  3118. FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
  3119. FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
  3120. FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
  3121. FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
  3122. ret = EINVAL;
  3123. goto bye;
  3124. }
  3125. /* We're using whatever's on the card and it's known to be good. */
  3126. adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
  3127. adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
  3128. bye:
  3129. return ret;
  3130. }
  3131. /**
  3132. * t4_flash_erase_sectors - erase a range of flash sectors
  3133. * @adapter: the adapter
  3134. * @start: the first sector to erase
  3135. * @end: the last sector to erase
  3136. *
  3137. * Erases the sectors in the given inclusive range.
  3138. */
  3139. static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
  3140. {
  3141. int ret = 0;
  3142. if (end >= adapter->params.sf_nsec)
  3143. return -EINVAL;
  3144. while (start <= end) {
  3145. if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
  3146. (ret = sf1_write(adapter, 4, 0, 1,
  3147. SF_ERASE_SECTOR | (start << 8))) != 0 ||
  3148. (ret = flash_wait_op(adapter, 14, 500)) != 0) {
  3149. dev_err(adapter->pdev_dev,
  3150. "erase of flash sector %d failed, error %d\n",
  3151. start, ret);
  3152. break;
  3153. }
  3154. start++;
  3155. }
  3156. t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
  3157. return ret;
  3158. }
  3159. /**
  3160. * t4_flash_cfg_addr - return the address of the flash configuration file
  3161. * @adapter: the adapter
  3162. *
  3163. * Return the address within the flash where the Firmware Configuration
  3164. * File is stored.
  3165. */
  3166. unsigned int t4_flash_cfg_addr(struct adapter *adapter)
  3167. {
  3168. if (adapter->params.sf_size == 0x100000)
  3169. return FLASH_FPGA_CFG_START;
  3170. else
  3171. return FLASH_CFG_START;
  3172. }
  3173. /* Return TRUE if the specified firmware matches the adapter. I.e. T4
  3174. * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
  3175. * and emit an error message for mismatched firmware to save our caller the
  3176. * effort ...
  3177. */
  3178. static bool t4_fw_matches_chip(const struct adapter *adap,
  3179. const struct fw_hdr *hdr)
  3180. {
  3181. /* The expression below will return FALSE for any unsupported adapter
  3182. * which will keep us "honest" in the future ...
  3183. */
  3184. if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
  3185. (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
  3186. (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
  3187. return true;
  3188. dev_err(adap->pdev_dev,
  3189. "FW image (%d) is not suitable for this adapter (%d)\n",
  3190. hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
  3191. return false;
  3192. }
  3193. /**
  3194. * t4_load_fw - download firmware
  3195. * @adap: the adapter
  3196. * @fw_data: the firmware image to write
  3197. * @size: image size
  3198. *
  3199. * Write the supplied firmware image to the card's serial flash.
  3200. */
  3201. int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
  3202. {
  3203. u32 csum;
  3204. int ret, addr;
  3205. unsigned int i;
  3206. u8 first_page[SF_PAGE_SIZE];
  3207. const __be32 *p = (const __be32 *)fw_data;
  3208. const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
  3209. unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
  3210. unsigned int fw_img_start = adap->params.sf_fw_start;
  3211. unsigned int fw_start_sec = fw_img_start / sf_sec_size;
  3212. if (!size) {
  3213. dev_err(adap->pdev_dev, "FW image has no data\n");
  3214. return -EINVAL;
  3215. }
  3216. if (size & 511) {
  3217. dev_err(adap->pdev_dev,
  3218. "FW image size not multiple of 512 bytes\n");
  3219. return -EINVAL;
  3220. }
  3221. if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
  3222. dev_err(adap->pdev_dev,
  3223. "FW image size differs from size in FW header\n");
  3224. return -EINVAL;
  3225. }
  3226. if (size > FW_MAX_SIZE) {
  3227. dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
  3228. FW_MAX_SIZE);
  3229. return -EFBIG;
  3230. }
  3231. if (!t4_fw_matches_chip(adap, hdr))
  3232. return -EINVAL;
  3233. for (csum = 0, i = 0; i < size / sizeof(csum); i++)
  3234. csum += be32_to_cpu(p[i]);
  3235. if (csum != 0xffffffff) {
  3236. dev_err(adap->pdev_dev,
  3237. "corrupted firmware image, checksum %#x\n", csum);
  3238. return -EINVAL;
  3239. }
  3240. i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
  3241. ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
  3242. if (ret)
  3243. goto out;
  3244. /*
  3245. * We write the correct version at the end so the driver can see a bad
  3246. * version if the FW write fails. Start by writing a copy of the
  3247. * first page with a bad version.
  3248. */
  3249. memcpy(first_page, fw_data, SF_PAGE_SIZE);
  3250. ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
  3251. ret = t4_write_flash(adap, fw_img_start, SF_PAGE_SIZE, first_page);
  3252. if (ret)
  3253. goto out;
  3254. addr = fw_img_start;
  3255. for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
  3256. addr += SF_PAGE_SIZE;
  3257. fw_data += SF_PAGE_SIZE;
  3258. ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
  3259. if (ret)
  3260. goto out;
  3261. }
  3262. ret = t4_write_flash(adap,
  3263. fw_img_start + offsetof(struct fw_hdr, fw_ver),
  3264. sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
  3265. out:
  3266. if (ret)
  3267. dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
  3268. ret);
  3269. else
  3270. ret = t4_get_fw_version(adap, &adap->params.fw_vers);
  3271. return ret;
  3272. }
  3273. /**
  3274. * t4_phy_fw_ver - return current PHY firmware version
  3275. * @adap: the adapter
  3276. * @phy_fw_ver: return value buffer for PHY firmware version
  3277. *
  3278. * Returns the current version of external PHY firmware on the
  3279. * adapter.
  3280. */
  3281. int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
  3282. {
  3283. u32 param, val;
  3284. int ret;
  3285. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3286. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3287. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3288. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
  3289. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
  3290. &param, &val);
  3291. if (ret < 0)
  3292. return ret;
  3293. *phy_fw_ver = val;
  3294. return 0;
  3295. }
  3296. /**
  3297. * t4_load_phy_fw - download port PHY firmware
  3298. * @adap: the adapter
  3299. * @win: the PCI-E Memory Window index to use for t4_memory_rw()
  3300. * @win_lock: the lock to use to guard the memory copy
  3301. * @phy_fw_version: function to check PHY firmware versions
  3302. * @phy_fw_data: the PHY firmware image to write
  3303. * @phy_fw_size: image size
  3304. *
  3305. * Transfer the specified PHY firmware to the adapter. If a non-NULL
  3306. * @phy_fw_version is supplied, then it will be used to determine if
  3307. * it's necessary to perform the transfer by comparing the version
  3308. * of any existing adapter PHY firmware with that of the passed in
  3309. * PHY firmware image. If @win_lock is non-NULL then it will be used
  3310. * around the call to t4_memory_rw() which transfers the PHY firmware
  3311. * to the adapter.
  3312. *
  3313. * A negative error number will be returned if an error occurs. If
  3314. * version number support is available and there's no need to upgrade
  3315. * the firmware, 0 will be returned. If firmware is successfully
  3316. * transferred to the adapter, 1 will be retured.
  3317. *
  3318. * NOTE: some adapters only have local RAM to store the PHY firmware. As
  3319. * a result, a RESET of the adapter would cause that RAM to lose its
  3320. * contents. Thus, loading PHY firmware on such adapters must happen
  3321. * after any FW_RESET_CMDs ...
  3322. */
  3323. int t4_load_phy_fw(struct adapter *adap,
  3324. int win, spinlock_t *win_lock,
  3325. int (*phy_fw_version)(const u8 *, size_t),
  3326. const u8 *phy_fw_data, size_t phy_fw_size)
  3327. {
  3328. unsigned long mtype = 0, maddr = 0;
  3329. u32 param, val;
  3330. int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
  3331. int ret;
  3332. /* If we have version number support, then check to see if the adapter
  3333. * already has up-to-date PHY firmware loaded.
  3334. */
  3335. if (phy_fw_version) {
  3336. new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
  3337. ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
  3338. if (ret < 0)
  3339. return ret;
  3340. if (cur_phy_fw_ver >= new_phy_fw_vers) {
  3341. CH_WARN(adap, "PHY Firmware already up-to-date, "
  3342. "version %#x\n", cur_phy_fw_ver);
  3343. return 0;
  3344. }
  3345. }
  3346. /* Ask the firmware where it wants us to copy the PHY firmware image.
  3347. * The size of the file requires a special version of the READ coommand
  3348. * which will pass the file size via the values field in PARAMS_CMD and
  3349. * retrieve the return value from firmware and place it in the same
  3350. * buffer values
  3351. */
  3352. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3353. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3354. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3355. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
  3356. val = phy_fw_size;
  3357. ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
  3358. &param, &val, 1);
  3359. if (ret < 0)
  3360. return ret;
  3361. mtype = val >> 8;
  3362. maddr = (val & 0xff) << 16;
  3363. /* Copy the supplied PHY Firmware image to the adapter memory location
  3364. * allocated by the adapter firmware.
  3365. */
  3366. if (win_lock)
  3367. spin_lock_bh(win_lock);
  3368. ret = t4_memory_rw(adap, win, mtype, maddr,
  3369. phy_fw_size, (__be32 *)phy_fw_data,
  3370. T4_MEMORY_WRITE);
  3371. if (win_lock)
  3372. spin_unlock_bh(win_lock);
  3373. if (ret)
  3374. return ret;
  3375. /* Tell the firmware that the PHY firmware image has been written to
  3376. * RAM and it can now start copying it over to the PHYs. The chip
  3377. * firmware will RESET the affected PHYs as part of this operation
  3378. * leaving them running the new PHY firmware image.
  3379. */
  3380. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3381. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
  3382. FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
  3383. FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
  3384. ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
  3385. &param, &val, 30000);
  3386. /* If we have version number support, then check to see that the new
  3387. * firmware got loaded properly.
  3388. */
  3389. if (phy_fw_version) {
  3390. ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
  3391. if (ret < 0)
  3392. return ret;
  3393. if (cur_phy_fw_ver != new_phy_fw_vers) {
  3394. CH_WARN(adap, "PHY Firmware did not update: "
  3395. "version on adapter %#x, "
  3396. "version flashed %#x\n",
  3397. cur_phy_fw_ver, new_phy_fw_vers);
  3398. return -ENXIO;
  3399. }
  3400. }
  3401. return 1;
  3402. }
  3403. /**
  3404. * t4_fwcache - firmware cache operation
  3405. * @adap: the adapter
  3406. * @op : the operation (flush or flush and invalidate)
  3407. */
  3408. int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
  3409. {
  3410. struct fw_params_cmd c;
  3411. memset(&c, 0, sizeof(c));
  3412. c.op_to_vfn =
  3413. cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  3414. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  3415. FW_PARAMS_CMD_PFN_V(adap->pf) |
  3416. FW_PARAMS_CMD_VFN_V(0));
  3417. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  3418. c.param[0].mnem =
  3419. cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3420. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
  3421. c.param[0].val = (__force __be32)op;
  3422. return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
  3423. }
  3424. void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
  3425. unsigned int *pif_req_wrptr,
  3426. unsigned int *pif_rsp_wrptr)
  3427. {
  3428. int i, j;
  3429. u32 cfg, val, req, rsp;
  3430. cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
  3431. if (cfg & LADBGEN_F)
  3432. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
  3433. val = t4_read_reg(adap, CIM_DEBUGSTS_A);
  3434. req = POLADBGWRPTR_G(val);
  3435. rsp = PILADBGWRPTR_G(val);
  3436. if (pif_req_wrptr)
  3437. *pif_req_wrptr = req;
  3438. if (pif_rsp_wrptr)
  3439. *pif_rsp_wrptr = rsp;
  3440. for (i = 0; i < CIM_PIFLA_SIZE; i++) {
  3441. for (j = 0; j < 6; j++) {
  3442. t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
  3443. PILADBGRDPTR_V(rsp));
  3444. *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
  3445. *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
  3446. req++;
  3447. rsp++;
  3448. }
  3449. req = (req + 2) & POLADBGRDPTR_M;
  3450. rsp = (rsp + 2) & PILADBGRDPTR_M;
  3451. }
  3452. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
  3453. }
  3454. void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
  3455. {
  3456. u32 cfg;
  3457. int i, j, idx;
  3458. cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
  3459. if (cfg & LADBGEN_F)
  3460. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
  3461. for (i = 0; i < CIM_MALA_SIZE; i++) {
  3462. for (j = 0; j < 5; j++) {
  3463. idx = 8 * i + j;
  3464. t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
  3465. PILADBGRDPTR_V(idx));
  3466. *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
  3467. *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
  3468. }
  3469. }
  3470. t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
  3471. }
  3472. void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
  3473. {
  3474. unsigned int i, j;
  3475. for (i = 0; i < 8; i++) {
  3476. u32 *p = la_buf + i;
  3477. t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
  3478. j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
  3479. t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
  3480. for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
  3481. *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
  3482. }
  3483. }
  3484. #define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
  3485. FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
  3486. FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
  3487. FW_PORT_CAP_ANEG)
  3488. /**
  3489. * t4_link_l1cfg - apply link configuration to MAC/PHY
  3490. * @phy: the PHY to setup
  3491. * @mac: the MAC to setup
  3492. * @lc: the requested link configuration
  3493. *
  3494. * Set up a port's MAC and PHY according to a desired link configuration.
  3495. * - If the PHY can auto-negotiate first decide what to advertise, then
  3496. * enable/disable auto-negotiation as desired, and reset.
  3497. * - If the PHY does not auto-negotiate just reset it.
  3498. * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
  3499. * otherwise do it later based on the outcome of auto-negotiation.
  3500. */
  3501. int t4_link_l1cfg(struct adapter *adap, unsigned int mbox, unsigned int port,
  3502. struct link_config *lc)
  3503. {
  3504. struct fw_port_cmd c;
  3505. unsigned int fc = 0, mdi = FW_PORT_CAP_MDI_V(FW_PORT_CAP_MDI_AUTO);
  3506. lc->link_ok = 0;
  3507. if (lc->requested_fc & PAUSE_RX)
  3508. fc |= FW_PORT_CAP_FC_RX;
  3509. if (lc->requested_fc & PAUSE_TX)
  3510. fc |= FW_PORT_CAP_FC_TX;
  3511. memset(&c, 0, sizeof(c));
  3512. c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  3513. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  3514. FW_PORT_CMD_PORTID_V(port));
  3515. c.action_to_len16 =
  3516. cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
  3517. FW_LEN16(c));
  3518. if (!(lc->supported & FW_PORT_CAP_ANEG)) {
  3519. c.u.l1cfg.rcap = cpu_to_be32((lc->supported & ADVERT_MASK) |
  3520. fc);
  3521. lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
  3522. } else if (lc->autoneg == AUTONEG_DISABLE) {
  3523. c.u.l1cfg.rcap = cpu_to_be32(lc->requested_speed | fc | mdi);
  3524. lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
  3525. } else
  3526. c.u.l1cfg.rcap = cpu_to_be32(lc->advertising | fc | mdi);
  3527. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  3528. }
  3529. /**
  3530. * t4_restart_aneg - restart autonegotiation
  3531. * @adap: the adapter
  3532. * @mbox: mbox to use for the FW command
  3533. * @port: the port id
  3534. *
  3535. * Restarts autonegotiation for the selected port.
  3536. */
  3537. int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
  3538. {
  3539. struct fw_port_cmd c;
  3540. memset(&c, 0, sizeof(c));
  3541. c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  3542. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  3543. FW_PORT_CMD_PORTID_V(port));
  3544. c.action_to_len16 =
  3545. cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
  3546. FW_LEN16(c));
  3547. c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP_ANEG);
  3548. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  3549. }
  3550. typedef void (*int_handler_t)(struct adapter *adap);
  3551. struct intr_info {
  3552. unsigned int mask; /* bits to check in interrupt status */
  3553. const char *msg; /* message to print or NULL */
  3554. short stat_idx; /* stat counter to increment or -1 */
  3555. unsigned short fatal; /* whether the condition reported is fatal */
  3556. int_handler_t int_handler; /* platform-specific int handler */
  3557. };
  3558. /**
  3559. * t4_handle_intr_status - table driven interrupt handler
  3560. * @adapter: the adapter that generated the interrupt
  3561. * @reg: the interrupt status register to process
  3562. * @acts: table of interrupt actions
  3563. *
  3564. * A table driven interrupt handler that applies a set of masks to an
  3565. * interrupt status word and performs the corresponding actions if the
  3566. * interrupts described by the mask have occurred. The actions include
  3567. * optionally emitting a warning or alert message. The table is terminated
  3568. * by an entry specifying mask 0. Returns the number of fatal interrupt
  3569. * conditions.
  3570. */
  3571. static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
  3572. const struct intr_info *acts)
  3573. {
  3574. int fatal = 0;
  3575. unsigned int mask = 0;
  3576. unsigned int status = t4_read_reg(adapter, reg);
  3577. for ( ; acts->mask; ++acts) {
  3578. if (!(status & acts->mask))
  3579. continue;
  3580. if (acts->fatal) {
  3581. fatal++;
  3582. dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
  3583. status & acts->mask);
  3584. } else if (acts->msg && printk_ratelimit())
  3585. dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
  3586. status & acts->mask);
  3587. if (acts->int_handler)
  3588. acts->int_handler(adapter);
  3589. mask |= acts->mask;
  3590. }
  3591. status &= mask;
  3592. if (status) /* clear processed interrupts */
  3593. t4_write_reg(adapter, reg, status);
  3594. return fatal;
  3595. }
  3596. /*
  3597. * Interrupt handler for the PCIE module.
  3598. */
  3599. static void pcie_intr_handler(struct adapter *adapter)
  3600. {
  3601. static const struct intr_info sysbus_intr_info[] = {
  3602. { RNPP_F, "RXNP array parity error", -1, 1 },
  3603. { RPCP_F, "RXPC array parity error", -1, 1 },
  3604. { RCIP_F, "RXCIF array parity error", -1, 1 },
  3605. { RCCP_F, "Rx completions control array parity error", -1, 1 },
  3606. { RFTP_F, "RXFT array parity error", -1, 1 },
  3607. { 0 }
  3608. };
  3609. static const struct intr_info pcie_port_intr_info[] = {
  3610. { TPCP_F, "TXPC array parity error", -1, 1 },
  3611. { TNPP_F, "TXNP array parity error", -1, 1 },
  3612. { TFTP_F, "TXFT array parity error", -1, 1 },
  3613. { TCAP_F, "TXCA array parity error", -1, 1 },
  3614. { TCIP_F, "TXCIF array parity error", -1, 1 },
  3615. { RCAP_F, "RXCA array parity error", -1, 1 },
  3616. { OTDD_F, "outbound request TLP discarded", -1, 1 },
  3617. { RDPE_F, "Rx data parity error", -1, 1 },
  3618. { TDUE_F, "Tx uncorrectable data error", -1, 1 },
  3619. { 0 }
  3620. };
  3621. static const struct intr_info pcie_intr_info[] = {
  3622. { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
  3623. { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
  3624. { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
  3625. { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
  3626. { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
  3627. { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
  3628. { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
  3629. { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
  3630. { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
  3631. { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
  3632. { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
  3633. { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
  3634. { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
  3635. { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
  3636. { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
  3637. { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
  3638. { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
  3639. { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
  3640. { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
  3641. { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
  3642. { FIDPERR_F, "PCI FID parity error", -1, 1 },
  3643. { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
  3644. { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
  3645. { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
  3646. { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
  3647. { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
  3648. { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
  3649. { PCIESINT_F, "PCI core secondary fault", -1, 1 },
  3650. { PCIEPINT_F, "PCI core primary fault", -1, 1 },
  3651. { UNXSPLCPLERR_F, "PCI unexpected split completion error",
  3652. -1, 0 },
  3653. { 0 }
  3654. };
  3655. static struct intr_info t5_pcie_intr_info[] = {
  3656. { MSTGRPPERR_F, "Master Response Read Queue parity error",
  3657. -1, 1 },
  3658. { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
  3659. { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
  3660. { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
  3661. { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
  3662. { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
  3663. { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
  3664. { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
  3665. -1, 1 },
  3666. { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
  3667. -1, 1 },
  3668. { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
  3669. { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
  3670. { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
  3671. { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
  3672. { DREQWRPERR_F, "PCI DMA channel write request parity error",
  3673. -1, 1 },
  3674. { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
  3675. { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
  3676. { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
  3677. { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
  3678. { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
  3679. { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
  3680. { FIDPERR_F, "PCI FID parity error", -1, 1 },
  3681. { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
  3682. { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
  3683. { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
  3684. { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
  3685. -1, 1 },
  3686. { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
  3687. -1, 1 },
  3688. { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
  3689. { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
  3690. { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
  3691. { READRSPERR_F, "Outbound read error", -1, 0 },
  3692. { 0 }
  3693. };
  3694. int fat;
  3695. if (is_t4(adapter->params.chip))
  3696. fat = t4_handle_intr_status(adapter,
  3697. PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
  3698. sysbus_intr_info) +
  3699. t4_handle_intr_status(adapter,
  3700. PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
  3701. pcie_port_intr_info) +
  3702. t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
  3703. pcie_intr_info);
  3704. else
  3705. fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
  3706. t5_pcie_intr_info);
  3707. if (fat)
  3708. t4_fatal_err(adapter);
  3709. }
  3710. /*
  3711. * TP interrupt handler.
  3712. */
  3713. static void tp_intr_handler(struct adapter *adapter)
  3714. {
  3715. static const struct intr_info tp_intr_info[] = {
  3716. { 0x3fffffff, "TP parity error", -1, 1 },
  3717. { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
  3718. { 0 }
  3719. };
  3720. if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
  3721. t4_fatal_err(adapter);
  3722. }
  3723. /*
  3724. * SGE interrupt handler.
  3725. */
  3726. static void sge_intr_handler(struct adapter *adapter)
  3727. {
  3728. u64 v;
  3729. u32 err;
  3730. static const struct intr_info sge_intr_info[] = {
  3731. { ERR_CPL_EXCEED_IQE_SIZE_F,
  3732. "SGE received CPL exceeding IQE size", -1, 1 },
  3733. { ERR_INVALID_CIDX_INC_F,
  3734. "SGE GTS CIDX increment too large", -1, 0 },
  3735. { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
  3736. { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
  3737. { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
  3738. "SGE IQID > 1023 received CPL for FL", -1, 0 },
  3739. { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
  3740. 0 },
  3741. { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
  3742. 0 },
  3743. { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
  3744. 0 },
  3745. { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
  3746. 0 },
  3747. { ERR_ING_CTXT_PRIO_F,
  3748. "SGE too many priority ingress contexts", -1, 0 },
  3749. { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
  3750. { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
  3751. { 0 }
  3752. };
  3753. static struct intr_info t4t5_sge_intr_info[] = {
  3754. { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
  3755. { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
  3756. { ERR_EGR_CTXT_PRIO_F,
  3757. "SGE too many priority egress contexts", -1, 0 },
  3758. { 0 }
  3759. };
  3760. v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
  3761. ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
  3762. if (v) {
  3763. dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
  3764. (unsigned long long)v);
  3765. t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
  3766. t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
  3767. }
  3768. v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
  3769. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  3770. v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
  3771. t4t5_sge_intr_info);
  3772. err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
  3773. if (err & ERROR_QID_VALID_F) {
  3774. dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
  3775. ERROR_QID_G(err));
  3776. if (err & UNCAPTURED_ERROR_F)
  3777. dev_err(adapter->pdev_dev,
  3778. "SGE UNCAPTURED_ERROR set (clearing)\n");
  3779. t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
  3780. UNCAPTURED_ERROR_F);
  3781. }
  3782. if (v != 0)
  3783. t4_fatal_err(adapter);
  3784. }
  3785. #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
  3786. OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
  3787. #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
  3788. IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
  3789. /*
  3790. * CIM interrupt handler.
  3791. */
  3792. static void cim_intr_handler(struct adapter *adapter)
  3793. {
  3794. static const struct intr_info cim_intr_info[] = {
  3795. { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
  3796. { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
  3797. { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
  3798. { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
  3799. { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
  3800. { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
  3801. { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
  3802. { 0 }
  3803. };
  3804. static const struct intr_info cim_upintr_info[] = {
  3805. { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
  3806. { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
  3807. { ILLWRINT_F, "CIM illegal write", -1, 1 },
  3808. { ILLRDINT_F, "CIM illegal read", -1, 1 },
  3809. { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
  3810. { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
  3811. { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
  3812. { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
  3813. { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
  3814. { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
  3815. { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
  3816. { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
  3817. { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
  3818. { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
  3819. { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
  3820. { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
  3821. { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
  3822. { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
  3823. { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
  3824. { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
  3825. { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
  3826. { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
  3827. { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
  3828. { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
  3829. { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
  3830. { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
  3831. { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
  3832. { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
  3833. { 0 }
  3834. };
  3835. int fat;
  3836. if (t4_read_reg(adapter, PCIE_FW_A) & PCIE_FW_ERR_F)
  3837. t4_report_fw_error(adapter);
  3838. fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
  3839. cim_intr_info) +
  3840. t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
  3841. cim_upintr_info);
  3842. if (fat)
  3843. t4_fatal_err(adapter);
  3844. }
  3845. /*
  3846. * ULP RX interrupt handler.
  3847. */
  3848. static void ulprx_intr_handler(struct adapter *adapter)
  3849. {
  3850. static const struct intr_info ulprx_intr_info[] = {
  3851. { 0x1800000, "ULPRX context error", -1, 1 },
  3852. { 0x7fffff, "ULPRX parity error", -1, 1 },
  3853. { 0 }
  3854. };
  3855. if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
  3856. t4_fatal_err(adapter);
  3857. }
  3858. /*
  3859. * ULP TX interrupt handler.
  3860. */
  3861. static void ulptx_intr_handler(struct adapter *adapter)
  3862. {
  3863. static const struct intr_info ulptx_intr_info[] = {
  3864. { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
  3865. 0 },
  3866. { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
  3867. 0 },
  3868. { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
  3869. 0 },
  3870. { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
  3871. 0 },
  3872. { 0xfffffff, "ULPTX parity error", -1, 1 },
  3873. { 0 }
  3874. };
  3875. if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
  3876. t4_fatal_err(adapter);
  3877. }
  3878. /*
  3879. * PM TX interrupt handler.
  3880. */
  3881. static void pmtx_intr_handler(struct adapter *adapter)
  3882. {
  3883. static const struct intr_info pmtx_intr_info[] = {
  3884. { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
  3885. { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
  3886. { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
  3887. { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
  3888. { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
  3889. { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
  3890. { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
  3891. -1, 1 },
  3892. { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
  3893. { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
  3894. { 0 }
  3895. };
  3896. if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
  3897. t4_fatal_err(adapter);
  3898. }
  3899. /*
  3900. * PM RX interrupt handler.
  3901. */
  3902. static void pmrx_intr_handler(struct adapter *adapter)
  3903. {
  3904. static const struct intr_info pmrx_intr_info[] = {
  3905. { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
  3906. { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
  3907. { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
  3908. { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
  3909. -1, 1 },
  3910. { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
  3911. { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
  3912. { 0 }
  3913. };
  3914. if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
  3915. t4_fatal_err(adapter);
  3916. }
  3917. /*
  3918. * CPL switch interrupt handler.
  3919. */
  3920. static void cplsw_intr_handler(struct adapter *adapter)
  3921. {
  3922. static const struct intr_info cplsw_intr_info[] = {
  3923. { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
  3924. { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
  3925. { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
  3926. { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
  3927. { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
  3928. { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
  3929. { 0 }
  3930. };
  3931. if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
  3932. t4_fatal_err(adapter);
  3933. }
  3934. /*
  3935. * LE interrupt handler.
  3936. */
  3937. static void le_intr_handler(struct adapter *adap)
  3938. {
  3939. enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
  3940. static const struct intr_info le_intr_info[] = {
  3941. { LIPMISS_F, "LE LIP miss", -1, 0 },
  3942. { LIP0_F, "LE 0 LIP error", -1, 0 },
  3943. { PARITYERR_F, "LE parity error", -1, 1 },
  3944. { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
  3945. { REQQPARERR_F, "LE request queue parity error", -1, 1 },
  3946. { 0 }
  3947. };
  3948. static struct intr_info t6_le_intr_info[] = {
  3949. { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
  3950. { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
  3951. { TCAMINTPERR_F, "LE parity error", -1, 1 },
  3952. { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
  3953. { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
  3954. { 0 }
  3955. };
  3956. if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
  3957. (chip <= CHELSIO_T5) ?
  3958. le_intr_info : t6_le_intr_info))
  3959. t4_fatal_err(adap);
  3960. }
  3961. /*
  3962. * MPS interrupt handler.
  3963. */
  3964. static void mps_intr_handler(struct adapter *adapter)
  3965. {
  3966. static const struct intr_info mps_rx_intr_info[] = {
  3967. { 0xffffff, "MPS Rx parity error", -1, 1 },
  3968. { 0 }
  3969. };
  3970. static const struct intr_info mps_tx_intr_info[] = {
  3971. { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
  3972. { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
  3973. { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
  3974. -1, 1 },
  3975. { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
  3976. -1, 1 },
  3977. { BUBBLE_F, "MPS Tx underflow", -1, 1 },
  3978. { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
  3979. { FRMERR_F, "MPS Tx framing error", -1, 1 },
  3980. { 0 }
  3981. };
  3982. static const struct intr_info mps_trc_intr_info[] = {
  3983. { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
  3984. { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
  3985. -1, 1 },
  3986. { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
  3987. { 0 }
  3988. };
  3989. static const struct intr_info mps_stat_sram_intr_info[] = {
  3990. { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
  3991. { 0 }
  3992. };
  3993. static const struct intr_info mps_stat_tx_intr_info[] = {
  3994. { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
  3995. { 0 }
  3996. };
  3997. static const struct intr_info mps_stat_rx_intr_info[] = {
  3998. { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
  3999. { 0 }
  4000. };
  4001. static const struct intr_info mps_cls_intr_info[] = {
  4002. { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
  4003. { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
  4004. { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
  4005. { 0 }
  4006. };
  4007. int fat;
  4008. fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
  4009. mps_rx_intr_info) +
  4010. t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
  4011. mps_tx_intr_info) +
  4012. t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
  4013. mps_trc_intr_info) +
  4014. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
  4015. mps_stat_sram_intr_info) +
  4016. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
  4017. mps_stat_tx_intr_info) +
  4018. t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
  4019. mps_stat_rx_intr_info) +
  4020. t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
  4021. mps_cls_intr_info);
  4022. t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
  4023. t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
  4024. if (fat)
  4025. t4_fatal_err(adapter);
  4026. }
  4027. #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
  4028. ECC_UE_INT_CAUSE_F)
  4029. /*
  4030. * EDC/MC interrupt handler.
  4031. */
  4032. static void mem_intr_handler(struct adapter *adapter, int idx)
  4033. {
  4034. static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
  4035. unsigned int addr, cnt_addr, v;
  4036. if (idx <= MEM_EDC1) {
  4037. addr = EDC_REG(EDC_INT_CAUSE_A, idx);
  4038. cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
  4039. } else if (idx == MEM_MC) {
  4040. if (is_t4(adapter->params.chip)) {
  4041. addr = MC_INT_CAUSE_A;
  4042. cnt_addr = MC_ECC_STATUS_A;
  4043. } else {
  4044. addr = MC_P_INT_CAUSE_A;
  4045. cnt_addr = MC_P_ECC_STATUS_A;
  4046. }
  4047. } else {
  4048. addr = MC_REG(MC_P_INT_CAUSE_A, 1);
  4049. cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
  4050. }
  4051. v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
  4052. if (v & PERR_INT_CAUSE_F)
  4053. dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
  4054. name[idx]);
  4055. if (v & ECC_CE_INT_CAUSE_F) {
  4056. u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
  4057. t4_edc_err_read(adapter, idx);
  4058. t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
  4059. if (printk_ratelimit())
  4060. dev_warn(adapter->pdev_dev,
  4061. "%u %s correctable ECC data error%s\n",
  4062. cnt, name[idx], cnt > 1 ? "s" : "");
  4063. }
  4064. if (v & ECC_UE_INT_CAUSE_F)
  4065. dev_alert(adapter->pdev_dev,
  4066. "%s uncorrectable ECC data error\n", name[idx]);
  4067. t4_write_reg(adapter, addr, v);
  4068. if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
  4069. t4_fatal_err(adapter);
  4070. }
  4071. /*
  4072. * MA interrupt handler.
  4073. */
  4074. static void ma_intr_handler(struct adapter *adap)
  4075. {
  4076. u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
  4077. if (status & MEM_PERR_INT_CAUSE_F) {
  4078. dev_alert(adap->pdev_dev,
  4079. "MA parity error, parity status %#x\n",
  4080. t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
  4081. if (is_t5(adap->params.chip))
  4082. dev_alert(adap->pdev_dev,
  4083. "MA parity error, parity status %#x\n",
  4084. t4_read_reg(adap,
  4085. MA_PARITY_ERROR_STATUS2_A));
  4086. }
  4087. if (status & MEM_WRAP_INT_CAUSE_F) {
  4088. v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
  4089. dev_alert(adap->pdev_dev, "MA address wrap-around error by "
  4090. "client %u to address %#x\n",
  4091. MEM_WRAP_CLIENT_NUM_G(v),
  4092. MEM_WRAP_ADDRESS_G(v) << 4);
  4093. }
  4094. t4_write_reg(adap, MA_INT_CAUSE_A, status);
  4095. t4_fatal_err(adap);
  4096. }
  4097. /*
  4098. * SMB interrupt handler.
  4099. */
  4100. static void smb_intr_handler(struct adapter *adap)
  4101. {
  4102. static const struct intr_info smb_intr_info[] = {
  4103. { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
  4104. { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
  4105. { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
  4106. { 0 }
  4107. };
  4108. if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
  4109. t4_fatal_err(adap);
  4110. }
  4111. /*
  4112. * NC-SI interrupt handler.
  4113. */
  4114. static void ncsi_intr_handler(struct adapter *adap)
  4115. {
  4116. static const struct intr_info ncsi_intr_info[] = {
  4117. { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
  4118. { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
  4119. { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
  4120. { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
  4121. { 0 }
  4122. };
  4123. if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
  4124. t4_fatal_err(adap);
  4125. }
  4126. /*
  4127. * XGMAC interrupt handler.
  4128. */
  4129. static void xgmac_intr_handler(struct adapter *adap, int port)
  4130. {
  4131. u32 v, int_cause_reg;
  4132. if (is_t4(adap->params.chip))
  4133. int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
  4134. else
  4135. int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
  4136. v = t4_read_reg(adap, int_cause_reg);
  4137. v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
  4138. if (!v)
  4139. return;
  4140. if (v & TXFIFO_PRTY_ERR_F)
  4141. dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
  4142. port);
  4143. if (v & RXFIFO_PRTY_ERR_F)
  4144. dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
  4145. port);
  4146. t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
  4147. t4_fatal_err(adap);
  4148. }
  4149. /*
  4150. * PL interrupt handler.
  4151. */
  4152. static void pl_intr_handler(struct adapter *adap)
  4153. {
  4154. static const struct intr_info pl_intr_info[] = {
  4155. { FATALPERR_F, "T4 fatal parity error", -1, 1 },
  4156. { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
  4157. { 0 }
  4158. };
  4159. if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
  4160. t4_fatal_err(adap);
  4161. }
  4162. #define PF_INTR_MASK (PFSW_F)
  4163. #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
  4164. EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
  4165. CPL_SWITCH_F | SGE_F | ULP_TX_F)
  4166. /**
  4167. * t4_slow_intr_handler - control path interrupt handler
  4168. * @adapter: the adapter
  4169. *
  4170. * T4 interrupt handler for non-data global interrupt events, e.g., errors.
  4171. * The designation 'slow' is because it involves register reads, while
  4172. * data interrupts typically don't involve any MMIOs.
  4173. */
  4174. int t4_slow_intr_handler(struct adapter *adapter)
  4175. {
  4176. u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
  4177. if (!(cause & GLBL_INTR_MASK))
  4178. return 0;
  4179. if (cause & CIM_F)
  4180. cim_intr_handler(adapter);
  4181. if (cause & MPS_F)
  4182. mps_intr_handler(adapter);
  4183. if (cause & NCSI_F)
  4184. ncsi_intr_handler(adapter);
  4185. if (cause & PL_F)
  4186. pl_intr_handler(adapter);
  4187. if (cause & SMB_F)
  4188. smb_intr_handler(adapter);
  4189. if (cause & XGMAC0_F)
  4190. xgmac_intr_handler(adapter, 0);
  4191. if (cause & XGMAC1_F)
  4192. xgmac_intr_handler(adapter, 1);
  4193. if (cause & XGMAC_KR0_F)
  4194. xgmac_intr_handler(adapter, 2);
  4195. if (cause & XGMAC_KR1_F)
  4196. xgmac_intr_handler(adapter, 3);
  4197. if (cause & PCIE_F)
  4198. pcie_intr_handler(adapter);
  4199. if (cause & MC_F)
  4200. mem_intr_handler(adapter, MEM_MC);
  4201. if (is_t5(adapter->params.chip) && (cause & MC1_F))
  4202. mem_intr_handler(adapter, MEM_MC1);
  4203. if (cause & EDC0_F)
  4204. mem_intr_handler(adapter, MEM_EDC0);
  4205. if (cause & EDC1_F)
  4206. mem_intr_handler(adapter, MEM_EDC1);
  4207. if (cause & LE_F)
  4208. le_intr_handler(adapter);
  4209. if (cause & TP_F)
  4210. tp_intr_handler(adapter);
  4211. if (cause & MA_F)
  4212. ma_intr_handler(adapter);
  4213. if (cause & PM_TX_F)
  4214. pmtx_intr_handler(adapter);
  4215. if (cause & PM_RX_F)
  4216. pmrx_intr_handler(adapter);
  4217. if (cause & ULP_RX_F)
  4218. ulprx_intr_handler(adapter);
  4219. if (cause & CPL_SWITCH_F)
  4220. cplsw_intr_handler(adapter);
  4221. if (cause & SGE_F)
  4222. sge_intr_handler(adapter);
  4223. if (cause & ULP_TX_F)
  4224. ulptx_intr_handler(adapter);
  4225. /* Clear the interrupts just processed for which we are the master. */
  4226. t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
  4227. (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
  4228. return 1;
  4229. }
  4230. /**
  4231. * t4_intr_enable - enable interrupts
  4232. * @adapter: the adapter whose interrupts should be enabled
  4233. *
  4234. * Enable PF-specific interrupts for the calling function and the top-level
  4235. * interrupt concentrator for global interrupts. Interrupts are already
  4236. * enabled at each module, here we just enable the roots of the interrupt
  4237. * hierarchies.
  4238. *
  4239. * Note: this function should be called only when the driver manages
  4240. * non PF-specific interrupts from the various HW modules. Only one PCI
  4241. * function at a time should be doing this.
  4242. */
  4243. void t4_intr_enable(struct adapter *adapter)
  4244. {
  4245. u32 val = 0;
  4246. u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
  4247. u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  4248. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  4249. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
  4250. val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
  4251. t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
  4252. ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
  4253. ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
  4254. ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
  4255. ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
  4256. ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
  4257. DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
  4258. t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
  4259. t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
  4260. }
  4261. /**
  4262. * t4_intr_disable - disable interrupts
  4263. * @adapter: the adapter whose interrupts should be disabled
  4264. *
  4265. * Disable interrupts. We only disable the top-level interrupt
  4266. * concentrators. The caller must be a PCI function managing global
  4267. * interrupts.
  4268. */
  4269. void t4_intr_disable(struct adapter *adapter)
  4270. {
  4271. u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
  4272. u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
  4273. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  4274. t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
  4275. t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
  4276. }
  4277. /**
  4278. * t4_config_rss_range - configure a portion of the RSS mapping table
  4279. * @adapter: the adapter
  4280. * @mbox: mbox to use for the FW command
  4281. * @viid: virtual interface whose RSS subtable is to be written
  4282. * @start: start entry in the table to write
  4283. * @n: how many table entries to write
  4284. * @rspq: values for the response queue lookup table
  4285. * @nrspq: number of values in @rspq
  4286. *
  4287. * Programs the selected part of the VI's RSS mapping table with the
  4288. * provided values. If @nrspq < @n the supplied values are used repeatedly
  4289. * until the full table range is populated.
  4290. *
  4291. * The caller must ensure the values in @rspq are in the range allowed for
  4292. * @viid.
  4293. */
  4294. int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
  4295. int start, int n, const u16 *rspq, unsigned int nrspq)
  4296. {
  4297. int ret;
  4298. const u16 *rsp = rspq;
  4299. const u16 *rsp_end = rspq + nrspq;
  4300. struct fw_rss_ind_tbl_cmd cmd;
  4301. memset(&cmd, 0, sizeof(cmd));
  4302. cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
  4303. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  4304. FW_RSS_IND_TBL_CMD_VIID_V(viid));
  4305. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  4306. /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
  4307. while (n > 0) {
  4308. int nq = min(n, 32);
  4309. __be32 *qp = &cmd.iq0_to_iq2;
  4310. cmd.niqid = cpu_to_be16(nq);
  4311. cmd.startidx = cpu_to_be16(start);
  4312. start += nq;
  4313. n -= nq;
  4314. while (nq > 0) {
  4315. unsigned int v;
  4316. v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
  4317. if (++rsp >= rsp_end)
  4318. rsp = rspq;
  4319. v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
  4320. if (++rsp >= rsp_end)
  4321. rsp = rspq;
  4322. v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
  4323. if (++rsp >= rsp_end)
  4324. rsp = rspq;
  4325. *qp++ = cpu_to_be32(v);
  4326. nq -= 3;
  4327. }
  4328. ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
  4329. if (ret)
  4330. return ret;
  4331. }
  4332. return 0;
  4333. }
  4334. /**
  4335. * t4_config_glbl_rss - configure the global RSS mode
  4336. * @adapter: the adapter
  4337. * @mbox: mbox to use for the FW command
  4338. * @mode: global RSS mode
  4339. * @flags: mode-specific flags
  4340. *
  4341. * Sets the global RSS mode.
  4342. */
  4343. int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
  4344. unsigned int flags)
  4345. {
  4346. struct fw_rss_glb_config_cmd c;
  4347. memset(&c, 0, sizeof(c));
  4348. c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
  4349. FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
  4350. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  4351. if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
  4352. c.u.manual.mode_pkd =
  4353. cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
  4354. } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
  4355. c.u.basicvirtual.mode_pkd =
  4356. cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
  4357. c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
  4358. } else
  4359. return -EINVAL;
  4360. return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
  4361. }
  4362. /**
  4363. * t4_config_vi_rss - configure per VI RSS settings
  4364. * @adapter: the adapter
  4365. * @mbox: mbox to use for the FW command
  4366. * @viid: the VI id
  4367. * @flags: RSS flags
  4368. * @defq: id of the default RSS queue for the VI.
  4369. *
  4370. * Configures VI-specific RSS properties.
  4371. */
  4372. int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
  4373. unsigned int flags, unsigned int defq)
  4374. {
  4375. struct fw_rss_vi_config_cmd c;
  4376. memset(&c, 0, sizeof(c));
  4377. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  4378. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  4379. FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
  4380. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  4381. c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
  4382. FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
  4383. return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
  4384. }
  4385. /* Read an RSS table row */
  4386. static int rd_rss_row(struct adapter *adap, int row, u32 *val)
  4387. {
  4388. t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
  4389. return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
  4390. 5, 0, val);
  4391. }
  4392. /**
  4393. * t4_read_rss - read the contents of the RSS mapping table
  4394. * @adapter: the adapter
  4395. * @map: holds the contents of the RSS mapping table
  4396. *
  4397. * Reads the contents of the RSS hash->queue mapping table.
  4398. */
  4399. int t4_read_rss(struct adapter *adapter, u16 *map)
  4400. {
  4401. u32 val;
  4402. int i, ret;
  4403. for (i = 0; i < RSS_NENTRIES / 2; ++i) {
  4404. ret = rd_rss_row(adapter, i, &val);
  4405. if (ret)
  4406. return ret;
  4407. *map++ = LKPTBLQUEUE0_G(val);
  4408. *map++ = LKPTBLQUEUE1_G(val);
  4409. }
  4410. return 0;
  4411. }
  4412. static unsigned int t4_use_ldst(struct adapter *adap)
  4413. {
  4414. return (adap->flags & FW_OK) || !adap->use_bd;
  4415. }
  4416. /**
  4417. * t4_fw_tp_pio_rw - Access TP PIO through LDST
  4418. * @adap: the adapter
  4419. * @vals: where the indirect register values are stored/written
  4420. * @nregs: how many indirect registers to read/write
  4421. * @start_idx: index of first indirect register to read/write
  4422. * @rw: Read (1) or Write (0)
  4423. *
  4424. * Access TP PIO registers through LDST
  4425. */
  4426. static void t4_fw_tp_pio_rw(struct adapter *adap, u32 *vals, unsigned int nregs,
  4427. unsigned int start_index, unsigned int rw)
  4428. {
  4429. int ret, i;
  4430. int cmd = FW_LDST_ADDRSPC_TP_PIO;
  4431. struct fw_ldst_cmd c;
  4432. for (i = 0 ; i < nregs; i++) {
  4433. memset(&c, 0, sizeof(c));
  4434. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  4435. FW_CMD_REQUEST_F |
  4436. (rw ? FW_CMD_READ_F :
  4437. FW_CMD_WRITE_F) |
  4438. FW_LDST_CMD_ADDRSPACE_V(cmd));
  4439. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  4440. c.u.addrval.addr = cpu_to_be32(start_index + i);
  4441. c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
  4442. ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
  4443. if (!ret && rw)
  4444. vals[i] = be32_to_cpu(c.u.addrval.val);
  4445. }
  4446. }
  4447. /**
  4448. * t4_read_rss_key - read the global RSS key
  4449. * @adap: the adapter
  4450. * @key: 10-entry array holding the 320-bit RSS key
  4451. *
  4452. * Reads the global 320-bit RSS key.
  4453. */
  4454. void t4_read_rss_key(struct adapter *adap, u32 *key)
  4455. {
  4456. if (t4_use_ldst(adap))
  4457. t4_fw_tp_pio_rw(adap, key, 10, TP_RSS_SECRET_KEY0_A, 1);
  4458. else
  4459. t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
  4460. TP_RSS_SECRET_KEY0_A);
  4461. }
  4462. /**
  4463. * t4_write_rss_key - program one of the RSS keys
  4464. * @adap: the adapter
  4465. * @key: 10-entry array holding the 320-bit RSS key
  4466. * @idx: which RSS key to write
  4467. *
  4468. * Writes one of the RSS keys with the given 320-bit value. If @idx is
  4469. * 0..15 the corresponding entry in the RSS key table is written,
  4470. * otherwise the global RSS key is written.
  4471. */
  4472. void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
  4473. {
  4474. u8 rss_key_addr_cnt = 16;
  4475. u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
  4476. /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
  4477. * allows access to key addresses 16-63 by using KeyWrAddrX
  4478. * as index[5:4](upper 2) into key table
  4479. */
  4480. if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
  4481. (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
  4482. rss_key_addr_cnt = 32;
  4483. if (t4_use_ldst(adap))
  4484. t4_fw_tp_pio_rw(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, 0);
  4485. else
  4486. t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
  4487. TP_RSS_SECRET_KEY0_A);
  4488. if (idx >= 0 && idx < rss_key_addr_cnt) {
  4489. if (rss_key_addr_cnt > 16)
  4490. t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
  4491. KEYWRADDRX_V(idx >> 4) |
  4492. T6_VFWRADDR_V(idx) | KEYWREN_F);
  4493. else
  4494. t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
  4495. KEYWRADDR_V(idx) | KEYWREN_F);
  4496. }
  4497. }
  4498. /**
  4499. * t4_read_rss_pf_config - read PF RSS Configuration Table
  4500. * @adapter: the adapter
  4501. * @index: the entry in the PF RSS table to read
  4502. * @valp: where to store the returned value
  4503. *
  4504. * Reads the PF RSS Configuration Table at the specified index and returns
  4505. * the value found there.
  4506. */
  4507. void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
  4508. u32 *valp)
  4509. {
  4510. if (t4_use_ldst(adapter))
  4511. t4_fw_tp_pio_rw(adapter, valp, 1,
  4512. TP_RSS_PF0_CONFIG_A + index, 1);
  4513. else
  4514. t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  4515. valp, 1, TP_RSS_PF0_CONFIG_A + index);
  4516. }
  4517. /**
  4518. * t4_read_rss_vf_config - read VF RSS Configuration Table
  4519. * @adapter: the adapter
  4520. * @index: the entry in the VF RSS table to read
  4521. * @vfl: where to store the returned VFL
  4522. * @vfh: where to store the returned VFH
  4523. *
  4524. * Reads the VF RSS Configuration Table at the specified index and returns
  4525. * the (VFL, VFH) values found there.
  4526. */
  4527. void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
  4528. u32 *vfl, u32 *vfh)
  4529. {
  4530. u32 vrt, mask, data;
  4531. if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
  4532. mask = VFWRADDR_V(VFWRADDR_M);
  4533. data = VFWRADDR_V(index);
  4534. } else {
  4535. mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
  4536. data = T6_VFWRADDR_V(index);
  4537. }
  4538. /* Request that the index'th VF Table values be read into VFL/VFH.
  4539. */
  4540. vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
  4541. vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
  4542. vrt |= data | VFRDEN_F;
  4543. t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
  4544. /* Grab the VFL/VFH values ...
  4545. */
  4546. if (t4_use_ldst(adapter)) {
  4547. t4_fw_tp_pio_rw(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, 1);
  4548. t4_fw_tp_pio_rw(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, 1);
  4549. } else {
  4550. t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  4551. vfl, 1, TP_RSS_VFL_CONFIG_A);
  4552. t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  4553. vfh, 1, TP_RSS_VFH_CONFIG_A);
  4554. }
  4555. }
  4556. /**
  4557. * t4_read_rss_pf_map - read PF RSS Map
  4558. * @adapter: the adapter
  4559. *
  4560. * Reads the PF RSS Map register and returns its value.
  4561. */
  4562. u32 t4_read_rss_pf_map(struct adapter *adapter)
  4563. {
  4564. u32 pfmap;
  4565. if (t4_use_ldst(adapter))
  4566. t4_fw_tp_pio_rw(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, 1);
  4567. else
  4568. t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  4569. &pfmap, 1, TP_RSS_PF_MAP_A);
  4570. return pfmap;
  4571. }
  4572. /**
  4573. * t4_read_rss_pf_mask - read PF RSS Mask
  4574. * @adapter: the adapter
  4575. *
  4576. * Reads the PF RSS Mask register and returns its value.
  4577. */
  4578. u32 t4_read_rss_pf_mask(struct adapter *adapter)
  4579. {
  4580. u32 pfmask;
  4581. if (t4_use_ldst(adapter))
  4582. t4_fw_tp_pio_rw(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, 1);
  4583. else
  4584. t4_read_indirect(adapter, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  4585. &pfmask, 1, TP_RSS_PF_MSK_A);
  4586. return pfmask;
  4587. }
  4588. /**
  4589. * t4_tp_get_tcp_stats - read TP's TCP MIB counters
  4590. * @adap: the adapter
  4591. * @v4: holds the TCP/IP counter values
  4592. * @v6: holds the TCP/IPv6 counter values
  4593. *
  4594. * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
  4595. * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
  4596. */
  4597. void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
  4598. struct tp_tcp_stats *v6)
  4599. {
  4600. u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
  4601. #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
  4602. #define STAT(x) val[STAT_IDX(x)]
  4603. #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
  4604. if (v4) {
  4605. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
  4606. ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
  4607. v4->tcp_out_rsts = STAT(OUT_RST);
  4608. v4->tcp_in_segs = STAT64(IN_SEG);
  4609. v4->tcp_out_segs = STAT64(OUT_SEG);
  4610. v4->tcp_retrans_segs = STAT64(RXT_SEG);
  4611. }
  4612. if (v6) {
  4613. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
  4614. ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
  4615. v6->tcp_out_rsts = STAT(OUT_RST);
  4616. v6->tcp_in_segs = STAT64(IN_SEG);
  4617. v6->tcp_out_segs = STAT64(OUT_SEG);
  4618. v6->tcp_retrans_segs = STAT64(RXT_SEG);
  4619. }
  4620. #undef STAT64
  4621. #undef STAT
  4622. #undef STAT_IDX
  4623. }
  4624. /**
  4625. * t4_tp_get_err_stats - read TP's error MIB counters
  4626. * @adap: the adapter
  4627. * @st: holds the counter values
  4628. *
  4629. * Returns the values of TP's error counters.
  4630. */
  4631. void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
  4632. {
  4633. int nchan = adap->params.arch.nchan;
  4634. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4635. st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A);
  4636. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4637. st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A);
  4638. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4639. st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A);
  4640. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4641. st->tnl_cong_drops, nchan, TP_MIB_TNL_CNG_DROP_0_A);
  4642. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4643. st->ofld_chan_drops, nchan, TP_MIB_OFD_CHN_DROP_0_A);
  4644. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4645. st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A);
  4646. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4647. st->ofld_vlan_drops, nchan, TP_MIB_OFD_VLN_DROP_0_A);
  4648. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4649. st->tcp6_in_errs, nchan, TP_MIB_TCP_V6IN_ERR_0_A);
  4650. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
  4651. &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
  4652. }
  4653. /**
  4654. * t4_tp_get_cpl_stats - read TP's CPL MIB counters
  4655. * @adap: the adapter
  4656. * @st: holds the counter values
  4657. *
  4658. * Returns the values of TP's CPL counters.
  4659. */
  4660. void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
  4661. {
  4662. int nchan = adap->params.arch.nchan;
  4663. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
  4664. nchan, TP_MIB_CPL_IN_REQ_0_A);
  4665. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
  4666. nchan, TP_MIB_CPL_OUT_RSP_0_A);
  4667. }
  4668. /**
  4669. * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
  4670. * @adap: the adapter
  4671. * @st: holds the counter values
  4672. *
  4673. * Returns the values of TP's RDMA counters.
  4674. */
  4675. void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
  4676. {
  4677. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
  4678. 2, TP_MIB_RQE_DFR_PKT_A);
  4679. }
  4680. /**
  4681. * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
  4682. * @adap: the adapter
  4683. * @idx: the port index
  4684. * @st: holds the counter values
  4685. *
  4686. * Returns the values of TP's FCoE counters for the selected port.
  4687. */
  4688. void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
  4689. struct tp_fcoe_stats *st)
  4690. {
  4691. u32 val[2];
  4692. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
  4693. 1, TP_MIB_FCOE_DDP_0_A + idx);
  4694. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
  4695. 1, TP_MIB_FCOE_DROP_0_A + idx);
  4696. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
  4697. 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
  4698. st->octets_ddp = ((u64)val[0] << 32) | val[1];
  4699. }
  4700. /**
  4701. * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
  4702. * @adap: the adapter
  4703. * @st: holds the counter values
  4704. *
  4705. * Returns the values of TP's counters for non-TCP directly-placed packets.
  4706. */
  4707. void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
  4708. {
  4709. u32 val[4];
  4710. t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
  4711. TP_MIB_USM_PKTS_A);
  4712. st->frames = val[0];
  4713. st->drops = val[1];
  4714. st->octets = ((u64)val[2] << 32) | val[3];
  4715. }
  4716. /**
  4717. * t4_read_mtu_tbl - returns the values in the HW path MTU table
  4718. * @adap: the adapter
  4719. * @mtus: where to store the MTU values
  4720. * @mtu_log: where to store the MTU base-2 log (may be %NULL)
  4721. *
  4722. * Reads the HW path MTU table.
  4723. */
  4724. void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
  4725. {
  4726. u32 v;
  4727. int i;
  4728. for (i = 0; i < NMTUS; ++i) {
  4729. t4_write_reg(adap, TP_MTU_TABLE_A,
  4730. MTUINDEX_V(0xff) | MTUVALUE_V(i));
  4731. v = t4_read_reg(adap, TP_MTU_TABLE_A);
  4732. mtus[i] = MTUVALUE_G(v);
  4733. if (mtu_log)
  4734. mtu_log[i] = MTUWIDTH_G(v);
  4735. }
  4736. }
  4737. /**
  4738. * t4_read_cong_tbl - reads the congestion control table
  4739. * @adap: the adapter
  4740. * @incr: where to store the alpha values
  4741. *
  4742. * Reads the additive increments programmed into the HW congestion
  4743. * control table.
  4744. */
  4745. void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
  4746. {
  4747. unsigned int mtu, w;
  4748. for (mtu = 0; mtu < NMTUS; ++mtu)
  4749. for (w = 0; w < NCCTRL_WIN; ++w) {
  4750. t4_write_reg(adap, TP_CCTRL_TABLE_A,
  4751. ROWINDEX_V(0xffff) | (mtu << 5) | w);
  4752. incr[mtu][w] = (u16)t4_read_reg(adap,
  4753. TP_CCTRL_TABLE_A) & 0x1fff;
  4754. }
  4755. }
  4756. /**
  4757. * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
  4758. * @adap: the adapter
  4759. * @addr: the indirect TP register address
  4760. * @mask: specifies the field within the register to modify
  4761. * @val: new value for the field
  4762. *
  4763. * Sets a field of an indirect TP register to the given value.
  4764. */
  4765. void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
  4766. unsigned int mask, unsigned int val)
  4767. {
  4768. t4_write_reg(adap, TP_PIO_ADDR_A, addr);
  4769. val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
  4770. t4_write_reg(adap, TP_PIO_DATA_A, val);
  4771. }
  4772. /**
  4773. * init_cong_ctrl - initialize congestion control parameters
  4774. * @a: the alpha values for congestion control
  4775. * @b: the beta values for congestion control
  4776. *
  4777. * Initialize the congestion control parameters.
  4778. */
  4779. static void init_cong_ctrl(unsigned short *a, unsigned short *b)
  4780. {
  4781. a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
  4782. a[9] = 2;
  4783. a[10] = 3;
  4784. a[11] = 4;
  4785. a[12] = 5;
  4786. a[13] = 6;
  4787. a[14] = 7;
  4788. a[15] = 8;
  4789. a[16] = 9;
  4790. a[17] = 10;
  4791. a[18] = 14;
  4792. a[19] = 17;
  4793. a[20] = 21;
  4794. a[21] = 25;
  4795. a[22] = 30;
  4796. a[23] = 35;
  4797. a[24] = 45;
  4798. a[25] = 60;
  4799. a[26] = 80;
  4800. a[27] = 100;
  4801. a[28] = 200;
  4802. a[29] = 300;
  4803. a[30] = 400;
  4804. a[31] = 500;
  4805. b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
  4806. b[9] = b[10] = 1;
  4807. b[11] = b[12] = 2;
  4808. b[13] = b[14] = b[15] = b[16] = 3;
  4809. b[17] = b[18] = b[19] = b[20] = b[21] = 4;
  4810. b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
  4811. b[28] = b[29] = 6;
  4812. b[30] = b[31] = 7;
  4813. }
  4814. /* The minimum additive increment value for the congestion control table */
  4815. #define CC_MIN_INCR 2U
  4816. /**
  4817. * t4_load_mtus - write the MTU and congestion control HW tables
  4818. * @adap: the adapter
  4819. * @mtus: the values for the MTU table
  4820. * @alpha: the values for the congestion control alpha parameter
  4821. * @beta: the values for the congestion control beta parameter
  4822. *
  4823. * Write the HW MTU table with the supplied MTUs and the high-speed
  4824. * congestion control table with the supplied alpha, beta, and MTUs.
  4825. * We write the two tables together because the additive increments
  4826. * depend on the MTUs.
  4827. */
  4828. void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
  4829. const unsigned short *alpha, const unsigned short *beta)
  4830. {
  4831. static const unsigned int avg_pkts[NCCTRL_WIN] = {
  4832. 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
  4833. 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
  4834. 28672, 40960, 57344, 81920, 114688, 163840, 229376
  4835. };
  4836. unsigned int i, w;
  4837. for (i = 0; i < NMTUS; ++i) {
  4838. unsigned int mtu = mtus[i];
  4839. unsigned int log2 = fls(mtu);
  4840. if (!(mtu & ((1 << log2) >> 2))) /* round */
  4841. log2--;
  4842. t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
  4843. MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
  4844. for (w = 0; w < NCCTRL_WIN; ++w) {
  4845. unsigned int inc;
  4846. inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
  4847. CC_MIN_INCR);
  4848. t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
  4849. (w << 16) | (beta[w] << 13) | inc);
  4850. }
  4851. }
  4852. }
  4853. /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
  4854. * clocks. The formula is
  4855. *
  4856. * bytes/s = bytes256 * 256 * ClkFreq / 4096
  4857. *
  4858. * which is equivalent to
  4859. *
  4860. * bytes/s = 62.5 * bytes256 * ClkFreq_ms
  4861. */
  4862. static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
  4863. {
  4864. u64 v = bytes256 * adap->params.vpd.cclk;
  4865. return v * 62 + v / 2;
  4866. }
  4867. /**
  4868. * t4_get_chan_txrate - get the current per channel Tx rates
  4869. * @adap: the adapter
  4870. * @nic_rate: rates for NIC traffic
  4871. * @ofld_rate: rates for offloaded traffic
  4872. *
  4873. * Return the current Tx rates in bytes/s for NIC and offloaded traffic
  4874. * for each channel.
  4875. */
  4876. void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
  4877. {
  4878. u32 v;
  4879. v = t4_read_reg(adap, TP_TX_TRATE_A);
  4880. nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
  4881. nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
  4882. if (adap->params.arch.nchan == NCHAN) {
  4883. nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
  4884. nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
  4885. }
  4886. v = t4_read_reg(adap, TP_TX_ORATE_A);
  4887. ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
  4888. ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
  4889. if (adap->params.arch.nchan == NCHAN) {
  4890. ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
  4891. ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
  4892. }
  4893. }
  4894. /**
  4895. * t4_set_trace_filter - configure one of the tracing filters
  4896. * @adap: the adapter
  4897. * @tp: the desired trace filter parameters
  4898. * @idx: which filter to configure
  4899. * @enable: whether to enable or disable the filter
  4900. *
  4901. * Configures one of the tracing filters available in HW. If @enable is
  4902. * %0 @tp is not examined and may be %NULL. The user is responsible to
  4903. * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
  4904. */
  4905. int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
  4906. int idx, int enable)
  4907. {
  4908. int i, ofst = idx * 4;
  4909. u32 data_reg, mask_reg, cfg;
  4910. u32 multitrc = TRCMULTIFILTER_F;
  4911. if (!enable) {
  4912. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
  4913. return 0;
  4914. }
  4915. cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
  4916. if (cfg & TRCMULTIFILTER_F) {
  4917. /* If multiple tracers are enabled, then maximum
  4918. * capture size is 2.5KB (FIFO size of a single channel)
  4919. * minus 2 flits for CPL_TRACE_PKT header.
  4920. */
  4921. if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
  4922. return -EINVAL;
  4923. } else {
  4924. /* If multiple tracers are disabled, to avoid deadlocks
  4925. * maximum packet capture size of 9600 bytes is recommended.
  4926. * Also in this mode, only trace0 can be enabled and running.
  4927. */
  4928. multitrc = 0;
  4929. if (tp->snap_len > 9600 || idx)
  4930. return -EINVAL;
  4931. }
  4932. if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
  4933. tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
  4934. tp->min_len > TFMINPKTSIZE_M)
  4935. return -EINVAL;
  4936. /* stop the tracer we'll be changing */
  4937. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
  4938. idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
  4939. data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
  4940. mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
  4941. for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
  4942. t4_write_reg(adap, data_reg, tp->data[i]);
  4943. t4_write_reg(adap, mask_reg, ~tp->mask[i]);
  4944. }
  4945. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
  4946. TFCAPTUREMAX_V(tp->snap_len) |
  4947. TFMINPKTSIZE_V(tp->min_len));
  4948. t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
  4949. TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
  4950. (is_t4(adap->params.chip) ?
  4951. TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
  4952. T5_TFPORT_V(tp->port) | T5_TFEN_F |
  4953. T5_TFINVERTMATCH_V(tp->invert)));
  4954. return 0;
  4955. }
  4956. /**
  4957. * t4_get_trace_filter - query one of the tracing filters
  4958. * @adap: the adapter
  4959. * @tp: the current trace filter parameters
  4960. * @idx: which trace filter to query
  4961. * @enabled: non-zero if the filter is enabled
  4962. *
  4963. * Returns the current settings of one of the HW tracing filters.
  4964. */
  4965. void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
  4966. int *enabled)
  4967. {
  4968. u32 ctla, ctlb;
  4969. int i, ofst = idx * 4;
  4970. u32 data_reg, mask_reg;
  4971. ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
  4972. ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
  4973. if (is_t4(adap->params.chip)) {
  4974. *enabled = !!(ctla & TFEN_F);
  4975. tp->port = TFPORT_G(ctla);
  4976. tp->invert = !!(ctla & TFINVERTMATCH_F);
  4977. } else {
  4978. *enabled = !!(ctla & T5_TFEN_F);
  4979. tp->port = T5_TFPORT_G(ctla);
  4980. tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
  4981. }
  4982. tp->snap_len = TFCAPTUREMAX_G(ctlb);
  4983. tp->min_len = TFMINPKTSIZE_G(ctlb);
  4984. tp->skip_ofst = TFOFFSET_G(ctla);
  4985. tp->skip_len = TFLENGTH_G(ctla);
  4986. ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
  4987. data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
  4988. mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
  4989. for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
  4990. tp->mask[i] = ~t4_read_reg(adap, mask_reg);
  4991. tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
  4992. }
  4993. }
  4994. /**
  4995. * t4_pmtx_get_stats - returns the HW stats from PMTX
  4996. * @adap: the adapter
  4997. * @cnt: where to store the count statistics
  4998. * @cycles: where to store the cycle statistics
  4999. *
  5000. * Returns performance statistics from PMTX.
  5001. */
  5002. void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
  5003. {
  5004. int i;
  5005. u32 data[2];
  5006. for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
  5007. t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
  5008. cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
  5009. if (is_t4(adap->params.chip)) {
  5010. cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
  5011. } else {
  5012. t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
  5013. PM_TX_DBG_DATA_A, data, 2,
  5014. PM_TX_DBG_STAT_MSB_A);
  5015. cycles[i] = (((u64)data[0] << 32) | data[1]);
  5016. }
  5017. }
  5018. }
  5019. /**
  5020. * t4_pmrx_get_stats - returns the HW stats from PMRX
  5021. * @adap: the adapter
  5022. * @cnt: where to store the count statistics
  5023. * @cycles: where to store the cycle statistics
  5024. *
  5025. * Returns performance statistics from PMRX.
  5026. */
  5027. void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
  5028. {
  5029. int i;
  5030. u32 data[2];
  5031. for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
  5032. t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
  5033. cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
  5034. if (is_t4(adap->params.chip)) {
  5035. cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
  5036. } else {
  5037. t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
  5038. PM_RX_DBG_DATA_A, data, 2,
  5039. PM_RX_DBG_STAT_MSB_A);
  5040. cycles[i] = (((u64)data[0] << 32) | data[1]);
  5041. }
  5042. }
  5043. }
  5044. /**
  5045. * t4_get_mps_bg_map - return the buffer groups associated with a port
  5046. * @adap: the adapter
  5047. * @idx: the port index
  5048. *
  5049. * Returns a bitmap indicating which MPS buffer groups are associated
  5050. * with the given port. Bit i is set if buffer group i is used by the
  5051. * port.
  5052. */
  5053. unsigned int t4_get_mps_bg_map(struct adapter *adap, int idx)
  5054. {
  5055. u32 n = NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
  5056. if (n == 0)
  5057. return idx == 0 ? 0xf : 0;
  5058. /* In T6 (which is a 2 port card),
  5059. * port 0 is mapped to channel 0 and port 1 is mapped to channel 1.
  5060. * For 2 port T4/T5 adapter,
  5061. * port 0 is mapped to channel 0 and 1,
  5062. * port 1 is mapped to channel 2 and 3.
  5063. */
  5064. if ((n == 1) &&
  5065. (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
  5066. return idx < 2 ? (3 << (2 * idx)) : 0;
  5067. return 1 << idx;
  5068. }
  5069. /**
  5070. * t4_get_port_type_description - return Port Type string description
  5071. * @port_type: firmware Port Type enumeration
  5072. */
  5073. const char *t4_get_port_type_description(enum fw_port_type port_type)
  5074. {
  5075. static const char *const port_type_description[] = {
  5076. "Fiber_XFI",
  5077. "Fiber_XAUI",
  5078. "BT_SGMII",
  5079. "BT_XFI",
  5080. "BT_XAUI",
  5081. "KX4",
  5082. "CX4",
  5083. "KX",
  5084. "KR",
  5085. "SFP",
  5086. "BP_AP",
  5087. "BP4_AP",
  5088. "QSFP_10G",
  5089. "QSA",
  5090. "QSFP",
  5091. "BP40_BA",
  5092. "KR4_100G",
  5093. "CR4_QSFP",
  5094. "CR_QSFP",
  5095. "CR2_QSFP",
  5096. "SFP28",
  5097. "KR_SFP28",
  5098. };
  5099. if (port_type < ARRAY_SIZE(port_type_description))
  5100. return port_type_description[port_type];
  5101. return "UNKNOWN";
  5102. }
  5103. /**
  5104. * t4_get_port_stats_offset - collect port stats relative to a previous
  5105. * snapshot
  5106. * @adap: The adapter
  5107. * @idx: The port
  5108. * @stats: Current stats to fill
  5109. * @offset: Previous stats snapshot
  5110. */
  5111. void t4_get_port_stats_offset(struct adapter *adap, int idx,
  5112. struct port_stats *stats,
  5113. struct port_stats *offset)
  5114. {
  5115. u64 *s, *o;
  5116. int i;
  5117. t4_get_port_stats(adap, idx, stats);
  5118. for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
  5119. i < (sizeof(struct port_stats) / sizeof(u64));
  5120. i++, s++, o++)
  5121. *s -= *o;
  5122. }
  5123. /**
  5124. * t4_get_port_stats - collect port statistics
  5125. * @adap: the adapter
  5126. * @idx: the port index
  5127. * @p: the stats structure to fill
  5128. *
  5129. * Collect statistics related to the given port from HW.
  5130. */
  5131. void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
  5132. {
  5133. u32 bgmap = t4_get_mps_bg_map(adap, idx);
  5134. u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
  5135. #define GET_STAT(name) \
  5136. t4_read_reg64(adap, \
  5137. (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
  5138. T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
  5139. #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
  5140. p->tx_octets = GET_STAT(TX_PORT_BYTES);
  5141. p->tx_frames = GET_STAT(TX_PORT_FRAMES);
  5142. p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
  5143. p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
  5144. p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
  5145. p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
  5146. p->tx_frames_64 = GET_STAT(TX_PORT_64B);
  5147. p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
  5148. p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
  5149. p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
  5150. p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
  5151. p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
  5152. p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
  5153. p->tx_drop = GET_STAT(TX_PORT_DROP);
  5154. p->tx_pause = GET_STAT(TX_PORT_PAUSE);
  5155. p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
  5156. p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
  5157. p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
  5158. p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
  5159. p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
  5160. p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
  5161. p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
  5162. p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
  5163. if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
  5164. if (stat_ctl & COUNTPAUSESTATTX_F) {
  5165. p->tx_frames -= p->tx_pause;
  5166. p->tx_octets -= p->tx_pause * 64;
  5167. }
  5168. if (stat_ctl & COUNTPAUSEMCTX_F)
  5169. p->tx_mcast_frames -= p->tx_pause;
  5170. }
  5171. p->rx_octets = GET_STAT(RX_PORT_BYTES);
  5172. p->rx_frames = GET_STAT(RX_PORT_FRAMES);
  5173. p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
  5174. p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
  5175. p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
  5176. p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
  5177. p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
  5178. p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
  5179. p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
  5180. p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
  5181. p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
  5182. p->rx_frames_64 = GET_STAT(RX_PORT_64B);
  5183. p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
  5184. p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
  5185. p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
  5186. p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
  5187. p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
  5188. p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
  5189. p->rx_pause = GET_STAT(RX_PORT_PAUSE);
  5190. p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
  5191. p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
  5192. p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
  5193. p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
  5194. p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
  5195. p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
  5196. p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
  5197. p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
  5198. if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
  5199. if (stat_ctl & COUNTPAUSESTATRX_F) {
  5200. p->rx_frames -= p->rx_pause;
  5201. p->rx_octets -= p->rx_pause * 64;
  5202. }
  5203. if (stat_ctl & COUNTPAUSEMCRX_F)
  5204. p->rx_mcast_frames -= p->rx_pause;
  5205. }
  5206. p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
  5207. p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
  5208. p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
  5209. p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
  5210. p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
  5211. p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
  5212. p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
  5213. p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
  5214. #undef GET_STAT
  5215. #undef GET_STAT_COM
  5216. }
  5217. /**
  5218. * t4_get_lb_stats - collect loopback port statistics
  5219. * @adap: the adapter
  5220. * @idx: the loopback port index
  5221. * @p: the stats structure to fill
  5222. *
  5223. * Return HW statistics for the given loopback port.
  5224. */
  5225. void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
  5226. {
  5227. u32 bgmap = t4_get_mps_bg_map(adap, idx);
  5228. #define GET_STAT(name) \
  5229. t4_read_reg64(adap, \
  5230. (is_t4(adap->params.chip) ? \
  5231. PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
  5232. T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
  5233. #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
  5234. p->octets = GET_STAT(BYTES);
  5235. p->frames = GET_STAT(FRAMES);
  5236. p->bcast_frames = GET_STAT(BCAST);
  5237. p->mcast_frames = GET_STAT(MCAST);
  5238. p->ucast_frames = GET_STAT(UCAST);
  5239. p->error_frames = GET_STAT(ERROR);
  5240. p->frames_64 = GET_STAT(64B);
  5241. p->frames_65_127 = GET_STAT(65B_127B);
  5242. p->frames_128_255 = GET_STAT(128B_255B);
  5243. p->frames_256_511 = GET_STAT(256B_511B);
  5244. p->frames_512_1023 = GET_STAT(512B_1023B);
  5245. p->frames_1024_1518 = GET_STAT(1024B_1518B);
  5246. p->frames_1519_max = GET_STAT(1519B_MAX);
  5247. p->drop = GET_STAT(DROP_FRAMES);
  5248. p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
  5249. p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
  5250. p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
  5251. p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
  5252. p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
  5253. p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
  5254. p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
  5255. p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
  5256. #undef GET_STAT
  5257. #undef GET_STAT_COM
  5258. }
  5259. /* t4_mk_filtdelwr - create a delete filter WR
  5260. * @ftid: the filter ID
  5261. * @wr: the filter work request to populate
  5262. * @qid: ingress queue to receive the delete notification
  5263. *
  5264. * Creates a filter work request to delete the supplied filter. If @qid is
  5265. * negative the delete notification is suppressed.
  5266. */
  5267. void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
  5268. {
  5269. memset(wr, 0, sizeof(*wr));
  5270. wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
  5271. wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
  5272. wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
  5273. FW_FILTER_WR_NOREPLY_V(qid < 0));
  5274. wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
  5275. if (qid >= 0)
  5276. wr->rx_chan_rx_rpl_iq =
  5277. cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
  5278. }
  5279. #define INIT_CMD(var, cmd, rd_wr) do { \
  5280. (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
  5281. FW_CMD_REQUEST_F | \
  5282. FW_CMD_##rd_wr##_F); \
  5283. (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
  5284. } while (0)
  5285. int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
  5286. u32 addr, u32 val)
  5287. {
  5288. u32 ldst_addrspace;
  5289. struct fw_ldst_cmd c;
  5290. memset(&c, 0, sizeof(c));
  5291. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
  5292. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5293. FW_CMD_REQUEST_F |
  5294. FW_CMD_WRITE_F |
  5295. ldst_addrspace);
  5296. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5297. c.u.addrval.addr = cpu_to_be32(addr);
  5298. c.u.addrval.val = cpu_to_be32(val);
  5299. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5300. }
  5301. /**
  5302. * t4_mdio_rd - read a PHY register through MDIO
  5303. * @adap: the adapter
  5304. * @mbox: mailbox to use for the FW command
  5305. * @phy_addr: the PHY address
  5306. * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  5307. * @reg: the register to read
  5308. * @valp: where to store the value
  5309. *
  5310. * Issues a FW command through the given mailbox to read a PHY register.
  5311. */
  5312. int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  5313. unsigned int mmd, unsigned int reg, u16 *valp)
  5314. {
  5315. int ret;
  5316. u32 ldst_addrspace;
  5317. struct fw_ldst_cmd c;
  5318. memset(&c, 0, sizeof(c));
  5319. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
  5320. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5321. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  5322. ldst_addrspace);
  5323. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5324. c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
  5325. FW_LDST_CMD_MMD_V(mmd));
  5326. c.u.mdio.raddr = cpu_to_be16(reg);
  5327. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  5328. if (ret == 0)
  5329. *valp = be16_to_cpu(c.u.mdio.rval);
  5330. return ret;
  5331. }
  5332. /**
  5333. * t4_mdio_wr - write a PHY register through MDIO
  5334. * @adap: the adapter
  5335. * @mbox: mailbox to use for the FW command
  5336. * @phy_addr: the PHY address
  5337. * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
  5338. * @reg: the register to write
  5339. * @valp: value to write
  5340. *
  5341. * Issues a FW command through the given mailbox to write a PHY register.
  5342. */
  5343. int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
  5344. unsigned int mmd, unsigned int reg, u16 val)
  5345. {
  5346. u32 ldst_addrspace;
  5347. struct fw_ldst_cmd c;
  5348. memset(&c, 0, sizeof(c));
  5349. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
  5350. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5351. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  5352. ldst_addrspace);
  5353. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5354. c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
  5355. FW_LDST_CMD_MMD_V(mmd));
  5356. c.u.mdio.raddr = cpu_to_be16(reg);
  5357. c.u.mdio.rval = cpu_to_be16(val);
  5358. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5359. }
  5360. /**
  5361. * t4_sge_decode_idma_state - decode the idma state
  5362. * @adap: the adapter
  5363. * @state: the state idma is stuck in
  5364. */
  5365. void t4_sge_decode_idma_state(struct adapter *adapter, int state)
  5366. {
  5367. static const char * const t4_decode[] = {
  5368. "IDMA_IDLE",
  5369. "IDMA_PUSH_MORE_CPL_FIFO",
  5370. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  5371. "Not used",
  5372. "IDMA_PHYSADDR_SEND_PCIEHDR",
  5373. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  5374. "IDMA_PHYSADDR_SEND_PAYLOAD",
  5375. "IDMA_SEND_FIFO_TO_IMSG",
  5376. "IDMA_FL_REQ_DATA_FL_PREP",
  5377. "IDMA_FL_REQ_DATA_FL",
  5378. "IDMA_FL_DROP",
  5379. "IDMA_FL_H_REQ_HEADER_FL",
  5380. "IDMA_FL_H_SEND_PCIEHDR",
  5381. "IDMA_FL_H_PUSH_CPL_FIFO",
  5382. "IDMA_FL_H_SEND_CPL",
  5383. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  5384. "IDMA_FL_H_SEND_IP_HDR",
  5385. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  5386. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  5387. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  5388. "IDMA_FL_D_SEND_PCIEHDR",
  5389. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  5390. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  5391. "IDMA_FL_SEND_PCIEHDR",
  5392. "IDMA_FL_PUSH_CPL_FIFO",
  5393. "IDMA_FL_SEND_CPL",
  5394. "IDMA_FL_SEND_PAYLOAD_FIRST",
  5395. "IDMA_FL_SEND_PAYLOAD",
  5396. "IDMA_FL_REQ_NEXT_DATA_FL",
  5397. "IDMA_FL_SEND_NEXT_PCIEHDR",
  5398. "IDMA_FL_SEND_PADDING",
  5399. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  5400. "IDMA_FL_SEND_FIFO_TO_IMSG",
  5401. "IDMA_FL_REQ_DATAFL_DONE",
  5402. "IDMA_FL_REQ_HEADERFL_DONE",
  5403. };
  5404. static const char * const t5_decode[] = {
  5405. "IDMA_IDLE",
  5406. "IDMA_ALMOST_IDLE",
  5407. "IDMA_PUSH_MORE_CPL_FIFO",
  5408. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  5409. "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
  5410. "IDMA_PHYSADDR_SEND_PCIEHDR",
  5411. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  5412. "IDMA_PHYSADDR_SEND_PAYLOAD",
  5413. "IDMA_SEND_FIFO_TO_IMSG",
  5414. "IDMA_FL_REQ_DATA_FL",
  5415. "IDMA_FL_DROP",
  5416. "IDMA_FL_DROP_SEND_INC",
  5417. "IDMA_FL_H_REQ_HEADER_FL",
  5418. "IDMA_FL_H_SEND_PCIEHDR",
  5419. "IDMA_FL_H_PUSH_CPL_FIFO",
  5420. "IDMA_FL_H_SEND_CPL",
  5421. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  5422. "IDMA_FL_H_SEND_IP_HDR",
  5423. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  5424. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  5425. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  5426. "IDMA_FL_D_SEND_PCIEHDR",
  5427. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  5428. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  5429. "IDMA_FL_SEND_PCIEHDR",
  5430. "IDMA_FL_PUSH_CPL_FIFO",
  5431. "IDMA_FL_SEND_CPL",
  5432. "IDMA_FL_SEND_PAYLOAD_FIRST",
  5433. "IDMA_FL_SEND_PAYLOAD",
  5434. "IDMA_FL_REQ_NEXT_DATA_FL",
  5435. "IDMA_FL_SEND_NEXT_PCIEHDR",
  5436. "IDMA_FL_SEND_PADDING",
  5437. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  5438. };
  5439. static const char * const t6_decode[] = {
  5440. "IDMA_IDLE",
  5441. "IDMA_PUSH_MORE_CPL_FIFO",
  5442. "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
  5443. "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
  5444. "IDMA_PHYSADDR_SEND_PCIEHDR",
  5445. "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
  5446. "IDMA_PHYSADDR_SEND_PAYLOAD",
  5447. "IDMA_FL_REQ_DATA_FL",
  5448. "IDMA_FL_DROP",
  5449. "IDMA_FL_DROP_SEND_INC",
  5450. "IDMA_FL_H_REQ_HEADER_FL",
  5451. "IDMA_FL_H_SEND_PCIEHDR",
  5452. "IDMA_FL_H_PUSH_CPL_FIFO",
  5453. "IDMA_FL_H_SEND_CPL",
  5454. "IDMA_FL_H_SEND_IP_HDR_FIRST",
  5455. "IDMA_FL_H_SEND_IP_HDR",
  5456. "IDMA_FL_H_REQ_NEXT_HEADER_FL",
  5457. "IDMA_FL_H_SEND_NEXT_PCIEHDR",
  5458. "IDMA_FL_H_SEND_IP_HDR_PADDING",
  5459. "IDMA_FL_D_SEND_PCIEHDR",
  5460. "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
  5461. "IDMA_FL_D_REQ_NEXT_DATA_FL",
  5462. "IDMA_FL_SEND_PCIEHDR",
  5463. "IDMA_FL_PUSH_CPL_FIFO",
  5464. "IDMA_FL_SEND_CPL",
  5465. "IDMA_FL_SEND_PAYLOAD_FIRST",
  5466. "IDMA_FL_SEND_PAYLOAD",
  5467. "IDMA_FL_REQ_NEXT_DATA_FL",
  5468. "IDMA_FL_SEND_NEXT_PCIEHDR",
  5469. "IDMA_FL_SEND_PADDING",
  5470. "IDMA_FL_SEND_COMPLETION_TO_IMSG",
  5471. };
  5472. static const u32 sge_regs[] = {
  5473. SGE_DEBUG_DATA_LOW_INDEX_2_A,
  5474. SGE_DEBUG_DATA_LOW_INDEX_3_A,
  5475. SGE_DEBUG_DATA_HIGH_INDEX_10_A,
  5476. };
  5477. const char **sge_idma_decode;
  5478. int sge_idma_decode_nstates;
  5479. int i;
  5480. unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
  5481. /* Select the right set of decode strings to dump depending on the
  5482. * adapter chip type.
  5483. */
  5484. switch (chip_version) {
  5485. case CHELSIO_T4:
  5486. sge_idma_decode = (const char **)t4_decode;
  5487. sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
  5488. break;
  5489. case CHELSIO_T5:
  5490. sge_idma_decode = (const char **)t5_decode;
  5491. sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
  5492. break;
  5493. case CHELSIO_T6:
  5494. sge_idma_decode = (const char **)t6_decode;
  5495. sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
  5496. break;
  5497. default:
  5498. dev_err(adapter->pdev_dev,
  5499. "Unsupported chip version %d\n", chip_version);
  5500. return;
  5501. }
  5502. if (is_t4(adapter->params.chip)) {
  5503. sge_idma_decode = (const char **)t4_decode;
  5504. sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
  5505. } else {
  5506. sge_idma_decode = (const char **)t5_decode;
  5507. sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
  5508. }
  5509. if (state < sge_idma_decode_nstates)
  5510. CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
  5511. else
  5512. CH_WARN(adapter, "idma state %d unknown\n", state);
  5513. for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
  5514. CH_WARN(adapter, "SGE register %#x value %#x\n",
  5515. sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
  5516. }
  5517. /**
  5518. * t4_sge_ctxt_flush - flush the SGE context cache
  5519. * @adap: the adapter
  5520. * @mbox: mailbox to use for the FW command
  5521. *
  5522. * Issues a FW command through the given mailbox to flush the
  5523. * SGE context cache.
  5524. */
  5525. int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox)
  5526. {
  5527. int ret;
  5528. u32 ldst_addrspace;
  5529. struct fw_ldst_cmd c;
  5530. memset(&c, 0, sizeof(c));
  5531. ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_SGE_EGRC);
  5532. c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
  5533. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  5534. ldst_addrspace);
  5535. c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
  5536. c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
  5537. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  5538. return ret;
  5539. }
  5540. /**
  5541. * t4_fw_hello - establish communication with FW
  5542. * @adap: the adapter
  5543. * @mbox: mailbox to use for the FW command
  5544. * @evt_mbox: mailbox to receive async FW events
  5545. * @master: specifies the caller's willingness to be the device master
  5546. * @state: returns the current device state (if non-NULL)
  5547. *
  5548. * Issues a command to establish communication with FW. Returns either
  5549. * an error (negative integer) or the mailbox of the Master PF.
  5550. */
  5551. int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
  5552. enum dev_master master, enum dev_state *state)
  5553. {
  5554. int ret;
  5555. struct fw_hello_cmd c;
  5556. u32 v;
  5557. unsigned int master_mbox;
  5558. int retries = FW_CMD_HELLO_RETRIES;
  5559. retry:
  5560. memset(&c, 0, sizeof(c));
  5561. INIT_CMD(c, HELLO, WRITE);
  5562. c.err_to_clearinit = cpu_to_be32(
  5563. FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
  5564. FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
  5565. FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
  5566. mbox : FW_HELLO_CMD_MBMASTER_M) |
  5567. FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
  5568. FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
  5569. FW_HELLO_CMD_CLEARINIT_F);
  5570. /*
  5571. * Issue the HELLO command to the firmware. If it's not successful
  5572. * but indicates that we got a "busy" or "timeout" condition, retry
  5573. * the HELLO until we exhaust our retry limit. If we do exceed our
  5574. * retry limit, check to see if the firmware left us any error
  5575. * information and report that if so.
  5576. */
  5577. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  5578. if (ret < 0) {
  5579. if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
  5580. goto retry;
  5581. if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
  5582. t4_report_fw_error(adap);
  5583. return ret;
  5584. }
  5585. v = be32_to_cpu(c.err_to_clearinit);
  5586. master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
  5587. if (state) {
  5588. if (v & FW_HELLO_CMD_ERR_F)
  5589. *state = DEV_STATE_ERR;
  5590. else if (v & FW_HELLO_CMD_INIT_F)
  5591. *state = DEV_STATE_INIT;
  5592. else
  5593. *state = DEV_STATE_UNINIT;
  5594. }
  5595. /*
  5596. * If we're not the Master PF then we need to wait around for the
  5597. * Master PF Driver to finish setting up the adapter.
  5598. *
  5599. * Note that we also do this wait if we're a non-Master-capable PF and
  5600. * there is no current Master PF; a Master PF may show up momentarily
  5601. * and we wouldn't want to fail pointlessly. (This can happen when an
  5602. * OS loads lots of different drivers rapidly at the same time). In
  5603. * this case, the Master PF returned by the firmware will be
  5604. * PCIE_FW_MASTER_M so the test below will work ...
  5605. */
  5606. if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
  5607. master_mbox != mbox) {
  5608. int waiting = FW_CMD_HELLO_TIMEOUT;
  5609. /*
  5610. * Wait for the firmware to either indicate an error or
  5611. * initialized state. If we see either of these we bail out
  5612. * and report the issue to the caller. If we exhaust the
  5613. * "hello timeout" and we haven't exhausted our retries, try
  5614. * again. Otherwise bail with a timeout error.
  5615. */
  5616. for (;;) {
  5617. u32 pcie_fw;
  5618. msleep(50);
  5619. waiting -= 50;
  5620. /*
  5621. * If neither Error nor Initialialized are indicated
  5622. * by the firmware keep waiting till we exaust our
  5623. * timeout ... and then retry if we haven't exhausted
  5624. * our retries ...
  5625. */
  5626. pcie_fw = t4_read_reg(adap, PCIE_FW_A);
  5627. if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
  5628. if (waiting <= 0) {
  5629. if (retries-- > 0)
  5630. goto retry;
  5631. return -ETIMEDOUT;
  5632. }
  5633. continue;
  5634. }
  5635. /*
  5636. * We either have an Error or Initialized condition
  5637. * report errors preferentially.
  5638. */
  5639. if (state) {
  5640. if (pcie_fw & PCIE_FW_ERR_F)
  5641. *state = DEV_STATE_ERR;
  5642. else if (pcie_fw & PCIE_FW_INIT_F)
  5643. *state = DEV_STATE_INIT;
  5644. }
  5645. /*
  5646. * If we arrived before a Master PF was selected and
  5647. * there's not a valid Master PF, grab its identity
  5648. * for our caller.
  5649. */
  5650. if (master_mbox == PCIE_FW_MASTER_M &&
  5651. (pcie_fw & PCIE_FW_MASTER_VLD_F))
  5652. master_mbox = PCIE_FW_MASTER_G(pcie_fw);
  5653. break;
  5654. }
  5655. }
  5656. return master_mbox;
  5657. }
  5658. /**
  5659. * t4_fw_bye - end communication with FW
  5660. * @adap: the adapter
  5661. * @mbox: mailbox to use for the FW command
  5662. *
  5663. * Issues a command to terminate communication with FW.
  5664. */
  5665. int t4_fw_bye(struct adapter *adap, unsigned int mbox)
  5666. {
  5667. struct fw_bye_cmd c;
  5668. memset(&c, 0, sizeof(c));
  5669. INIT_CMD(c, BYE, WRITE);
  5670. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5671. }
  5672. /**
  5673. * t4_init_cmd - ask FW to initialize the device
  5674. * @adap: the adapter
  5675. * @mbox: mailbox to use for the FW command
  5676. *
  5677. * Issues a command to FW to partially initialize the device. This
  5678. * performs initialization that generally doesn't depend on user input.
  5679. */
  5680. int t4_early_init(struct adapter *adap, unsigned int mbox)
  5681. {
  5682. struct fw_initialize_cmd c;
  5683. memset(&c, 0, sizeof(c));
  5684. INIT_CMD(c, INITIALIZE, WRITE);
  5685. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5686. }
  5687. /**
  5688. * t4_fw_reset - issue a reset to FW
  5689. * @adap: the adapter
  5690. * @mbox: mailbox to use for the FW command
  5691. * @reset: specifies the type of reset to perform
  5692. *
  5693. * Issues a reset command of the specified type to FW.
  5694. */
  5695. int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
  5696. {
  5697. struct fw_reset_cmd c;
  5698. memset(&c, 0, sizeof(c));
  5699. INIT_CMD(c, RESET, WRITE);
  5700. c.val = cpu_to_be32(reset);
  5701. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5702. }
  5703. /**
  5704. * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
  5705. * @adap: the adapter
  5706. * @mbox: mailbox to use for the FW RESET command (if desired)
  5707. * @force: force uP into RESET even if FW RESET command fails
  5708. *
  5709. * Issues a RESET command to firmware (if desired) with a HALT indication
  5710. * and then puts the microprocessor into RESET state. The RESET command
  5711. * will only be issued if a legitimate mailbox is provided (mbox <=
  5712. * PCIE_FW_MASTER_M).
  5713. *
  5714. * This is generally used in order for the host to safely manipulate the
  5715. * adapter without fear of conflicting with whatever the firmware might
  5716. * be doing. The only way out of this state is to RESTART the firmware
  5717. * ...
  5718. */
  5719. static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
  5720. {
  5721. int ret = 0;
  5722. /*
  5723. * If a legitimate mailbox is provided, issue a RESET command
  5724. * with a HALT indication.
  5725. */
  5726. if (mbox <= PCIE_FW_MASTER_M) {
  5727. struct fw_reset_cmd c;
  5728. memset(&c, 0, sizeof(c));
  5729. INIT_CMD(c, RESET, WRITE);
  5730. c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
  5731. c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
  5732. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  5733. }
  5734. /*
  5735. * Normally we won't complete the operation if the firmware RESET
  5736. * command fails but if our caller insists we'll go ahead and put the
  5737. * uP into RESET. This can be useful if the firmware is hung or even
  5738. * missing ... We'll have to take the risk of putting the uP into
  5739. * RESET without the cooperation of firmware in that case.
  5740. *
  5741. * We also force the firmware's HALT flag to be on in case we bypassed
  5742. * the firmware RESET command above or we're dealing with old firmware
  5743. * which doesn't have the HALT capability. This will serve as a flag
  5744. * for the incoming firmware to know that it's coming out of a HALT
  5745. * rather than a RESET ... if it's new enough to understand that ...
  5746. */
  5747. if (ret == 0 || force) {
  5748. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
  5749. t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
  5750. PCIE_FW_HALT_F);
  5751. }
  5752. /*
  5753. * And we always return the result of the firmware RESET command
  5754. * even when we force the uP into RESET ...
  5755. */
  5756. return ret;
  5757. }
  5758. /**
  5759. * t4_fw_restart - restart the firmware by taking the uP out of RESET
  5760. * @adap: the adapter
  5761. * @reset: if we want to do a RESET to restart things
  5762. *
  5763. * Restart firmware previously halted by t4_fw_halt(). On successful
  5764. * return the previous PF Master remains as the new PF Master and there
  5765. * is no need to issue a new HELLO command, etc.
  5766. *
  5767. * We do this in two ways:
  5768. *
  5769. * 1. If we're dealing with newer firmware we'll simply want to take
  5770. * the chip's microprocessor out of RESET. This will cause the
  5771. * firmware to start up from its start vector. And then we'll loop
  5772. * until the firmware indicates it's started again (PCIE_FW.HALT
  5773. * reset to 0) or we timeout.
  5774. *
  5775. * 2. If we're dealing with older firmware then we'll need to RESET
  5776. * the chip since older firmware won't recognize the PCIE_FW.HALT
  5777. * flag and automatically RESET itself on startup.
  5778. */
  5779. static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
  5780. {
  5781. if (reset) {
  5782. /*
  5783. * Since we're directing the RESET instead of the firmware
  5784. * doing it automatically, we need to clear the PCIE_FW.HALT
  5785. * bit.
  5786. */
  5787. t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
  5788. /*
  5789. * If we've been given a valid mailbox, first try to get the
  5790. * firmware to do the RESET. If that works, great and we can
  5791. * return success. Otherwise, if we haven't been given a
  5792. * valid mailbox or the RESET command failed, fall back to
  5793. * hitting the chip with a hammer.
  5794. */
  5795. if (mbox <= PCIE_FW_MASTER_M) {
  5796. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
  5797. msleep(100);
  5798. if (t4_fw_reset(adap, mbox,
  5799. PIORST_F | PIORSTMODE_F) == 0)
  5800. return 0;
  5801. }
  5802. t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
  5803. msleep(2000);
  5804. } else {
  5805. int ms;
  5806. t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
  5807. for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
  5808. if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
  5809. return 0;
  5810. msleep(100);
  5811. ms += 100;
  5812. }
  5813. return -ETIMEDOUT;
  5814. }
  5815. return 0;
  5816. }
  5817. /**
  5818. * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
  5819. * @adap: the adapter
  5820. * @mbox: mailbox to use for the FW RESET command (if desired)
  5821. * @fw_data: the firmware image to write
  5822. * @size: image size
  5823. * @force: force upgrade even if firmware doesn't cooperate
  5824. *
  5825. * Perform all of the steps necessary for upgrading an adapter's
  5826. * firmware image. Normally this requires the cooperation of the
  5827. * existing firmware in order to halt all existing activities
  5828. * but if an invalid mailbox token is passed in we skip that step
  5829. * (though we'll still put the adapter microprocessor into RESET in
  5830. * that case).
  5831. *
  5832. * On successful return the new firmware will have been loaded and
  5833. * the adapter will have been fully RESET losing all previous setup
  5834. * state. On unsuccessful return the adapter may be completely hosed ...
  5835. * positive errno indicates that the adapter is ~probably~ intact, a
  5836. * negative errno indicates that things are looking bad ...
  5837. */
  5838. int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
  5839. const u8 *fw_data, unsigned int size, int force)
  5840. {
  5841. const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
  5842. int reset, ret;
  5843. if (!t4_fw_matches_chip(adap, fw_hdr))
  5844. return -EINVAL;
  5845. ret = t4_fw_halt(adap, mbox, force);
  5846. if (ret < 0 && !force)
  5847. return ret;
  5848. ret = t4_load_fw(adap, fw_data, size);
  5849. if (ret < 0)
  5850. return ret;
  5851. /*
  5852. * Older versions of the firmware don't understand the new
  5853. * PCIE_FW.HALT flag and so won't know to perform a RESET when they
  5854. * restart. So for newly loaded older firmware we'll have to do the
  5855. * RESET for it so it starts up on a clean slate. We can tell if
  5856. * the newly loaded firmware will handle this right by checking
  5857. * its header flags to see if it advertises the capability.
  5858. */
  5859. reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
  5860. return t4_fw_restart(adap, mbox, reset);
  5861. }
  5862. /**
  5863. * t4_fl_pkt_align - return the fl packet alignment
  5864. * @adap: the adapter
  5865. *
  5866. * T4 has a single field to specify the packing and padding boundary.
  5867. * T5 onwards has separate fields for this and hence the alignment for
  5868. * next packet offset is maximum of these two.
  5869. *
  5870. */
  5871. int t4_fl_pkt_align(struct adapter *adap)
  5872. {
  5873. u32 sge_control, sge_control2;
  5874. unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
  5875. sge_control = t4_read_reg(adap, SGE_CONTROL_A);
  5876. /* T4 uses a single control field to specify both the PCIe Padding and
  5877. * Packing Boundary. T5 introduced the ability to specify these
  5878. * separately. The actual Ingress Packet Data alignment boundary
  5879. * within Packed Buffer Mode is the maximum of these two
  5880. * specifications. (Note that it makes no real practical sense to
  5881. * have the Pading Boudary be larger than the Packing Boundary but you
  5882. * could set the chip up that way and, in fact, legacy T4 code would
  5883. * end doing this because it would initialize the Padding Boundary and
  5884. * leave the Packing Boundary initialized to 0 (16 bytes).)
  5885. * Padding Boundary values in T6 starts from 8B,
  5886. * where as it is 32B for T4 and T5.
  5887. */
  5888. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  5889. ingpad_shift = INGPADBOUNDARY_SHIFT_X;
  5890. else
  5891. ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
  5892. ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
  5893. fl_align = ingpadboundary;
  5894. if (!is_t4(adap->params.chip)) {
  5895. /* T5 has a weird interpretation of one of the PCIe Packing
  5896. * Boundary values. No idea why ...
  5897. */
  5898. sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
  5899. ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
  5900. if (ingpackboundary == INGPACKBOUNDARY_16B_X)
  5901. ingpackboundary = 16;
  5902. else
  5903. ingpackboundary = 1 << (ingpackboundary +
  5904. INGPACKBOUNDARY_SHIFT_X);
  5905. fl_align = max(ingpadboundary, ingpackboundary);
  5906. }
  5907. return fl_align;
  5908. }
  5909. /**
  5910. * t4_fixup_host_params - fix up host-dependent parameters
  5911. * @adap: the adapter
  5912. * @page_size: the host's Base Page Size
  5913. * @cache_line_size: the host's Cache Line Size
  5914. *
  5915. * Various registers in T4 contain values which are dependent on the
  5916. * host's Base Page and Cache Line Sizes. This function will fix all of
  5917. * those registers with the appropriate values as passed in ...
  5918. */
  5919. int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
  5920. unsigned int cache_line_size)
  5921. {
  5922. unsigned int page_shift = fls(page_size) - 1;
  5923. unsigned int sge_hps = page_shift - 10;
  5924. unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
  5925. unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
  5926. unsigned int fl_align_log = fls(fl_align) - 1;
  5927. unsigned int ingpad;
  5928. t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
  5929. HOSTPAGESIZEPF0_V(sge_hps) |
  5930. HOSTPAGESIZEPF1_V(sge_hps) |
  5931. HOSTPAGESIZEPF2_V(sge_hps) |
  5932. HOSTPAGESIZEPF3_V(sge_hps) |
  5933. HOSTPAGESIZEPF4_V(sge_hps) |
  5934. HOSTPAGESIZEPF5_V(sge_hps) |
  5935. HOSTPAGESIZEPF6_V(sge_hps) |
  5936. HOSTPAGESIZEPF7_V(sge_hps));
  5937. if (is_t4(adap->params.chip)) {
  5938. t4_set_reg_field(adap, SGE_CONTROL_A,
  5939. INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
  5940. EGRSTATUSPAGESIZE_F,
  5941. INGPADBOUNDARY_V(fl_align_log -
  5942. INGPADBOUNDARY_SHIFT_X) |
  5943. EGRSTATUSPAGESIZE_V(stat_len != 64));
  5944. } else {
  5945. /* T5 introduced the separation of the Free List Padding and
  5946. * Packing Boundaries. Thus, we can select a smaller Padding
  5947. * Boundary to avoid uselessly chewing up PCIe Link and Memory
  5948. * Bandwidth, and use a Packing Boundary which is large enough
  5949. * to avoid false sharing between CPUs, etc.
  5950. *
  5951. * For the PCI Link, the smaller the Padding Boundary the
  5952. * better. For the Memory Controller, a smaller Padding
  5953. * Boundary is better until we cross under the Memory Line
  5954. * Size (the minimum unit of transfer to/from Memory). If we
  5955. * have a Padding Boundary which is smaller than the Memory
  5956. * Line Size, that'll involve a Read-Modify-Write cycle on the
  5957. * Memory Controller which is never good. For T5 the smallest
  5958. * Padding Boundary which we can select is 32 bytes which is
  5959. * larger than any known Memory Controller Line Size so we'll
  5960. * use that.
  5961. *
  5962. * T5 has a different interpretation of the "0" value for the
  5963. * Packing Boundary. This corresponds to 16 bytes instead of
  5964. * the expected 32 bytes. We never have a Packing Boundary
  5965. * less than 32 bytes so we can't use that special value but
  5966. * on the other hand, if we wanted 32 bytes, the best we can
  5967. * really do is 64 bytes.
  5968. */
  5969. if (fl_align <= 32) {
  5970. fl_align = 64;
  5971. fl_align_log = 6;
  5972. }
  5973. if (is_t5(adap->params.chip))
  5974. ingpad = INGPCIEBOUNDARY_32B_X;
  5975. else
  5976. ingpad = T6_INGPADBOUNDARY_32B_X;
  5977. t4_set_reg_field(adap, SGE_CONTROL_A,
  5978. INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
  5979. EGRSTATUSPAGESIZE_F,
  5980. INGPADBOUNDARY_V(ingpad) |
  5981. EGRSTATUSPAGESIZE_V(stat_len != 64));
  5982. t4_set_reg_field(adap, SGE_CONTROL2_A,
  5983. INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
  5984. INGPACKBOUNDARY_V(fl_align_log -
  5985. INGPACKBOUNDARY_SHIFT_X));
  5986. }
  5987. /*
  5988. * Adjust various SGE Free List Host Buffer Sizes.
  5989. *
  5990. * This is something of a crock since we're using fixed indices into
  5991. * the array which are also known by the sge.c code and the T4
  5992. * Firmware Configuration File. We need to come up with a much better
  5993. * approach to managing this array. For now, the first four entries
  5994. * are:
  5995. *
  5996. * 0: Host Page Size
  5997. * 1: 64KB
  5998. * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
  5999. * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
  6000. *
  6001. * For the single-MTU buffers in unpacked mode we need to include
  6002. * space for the SGE Control Packet Shift, 14 byte Ethernet header,
  6003. * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
  6004. * Padding boundary. All of these are accommodated in the Factory
  6005. * Default Firmware Configuration File but we need to adjust it for
  6006. * this host's cache line size.
  6007. */
  6008. t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
  6009. t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
  6010. (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
  6011. & ~(fl_align-1));
  6012. t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
  6013. (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
  6014. & ~(fl_align-1));
  6015. t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
  6016. return 0;
  6017. }
  6018. /**
  6019. * t4_fw_initialize - ask FW to initialize the device
  6020. * @adap: the adapter
  6021. * @mbox: mailbox to use for the FW command
  6022. *
  6023. * Issues a command to FW to partially initialize the device. This
  6024. * performs initialization that generally doesn't depend on user input.
  6025. */
  6026. int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
  6027. {
  6028. struct fw_initialize_cmd c;
  6029. memset(&c, 0, sizeof(c));
  6030. INIT_CMD(c, INITIALIZE, WRITE);
  6031. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6032. }
  6033. /**
  6034. * t4_query_params_rw - query FW or device parameters
  6035. * @adap: the adapter
  6036. * @mbox: mailbox to use for the FW command
  6037. * @pf: the PF
  6038. * @vf: the VF
  6039. * @nparams: the number of parameters
  6040. * @params: the parameter names
  6041. * @val: the parameter values
  6042. * @rw: Write and read flag
  6043. *
  6044. * Reads the value of FW or device parameters. Up to 7 parameters can be
  6045. * queried at once.
  6046. */
  6047. int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6048. unsigned int vf, unsigned int nparams, const u32 *params,
  6049. u32 *val, int rw)
  6050. {
  6051. int i, ret;
  6052. struct fw_params_cmd c;
  6053. __be32 *p = &c.param[0].mnem;
  6054. if (nparams > 7)
  6055. return -EINVAL;
  6056. memset(&c, 0, sizeof(c));
  6057. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  6058. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  6059. FW_PARAMS_CMD_PFN_V(pf) |
  6060. FW_PARAMS_CMD_VFN_V(vf));
  6061. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6062. for (i = 0; i < nparams; i++) {
  6063. *p++ = cpu_to_be32(*params++);
  6064. if (rw)
  6065. *p = cpu_to_be32(*(val + i));
  6066. p++;
  6067. }
  6068. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6069. if (ret == 0)
  6070. for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
  6071. *val++ = be32_to_cpu(*p);
  6072. return ret;
  6073. }
  6074. int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6075. unsigned int vf, unsigned int nparams, const u32 *params,
  6076. u32 *val)
  6077. {
  6078. return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0);
  6079. }
  6080. /**
  6081. * t4_set_params_timeout - sets FW or device parameters
  6082. * @adap: the adapter
  6083. * @mbox: mailbox to use for the FW command
  6084. * @pf: the PF
  6085. * @vf: the VF
  6086. * @nparams: the number of parameters
  6087. * @params: the parameter names
  6088. * @val: the parameter values
  6089. * @timeout: the timeout time
  6090. *
  6091. * Sets the value of FW or device parameters. Up to 7 parameters can be
  6092. * specified at once.
  6093. */
  6094. int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
  6095. unsigned int pf, unsigned int vf,
  6096. unsigned int nparams, const u32 *params,
  6097. const u32 *val, int timeout)
  6098. {
  6099. struct fw_params_cmd c;
  6100. __be32 *p = &c.param[0].mnem;
  6101. if (nparams > 7)
  6102. return -EINVAL;
  6103. memset(&c, 0, sizeof(c));
  6104. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
  6105. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6106. FW_PARAMS_CMD_PFN_V(pf) |
  6107. FW_PARAMS_CMD_VFN_V(vf));
  6108. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6109. while (nparams--) {
  6110. *p++ = cpu_to_be32(*params++);
  6111. *p++ = cpu_to_be32(*val++);
  6112. }
  6113. return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
  6114. }
  6115. /**
  6116. * t4_set_params - sets FW or device parameters
  6117. * @adap: the adapter
  6118. * @mbox: mailbox to use for the FW command
  6119. * @pf: the PF
  6120. * @vf: the VF
  6121. * @nparams: the number of parameters
  6122. * @params: the parameter names
  6123. * @val: the parameter values
  6124. *
  6125. * Sets the value of FW or device parameters. Up to 7 parameters can be
  6126. * specified at once.
  6127. */
  6128. int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6129. unsigned int vf, unsigned int nparams, const u32 *params,
  6130. const u32 *val)
  6131. {
  6132. return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
  6133. FW_CMD_MAX_TIMEOUT);
  6134. }
  6135. /**
  6136. * t4_cfg_pfvf - configure PF/VF resource limits
  6137. * @adap: the adapter
  6138. * @mbox: mailbox to use for the FW command
  6139. * @pf: the PF being configured
  6140. * @vf: the VF being configured
  6141. * @txq: the max number of egress queues
  6142. * @txq_eth_ctrl: the max number of egress Ethernet or control queues
  6143. * @rxqi: the max number of interrupt-capable ingress queues
  6144. * @rxq: the max number of interruptless ingress queues
  6145. * @tc: the PCI traffic class
  6146. * @vi: the max number of virtual interfaces
  6147. * @cmask: the channel access rights mask for the PF/VF
  6148. * @pmask: the port access rights mask for the PF/VF
  6149. * @nexact: the maximum number of exact MPS filters
  6150. * @rcaps: read capabilities
  6151. * @wxcaps: write/execute capabilities
  6152. *
  6153. * Configures resource limits and capabilities for a physical or virtual
  6154. * function.
  6155. */
  6156. int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6157. unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
  6158. unsigned int rxqi, unsigned int rxq, unsigned int tc,
  6159. unsigned int vi, unsigned int cmask, unsigned int pmask,
  6160. unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
  6161. {
  6162. struct fw_pfvf_cmd c;
  6163. memset(&c, 0, sizeof(c));
  6164. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
  6165. FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
  6166. FW_PFVF_CMD_VFN_V(vf));
  6167. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6168. c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
  6169. FW_PFVF_CMD_NIQ_V(rxq));
  6170. c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
  6171. FW_PFVF_CMD_PMASK_V(pmask) |
  6172. FW_PFVF_CMD_NEQ_V(txq));
  6173. c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
  6174. FW_PFVF_CMD_NVI_V(vi) |
  6175. FW_PFVF_CMD_NEXACTF_V(nexact));
  6176. c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
  6177. FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
  6178. FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
  6179. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6180. }
  6181. /**
  6182. * t4_alloc_vi - allocate a virtual interface
  6183. * @adap: the adapter
  6184. * @mbox: mailbox to use for the FW command
  6185. * @port: physical port associated with the VI
  6186. * @pf: the PF owning the VI
  6187. * @vf: the VF owning the VI
  6188. * @nmac: number of MAC addresses needed (1 to 5)
  6189. * @mac: the MAC addresses of the VI
  6190. * @rss_size: size of RSS table slice associated with this VI
  6191. *
  6192. * Allocates a virtual interface for the given physical port. If @mac is
  6193. * not %NULL it contains the MAC addresses of the VI as assigned by FW.
  6194. * @mac should be large enough to hold @nmac Ethernet addresses, they are
  6195. * stored consecutively so the space needed is @nmac * 6 bytes.
  6196. * Returns a negative error number or the non-negative VI id.
  6197. */
  6198. int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
  6199. unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
  6200. unsigned int *rss_size)
  6201. {
  6202. int ret;
  6203. struct fw_vi_cmd c;
  6204. memset(&c, 0, sizeof(c));
  6205. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
  6206. FW_CMD_WRITE_F | FW_CMD_EXEC_F |
  6207. FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
  6208. c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
  6209. c.portid_pkd = FW_VI_CMD_PORTID_V(port);
  6210. c.nmac = nmac - 1;
  6211. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6212. if (ret)
  6213. return ret;
  6214. if (mac) {
  6215. memcpy(mac, c.mac, sizeof(c.mac));
  6216. switch (nmac) {
  6217. case 5:
  6218. memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
  6219. case 4:
  6220. memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
  6221. case 3:
  6222. memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
  6223. case 2:
  6224. memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
  6225. }
  6226. }
  6227. if (rss_size)
  6228. *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
  6229. return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
  6230. }
  6231. /**
  6232. * t4_free_vi - free a virtual interface
  6233. * @adap: the adapter
  6234. * @mbox: mailbox to use for the FW command
  6235. * @pf: the PF owning the VI
  6236. * @vf: the VF owning the VI
  6237. * @viid: virtual interface identifiler
  6238. *
  6239. * Free a previously allocated virtual interface.
  6240. */
  6241. int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6242. unsigned int vf, unsigned int viid)
  6243. {
  6244. struct fw_vi_cmd c;
  6245. memset(&c, 0, sizeof(c));
  6246. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
  6247. FW_CMD_REQUEST_F |
  6248. FW_CMD_EXEC_F |
  6249. FW_VI_CMD_PFN_V(pf) |
  6250. FW_VI_CMD_VFN_V(vf));
  6251. c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
  6252. c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
  6253. return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6254. }
  6255. /**
  6256. * t4_set_rxmode - set Rx properties of a virtual interface
  6257. * @adap: the adapter
  6258. * @mbox: mailbox to use for the FW command
  6259. * @viid: the VI id
  6260. * @mtu: the new MTU or -1
  6261. * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
  6262. * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
  6263. * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
  6264. * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
  6265. * @sleep_ok: if true we may sleep while awaiting command completion
  6266. *
  6267. * Sets Rx properties of a virtual interface.
  6268. */
  6269. int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6270. int mtu, int promisc, int all_multi, int bcast, int vlanex,
  6271. bool sleep_ok)
  6272. {
  6273. struct fw_vi_rxmode_cmd c;
  6274. /* convert to FW values */
  6275. if (mtu < 0)
  6276. mtu = FW_RXMODE_MTU_NO_CHG;
  6277. if (promisc < 0)
  6278. promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
  6279. if (all_multi < 0)
  6280. all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
  6281. if (bcast < 0)
  6282. bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
  6283. if (vlanex < 0)
  6284. vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
  6285. memset(&c, 0, sizeof(c));
  6286. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
  6287. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6288. FW_VI_RXMODE_CMD_VIID_V(viid));
  6289. c.retval_len16 = cpu_to_be32(FW_LEN16(c));
  6290. c.mtu_to_vlanexen =
  6291. cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
  6292. FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
  6293. FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
  6294. FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
  6295. FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
  6296. return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
  6297. }
  6298. /**
  6299. * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
  6300. * @adap: the adapter
  6301. * @mbox: mailbox to use for the FW command
  6302. * @viid: the VI id
  6303. * @free: if true any existing filters for this VI id are first removed
  6304. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  6305. * @addr: the MAC address(es)
  6306. * @idx: where to store the index of each allocated filter
  6307. * @hash: pointer to hash address filter bitmap
  6308. * @sleep_ok: call is allowed to sleep
  6309. *
  6310. * Allocates an exact-match filter for each of the supplied addresses and
  6311. * sets it to the corresponding address. If @idx is not %NULL it should
  6312. * have at least @naddr entries, each of which will be set to the index of
  6313. * the filter allocated for the corresponding MAC address. If a filter
  6314. * could not be allocated for an address its index is set to 0xffff.
  6315. * If @hash is not %NULL addresses that fail to allocate an exact filter
  6316. * are hashed and update the hash filter bitmap pointed at by @hash.
  6317. *
  6318. * Returns a negative error number or the number of filters allocated.
  6319. */
  6320. int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
  6321. unsigned int viid, bool free, unsigned int naddr,
  6322. const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
  6323. {
  6324. int offset, ret = 0;
  6325. struct fw_vi_mac_cmd c;
  6326. unsigned int nfilters = 0;
  6327. unsigned int max_naddr = adap->params.arch.mps_tcam_size;
  6328. unsigned int rem = naddr;
  6329. if (naddr > max_naddr)
  6330. return -EINVAL;
  6331. for (offset = 0; offset < naddr ; /**/) {
  6332. unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
  6333. rem : ARRAY_SIZE(c.u.exact));
  6334. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  6335. u.exact[fw_naddr]), 16);
  6336. struct fw_vi_mac_exact *p;
  6337. int i;
  6338. memset(&c, 0, sizeof(c));
  6339. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  6340. FW_CMD_REQUEST_F |
  6341. FW_CMD_WRITE_F |
  6342. FW_CMD_EXEC_V(free) |
  6343. FW_VI_MAC_CMD_VIID_V(viid));
  6344. c.freemacs_to_len16 =
  6345. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
  6346. FW_CMD_LEN16_V(len16));
  6347. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  6348. p->valid_to_idx =
  6349. cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  6350. FW_VI_MAC_CMD_IDX_V(
  6351. FW_VI_MAC_ADD_MAC));
  6352. memcpy(p->macaddr, addr[offset + i],
  6353. sizeof(p->macaddr));
  6354. }
  6355. /* It's okay if we run out of space in our MAC address arena.
  6356. * Some of the addresses we submit may get stored so we need
  6357. * to run through the reply to see what the results were ...
  6358. */
  6359. ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
  6360. if (ret && ret != -FW_ENOMEM)
  6361. break;
  6362. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  6363. u16 index = FW_VI_MAC_CMD_IDX_G(
  6364. be16_to_cpu(p->valid_to_idx));
  6365. if (idx)
  6366. idx[offset + i] = (index >= max_naddr ?
  6367. 0xffff : index);
  6368. if (index < max_naddr)
  6369. nfilters++;
  6370. else if (hash)
  6371. *hash |= (1ULL <<
  6372. hash_mac_addr(addr[offset + i]));
  6373. }
  6374. free = false;
  6375. offset += fw_naddr;
  6376. rem -= fw_naddr;
  6377. }
  6378. if (ret == 0 || ret == -FW_ENOMEM)
  6379. ret = nfilters;
  6380. return ret;
  6381. }
  6382. /**
  6383. * t4_free_mac_filt - frees exact-match filters of given MAC addresses
  6384. * @adap: the adapter
  6385. * @mbox: mailbox to use for the FW command
  6386. * @viid: the VI id
  6387. * @naddr: the number of MAC addresses to allocate filters for (up to 7)
  6388. * @addr: the MAC address(es)
  6389. * @sleep_ok: call is allowed to sleep
  6390. *
  6391. * Frees the exact-match filter for each of the supplied addresses
  6392. *
  6393. * Returns a negative error number or the number of filters freed.
  6394. */
  6395. int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
  6396. unsigned int viid, unsigned int naddr,
  6397. const u8 **addr, bool sleep_ok)
  6398. {
  6399. int offset, ret = 0;
  6400. struct fw_vi_mac_cmd c;
  6401. unsigned int nfilters = 0;
  6402. unsigned int max_naddr = is_t4(adap->params.chip) ?
  6403. NUM_MPS_CLS_SRAM_L_INSTANCES :
  6404. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  6405. unsigned int rem = naddr;
  6406. if (naddr > max_naddr)
  6407. return -EINVAL;
  6408. for (offset = 0; offset < (int)naddr ; /**/) {
  6409. unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
  6410. ? rem
  6411. : ARRAY_SIZE(c.u.exact));
  6412. size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
  6413. u.exact[fw_naddr]), 16);
  6414. struct fw_vi_mac_exact *p;
  6415. int i;
  6416. memset(&c, 0, sizeof(c));
  6417. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  6418. FW_CMD_REQUEST_F |
  6419. FW_CMD_WRITE_F |
  6420. FW_CMD_EXEC_V(0) |
  6421. FW_VI_MAC_CMD_VIID_V(viid));
  6422. c.freemacs_to_len16 =
  6423. cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
  6424. FW_CMD_LEN16_V(len16));
  6425. for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
  6426. p->valid_to_idx = cpu_to_be16(
  6427. FW_VI_MAC_CMD_VALID_F |
  6428. FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
  6429. memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
  6430. }
  6431. ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
  6432. if (ret)
  6433. break;
  6434. for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
  6435. u16 index = FW_VI_MAC_CMD_IDX_G(
  6436. be16_to_cpu(p->valid_to_idx));
  6437. if (index < max_naddr)
  6438. nfilters++;
  6439. }
  6440. offset += fw_naddr;
  6441. rem -= fw_naddr;
  6442. }
  6443. if (ret == 0)
  6444. ret = nfilters;
  6445. return ret;
  6446. }
  6447. /**
  6448. * t4_change_mac - modifies the exact-match filter for a MAC address
  6449. * @adap: the adapter
  6450. * @mbox: mailbox to use for the FW command
  6451. * @viid: the VI id
  6452. * @idx: index of existing filter for old value of MAC address, or -1
  6453. * @addr: the new MAC address value
  6454. * @persist: whether a new MAC allocation should be persistent
  6455. * @add_smt: if true also add the address to the HW SMT
  6456. *
  6457. * Modifies an exact-match filter and sets it to the new MAC address.
  6458. * Note that in general it is not possible to modify the value of a given
  6459. * filter so the generic way to modify an address filter is to free the one
  6460. * being used by the old address value and allocate a new filter for the
  6461. * new address value. @idx can be -1 if the address is a new addition.
  6462. *
  6463. * Returns a negative error number or the index of the filter with the new
  6464. * MAC value.
  6465. */
  6466. int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6467. int idx, const u8 *addr, bool persist, bool add_smt)
  6468. {
  6469. int ret, mode;
  6470. struct fw_vi_mac_cmd c;
  6471. struct fw_vi_mac_exact *p = c.u.exact;
  6472. unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
  6473. if (idx < 0) /* new allocation */
  6474. idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
  6475. mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
  6476. memset(&c, 0, sizeof(c));
  6477. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  6478. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6479. FW_VI_MAC_CMD_VIID_V(viid));
  6480. c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
  6481. p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
  6482. FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
  6483. FW_VI_MAC_CMD_IDX_V(idx));
  6484. memcpy(p->macaddr, addr, sizeof(p->macaddr));
  6485. ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
  6486. if (ret == 0) {
  6487. ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
  6488. if (ret >= max_mac_addr)
  6489. ret = -ENOMEM;
  6490. }
  6491. return ret;
  6492. }
  6493. /**
  6494. * t4_set_addr_hash - program the MAC inexact-match hash filter
  6495. * @adap: the adapter
  6496. * @mbox: mailbox to use for the FW command
  6497. * @viid: the VI id
  6498. * @ucast: whether the hash filter should also match unicast addresses
  6499. * @vec: the value to be written to the hash filter
  6500. * @sleep_ok: call is allowed to sleep
  6501. *
  6502. * Sets the 64-bit inexact-match hash filter for a virtual interface.
  6503. */
  6504. int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6505. bool ucast, u64 vec, bool sleep_ok)
  6506. {
  6507. struct fw_vi_mac_cmd c;
  6508. memset(&c, 0, sizeof(c));
  6509. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
  6510. FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
  6511. FW_VI_ENABLE_CMD_VIID_V(viid));
  6512. c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
  6513. FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
  6514. FW_CMD_LEN16_V(1));
  6515. c.u.hash.hashvec = cpu_to_be64(vec);
  6516. return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
  6517. }
  6518. /**
  6519. * t4_enable_vi_params - enable/disable a virtual interface
  6520. * @adap: the adapter
  6521. * @mbox: mailbox to use for the FW command
  6522. * @viid: the VI id
  6523. * @rx_en: 1=enable Rx, 0=disable Rx
  6524. * @tx_en: 1=enable Tx, 0=disable Tx
  6525. * @dcb_en: 1=enable delivery of Data Center Bridging messages.
  6526. *
  6527. * Enables/disables a virtual interface. Note that setting DCB Enable
  6528. * only makes sense when enabling a Virtual Interface ...
  6529. */
  6530. int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
  6531. unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
  6532. {
  6533. struct fw_vi_enable_cmd c;
  6534. memset(&c, 0, sizeof(c));
  6535. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  6536. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  6537. FW_VI_ENABLE_CMD_VIID_V(viid));
  6538. c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
  6539. FW_VI_ENABLE_CMD_EEN_V(tx_en) |
  6540. FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
  6541. FW_LEN16(c));
  6542. return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
  6543. }
  6544. /**
  6545. * t4_enable_vi - enable/disable a virtual interface
  6546. * @adap: the adapter
  6547. * @mbox: mailbox to use for the FW command
  6548. * @viid: the VI id
  6549. * @rx_en: 1=enable Rx, 0=disable Rx
  6550. * @tx_en: 1=enable Tx, 0=disable Tx
  6551. *
  6552. * Enables/disables a virtual interface.
  6553. */
  6554. int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6555. bool rx_en, bool tx_en)
  6556. {
  6557. return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
  6558. }
  6559. /**
  6560. * t4_identify_port - identify a VI's port by blinking its LED
  6561. * @adap: the adapter
  6562. * @mbox: mailbox to use for the FW command
  6563. * @viid: the VI id
  6564. * @nblinks: how many times to blink LED at 2.5 Hz
  6565. *
  6566. * Identifies a VI's port by blinking its LED.
  6567. */
  6568. int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
  6569. unsigned int nblinks)
  6570. {
  6571. struct fw_vi_enable_cmd c;
  6572. memset(&c, 0, sizeof(c));
  6573. c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
  6574. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  6575. FW_VI_ENABLE_CMD_VIID_V(viid));
  6576. c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
  6577. c.blinkdur = cpu_to_be16(nblinks);
  6578. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6579. }
  6580. /**
  6581. * t4_iq_stop - stop an ingress queue and its FLs
  6582. * @adap: the adapter
  6583. * @mbox: mailbox to use for the FW command
  6584. * @pf: the PF owning the queues
  6585. * @vf: the VF owning the queues
  6586. * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
  6587. * @iqid: ingress queue id
  6588. * @fl0id: FL0 queue id or 0xffff if no attached FL0
  6589. * @fl1id: FL1 queue id or 0xffff if no attached FL1
  6590. *
  6591. * Stops an ingress queue and its associated FLs, if any. This causes
  6592. * any current or future data/messages destined for these queues to be
  6593. * tossed.
  6594. */
  6595. int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6596. unsigned int vf, unsigned int iqtype, unsigned int iqid,
  6597. unsigned int fl0id, unsigned int fl1id)
  6598. {
  6599. struct fw_iq_cmd c;
  6600. memset(&c, 0, sizeof(c));
  6601. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
  6602. FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
  6603. FW_IQ_CMD_VFN_V(vf));
  6604. c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
  6605. c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  6606. c.iqid = cpu_to_be16(iqid);
  6607. c.fl0id = cpu_to_be16(fl0id);
  6608. c.fl1id = cpu_to_be16(fl1id);
  6609. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6610. }
  6611. /**
  6612. * t4_iq_free - free an ingress queue and its FLs
  6613. * @adap: the adapter
  6614. * @mbox: mailbox to use for the FW command
  6615. * @pf: the PF owning the queues
  6616. * @vf: the VF owning the queues
  6617. * @iqtype: the ingress queue type
  6618. * @iqid: ingress queue id
  6619. * @fl0id: FL0 queue id or 0xffff if no attached FL0
  6620. * @fl1id: FL1 queue id or 0xffff if no attached FL1
  6621. *
  6622. * Frees an ingress queue and its associated FLs, if any.
  6623. */
  6624. int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6625. unsigned int vf, unsigned int iqtype, unsigned int iqid,
  6626. unsigned int fl0id, unsigned int fl1id)
  6627. {
  6628. struct fw_iq_cmd c;
  6629. memset(&c, 0, sizeof(c));
  6630. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
  6631. FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
  6632. FW_IQ_CMD_VFN_V(vf));
  6633. c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
  6634. c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
  6635. c.iqid = cpu_to_be16(iqid);
  6636. c.fl0id = cpu_to_be16(fl0id);
  6637. c.fl1id = cpu_to_be16(fl1id);
  6638. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6639. }
  6640. /**
  6641. * t4_eth_eq_free - free an Ethernet egress queue
  6642. * @adap: the adapter
  6643. * @mbox: mailbox to use for the FW command
  6644. * @pf: the PF owning the queue
  6645. * @vf: the VF owning the queue
  6646. * @eqid: egress queue id
  6647. *
  6648. * Frees an Ethernet egress queue.
  6649. */
  6650. int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6651. unsigned int vf, unsigned int eqid)
  6652. {
  6653. struct fw_eq_eth_cmd c;
  6654. memset(&c, 0, sizeof(c));
  6655. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
  6656. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  6657. FW_EQ_ETH_CMD_PFN_V(pf) |
  6658. FW_EQ_ETH_CMD_VFN_V(vf));
  6659. c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
  6660. c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
  6661. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6662. }
  6663. /**
  6664. * t4_ctrl_eq_free - free a control egress queue
  6665. * @adap: the adapter
  6666. * @mbox: mailbox to use for the FW command
  6667. * @pf: the PF owning the queue
  6668. * @vf: the VF owning the queue
  6669. * @eqid: egress queue id
  6670. *
  6671. * Frees a control egress queue.
  6672. */
  6673. int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6674. unsigned int vf, unsigned int eqid)
  6675. {
  6676. struct fw_eq_ctrl_cmd c;
  6677. memset(&c, 0, sizeof(c));
  6678. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
  6679. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  6680. FW_EQ_CTRL_CMD_PFN_V(pf) |
  6681. FW_EQ_CTRL_CMD_VFN_V(vf));
  6682. c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
  6683. c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
  6684. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6685. }
  6686. /**
  6687. * t4_ofld_eq_free - free an offload egress queue
  6688. * @adap: the adapter
  6689. * @mbox: mailbox to use for the FW command
  6690. * @pf: the PF owning the queue
  6691. * @vf: the VF owning the queue
  6692. * @eqid: egress queue id
  6693. *
  6694. * Frees a control egress queue.
  6695. */
  6696. int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
  6697. unsigned int vf, unsigned int eqid)
  6698. {
  6699. struct fw_eq_ofld_cmd c;
  6700. memset(&c, 0, sizeof(c));
  6701. c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
  6702. FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
  6703. FW_EQ_OFLD_CMD_PFN_V(pf) |
  6704. FW_EQ_OFLD_CMD_VFN_V(vf));
  6705. c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
  6706. c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
  6707. return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
  6708. }
  6709. /**
  6710. * t4_link_down_rc_str - return a string for a Link Down Reason Code
  6711. * @adap: the adapter
  6712. * @link_down_rc: Link Down Reason Code
  6713. *
  6714. * Returns a string representation of the Link Down Reason Code.
  6715. */
  6716. static const char *t4_link_down_rc_str(unsigned char link_down_rc)
  6717. {
  6718. static const char * const reason[] = {
  6719. "Link Down",
  6720. "Remote Fault",
  6721. "Auto-negotiation Failure",
  6722. "Reserved",
  6723. "Insufficient Airflow",
  6724. "Unable To Determine Reason",
  6725. "No RX Signal Detected",
  6726. "Reserved",
  6727. };
  6728. if (link_down_rc >= ARRAY_SIZE(reason))
  6729. return "Bad Reason Code";
  6730. return reason[link_down_rc];
  6731. }
  6732. /**
  6733. * t4_handle_get_port_info - process a FW reply message
  6734. * @pi: the port info
  6735. * @rpl: start of the FW message
  6736. *
  6737. * Processes a GET_PORT_INFO FW reply message.
  6738. */
  6739. void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
  6740. {
  6741. const struct fw_port_cmd *p = (const void *)rpl;
  6742. struct adapter *adap = pi->adapter;
  6743. /* link/module state change message */
  6744. int speed = 0, fc = 0;
  6745. struct link_config *lc;
  6746. u32 stat = be32_to_cpu(p->u.info.lstatus_to_modtype);
  6747. int link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
  6748. u32 mod = FW_PORT_CMD_MODTYPE_G(stat);
  6749. if (stat & FW_PORT_CMD_RXPAUSE_F)
  6750. fc |= PAUSE_RX;
  6751. if (stat & FW_PORT_CMD_TXPAUSE_F)
  6752. fc |= PAUSE_TX;
  6753. if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
  6754. speed = 100;
  6755. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
  6756. speed = 1000;
  6757. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
  6758. speed = 10000;
  6759. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
  6760. speed = 25000;
  6761. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
  6762. speed = 40000;
  6763. else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
  6764. speed = 100000;
  6765. lc = &pi->link_cfg;
  6766. if (mod != pi->mod_type) {
  6767. pi->mod_type = mod;
  6768. t4_os_portmod_changed(adap, pi->port_id);
  6769. }
  6770. if (link_ok != lc->link_ok || speed != lc->speed ||
  6771. fc != lc->fc) { /* something changed */
  6772. if (!link_ok && lc->link_ok) {
  6773. unsigned char rc = FW_PORT_CMD_LINKDNRC_G(stat);
  6774. lc->link_down_rc = rc;
  6775. dev_warn(adap->pdev_dev,
  6776. "Port %d link down, reason: %s\n",
  6777. pi->port_id, t4_link_down_rc_str(rc));
  6778. }
  6779. lc->link_ok = link_ok;
  6780. lc->speed = speed;
  6781. lc->fc = fc;
  6782. lc->supported = be16_to_cpu(p->u.info.pcap);
  6783. lc->lp_advertising = be16_to_cpu(p->u.info.lpacap);
  6784. t4_os_link_changed(adap, pi->port_id, link_ok);
  6785. }
  6786. }
  6787. /**
  6788. * t4_handle_fw_rpl - process a FW reply message
  6789. * @adap: the adapter
  6790. * @rpl: start of the FW message
  6791. *
  6792. * Processes a FW message, such as link state change messages.
  6793. */
  6794. int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
  6795. {
  6796. u8 opcode = *(const u8 *)rpl;
  6797. /* This might be a port command ... this simplifies the following
  6798. * conditionals ... We can get away with pre-dereferencing
  6799. * action_to_len16 because it's in the first 16 bytes and all messages
  6800. * will be at least that long.
  6801. */
  6802. const struct fw_port_cmd *p = (const void *)rpl;
  6803. unsigned int action =
  6804. FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
  6805. if (opcode == FW_PORT_CMD && action == FW_PORT_ACTION_GET_PORT_INFO) {
  6806. int i;
  6807. int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
  6808. struct port_info *pi = NULL;
  6809. for_each_port(adap, i) {
  6810. pi = adap2pinfo(adap, i);
  6811. if (pi->tx_chan == chan)
  6812. break;
  6813. }
  6814. t4_handle_get_port_info(pi, rpl);
  6815. } else {
  6816. dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n", opcode);
  6817. return -EINVAL;
  6818. }
  6819. return 0;
  6820. }
  6821. static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
  6822. {
  6823. u16 val;
  6824. if (pci_is_pcie(adapter->pdev)) {
  6825. pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
  6826. p->speed = val & PCI_EXP_LNKSTA_CLS;
  6827. p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
  6828. }
  6829. }
  6830. /**
  6831. * init_link_config - initialize a link's SW state
  6832. * @lc: structure holding the link state
  6833. * @caps: link capabilities
  6834. *
  6835. * Initializes the SW state maintained for each link, including the link's
  6836. * capabilities and default speed/flow-control/autonegotiation settings.
  6837. */
  6838. static void init_link_config(struct link_config *lc, unsigned int caps)
  6839. {
  6840. lc->supported = caps;
  6841. lc->lp_advertising = 0;
  6842. lc->requested_speed = 0;
  6843. lc->speed = 0;
  6844. lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
  6845. if (lc->supported & FW_PORT_CAP_ANEG) {
  6846. lc->advertising = lc->supported & ADVERT_MASK;
  6847. lc->autoneg = AUTONEG_ENABLE;
  6848. lc->requested_fc |= PAUSE_AUTONEG;
  6849. } else {
  6850. lc->advertising = 0;
  6851. lc->autoneg = AUTONEG_DISABLE;
  6852. }
  6853. }
  6854. #define CIM_PF_NOACCESS 0xeeeeeeee
  6855. int t4_wait_dev_ready(void __iomem *regs)
  6856. {
  6857. u32 whoami;
  6858. whoami = readl(regs + PL_WHOAMI_A);
  6859. if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
  6860. return 0;
  6861. msleep(500);
  6862. whoami = readl(regs + PL_WHOAMI_A);
  6863. return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
  6864. }
  6865. struct flash_desc {
  6866. u32 vendor_and_model_id;
  6867. u32 size_mb;
  6868. };
  6869. static int get_flash_params(struct adapter *adap)
  6870. {
  6871. /* Table for non-Numonix supported flash parts. Numonix parts are left
  6872. * to the preexisting code. All flash parts have 64KB sectors.
  6873. */
  6874. static struct flash_desc supported_flash[] = {
  6875. { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
  6876. };
  6877. int ret;
  6878. u32 info;
  6879. ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
  6880. if (!ret)
  6881. ret = sf1_read(adap, 3, 0, 1, &info);
  6882. t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
  6883. if (ret)
  6884. return ret;
  6885. for (ret = 0; ret < ARRAY_SIZE(supported_flash); ++ret)
  6886. if (supported_flash[ret].vendor_and_model_id == info) {
  6887. adap->params.sf_size = supported_flash[ret].size_mb;
  6888. adap->params.sf_nsec =
  6889. adap->params.sf_size / SF_SEC_SIZE;
  6890. return 0;
  6891. }
  6892. if ((info & 0xff) != 0x20) /* not a Numonix flash */
  6893. return -EINVAL;
  6894. info >>= 16; /* log2 of size */
  6895. if (info >= 0x14 && info < 0x18)
  6896. adap->params.sf_nsec = 1 << (info - 16);
  6897. else if (info == 0x18)
  6898. adap->params.sf_nsec = 64;
  6899. else
  6900. return -EINVAL;
  6901. adap->params.sf_size = 1 << info;
  6902. adap->params.sf_fw_start =
  6903. t4_read_reg(adap, CIM_BOOT_CFG_A) & BOOTADDR_M;
  6904. if (adap->params.sf_size < FLASH_MIN_SIZE)
  6905. dev_warn(adap->pdev_dev, "WARNING!!! FLASH size %#x < %#x!!!\n",
  6906. adap->params.sf_size, FLASH_MIN_SIZE);
  6907. return 0;
  6908. }
  6909. static void set_pcie_completion_timeout(struct adapter *adapter, u8 range)
  6910. {
  6911. u16 val;
  6912. u32 pcie_cap;
  6913. pcie_cap = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
  6914. if (pcie_cap) {
  6915. pci_read_config_word(adapter->pdev,
  6916. pcie_cap + PCI_EXP_DEVCTL2, &val);
  6917. val &= ~PCI_EXP_DEVCTL2_COMP_TIMEOUT;
  6918. val |= range;
  6919. pci_write_config_word(adapter->pdev,
  6920. pcie_cap + PCI_EXP_DEVCTL2, val);
  6921. }
  6922. }
  6923. /**
  6924. * t4_prep_adapter - prepare SW and HW for operation
  6925. * @adapter: the adapter
  6926. * @reset: if true perform a HW reset
  6927. *
  6928. * Initialize adapter SW state for the various HW modules, set initial
  6929. * values for some adapter tunables, take PHYs out of reset, and
  6930. * initialize the MDIO interface.
  6931. */
  6932. int t4_prep_adapter(struct adapter *adapter)
  6933. {
  6934. int ret, ver;
  6935. uint16_t device_id;
  6936. u32 pl_rev;
  6937. get_pci_mode(adapter, &adapter->params.pci);
  6938. pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
  6939. ret = get_flash_params(adapter);
  6940. if (ret < 0) {
  6941. dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
  6942. return ret;
  6943. }
  6944. /* Retrieve adapter's device ID
  6945. */
  6946. pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
  6947. ver = device_id >> 12;
  6948. adapter->params.chip = 0;
  6949. switch (ver) {
  6950. case CHELSIO_T4:
  6951. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
  6952. adapter->params.arch.sge_fl_db = DBPRIO_F;
  6953. adapter->params.arch.mps_tcam_size =
  6954. NUM_MPS_CLS_SRAM_L_INSTANCES;
  6955. adapter->params.arch.mps_rplc_size = 128;
  6956. adapter->params.arch.nchan = NCHAN;
  6957. adapter->params.arch.pm_stats_cnt = PM_NSTATS;
  6958. adapter->params.arch.vfcount = 128;
  6959. /* Congestion map is for 4 channels so that
  6960. * MPS can have 4 priority per port.
  6961. */
  6962. adapter->params.arch.cng_ch_bits_log = 2;
  6963. break;
  6964. case CHELSIO_T5:
  6965. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
  6966. adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
  6967. adapter->params.arch.mps_tcam_size =
  6968. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  6969. adapter->params.arch.mps_rplc_size = 128;
  6970. adapter->params.arch.nchan = NCHAN;
  6971. adapter->params.arch.pm_stats_cnt = PM_NSTATS;
  6972. adapter->params.arch.vfcount = 128;
  6973. adapter->params.arch.cng_ch_bits_log = 2;
  6974. break;
  6975. case CHELSIO_T6:
  6976. adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
  6977. adapter->params.arch.sge_fl_db = 0;
  6978. adapter->params.arch.mps_tcam_size =
  6979. NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
  6980. adapter->params.arch.mps_rplc_size = 256;
  6981. adapter->params.arch.nchan = 2;
  6982. adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
  6983. adapter->params.arch.vfcount = 256;
  6984. /* Congestion map will be for 2 channels so that
  6985. * MPS can have 8 priority per port.
  6986. */
  6987. adapter->params.arch.cng_ch_bits_log = 3;
  6988. break;
  6989. default:
  6990. dev_err(adapter->pdev_dev, "Device %d is not supported\n",
  6991. device_id);
  6992. return -EINVAL;
  6993. }
  6994. adapter->params.cim_la_size = CIMLA_SIZE;
  6995. init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
  6996. /*
  6997. * Default port for debugging in case we can't reach FW.
  6998. */
  6999. adapter->params.nports = 1;
  7000. adapter->params.portvec = 1;
  7001. adapter->params.vpd.cclk = 50000;
  7002. /* Set pci completion timeout value to 4 seconds. */
  7003. set_pcie_completion_timeout(adapter, 0xd);
  7004. return 0;
  7005. }
  7006. /**
  7007. * t4_shutdown_adapter - shut down adapter, host & wire
  7008. * @adapter: the adapter
  7009. *
  7010. * Perform an emergency shutdown of the adapter and stop it from
  7011. * continuing any further communication on the ports or DMA to the
  7012. * host. This is typically used when the adapter and/or firmware
  7013. * have crashed and we want to prevent any further accidental
  7014. * communication with the rest of the world. This will also force
  7015. * the port Link Status to go down -- if register writes work --
  7016. * which should help our peers figure out that we're down.
  7017. */
  7018. int t4_shutdown_adapter(struct adapter *adapter)
  7019. {
  7020. int port;
  7021. t4_intr_disable(adapter);
  7022. t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
  7023. for_each_port(adapter, port) {
  7024. u32 a_port_cfg = PORT_REG(port,
  7025. is_t4(adapter->params.chip)
  7026. ? XGMAC_PORT_CFG_A
  7027. : MAC_PORT_CFG_A);
  7028. t4_write_reg(adapter, a_port_cfg,
  7029. t4_read_reg(adapter, a_port_cfg)
  7030. & ~SIGNAL_DET_V(1));
  7031. }
  7032. t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
  7033. return 0;
  7034. }
  7035. /**
  7036. * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
  7037. * @adapter: the adapter
  7038. * @qid: the Queue ID
  7039. * @qtype: the Ingress or Egress type for @qid
  7040. * @user: true if this request is for a user mode queue
  7041. * @pbar2_qoffset: BAR2 Queue Offset
  7042. * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
  7043. *
  7044. * Returns the BAR2 SGE Queue Registers information associated with the
  7045. * indicated Absolute Queue ID. These are passed back in return value
  7046. * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
  7047. * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
  7048. *
  7049. * This may return an error which indicates that BAR2 SGE Queue
  7050. * registers aren't available. If an error is not returned, then the
  7051. * following values are returned:
  7052. *
  7053. * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
  7054. * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
  7055. *
  7056. * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
  7057. * require the "Inferred Queue ID" ability may be used. E.g. the
  7058. * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
  7059. * then these "Inferred Queue ID" register may not be used.
  7060. */
  7061. int t4_bar2_sge_qregs(struct adapter *adapter,
  7062. unsigned int qid,
  7063. enum t4_bar2_qtype qtype,
  7064. int user,
  7065. u64 *pbar2_qoffset,
  7066. unsigned int *pbar2_qid)
  7067. {
  7068. unsigned int page_shift, page_size, qpp_shift, qpp_mask;
  7069. u64 bar2_page_offset, bar2_qoffset;
  7070. unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
  7071. /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
  7072. if (!user && is_t4(adapter->params.chip))
  7073. return -EINVAL;
  7074. /* Get our SGE Page Size parameters.
  7075. */
  7076. page_shift = adapter->params.sge.hps + 10;
  7077. page_size = 1 << page_shift;
  7078. /* Get the right Queues per Page parameters for our Queue.
  7079. */
  7080. qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
  7081. ? adapter->params.sge.eq_qpp
  7082. : adapter->params.sge.iq_qpp);
  7083. qpp_mask = (1 << qpp_shift) - 1;
  7084. /* Calculate the basics of the BAR2 SGE Queue register area:
  7085. * o The BAR2 page the Queue registers will be in.
  7086. * o The BAR2 Queue ID.
  7087. * o The BAR2 Queue ID Offset into the BAR2 page.
  7088. */
  7089. bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
  7090. bar2_qid = qid & qpp_mask;
  7091. bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
  7092. /* If the BAR2 Queue ID Offset is less than the Page Size, then the
  7093. * hardware will infer the Absolute Queue ID simply from the writes to
  7094. * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
  7095. * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
  7096. * write to the first BAR2 SGE Queue Area within the BAR2 Page with
  7097. * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
  7098. * from the BAR2 Page and BAR2 Queue ID.
  7099. *
  7100. * One important censequence of this is that some BAR2 SGE registers
  7101. * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
  7102. * there. But other registers synthesize the SGE Queue ID purely
  7103. * from the writes to the registers -- the Write Combined Doorbell
  7104. * Buffer is a good example. These BAR2 SGE Registers are only
  7105. * available for those BAR2 SGE Register areas where the SGE Absolute
  7106. * Queue ID can be inferred from simple writes.
  7107. */
  7108. bar2_qoffset = bar2_page_offset;
  7109. bar2_qinferred = (bar2_qid_offset < page_size);
  7110. if (bar2_qinferred) {
  7111. bar2_qoffset += bar2_qid_offset;
  7112. bar2_qid = 0;
  7113. }
  7114. *pbar2_qoffset = bar2_qoffset;
  7115. *pbar2_qid = bar2_qid;
  7116. return 0;
  7117. }
  7118. /**
  7119. * t4_init_devlog_params - initialize adapter->params.devlog
  7120. * @adap: the adapter
  7121. *
  7122. * Initialize various fields of the adapter's Firmware Device Log
  7123. * Parameters structure.
  7124. */
  7125. int t4_init_devlog_params(struct adapter *adap)
  7126. {
  7127. struct devlog_params *dparams = &adap->params.devlog;
  7128. u32 pf_dparams;
  7129. unsigned int devlog_meminfo;
  7130. struct fw_devlog_cmd devlog_cmd;
  7131. int ret;
  7132. /* If we're dealing with newer firmware, the Device Log Paramerters
  7133. * are stored in a designated register which allows us to access the
  7134. * Device Log even if we can't talk to the firmware.
  7135. */
  7136. pf_dparams =
  7137. t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
  7138. if (pf_dparams) {
  7139. unsigned int nentries, nentries128;
  7140. dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
  7141. dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
  7142. nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
  7143. nentries = (nentries128 + 1) * 128;
  7144. dparams->size = nentries * sizeof(struct fw_devlog_e);
  7145. return 0;
  7146. }
  7147. /* Otherwise, ask the firmware for it's Device Log Parameters.
  7148. */
  7149. memset(&devlog_cmd, 0, sizeof(devlog_cmd));
  7150. devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
  7151. FW_CMD_REQUEST_F | FW_CMD_READ_F);
  7152. devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
  7153. ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
  7154. &devlog_cmd);
  7155. if (ret)
  7156. return ret;
  7157. devlog_meminfo =
  7158. be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
  7159. dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
  7160. dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
  7161. dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
  7162. return 0;
  7163. }
  7164. /**
  7165. * t4_init_sge_params - initialize adap->params.sge
  7166. * @adapter: the adapter
  7167. *
  7168. * Initialize various fields of the adapter's SGE Parameters structure.
  7169. */
  7170. int t4_init_sge_params(struct adapter *adapter)
  7171. {
  7172. struct sge_params *sge_params = &adapter->params.sge;
  7173. u32 hps, qpp;
  7174. unsigned int s_hps, s_qpp;
  7175. /* Extract the SGE Page Size for our PF.
  7176. */
  7177. hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
  7178. s_hps = (HOSTPAGESIZEPF0_S +
  7179. (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
  7180. sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
  7181. /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
  7182. */
  7183. s_qpp = (QUEUESPERPAGEPF0_S +
  7184. (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
  7185. qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
  7186. sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
  7187. qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
  7188. sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
  7189. return 0;
  7190. }
  7191. /**
  7192. * t4_init_tp_params - initialize adap->params.tp
  7193. * @adap: the adapter
  7194. *
  7195. * Initialize various fields of the adapter's TP Parameters structure.
  7196. */
  7197. int t4_init_tp_params(struct adapter *adap)
  7198. {
  7199. int chan;
  7200. u32 v;
  7201. v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
  7202. adap->params.tp.tre = TIMERRESOLUTION_G(v);
  7203. adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
  7204. /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
  7205. for (chan = 0; chan < NCHAN; chan++)
  7206. adap->params.tp.tx_modq[chan] = chan;
  7207. /* Cache the adapter's Compressed Filter Mode and global Incress
  7208. * Configuration.
  7209. */
  7210. if (t4_use_ldst(adap)) {
  7211. t4_fw_tp_pio_rw(adap, &adap->params.tp.vlan_pri_map, 1,
  7212. TP_VLAN_PRI_MAP_A, 1);
  7213. t4_fw_tp_pio_rw(adap, &adap->params.tp.ingress_config, 1,
  7214. TP_INGRESS_CONFIG_A, 1);
  7215. } else {
  7216. t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  7217. &adap->params.tp.vlan_pri_map, 1,
  7218. TP_VLAN_PRI_MAP_A);
  7219. t4_read_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  7220. &adap->params.tp.ingress_config, 1,
  7221. TP_INGRESS_CONFIG_A);
  7222. }
  7223. /* For T6, cache the adapter's compressed error vector
  7224. * and passing outer header info for encapsulated packets.
  7225. */
  7226. if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
  7227. v = t4_read_reg(adap, TP_OUT_CONFIG_A);
  7228. adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
  7229. }
  7230. /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
  7231. * shift positions of several elements of the Compressed Filter Tuple
  7232. * for this adapter which we need frequently ...
  7233. */
  7234. adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
  7235. adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
  7236. adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
  7237. adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
  7238. PROTOCOL_F);
  7239. /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
  7240. * represents the presence of an Outer VLAN instead of a VNIC ID.
  7241. */
  7242. if ((adap->params.tp.ingress_config & VNIC_F) == 0)
  7243. adap->params.tp.vnic_shift = -1;
  7244. return 0;
  7245. }
  7246. /**
  7247. * t4_filter_field_shift - calculate filter field shift
  7248. * @adap: the adapter
  7249. * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
  7250. *
  7251. * Return the shift position of a filter field within the Compressed
  7252. * Filter Tuple. The filter field is specified via its selection bit
  7253. * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
  7254. */
  7255. int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
  7256. {
  7257. unsigned int filter_mode = adap->params.tp.vlan_pri_map;
  7258. unsigned int sel;
  7259. int field_shift;
  7260. if ((filter_mode & filter_sel) == 0)
  7261. return -1;
  7262. for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
  7263. switch (filter_mode & sel) {
  7264. case FCOE_F:
  7265. field_shift += FT_FCOE_W;
  7266. break;
  7267. case PORT_F:
  7268. field_shift += FT_PORT_W;
  7269. break;
  7270. case VNIC_ID_F:
  7271. field_shift += FT_VNIC_ID_W;
  7272. break;
  7273. case VLAN_F:
  7274. field_shift += FT_VLAN_W;
  7275. break;
  7276. case TOS_F:
  7277. field_shift += FT_TOS_W;
  7278. break;
  7279. case PROTOCOL_F:
  7280. field_shift += FT_PROTOCOL_W;
  7281. break;
  7282. case ETHERTYPE_F:
  7283. field_shift += FT_ETHERTYPE_W;
  7284. break;
  7285. case MACMATCH_F:
  7286. field_shift += FT_MACMATCH_W;
  7287. break;
  7288. case MPSHITTYPE_F:
  7289. field_shift += FT_MPSHITTYPE_W;
  7290. break;
  7291. case FRAGMENTATION_F:
  7292. field_shift += FT_FRAGMENTATION_W;
  7293. break;
  7294. }
  7295. }
  7296. return field_shift;
  7297. }
  7298. int t4_init_rss_mode(struct adapter *adap, int mbox)
  7299. {
  7300. int i, ret;
  7301. struct fw_rss_vi_config_cmd rvc;
  7302. memset(&rvc, 0, sizeof(rvc));
  7303. for_each_port(adap, i) {
  7304. struct port_info *p = adap2pinfo(adap, i);
  7305. rvc.op_to_viid =
  7306. cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
  7307. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  7308. FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
  7309. rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
  7310. ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
  7311. if (ret)
  7312. return ret;
  7313. p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
  7314. }
  7315. return 0;
  7316. }
  7317. /**
  7318. * t4_init_portinfo - allocate a virtual interface amd initialize port_info
  7319. * @pi: the port_info
  7320. * @mbox: mailbox to use for the FW command
  7321. * @port: physical port associated with the VI
  7322. * @pf: the PF owning the VI
  7323. * @vf: the VF owning the VI
  7324. * @mac: the MAC address of the VI
  7325. *
  7326. * Allocates a virtual interface for the given physical port. If @mac is
  7327. * not %NULL it contains the MAC address of the VI as assigned by FW.
  7328. * @mac should be large enough to hold an Ethernet address.
  7329. * Returns < 0 on error.
  7330. */
  7331. int t4_init_portinfo(struct port_info *pi, int mbox,
  7332. int port, int pf, int vf, u8 mac[])
  7333. {
  7334. int ret;
  7335. struct fw_port_cmd c;
  7336. unsigned int rss_size;
  7337. memset(&c, 0, sizeof(c));
  7338. c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
  7339. FW_CMD_REQUEST_F | FW_CMD_READ_F |
  7340. FW_PORT_CMD_PORTID_V(port));
  7341. c.action_to_len16 = cpu_to_be32(
  7342. FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_GET_PORT_INFO) |
  7343. FW_LEN16(c));
  7344. ret = t4_wr_mbox(pi->adapter, mbox, &c, sizeof(c), &c);
  7345. if (ret)
  7346. return ret;
  7347. ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
  7348. if (ret < 0)
  7349. return ret;
  7350. pi->viid = ret;
  7351. pi->tx_chan = port;
  7352. pi->lport = port;
  7353. pi->rss_size = rss_size;
  7354. ret = be32_to_cpu(c.u.info.lstatus_to_modtype);
  7355. pi->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP_F) ?
  7356. FW_PORT_CMD_MDIOADDR_G(ret) : -1;
  7357. pi->port_type = FW_PORT_CMD_PTYPE_G(ret);
  7358. pi->mod_type = FW_PORT_MOD_TYPE_NA;
  7359. init_link_config(&pi->link_cfg, be16_to_cpu(c.u.info.pcap));
  7360. return 0;
  7361. }
  7362. int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
  7363. {
  7364. u8 addr[6];
  7365. int ret, i, j = 0;
  7366. for_each_port(adap, i) {
  7367. struct port_info *pi = adap2pinfo(adap, i);
  7368. while ((adap->params.portvec & (1 << j)) == 0)
  7369. j++;
  7370. ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
  7371. if (ret)
  7372. return ret;
  7373. memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
  7374. j++;
  7375. }
  7376. return 0;
  7377. }
  7378. /**
  7379. * t4_read_cimq_cfg - read CIM queue configuration
  7380. * @adap: the adapter
  7381. * @base: holds the queue base addresses in bytes
  7382. * @size: holds the queue sizes in bytes
  7383. * @thres: holds the queue full thresholds in bytes
  7384. *
  7385. * Returns the current configuration of the CIM queues, starting with
  7386. * the IBQs, then the OBQs.
  7387. */
  7388. void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
  7389. {
  7390. unsigned int i, v;
  7391. int cim_num_obq = is_t4(adap->params.chip) ?
  7392. CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
  7393. for (i = 0; i < CIM_NUM_IBQ; i++) {
  7394. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
  7395. QUENUMSELECT_V(i));
  7396. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  7397. /* value is in 256-byte units */
  7398. *base++ = CIMQBASE_G(v) * 256;
  7399. *size++ = CIMQSIZE_G(v) * 256;
  7400. *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
  7401. }
  7402. for (i = 0; i < cim_num_obq; i++) {
  7403. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
  7404. QUENUMSELECT_V(i));
  7405. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  7406. /* value is in 256-byte units */
  7407. *base++ = CIMQBASE_G(v) * 256;
  7408. *size++ = CIMQSIZE_G(v) * 256;
  7409. }
  7410. }
  7411. /**
  7412. * t4_read_cim_ibq - read the contents of a CIM inbound queue
  7413. * @adap: the adapter
  7414. * @qid: the queue index
  7415. * @data: where to store the queue contents
  7416. * @n: capacity of @data in 32-bit words
  7417. *
  7418. * Reads the contents of the selected CIM queue starting at address 0 up
  7419. * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
  7420. * error and the number of 32-bit words actually read on success.
  7421. */
  7422. int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
  7423. {
  7424. int i, err, attempts;
  7425. unsigned int addr;
  7426. const unsigned int nwords = CIM_IBQ_SIZE * 4;
  7427. if (qid > 5 || (n & 3))
  7428. return -EINVAL;
  7429. addr = qid * nwords;
  7430. if (n > nwords)
  7431. n = nwords;
  7432. /* It might take 3-10ms before the IBQ debug read access is allowed.
  7433. * Wait for 1 Sec with a delay of 1 usec.
  7434. */
  7435. attempts = 1000000;
  7436. for (i = 0; i < n; i++, addr++) {
  7437. t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
  7438. IBQDBGEN_F);
  7439. err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
  7440. attempts, 1);
  7441. if (err)
  7442. return err;
  7443. *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
  7444. }
  7445. t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
  7446. return i;
  7447. }
  7448. /**
  7449. * t4_read_cim_obq - read the contents of a CIM outbound queue
  7450. * @adap: the adapter
  7451. * @qid: the queue index
  7452. * @data: where to store the queue contents
  7453. * @n: capacity of @data in 32-bit words
  7454. *
  7455. * Reads the contents of the selected CIM queue starting at address 0 up
  7456. * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
  7457. * error and the number of 32-bit words actually read on success.
  7458. */
  7459. int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
  7460. {
  7461. int i, err;
  7462. unsigned int addr, v, nwords;
  7463. int cim_num_obq = is_t4(adap->params.chip) ?
  7464. CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
  7465. if ((qid > (cim_num_obq - 1)) || (n & 3))
  7466. return -EINVAL;
  7467. t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
  7468. QUENUMSELECT_V(qid));
  7469. v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
  7470. addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
  7471. nwords = CIMQSIZE_G(v) * 64; /* same */
  7472. if (n > nwords)
  7473. n = nwords;
  7474. for (i = 0; i < n; i++, addr++) {
  7475. t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
  7476. OBQDBGEN_F);
  7477. err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
  7478. 2, 1);
  7479. if (err)
  7480. return err;
  7481. *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
  7482. }
  7483. t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
  7484. return i;
  7485. }
  7486. /**
  7487. * t4_cim_read - read a block from CIM internal address space
  7488. * @adap: the adapter
  7489. * @addr: the start address within the CIM address space
  7490. * @n: number of words to read
  7491. * @valp: where to store the result
  7492. *
  7493. * Reads a block of 4-byte words from the CIM intenal address space.
  7494. */
  7495. int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
  7496. unsigned int *valp)
  7497. {
  7498. int ret = 0;
  7499. if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
  7500. return -EBUSY;
  7501. for ( ; !ret && n--; addr += 4) {
  7502. t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
  7503. ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
  7504. 0, 5, 2);
  7505. if (!ret)
  7506. *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
  7507. }
  7508. return ret;
  7509. }
  7510. /**
  7511. * t4_cim_write - write a block into CIM internal address space
  7512. * @adap: the adapter
  7513. * @addr: the start address within the CIM address space
  7514. * @n: number of words to write
  7515. * @valp: set of values to write
  7516. *
  7517. * Writes a block of 4-byte words into the CIM intenal address space.
  7518. */
  7519. int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
  7520. const unsigned int *valp)
  7521. {
  7522. int ret = 0;
  7523. if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
  7524. return -EBUSY;
  7525. for ( ; !ret && n--; addr += 4) {
  7526. t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
  7527. t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
  7528. ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
  7529. 0, 5, 2);
  7530. }
  7531. return ret;
  7532. }
  7533. static int t4_cim_write1(struct adapter *adap, unsigned int addr,
  7534. unsigned int val)
  7535. {
  7536. return t4_cim_write(adap, addr, 1, &val);
  7537. }
  7538. /**
  7539. * t4_cim_read_la - read CIM LA capture buffer
  7540. * @adap: the adapter
  7541. * @la_buf: where to store the LA data
  7542. * @wrptr: the HW write pointer within the capture buffer
  7543. *
  7544. * Reads the contents of the CIM LA buffer with the most recent entry at
  7545. * the end of the returned data and with the entry at @wrptr first.
  7546. * We try to leave the LA in the running state we find it in.
  7547. */
  7548. int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
  7549. {
  7550. int i, ret;
  7551. unsigned int cfg, val, idx;
  7552. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
  7553. if (ret)
  7554. return ret;
  7555. if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
  7556. ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
  7557. if (ret)
  7558. return ret;
  7559. }
  7560. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
  7561. if (ret)
  7562. goto restart;
  7563. idx = UPDBGLAWRPTR_G(val);
  7564. if (wrptr)
  7565. *wrptr = idx;
  7566. for (i = 0; i < adap->params.cim_la_size; i++) {
  7567. ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
  7568. UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
  7569. if (ret)
  7570. break;
  7571. ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
  7572. if (ret)
  7573. break;
  7574. if (val & UPDBGLARDEN_F) {
  7575. ret = -ETIMEDOUT;
  7576. break;
  7577. }
  7578. ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
  7579. if (ret)
  7580. break;
  7581. idx = (idx + 1) & UPDBGLARDPTR_M;
  7582. }
  7583. restart:
  7584. if (cfg & UPDBGLAEN_F) {
  7585. int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
  7586. cfg & ~UPDBGLARDEN_F);
  7587. if (!ret)
  7588. ret = r;
  7589. }
  7590. return ret;
  7591. }
  7592. /**
  7593. * t4_tp_read_la - read TP LA capture buffer
  7594. * @adap: the adapter
  7595. * @la_buf: where to store the LA data
  7596. * @wrptr: the HW write pointer within the capture buffer
  7597. *
  7598. * Reads the contents of the TP LA buffer with the most recent entry at
  7599. * the end of the returned data and with the entry at @wrptr first.
  7600. * We leave the LA in the running state we find it in.
  7601. */
  7602. void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
  7603. {
  7604. bool last_incomplete;
  7605. unsigned int i, cfg, val, idx;
  7606. cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
  7607. if (cfg & DBGLAENABLE_F) /* freeze LA */
  7608. t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
  7609. adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
  7610. val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
  7611. idx = DBGLAWPTR_G(val);
  7612. last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
  7613. if (last_incomplete)
  7614. idx = (idx + 1) & DBGLARPTR_M;
  7615. if (wrptr)
  7616. *wrptr = idx;
  7617. val &= 0xffff;
  7618. val &= ~DBGLARPTR_V(DBGLARPTR_M);
  7619. val |= adap->params.tp.la_mask;
  7620. for (i = 0; i < TPLA_SIZE; i++) {
  7621. t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
  7622. la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
  7623. idx = (idx + 1) & DBGLARPTR_M;
  7624. }
  7625. /* Wipe out last entry if it isn't valid */
  7626. if (last_incomplete)
  7627. la_buf[TPLA_SIZE - 1] = ~0ULL;
  7628. if (cfg & DBGLAENABLE_F) /* restore running state */
  7629. t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
  7630. cfg | adap->params.tp.la_mask);
  7631. }
  7632. /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
  7633. * seconds). If we find one of the SGE Ingress DMA State Machines in the same
  7634. * state for more than the Warning Threshold then we'll issue a warning about
  7635. * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
  7636. * appears to be hung every Warning Repeat second till the situation clears.
  7637. * If the situation clears, we'll note that as well.
  7638. */
  7639. #define SGE_IDMA_WARN_THRESH 1
  7640. #define SGE_IDMA_WARN_REPEAT 300
  7641. /**
  7642. * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
  7643. * @adapter: the adapter
  7644. * @idma: the adapter IDMA Monitor state
  7645. *
  7646. * Initialize the state of an SGE Ingress DMA Monitor.
  7647. */
  7648. void t4_idma_monitor_init(struct adapter *adapter,
  7649. struct sge_idma_monitor_state *idma)
  7650. {
  7651. /* Initialize the state variables for detecting an SGE Ingress DMA
  7652. * hang. The SGE has internal counters which count up on each clock
  7653. * tick whenever the SGE finds its Ingress DMA State Engines in the
  7654. * same state they were on the previous clock tick. The clock used is
  7655. * the Core Clock so we have a limit on the maximum "time" they can
  7656. * record; typically a very small number of seconds. For instance,
  7657. * with a 600MHz Core Clock, we can only count up to a bit more than
  7658. * 7s. So we'll synthesize a larger counter in order to not run the
  7659. * risk of having the "timers" overflow and give us the flexibility to
  7660. * maintain a Hung SGE State Machine of our own which operates across
  7661. * a longer time frame.
  7662. */
  7663. idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
  7664. idma->idma_stalled[0] = 0;
  7665. idma->idma_stalled[1] = 0;
  7666. }
  7667. /**
  7668. * t4_idma_monitor - monitor SGE Ingress DMA state
  7669. * @adapter: the adapter
  7670. * @idma: the adapter IDMA Monitor state
  7671. * @hz: number of ticks/second
  7672. * @ticks: number of ticks since the last IDMA Monitor call
  7673. */
  7674. void t4_idma_monitor(struct adapter *adapter,
  7675. struct sge_idma_monitor_state *idma,
  7676. int hz, int ticks)
  7677. {
  7678. int i, idma_same_state_cnt[2];
  7679. /* Read the SGE Debug Ingress DMA Same State Count registers. These
  7680. * are counters inside the SGE which count up on each clock when the
  7681. * SGE finds its Ingress DMA State Engines in the same states they
  7682. * were in the previous clock. The counters will peg out at
  7683. * 0xffffffff without wrapping around so once they pass the 1s
  7684. * threshold they'll stay above that till the IDMA state changes.
  7685. */
  7686. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
  7687. idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
  7688. idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  7689. for (i = 0; i < 2; i++) {
  7690. u32 debug0, debug11;
  7691. /* If the Ingress DMA Same State Counter ("timer") is less
  7692. * than 1s, then we can reset our synthesized Stall Timer and
  7693. * continue. If we have previously emitted warnings about a
  7694. * potential stalled Ingress Queue, issue a note indicating
  7695. * that the Ingress Queue has resumed forward progress.
  7696. */
  7697. if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
  7698. if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
  7699. dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
  7700. "resumed after %d seconds\n",
  7701. i, idma->idma_qid[i],
  7702. idma->idma_stalled[i] / hz);
  7703. idma->idma_stalled[i] = 0;
  7704. continue;
  7705. }
  7706. /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
  7707. * domain. The first time we get here it'll be because we
  7708. * passed the 1s Threshold; each additional time it'll be
  7709. * because the RX Timer Callback is being fired on its regular
  7710. * schedule.
  7711. *
  7712. * If the stall is below our Potential Hung Ingress Queue
  7713. * Warning Threshold, continue.
  7714. */
  7715. if (idma->idma_stalled[i] == 0) {
  7716. idma->idma_stalled[i] = hz;
  7717. idma->idma_warn[i] = 0;
  7718. } else {
  7719. idma->idma_stalled[i] += ticks;
  7720. idma->idma_warn[i] -= ticks;
  7721. }
  7722. if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
  7723. continue;
  7724. /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
  7725. */
  7726. if (idma->idma_warn[i] > 0)
  7727. continue;
  7728. idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
  7729. /* Read and save the SGE IDMA State and Queue ID information.
  7730. * We do this every time in case it changes across time ...
  7731. * can't be too careful ...
  7732. */
  7733. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
  7734. debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  7735. idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
  7736. t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
  7737. debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
  7738. idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
  7739. dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
  7740. "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
  7741. i, idma->idma_qid[i], idma->idma_state[i],
  7742. idma->idma_stalled[i] / hz,
  7743. debug0, debug11);
  7744. t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
  7745. }
  7746. }
  7747. /**
  7748. * t4_set_vf_mac - Set MAC address for the specified VF
  7749. * @adapter: The adapter
  7750. * @vf: one of the VFs instantiated by the specified PF
  7751. * @naddr: the number of MAC addresses
  7752. * @addr: the MAC address(es) to be set to the specified VF
  7753. */
  7754. int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
  7755. unsigned int naddr, u8 *addr)
  7756. {
  7757. struct fw_acl_mac_cmd cmd;
  7758. memset(&cmd, 0, sizeof(cmd));
  7759. cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
  7760. FW_CMD_REQUEST_F |
  7761. FW_CMD_WRITE_F |
  7762. FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
  7763. FW_ACL_MAC_CMD_VFN_V(vf));
  7764. /* Note: Do not enable the ACL */
  7765. cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
  7766. cmd.nmac = naddr;
  7767. switch (adapter->pf) {
  7768. case 3:
  7769. memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
  7770. break;
  7771. case 2:
  7772. memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
  7773. break;
  7774. case 1:
  7775. memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
  7776. break;
  7777. case 0:
  7778. memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
  7779. break;
  7780. }
  7781. return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
  7782. }
  7783. int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
  7784. int rateunit, int ratemode, int channel, int class,
  7785. int minrate, int maxrate, int weight, int pktsize)
  7786. {
  7787. struct fw_sched_cmd cmd;
  7788. memset(&cmd, 0, sizeof(cmd));
  7789. cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
  7790. FW_CMD_REQUEST_F |
  7791. FW_CMD_WRITE_F);
  7792. cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
  7793. cmd.u.params.sc = FW_SCHED_SC_PARAMS;
  7794. cmd.u.params.type = type;
  7795. cmd.u.params.level = level;
  7796. cmd.u.params.mode = mode;
  7797. cmd.u.params.ch = channel;
  7798. cmd.u.params.cl = class;
  7799. cmd.u.params.unit = rateunit;
  7800. cmd.u.params.rate = ratemode;
  7801. cmd.u.params.min = cpu_to_be32(minrate);
  7802. cmd.u.params.max = cpu_to_be32(maxrate);
  7803. cmd.u.params.weight = cpu_to_be16(weight);
  7804. cmd.u.params.pktsize = cpu_to_be16(pktsize);
  7805. return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
  7806. NULL, 1);
  7807. }