cxgb4_main.c 159 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  35. #include <linux/bitmap.h>
  36. #include <linux/crc32.h>
  37. #include <linux/ctype.h>
  38. #include <linux/debugfs.h>
  39. #include <linux/err.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/firmware.h>
  42. #include <linux/if.h>
  43. #include <linux/if_vlan.h>
  44. #include <linux/init.h>
  45. #include <linux/log2.h>
  46. #include <linux/mdio.h>
  47. #include <linux/module.h>
  48. #include <linux/moduleparam.h>
  49. #include <linux/mutex.h>
  50. #include <linux/netdevice.h>
  51. #include <linux/pci.h>
  52. #include <linux/aer.h>
  53. #include <linux/rtnetlink.h>
  54. #include <linux/sched.h>
  55. #include <linux/seq_file.h>
  56. #include <linux/sockios.h>
  57. #include <linux/vmalloc.h>
  58. #include <linux/workqueue.h>
  59. #include <net/neighbour.h>
  60. #include <net/netevent.h>
  61. #include <net/addrconf.h>
  62. #include <net/bonding.h>
  63. #include <net/addrconf.h>
  64. #include <linux/uaccess.h>
  65. #include <linux/crash_dump.h>
  66. #include <net/udp_tunnel.h>
  67. #include "cxgb4.h"
  68. #include "cxgb4_filter.h"
  69. #include "t4_regs.h"
  70. #include "t4_values.h"
  71. #include "t4_msg.h"
  72. #include "t4fw_api.h"
  73. #include "t4fw_version.h"
  74. #include "cxgb4_dcb.h"
  75. #include "srq.h"
  76. #include "cxgb4_debugfs.h"
  77. #include "clip_tbl.h"
  78. #include "l2t.h"
  79. #include "smt.h"
  80. #include "sched.h"
  81. #include "cxgb4_tc_u32.h"
  82. #include "cxgb4_tc_flower.h"
  83. #include "cxgb4_ptp.h"
  84. #include "cxgb4_cudbg.h"
  85. char cxgb4_driver_name[] = KBUILD_MODNAME;
  86. #ifdef DRV_VERSION
  87. #undef DRV_VERSION
  88. #endif
  89. #define DRV_VERSION "2.0.0-ko"
  90. const char cxgb4_driver_version[] = DRV_VERSION;
  91. #define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
  92. #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
  93. NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
  94. NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
  95. /* Macros needed to support the PCI Device ID Table ...
  96. */
  97. #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
  98. static const struct pci_device_id cxgb4_pci_tbl[] = {
  99. #define CXGB4_UNIFIED_PF 0x4
  100. #define CH_PCI_DEVICE_ID_FUNCTION CXGB4_UNIFIED_PF
  101. /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
  102. * called for both.
  103. */
  104. #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
  105. #define CH_PCI_ID_TABLE_ENTRY(devid) \
  106. {PCI_VDEVICE(CHELSIO, (devid)), CXGB4_UNIFIED_PF}
  107. #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
  108. { 0, } \
  109. }
  110. #include "t4_pci_id_tbl.h"
  111. #define FW4_FNAME "cxgb4/t4fw.bin"
  112. #define FW5_FNAME "cxgb4/t5fw.bin"
  113. #define FW6_FNAME "cxgb4/t6fw.bin"
  114. #define FW4_CFNAME "cxgb4/t4-config.txt"
  115. #define FW5_CFNAME "cxgb4/t5-config.txt"
  116. #define FW6_CFNAME "cxgb4/t6-config.txt"
  117. #define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
  118. #define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
  119. #define PHY_AQ1202_DEVICEID 0x4409
  120. #define PHY_BCM84834_DEVICEID 0x4486
  121. MODULE_DESCRIPTION(DRV_DESC);
  122. MODULE_AUTHOR("Chelsio Communications");
  123. MODULE_LICENSE("Dual BSD/GPL");
  124. MODULE_VERSION(DRV_VERSION);
  125. MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
  126. MODULE_FIRMWARE(FW4_FNAME);
  127. MODULE_FIRMWARE(FW5_FNAME);
  128. MODULE_FIRMWARE(FW6_FNAME);
  129. /*
  130. * The driver uses the best interrupt scheme available on a platform in the
  131. * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
  132. * of these schemes the driver may consider as follows:
  133. *
  134. * msi = 2: choose from among all three options
  135. * msi = 1: only consider MSI and INTx interrupts
  136. * msi = 0: force INTx interrupts
  137. */
  138. static int msi = 2;
  139. module_param(msi, int, 0644);
  140. MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
  141. /*
  142. * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
  143. * offset by 2 bytes in order to have the IP headers line up on 4-byte
  144. * boundaries. This is a requirement for many architectures which will throw
  145. * a machine check fault if an attempt is made to access one of the 4-byte IP
  146. * header fields on a non-4-byte boundary. And it's a major performance issue
  147. * even on some architectures which allow it like some implementations of the
  148. * x86 ISA. However, some architectures don't mind this and for some very
  149. * edge-case performance sensitive applications (like forwarding large volumes
  150. * of small packets), setting this DMA offset to 0 will decrease the number of
  151. * PCI-E Bus transfers enough to measurably affect performance.
  152. */
  153. static int rx_dma_offset = 2;
  154. /* TX Queue select used to determine what algorithm to use for selecting TX
  155. * queue. Select between the kernel provided function (select_queue=0) or user
  156. * cxgb_select_queue function (select_queue=1)
  157. *
  158. * Default: select_queue=0
  159. */
  160. static int select_queue;
  161. module_param(select_queue, int, 0644);
  162. MODULE_PARM_DESC(select_queue,
  163. "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
  164. static struct dentry *cxgb4_debugfs_root;
  165. LIST_HEAD(adapter_list);
  166. DEFINE_MUTEX(uld_mutex);
  167. static void link_report(struct net_device *dev)
  168. {
  169. if (!netif_carrier_ok(dev))
  170. netdev_info(dev, "link down\n");
  171. else {
  172. static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
  173. const char *s;
  174. const struct port_info *p = netdev_priv(dev);
  175. switch (p->link_cfg.speed) {
  176. case 100:
  177. s = "100Mbps";
  178. break;
  179. case 1000:
  180. s = "1Gbps";
  181. break;
  182. case 10000:
  183. s = "10Gbps";
  184. break;
  185. case 25000:
  186. s = "25Gbps";
  187. break;
  188. case 40000:
  189. s = "40Gbps";
  190. break;
  191. case 50000:
  192. s = "50Gbps";
  193. break;
  194. case 100000:
  195. s = "100Gbps";
  196. break;
  197. default:
  198. pr_info("%s: unsupported speed: %d\n",
  199. dev->name, p->link_cfg.speed);
  200. return;
  201. }
  202. netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
  203. fc[p->link_cfg.fc]);
  204. }
  205. }
  206. #ifdef CONFIG_CHELSIO_T4_DCB
  207. /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
  208. static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
  209. {
  210. struct port_info *pi = netdev_priv(dev);
  211. struct adapter *adap = pi->adapter;
  212. struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
  213. int i;
  214. /* We use a simple mapping of Port TX Queue Index to DCB
  215. * Priority when we're enabling DCB.
  216. */
  217. for (i = 0; i < pi->nqsets; i++, txq++) {
  218. u32 name, value;
  219. int err;
  220. name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  221. FW_PARAMS_PARAM_X_V(
  222. FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
  223. FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
  224. value = enable ? i : 0xffffffff;
  225. /* Since we can be called while atomic (from "interrupt
  226. * level") we need to issue the Set Parameters Commannd
  227. * without sleeping (timeout < 0).
  228. */
  229. err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
  230. &name, &value,
  231. -FW_CMD_MAX_TIMEOUT);
  232. if (err)
  233. dev_err(adap->pdev_dev,
  234. "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
  235. enable ? "set" : "unset", pi->port_id, i, -err);
  236. else
  237. txq->dcb_prio = value;
  238. }
  239. }
  240. static int cxgb4_dcb_enabled(const struct net_device *dev)
  241. {
  242. struct port_info *pi = netdev_priv(dev);
  243. if (!pi->dcb.enabled)
  244. return 0;
  245. return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
  246. (pi->dcb.state == CXGB4_DCB_STATE_HOST));
  247. }
  248. #endif /* CONFIG_CHELSIO_T4_DCB */
  249. void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
  250. {
  251. struct net_device *dev = adapter->port[port_id];
  252. /* Skip changes from disabled ports. */
  253. if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
  254. if (link_stat)
  255. netif_carrier_on(dev);
  256. else {
  257. #ifdef CONFIG_CHELSIO_T4_DCB
  258. if (cxgb4_dcb_enabled(dev)) {
  259. cxgb4_dcb_reset(dev);
  260. dcb_tx_queue_prio_enable(dev, false);
  261. }
  262. #endif /* CONFIG_CHELSIO_T4_DCB */
  263. netif_carrier_off(dev);
  264. }
  265. link_report(dev);
  266. }
  267. }
  268. void t4_os_portmod_changed(struct adapter *adap, int port_id)
  269. {
  270. static const char *mod_str[] = {
  271. NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
  272. };
  273. struct net_device *dev = adap->port[port_id];
  274. struct port_info *pi = netdev_priv(dev);
  275. if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
  276. netdev_info(dev, "port module unplugged\n");
  277. else if (pi->mod_type < ARRAY_SIZE(mod_str))
  278. netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
  279. else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
  280. netdev_info(dev, "%s: unsupported port module inserted\n",
  281. dev->name);
  282. else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
  283. netdev_info(dev, "%s: unknown port module inserted\n",
  284. dev->name);
  285. else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
  286. netdev_info(dev, "%s: transceiver module error\n", dev->name);
  287. else
  288. netdev_info(dev, "%s: unknown module type %d inserted\n",
  289. dev->name, pi->mod_type);
  290. /* If the interface is running, then we'll need any "sticky" Link
  291. * Parameters redone with a new Transceiver Module.
  292. */
  293. pi->link_cfg.redo_l1cfg = netif_running(dev);
  294. }
  295. int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
  296. module_param(dbfifo_int_thresh, int, 0644);
  297. MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
  298. /*
  299. * usecs to sleep while draining the dbfifo
  300. */
  301. static int dbfifo_drain_delay = 1000;
  302. module_param(dbfifo_drain_delay, int, 0644);
  303. MODULE_PARM_DESC(dbfifo_drain_delay,
  304. "usecs to sleep while draining the dbfifo");
  305. static inline int cxgb4_set_addr_hash(struct port_info *pi)
  306. {
  307. struct adapter *adap = pi->adapter;
  308. u64 vec = 0;
  309. bool ucast = false;
  310. struct hash_mac_addr *entry;
  311. /* Calculate the hash vector for the updated list and program it */
  312. list_for_each_entry(entry, &adap->mac_hlist, list) {
  313. ucast |= is_unicast_ether_addr(entry->addr);
  314. vec |= (1ULL << hash_mac_addr(entry->addr));
  315. }
  316. return t4_set_addr_hash(adap, adap->mbox, pi->viid, ucast,
  317. vec, false);
  318. }
  319. static int cxgb4_mac_sync(struct net_device *netdev, const u8 *mac_addr)
  320. {
  321. struct port_info *pi = netdev_priv(netdev);
  322. struct adapter *adap = pi->adapter;
  323. int ret;
  324. u64 mhash = 0;
  325. u64 uhash = 0;
  326. bool free = false;
  327. bool ucast = is_unicast_ether_addr(mac_addr);
  328. const u8 *maclist[1] = {mac_addr};
  329. struct hash_mac_addr *new_entry;
  330. ret = t4_alloc_mac_filt(adap, adap->mbox, pi->viid, free, 1, maclist,
  331. NULL, ucast ? &uhash : &mhash, false);
  332. if (ret < 0)
  333. goto out;
  334. /* if hash != 0, then add the addr to hash addr list
  335. * so on the end we will calculate the hash for the
  336. * list and program it
  337. */
  338. if (uhash || mhash) {
  339. new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
  340. if (!new_entry)
  341. return -ENOMEM;
  342. ether_addr_copy(new_entry->addr, mac_addr);
  343. list_add_tail(&new_entry->list, &adap->mac_hlist);
  344. ret = cxgb4_set_addr_hash(pi);
  345. }
  346. out:
  347. return ret < 0 ? ret : 0;
  348. }
  349. static int cxgb4_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
  350. {
  351. struct port_info *pi = netdev_priv(netdev);
  352. struct adapter *adap = pi->adapter;
  353. int ret;
  354. const u8 *maclist[1] = {mac_addr};
  355. struct hash_mac_addr *entry, *tmp;
  356. /* If the MAC address to be removed is in the hash addr
  357. * list, delete it from the list and update hash vector
  358. */
  359. list_for_each_entry_safe(entry, tmp, &adap->mac_hlist, list) {
  360. if (ether_addr_equal(entry->addr, mac_addr)) {
  361. list_del(&entry->list);
  362. kfree(entry);
  363. return cxgb4_set_addr_hash(pi);
  364. }
  365. }
  366. ret = t4_free_mac_filt(adap, adap->mbox, pi->viid, 1, maclist, false);
  367. return ret < 0 ? -EINVAL : 0;
  368. }
  369. /*
  370. * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
  371. * If @mtu is -1 it is left unchanged.
  372. */
  373. static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
  374. {
  375. struct port_info *pi = netdev_priv(dev);
  376. struct adapter *adapter = pi->adapter;
  377. __dev_uc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
  378. __dev_mc_sync(dev, cxgb4_mac_sync, cxgb4_mac_unsync);
  379. return t4_set_rxmode(adapter, adapter->mbox, pi->viid, mtu,
  380. (dev->flags & IFF_PROMISC) ? 1 : 0,
  381. (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
  382. sleep_ok);
  383. }
  384. /**
  385. * link_start - enable a port
  386. * @dev: the port to enable
  387. *
  388. * Performs the MAC and PHY actions needed to enable a port.
  389. */
  390. static int link_start(struct net_device *dev)
  391. {
  392. int ret;
  393. struct port_info *pi = netdev_priv(dev);
  394. unsigned int mb = pi->adapter->pf;
  395. /*
  396. * We do not set address filters and promiscuity here, the stack does
  397. * that step explicitly.
  398. */
  399. ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
  400. !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
  401. if (ret == 0) {
  402. ret = t4_change_mac(pi->adapter, mb, pi->viid,
  403. pi->xact_addr_filt, dev->dev_addr, true,
  404. true);
  405. if (ret >= 0) {
  406. pi->xact_addr_filt = ret;
  407. ret = 0;
  408. }
  409. }
  410. if (ret == 0)
  411. ret = t4_link_l1cfg(pi->adapter, mb, pi->tx_chan,
  412. &pi->link_cfg);
  413. if (ret == 0) {
  414. local_bh_disable();
  415. ret = t4_enable_pi_params(pi->adapter, mb, pi, true,
  416. true, CXGB4_DCB_ENABLED);
  417. local_bh_enable();
  418. }
  419. return ret;
  420. }
  421. #ifdef CONFIG_CHELSIO_T4_DCB
  422. /* Handle a Data Center Bridging update message from the firmware. */
  423. static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
  424. {
  425. int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
  426. struct net_device *dev = adap->port[adap->chan_map[port]];
  427. int old_dcb_enabled = cxgb4_dcb_enabled(dev);
  428. int new_dcb_enabled;
  429. cxgb4_dcb_handle_fw_update(adap, pcmd);
  430. new_dcb_enabled = cxgb4_dcb_enabled(dev);
  431. /* If the DCB has become enabled or disabled on the port then we're
  432. * going to need to set up/tear down DCB Priority parameters for the
  433. * TX Queues associated with the port.
  434. */
  435. if (new_dcb_enabled != old_dcb_enabled)
  436. dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
  437. }
  438. #endif /* CONFIG_CHELSIO_T4_DCB */
  439. /* Response queue handler for the FW event queue.
  440. */
  441. static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
  442. const struct pkt_gl *gl)
  443. {
  444. u8 opcode = ((const struct rss_header *)rsp)->opcode;
  445. rsp++; /* skip RSS header */
  446. /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
  447. */
  448. if (unlikely(opcode == CPL_FW4_MSG &&
  449. ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
  450. rsp++;
  451. opcode = ((const struct rss_header *)rsp)->opcode;
  452. rsp++;
  453. if (opcode != CPL_SGE_EGR_UPDATE) {
  454. dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
  455. , opcode);
  456. goto out;
  457. }
  458. }
  459. if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
  460. const struct cpl_sge_egr_update *p = (void *)rsp;
  461. unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
  462. struct sge_txq *txq;
  463. txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
  464. txq->restarts++;
  465. if (txq->q_type == CXGB4_TXQ_ETH) {
  466. struct sge_eth_txq *eq;
  467. eq = container_of(txq, struct sge_eth_txq, q);
  468. netif_tx_wake_queue(eq->txq);
  469. } else {
  470. struct sge_uld_txq *oq;
  471. oq = container_of(txq, struct sge_uld_txq, q);
  472. tasklet_schedule(&oq->qresume_tsk);
  473. }
  474. } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
  475. const struct cpl_fw6_msg *p = (void *)rsp;
  476. #ifdef CONFIG_CHELSIO_T4_DCB
  477. const struct fw_port_cmd *pcmd = (const void *)p->data;
  478. unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
  479. unsigned int action =
  480. FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
  481. if (cmd == FW_PORT_CMD &&
  482. (action == FW_PORT_ACTION_GET_PORT_INFO ||
  483. action == FW_PORT_ACTION_GET_PORT_INFO32)) {
  484. int port = FW_PORT_CMD_PORTID_G(
  485. be32_to_cpu(pcmd->op_to_portid));
  486. struct net_device *dev;
  487. int dcbxdis, state_input;
  488. dev = q->adap->port[q->adap->chan_map[port]];
  489. dcbxdis = (action == FW_PORT_ACTION_GET_PORT_INFO
  490. ? !!(pcmd->u.info.dcbxdis_pkd &
  491. FW_PORT_CMD_DCBXDIS_F)
  492. : !!(pcmd->u.info32.lstatus32_to_cbllen32 &
  493. FW_PORT_CMD_DCBXDIS32_F));
  494. state_input = (dcbxdis
  495. ? CXGB4_DCB_INPUT_FW_DISABLED
  496. : CXGB4_DCB_INPUT_FW_ENABLED);
  497. cxgb4_dcb_state_fsm(dev, state_input);
  498. }
  499. if (cmd == FW_PORT_CMD &&
  500. action == FW_PORT_ACTION_L2_DCB_CFG)
  501. dcb_rpl(q->adap, pcmd);
  502. else
  503. #endif
  504. if (p->type == 0)
  505. t4_handle_fw_rpl(q->adap, p->data);
  506. } else if (opcode == CPL_L2T_WRITE_RPL) {
  507. const struct cpl_l2t_write_rpl *p = (void *)rsp;
  508. do_l2t_write_rpl(q->adap, p);
  509. } else if (opcode == CPL_SMT_WRITE_RPL) {
  510. const struct cpl_smt_write_rpl *p = (void *)rsp;
  511. do_smt_write_rpl(q->adap, p);
  512. } else if (opcode == CPL_SET_TCB_RPL) {
  513. const struct cpl_set_tcb_rpl *p = (void *)rsp;
  514. filter_rpl(q->adap, p);
  515. } else if (opcode == CPL_ACT_OPEN_RPL) {
  516. const struct cpl_act_open_rpl *p = (void *)rsp;
  517. hash_filter_rpl(q->adap, p);
  518. } else if (opcode == CPL_ABORT_RPL_RSS) {
  519. const struct cpl_abort_rpl_rss *p = (void *)rsp;
  520. hash_del_filter_rpl(q->adap, p);
  521. } else if (opcode == CPL_SRQ_TABLE_RPL) {
  522. const struct cpl_srq_table_rpl *p = (void *)rsp;
  523. do_srq_table_rpl(q->adap, p);
  524. } else
  525. dev_err(q->adap->pdev_dev,
  526. "unexpected CPL %#x on FW event queue\n", opcode);
  527. out:
  528. return 0;
  529. }
  530. static void disable_msi(struct adapter *adapter)
  531. {
  532. if (adapter->flags & USING_MSIX) {
  533. pci_disable_msix(adapter->pdev);
  534. adapter->flags &= ~USING_MSIX;
  535. } else if (adapter->flags & USING_MSI) {
  536. pci_disable_msi(adapter->pdev);
  537. adapter->flags &= ~USING_MSI;
  538. }
  539. }
  540. /*
  541. * Interrupt handler for non-data events used with MSI-X.
  542. */
  543. static irqreturn_t t4_nondata_intr(int irq, void *cookie)
  544. {
  545. struct adapter *adap = cookie;
  546. u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
  547. if (v & PFSW_F) {
  548. adap->swintr = 1;
  549. t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
  550. }
  551. if (adap->flags & MASTER_PF)
  552. t4_slow_intr_handler(adap);
  553. return IRQ_HANDLED;
  554. }
  555. /*
  556. * Name the MSI-X interrupts.
  557. */
  558. static void name_msix_vecs(struct adapter *adap)
  559. {
  560. int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
  561. /* non-data interrupts */
  562. snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
  563. /* FW events */
  564. snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
  565. adap->port[0]->name);
  566. /* Ethernet queues */
  567. for_each_port(adap, j) {
  568. struct net_device *d = adap->port[j];
  569. const struct port_info *pi = netdev_priv(d);
  570. for (i = 0; i < pi->nqsets; i++, msi_idx++)
  571. snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
  572. d->name, i);
  573. }
  574. }
  575. static int request_msix_queue_irqs(struct adapter *adap)
  576. {
  577. struct sge *s = &adap->sge;
  578. int err, ethqidx;
  579. int msi_index = 2;
  580. err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
  581. adap->msix_info[1].desc, &s->fw_evtq);
  582. if (err)
  583. return err;
  584. for_each_ethrxq(s, ethqidx) {
  585. err = request_irq(adap->msix_info[msi_index].vec,
  586. t4_sge_intr_msix, 0,
  587. adap->msix_info[msi_index].desc,
  588. &s->ethrxq[ethqidx].rspq);
  589. if (err)
  590. goto unwind;
  591. msi_index++;
  592. }
  593. return 0;
  594. unwind:
  595. while (--ethqidx >= 0)
  596. free_irq(adap->msix_info[--msi_index].vec,
  597. &s->ethrxq[ethqidx].rspq);
  598. free_irq(adap->msix_info[1].vec, &s->fw_evtq);
  599. return err;
  600. }
  601. static void free_msix_queue_irqs(struct adapter *adap)
  602. {
  603. int i, msi_index = 2;
  604. struct sge *s = &adap->sge;
  605. free_irq(adap->msix_info[1].vec, &s->fw_evtq);
  606. for_each_ethrxq(s, i)
  607. free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
  608. }
  609. /**
  610. * cxgb4_write_rss - write the RSS table for a given port
  611. * @pi: the port
  612. * @queues: array of queue indices for RSS
  613. *
  614. * Sets up the portion of the HW RSS table for the port's VI to distribute
  615. * packets to the Rx queues in @queues.
  616. * Should never be called before setting up sge eth rx queues
  617. */
  618. int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
  619. {
  620. u16 *rss;
  621. int i, err;
  622. struct adapter *adapter = pi->adapter;
  623. const struct sge_eth_rxq *rxq;
  624. rxq = &adapter->sge.ethrxq[pi->first_qset];
  625. rss = kmalloc_array(pi->rss_size, sizeof(u16), GFP_KERNEL);
  626. if (!rss)
  627. return -ENOMEM;
  628. /* map the queue indices to queue ids */
  629. for (i = 0; i < pi->rss_size; i++, queues++)
  630. rss[i] = rxq[*queues].rspq.abs_id;
  631. err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
  632. pi->rss_size, rss, pi->rss_size);
  633. /* If Tunnel All Lookup isn't specified in the global RSS
  634. * Configuration, then we need to specify a default Ingress
  635. * Queue for any ingress packets which aren't hashed. We'll
  636. * use our first ingress queue ...
  637. */
  638. if (!err)
  639. err = t4_config_vi_rss(adapter, adapter->mbox, pi->viid,
  640. FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN_F |
  641. FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN_F |
  642. FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN_F |
  643. FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN_F |
  644. FW_RSS_VI_CONFIG_CMD_UDPEN_F,
  645. rss[0]);
  646. kfree(rss);
  647. return err;
  648. }
  649. /**
  650. * setup_rss - configure RSS
  651. * @adap: the adapter
  652. *
  653. * Sets up RSS for each port.
  654. */
  655. static int setup_rss(struct adapter *adap)
  656. {
  657. int i, j, err;
  658. for_each_port(adap, i) {
  659. const struct port_info *pi = adap2pinfo(adap, i);
  660. /* Fill default values with equal distribution */
  661. for (j = 0; j < pi->rss_size; j++)
  662. pi->rss[j] = j % pi->nqsets;
  663. err = cxgb4_write_rss(pi, pi->rss);
  664. if (err)
  665. return err;
  666. }
  667. return 0;
  668. }
  669. /*
  670. * Return the channel of the ingress queue with the given qid.
  671. */
  672. static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
  673. {
  674. qid -= p->ingr_start;
  675. return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
  676. }
  677. /*
  678. * Wait until all NAPI handlers are descheduled.
  679. */
  680. static void quiesce_rx(struct adapter *adap)
  681. {
  682. int i;
  683. for (i = 0; i < adap->sge.ingr_sz; i++) {
  684. struct sge_rspq *q = adap->sge.ingr_map[i];
  685. if (q && q->handler)
  686. napi_disable(&q->napi);
  687. }
  688. }
  689. /* Disable interrupt and napi handler */
  690. static void disable_interrupts(struct adapter *adap)
  691. {
  692. if (adap->flags & FULL_INIT_DONE) {
  693. t4_intr_disable(adap);
  694. if (adap->flags & USING_MSIX) {
  695. free_msix_queue_irqs(adap);
  696. free_irq(adap->msix_info[0].vec, adap);
  697. } else {
  698. free_irq(adap->pdev->irq, adap);
  699. }
  700. quiesce_rx(adap);
  701. }
  702. }
  703. /*
  704. * Enable NAPI scheduling and interrupt generation for all Rx queues.
  705. */
  706. static void enable_rx(struct adapter *adap)
  707. {
  708. int i;
  709. for (i = 0; i < adap->sge.ingr_sz; i++) {
  710. struct sge_rspq *q = adap->sge.ingr_map[i];
  711. if (!q)
  712. continue;
  713. if (q->handler)
  714. napi_enable(&q->napi);
  715. /* 0-increment GTS to start the timer and enable interrupts */
  716. t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
  717. SEINTARM_V(q->intr_params) |
  718. INGRESSQID_V(q->cntxt_id));
  719. }
  720. }
  721. static int setup_fw_sge_queues(struct adapter *adap)
  722. {
  723. struct sge *s = &adap->sge;
  724. int err = 0;
  725. bitmap_zero(s->starving_fl, s->egr_sz);
  726. bitmap_zero(s->txq_maperr, s->egr_sz);
  727. if (adap->flags & USING_MSIX)
  728. adap->msi_idx = 1; /* vector 0 is for non-queue interrupts */
  729. else {
  730. err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
  731. NULL, NULL, NULL, -1);
  732. if (err)
  733. return err;
  734. adap->msi_idx = -((int)s->intrq.abs_id + 1);
  735. }
  736. err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
  737. adap->msi_idx, NULL, fwevtq_handler, NULL, -1);
  738. return err;
  739. }
  740. /**
  741. * setup_sge_queues - configure SGE Tx/Rx/response queues
  742. * @adap: the adapter
  743. *
  744. * Determines how many sets of SGE queues to use and initializes them.
  745. * We support multiple queue sets per port if we have MSI-X, otherwise
  746. * just one queue set per port.
  747. */
  748. static int setup_sge_queues(struct adapter *adap)
  749. {
  750. int err, i, j;
  751. struct sge *s = &adap->sge;
  752. struct sge_uld_rxq_info *rxq_info = NULL;
  753. unsigned int cmplqid = 0;
  754. if (is_uld(adap))
  755. rxq_info = s->uld_rxq_info[CXGB4_ULD_RDMA];
  756. for_each_port(adap, i) {
  757. struct net_device *dev = adap->port[i];
  758. struct port_info *pi = netdev_priv(dev);
  759. struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
  760. struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
  761. for (j = 0; j < pi->nqsets; j++, q++) {
  762. if (adap->msi_idx > 0)
  763. adap->msi_idx++;
  764. err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
  765. adap->msi_idx, &q->fl,
  766. t4_ethrx_handler,
  767. NULL,
  768. t4_get_tp_ch_map(adap,
  769. pi->tx_chan));
  770. if (err)
  771. goto freeout;
  772. q->rspq.idx = j;
  773. memset(&q->stats, 0, sizeof(q->stats));
  774. }
  775. for (j = 0; j < pi->nqsets; j++, t++) {
  776. err = t4_sge_alloc_eth_txq(adap, t, dev,
  777. netdev_get_tx_queue(dev, j),
  778. s->fw_evtq.cntxt_id);
  779. if (err)
  780. goto freeout;
  781. }
  782. }
  783. for_each_port(adap, i) {
  784. /* Note that cmplqid below is 0 if we don't
  785. * have RDMA queues, and that's the right value.
  786. */
  787. if (rxq_info)
  788. cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
  789. err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
  790. s->fw_evtq.cntxt_id, cmplqid);
  791. if (err)
  792. goto freeout;
  793. }
  794. if (!is_t4(adap->params.chip)) {
  795. err = t4_sge_alloc_eth_txq(adap, &s->ptptxq, adap->port[0],
  796. netdev_get_tx_queue(adap->port[0], 0)
  797. , s->fw_evtq.cntxt_id);
  798. if (err)
  799. goto freeout;
  800. }
  801. t4_write_reg(adap, is_t4(adap->params.chip) ?
  802. MPS_TRC_RSS_CONTROL_A :
  803. MPS_T5_TRC_RSS_CONTROL_A,
  804. RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
  805. QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
  806. return 0;
  807. freeout:
  808. t4_free_sge_resources(adap);
  809. return err;
  810. }
  811. static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
  812. void *accel_priv, select_queue_fallback_t fallback)
  813. {
  814. int txq;
  815. #ifdef CONFIG_CHELSIO_T4_DCB
  816. /* If a Data Center Bridging has been successfully negotiated on this
  817. * link then we'll use the skb's priority to map it to a TX Queue.
  818. * The skb's priority is determined via the VLAN Tag Priority Code
  819. * Point field.
  820. */
  821. if (cxgb4_dcb_enabled(dev) && !is_kdump_kernel()) {
  822. u16 vlan_tci;
  823. int err;
  824. err = vlan_get_tag(skb, &vlan_tci);
  825. if (unlikely(err)) {
  826. if (net_ratelimit())
  827. netdev_warn(dev,
  828. "TX Packet without VLAN Tag on DCB Link\n");
  829. txq = 0;
  830. } else {
  831. txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
  832. #ifdef CONFIG_CHELSIO_T4_FCOE
  833. if (skb->protocol == htons(ETH_P_FCOE))
  834. txq = skb->priority & 0x7;
  835. #endif /* CONFIG_CHELSIO_T4_FCOE */
  836. }
  837. return txq;
  838. }
  839. #endif /* CONFIG_CHELSIO_T4_DCB */
  840. if (select_queue) {
  841. txq = (skb_rx_queue_recorded(skb)
  842. ? skb_get_rx_queue(skb)
  843. : smp_processor_id());
  844. while (unlikely(txq >= dev->real_num_tx_queues))
  845. txq -= dev->real_num_tx_queues;
  846. return txq;
  847. }
  848. return fallback(dev, skb) % dev->real_num_tx_queues;
  849. }
  850. static int closest_timer(const struct sge *s, int time)
  851. {
  852. int i, delta, match = 0, min_delta = INT_MAX;
  853. for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
  854. delta = time - s->timer_val[i];
  855. if (delta < 0)
  856. delta = -delta;
  857. if (delta < min_delta) {
  858. min_delta = delta;
  859. match = i;
  860. }
  861. }
  862. return match;
  863. }
  864. static int closest_thres(const struct sge *s, int thres)
  865. {
  866. int i, delta, match = 0, min_delta = INT_MAX;
  867. for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
  868. delta = thres - s->counter_val[i];
  869. if (delta < 0)
  870. delta = -delta;
  871. if (delta < min_delta) {
  872. min_delta = delta;
  873. match = i;
  874. }
  875. }
  876. return match;
  877. }
  878. /**
  879. * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
  880. * @q: the Rx queue
  881. * @us: the hold-off time in us, or 0 to disable timer
  882. * @cnt: the hold-off packet count, or 0 to disable counter
  883. *
  884. * Sets an Rx queue's interrupt hold-off time and packet count. At least
  885. * one of the two needs to be enabled for the queue to generate interrupts.
  886. */
  887. int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
  888. unsigned int us, unsigned int cnt)
  889. {
  890. struct adapter *adap = q->adap;
  891. if ((us | cnt) == 0)
  892. cnt = 1;
  893. if (cnt) {
  894. int err;
  895. u32 v, new_idx;
  896. new_idx = closest_thres(&adap->sge, cnt);
  897. if (q->desc && q->pktcnt_idx != new_idx) {
  898. /* the queue has already been created, update it */
  899. v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  900. FW_PARAMS_PARAM_X_V(
  901. FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
  902. FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
  903. err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
  904. &v, &new_idx);
  905. if (err)
  906. return err;
  907. }
  908. q->pktcnt_idx = new_idx;
  909. }
  910. us = us == 0 ? 6 : closest_timer(&adap->sge, us);
  911. q->intr_params = QINTR_TIMER_IDX_V(us) | QINTR_CNT_EN_V(cnt > 0);
  912. return 0;
  913. }
  914. static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
  915. {
  916. const struct port_info *pi = netdev_priv(dev);
  917. netdev_features_t changed = dev->features ^ features;
  918. int err;
  919. if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
  920. return 0;
  921. err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
  922. -1, -1, -1,
  923. !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
  924. if (unlikely(err))
  925. dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
  926. return err;
  927. }
  928. static int setup_debugfs(struct adapter *adap)
  929. {
  930. if (IS_ERR_OR_NULL(adap->debugfs_root))
  931. return -1;
  932. #ifdef CONFIG_DEBUG_FS
  933. t4_setup_debugfs(adap);
  934. #endif
  935. return 0;
  936. }
  937. /*
  938. * upper-layer driver support
  939. */
  940. /*
  941. * Allocate an active-open TID and set it to the supplied value.
  942. */
  943. int cxgb4_alloc_atid(struct tid_info *t, void *data)
  944. {
  945. int atid = -1;
  946. spin_lock_bh(&t->atid_lock);
  947. if (t->afree) {
  948. union aopen_entry *p = t->afree;
  949. atid = (p - t->atid_tab) + t->atid_base;
  950. t->afree = p->next;
  951. p->data = data;
  952. t->atids_in_use++;
  953. }
  954. spin_unlock_bh(&t->atid_lock);
  955. return atid;
  956. }
  957. EXPORT_SYMBOL(cxgb4_alloc_atid);
  958. /*
  959. * Release an active-open TID.
  960. */
  961. void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
  962. {
  963. union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
  964. spin_lock_bh(&t->atid_lock);
  965. p->next = t->afree;
  966. t->afree = p;
  967. t->atids_in_use--;
  968. spin_unlock_bh(&t->atid_lock);
  969. }
  970. EXPORT_SYMBOL(cxgb4_free_atid);
  971. /*
  972. * Allocate a server TID and set it to the supplied value.
  973. */
  974. int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
  975. {
  976. int stid;
  977. spin_lock_bh(&t->stid_lock);
  978. if (family == PF_INET) {
  979. stid = find_first_zero_bit(t->stid_bmap, t->nstids);
  980. if (stid < t->nstids)
  981. __set_bit(stid, t->stid_bmap);
  982. else
  983. stid = -1;
  984. } else {
  985. stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 1);
  986. if (stid < 0)
  987. stid = -1;
  988. }
  989. if (stid >= 0) {
  990. t->stid_tab[stid].data = data;
  991. stid += t->stid_base;
  992. /* IPv6 requires max of 520 bits or 16 cells in TCAM
  993. * This is equivalent to 4 TIDs. With CLIP enabled it
  994. * needs 2 TIDs.
  995. */
  996. if (family == PF_INET6) {
  997. t->stids_in_use += 2;
  998. t->v6_stids_in_use += 2;
  999. } else {
  1000. t->stids_in_use++;
  1001. }
  1002. }
  1003. spin_unlock_bh(&t->stid_lock);
  1004. return stid;
  1005. }
  1006. EXPORT_SYMBOL(cxgb4_alloc_stid);
  1007. /* Allocate a server filter TID and set it to the supplied value.
  1008. */
  1009. int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
  1010. {
  1011. int stid;
  1012. spin_lock_bh(&t->stid_lock);
  1013. if (family == PF_INET) {
  1014. stid = find_next_zero_bit(t->stid_bmap,
  1015. t->nstids + t->nsftids, t->nstids);
  1016. if (stid < (t->nstids + t->nsftids))
  1017. __set_bit(stid, t->stid_bmap);
  1018. else
  1019. stid = -1;
  1020. } else {
  1021. stid = -1;
  1022. }
  1023. if (stid >= 0) {
  1024. t->stid_tab[stid].data = data;
  1025. stid -= t->nstids;
  1026. stid += t->sftid_base;
  1027. t->sftids_in_use++;
  1028. }
  1029. spin_unlock_bh(&t->stid_lock);
  1030. return stid;
  1031. }
  1032. EXPORT_SYMBOL(cxgb4_alloc_sftid);
  1033. /* Release a server TID.
  1034. */
  1035. void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
  1036. {
  1037. /* Is it a server filter TID? */
  1038. if (t->nsftids && (stid >= t->sftid_base)) {
  1039. stid -= t->sftid_base;
  1040. stid += t->nstids;
  1041. } else {
  1042. stid -= t->stid_base;
  1043. }
  1044. spin_lock_bh(&t->stid_lock);
  1045. if (family == PF_INET)
  1046. __clear_bit(stid, t->stid_bmap);
  1047. else
  1048. bitmap_release_region(t->stid_bmap, stid, 1);
  1049. t->stid_tab[stid].data = NULL;
  1050. if (stid < t->nstids) {
  1051. if (family == PF_INET6) {
  1052. t->stids_in_use -= 2;
  1053. t->v6_stids_in_use -= 2;
  1054. } else {
  1055. t->stids_in_use--;
  1056. }
  1057. } else {
  1058. t->sftids_in_use--;
  1059. }
  1060. spin_unlock_bh(&t->stid_lock);
  1061. }
  1062. EXPORT_SYMBOL(cxgb4_free_stid);
  1063. /*
  1064. * Populate a TID_RELEASE WR. Caller must properly size the skb.
  1065. */
  1066. static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
  1067. unsigned int tid)
  1068. {
  1069. struct cpl_tid_release *req;
  1070. set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
  1071. req = __skb_put(skb, sizeof(*req));
  1072. INIT_TP_WR(req, tid);
  1073. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
  1074. }
  1075. /*
  1076. * Queue a TID release request and if necessary schedule a work queue to
  1077. * process it.
  1078. */
  1079. static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
  1080. unsigned int tid)
  1081. {
  1082. void **p = &t->tid_tab[tid];
  1083. struct adapter *adap = container_of(t, struct adapter, tids);
  1084. spin_lock_bh(&adap->tid_release_lock);
  1085. *p = adap->tid_release_head;
  1086. /* Low 2 bits encode the Tx channel number */
  1087. adap->tid_release_head = (void **)((uintptr_t)p | chan);
  1088. if (!adap->tid_release_task_busy) {
  1089. adap->tid_release_task_busy = true;
  1090. queue_work(adap->workq, &adap->tid_release_task);
  1091. }
  1092. spin_unlock_bh(&adap->tid_release_lock);
  1093. }
  1094. /*
  1095. * Process the list of pending TID release requests.
  1096. */
  1097. static void process_tid_release_list(struct work_struct *work)
  1098. {
  1099. struct sk_buff *skb;
  1100. struct adapter *adap;
  1101. adap = container_of(work, struct adapter, tid_release_task);
  1102. spin_lock_bh(&adap->tid_release_lock);
  1103. while (adap->tid_release_head) {
  1104. void **p = adap->tid_release_head;
  1105. unsigned int chan = (uintptr_t)p & 3;
  1106. p = (void *)p - chan;
  1107. adap->tid_release_head = *p;
  1108. *p = NULL;
  1109. spin_unlock_bh(&adap->tid_release_lock);
  1110. while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
  1111. GFP_KERNEL)))
  1112. schedule_timeout_uninterruptible(1);
  1113. mk_tid_release(skb, chan, p - adap->tids.tid_tab);
  1114. t4_ofld_send(adap, skb);
  1115. spin_lock_bh(&adap->tid_release_lock);
  1116. }
  1117. adap->tid_release_task_busy = false;
  1118. spin_unlock_bh(&adap->tid_release_lock);
  1119. }
  1120. /*
  1121. * Release a TID and inform HW. If we are unable to allocate the release
  1122. * message we defer to a work queue.
  1123. */
  1124. void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid,
  1125. unsigned short family)
  1126. {
  1127. struct sk_buff *skb;
  1128. struct adapter *adap = container_of(t, struct adapter, tids);
  1129. WARN_ON(tid >= t->ntids);
  1130. if (t->tid_tab[tid]) {
  1131. t->tid_tab[tid] = NULL;
  1132. atomic_dec(&t->conns_in_use);
  1133. if (t->hash_base && (tid >= t->hash_base)) {
  1134. if (family == AF_INET6)
  1135. atomic_sub(2, &t->hash_tids_in_use);
  1136. else
  1137. atomic_dec(&t->hash_tids_in_use);
  1138. } else {
  1139. if (family == AF_INET6)
  1140. atomic_sub(2, &t->tids_in_use);
  1141. else
  1142. atomic_dec(&t->tids_in_use);
  1143. }
  1144. }
  1145. skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
  1146. if (likely(skb)) {
  1147. mk_tid_release(skb, chan, tid);
  1148. t4_ofld_send(adap, skb);
  1149. } else
  1150. cxgb4_queue_tid_release(t, chan, tid);
  1151. }
  1152. EXPORT_SYMBOL(cxgb4_remove_tid);
  1153. /*
  1154. * Allocate and initialize the TID tables. Returns 0 on success.
  1155. */
  1156. static int tid_init(struct tid_info *t)
  1157. {
  1158. struct adapter *adap = container_of(t, struct adapter, tids);
  1159. unsigned int max_ftids = t->nftids + t->nsftids;
  1160. unsigned int natids = t->natids;
  1161. unsigned int stid_bmap_size;
  1162. unsigned int ftid_bmap_size;
  1163. size_t size;
  1164. stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
  1165. ftid_bmap_size = BITS_TO_LONGS(t->nftids);
  1166. size = t->ntids * sizeof(*t->tid_tab) +
  1167. natids * sizeof(*t->atid_tab) +
  1168. t->nstids * sizeof(*t->stid_tab) +
  1169. t->nsftids * sizeof(*t->stid_tab) +
  1170. stid_bmap_size * sizeof(long) +
  1171. max_ftids * sizeof(*t->ftid_tab) +
  1172. ftid_bmap_size * sizeof(long);
  1173. t->tid_tab = kvzalloc(size, GFP_KERNEL);
  1174. if (!t->tid_tab)
  1175. return -ENOMEM;
  1176. t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
  1177. t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
  1178. t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
  1179. t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
  1180. t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
  1181. spin_lock_init(&t->stid_lock);
  1182. spin_lock_init(&t->atid_lock);
  1183. spin_lock_init(&t->ftid_lock);
  1184. t->stids_in_use = 0;
  1185. t->v6_stids_in_use = 0;
  1186. t->sftids_in_use = 0;
  1187. t->afree = NULL;
  1188. t->atids_in_use = 0;
  1189. atomic_set(&t->tids_in_use, 0);
  1190. atomic_set(&t->conns_in_use, 0);
  1191. atomic_set(&t->hash_tids_in_use, 0);
  1192. /* Setup the free list for atid_tab and clear the stid bitmap. */
  1193. if (natids) {
  1194. while (--natids)
  1195. t->atid_tab[natids - 1].next = &t->atid_tab[natids];
  1196. t->afree = t->atid_tab;
  1197. }
  1198. if (is_offload(adap)) {
  1199. bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
  1200. /* Reserve stid 0 for T4/T5 adapters */
  1201. if (!t->stid_base &&
  1202. CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  1203. __set_bit(0, t->stid_bmap);
  1204. }
  1205. bitmap_zero(t->ftid_bmap, t->nftids);
  1206. return 0;
  1207. }
  1208. /**
  1209. * cxgb4_create_server - create an IP server
  1210. * @dev: the device
  1211. * @stid: the server TID
  1212. * @sip: local IP address to bind server to
  1213. * @sport: the server's TCP port
  1214. * @queue: queue to direct messages from this server to
  1215. *
  1216. * Create an IP server for the given port and address.
  1217. * Returns <0 on error and one of the %NET_XMIT_* values on success.
  1218. */
  1219. int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
  1220. __be32 sip, __be16 sport, __be16 vlan,
  1221. unsigned int queue)
  1222. {
  1223. unsigned int chan;
  1224. struct sk_buff *skb;
  1225. struct adapter *adap;
  1226. struct cpl_pass_open_req *req;
  1227. int ret;
  1228. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  1229. if (!skb)
  1230. return -ENOMEM;
  1231. adap = netdev2adap(dev);
  1232. req = __skb_put(skb, sizeof(*req));
  1233. INIT_TP_WR(req, 0);
  1234. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
  1235. req->local_port = sport;
  1236. req->peer_port = htons(0);
  1237. req->local_ip = sip;
  1238. req->peer_ip = htonl(0);
  1239. chan = rxq_to_chan(&adap->sge, queue);
  1240. req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
  1241. req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
  1242. SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
  1243. ret = t4_mgmt_tx(adap, skb);
  1244. return net_xmit_eval(ret);
  1245. }
  1246. EXPORT_SYMBOL(cxgb4_create_server);
  1247. /* cxgb4_create_server6 - create an IPv6 server
  1248. * @dev: the device
  1249. * @stid: the server TID
  1250. * @sip: local IPv6 address to bind server to
  1251. * @sport: the server's TCP port
  1252. * @queue: queue to direct messages from this server to
  1253. *
  1254. * Create an IPv6 server for the given port and address.
  1255. * Returns <0 on error and one of the %NET_XMIT_* values on success.
  1256. */
  1257. int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
  1258. const struct in6_addr *sip, __be16 sport,
  1259. unsigned int queue)
  1260. {
  1261. unsigned int chan;
  1262. struct sk_buff *skb;
  1263. struct adapter *adap;
  1264. struct cpl_pass_open_req6 *req;
  1265. int ret;
  1266. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  1267. if (!skb)
  1268. return -ENOMEM;
  1269. adap = netdev2adap(dev);
  1270. req = __skb_put(skb, sizeof(*req));
  1271. INIT_TP_WR(req, 0);
  1272. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
  1273. req->local_port = sport;
  1274. req->peer_port = htons(0);
  1275. req->local_ip_hi = *(__be64 *)(sip->s6_addr);
  1276. req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
  1277. req->peer_ip_hi = cpu_to_be64(0);
  1278. req->peer_ip_lo = cpu_to_be64(0);
  1279. chan = rxq_to_chan(&adap->sge, queue);
  1280. req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
  1281. req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
  1282. SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
  1283. ret = t4_mgmt_tx(adap, skb);
  1284. return net_xmit_eval(ret);
  1285. }
  1286. EXPORT_SYMBOL(cxgb4_create_server6);
  1287. int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
  1288. unsigned int queue, bool ipv6)
  1289. {
  1290. struct sk_buff *skb;
  1291. struct adapter *adap;
  1292. struct cpl_close_listsvr_req *req;
  1293. int ret;
  1294. adap = netdev2adap(dev);
  1295. skb = alloc_skb(sizeof(*req), GFP_KERNEL);
  1296. if (!skb)
  1297. return -ENOMEM;
  1298. req = __skb_put(skb, sizeof(*req));
  1299. INIT_TP_WR(req, 0);
  1300. OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
  1301. req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
  1302. LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
  1303. ret = t4_mgmt_tx(adap, skb);
  1304. return net_xmit_eval(ret);
  1305. }
  1306. EXPORT_SYMBOL(cxgb4_remove_server);
  1307. /**
  1308. * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
  1309. * @mtus: the HW MTU table
  1310. * @mtu: the target MTU
  1311. * @idx: index of selected entry in the MTU table
  1312. *
  1313. * Returns the index and the value in the HW MTU table that is closest to
  1314. * but does not exceed @mtu, unless @mtu is smaller than any value in the
  1315. * table, in which case that smallest available value is selected.
  1316. */
  1317. unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
  1318. unsigned int *idx)
  1319. {
  1320. unsigned int i = 0;
  1321. while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
  1322. ++i;
  1323. if (idx)
  1324. *idx = i;
  1325. return mtus[i];
  1326. }
  1327. EXPORT_SYMBOL(cxgb4_best_mtu);
  1328. /**
  1329. * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
  1330. * @mtus: the HW MTU table
  1331. * @header_size: Header Size
  1332. * @data_size_max: maximum Data Segment Size
  1333. * @data_size_align: desired Data Segment Size Alignment (2^N)
  1334. * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
  1335. *
  1336. * Similar to cxgb4_best_mtu() but instead of searching the Hardware
  1337. * MTU Table based solely on a Maximum MTU parameter, we break that
  1338. * parameter up into a Header Size and Maximum Data Segment Size, and
  1339. * provide a desired Data Segment Size Alignment. If we find an MTU in
  1340. * the Hardware MTU Table which will result in a Data Segment Size with
  1341. * the requested alignment _and_ that MTU isn't "too far" from the
  1342. * closest MTU, then we'll return that rather than the closest MTU.
  1343. */
  1344. unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
  1345. unsigned short header_size,
  1346. unsigned short data_size_max,
  1347. unsigned short data_size_align,
  1348. unsigned int *mtu_idxp)
  1349. {
  1350. unsigned short max_mtu = header_size + data_size_max;
  1351. unsigned short data_size_align_mask = data_size_align - 1;
  1352. int mtu_idx, aligned_mtu_idx;
  1353. /* Scan the MTU Table till we find an MTU which is larger than our
  1354. * Maximum MTU or we reach the end of the table. Along the way,
  1355. * record the last MTU found, if any, which will result in a Data
  1356. * Segment Length matching the requested alignment.
  1357. */
  1358. for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
  1359. unsigned short data_size = mtus[mtu_idx] - header_size;
  1360. /* If this MTU minus the Header Size would result in a
  1361. * Data Segment Size of the desired alignment, remember it.
  1362. */
  1363. if ((data_size & data_size_align_mask) == 0)
  1364. aligned_mtu_idx = mtu_idx;
  1365. /* If we're not at the end of the Hardware MTU Table and the
  1366. * next element is larger than our Maximum MTU, drop out of
  1367. * the loop.
  1368. */
  1369. if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
  1370. break;
  1371. }
  1372. /* If we fell out of the loop because we ran to the end of the table,
  1373. * then we just have to use the last [largest] entry.
  1374. */
  1375. if (mtu_idx == NMTUS)
  1376. mtu_idx--;
  1377. /* If we found an MTU which resulted in the requested Data Segment
  1378. * Length alignment and that's "not far" from the largest MTU which is
  1379. * less than or equal to the maximum MTU, then use that.
  1380. */
  1381. if (aligned_mtu_idx >= 0 &&
  1382. mtu_idx - aligned_mtu_idx <= 1)
  1383. mtu_idx = aligned_mtu_idx;
  1384. /* If the caller has passed in an MTU Index pointer, pass the
  1385. * MTU Index back. Return the MTU value.
  1386. */
  1387. if (mtu_idxp)
  1388. *mtu_idxp = mtu_idx;
  1389. return mtus[mtu_idx];
  1390. }
  1391. EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
  1392. /**
  1393. * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI
  1394. * @chip: chip type
  1395. * @viid: VI id of the given port
  1396. *
  1397. * Return the SMT index for this VI.
  1398. */
  1399. unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid)
  1400. {
  1401. /* In T4/T5, SMT contains 256 SMAC entries organized in
  1402. * 128 rows of 2 entries each.
  1403. * In T6, SMT contains 256 SMAC entries in 256 rows.
  1404. * TODO: The below code needs to be updated when we add support
  1405. * for 256 VFs.
  1406. */
  1407. if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
  1408. return ((viid & 0x7f) << 1);
  1409. else
  1410. return (viid & 0x7f);
  1411. }
  1412. EXPORT_SYMBOL(cxgb4_tp_smt_idx);
  1413. /**
  1414. * cxgb4_port_chan - get the HW channel of a port
  1415. * @dev: the net device for the port
  1416. *
  1417. * Return the HW Tx channel of the given port.
  1418. */
  1419. unsigned int cxgb4_port_chan(const struct net_device *dev)
  1420. {
  1421. return netdev2pinfo(dev)->tx_chan;
  1422. }
  1423. EXPORT_SYMBOL(cxgb4_port_chan);
  1424. unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
  1425. {
  1426. struct adapter *adap = netdev2adap(dev);
  1427. u32 v1, v2, lp_count, hp_count;
  1428. v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
  1429. v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
  1430. if (is_t4(adap->params.chip)) {
  1431. lp_count = LP_COUNT_G(v1);
  1432. hp_count = HP_COUNT_G(v1);
  1433. } else {
  1434. lp_count = LP_COUNT_T5_G(v1);
  1435. hp_count = HP_COUNT_T5_G(v2);
  1436. }
  1437. return lpfifo ? lp_count : hp_count;
  1438. }
  1439. EXPORT_SYMBOL(cxgb4_dbfifo_count);
  1440. /**
  1441. * cxgb4_port_viid - get the VI id of a port
  1442. * @dev: the net device for the port
  1443. *
  1444. * Return the VI id of the given port.
  1445. */
  1446. unsigned int cxgb4_port_viid(const struct net_device *dev)
  1447. {
  1448. return netdev2pinfo(dev)->viid;
  1449. }
  1450. EXPORT_SYMBOL(cxgb4_port_viid);
  1451. /**
  1452. * cxgb4_port_idx - get the index of a port
  1453. * @dev: the net device for the port
  1454. *
  1455. * Return the index of the given port.
  1456. */
  1457. unsigned int cxgb4_port_idx(const struct net_device *dev)
  1458. {
  1459. return netdev2pinfo(dev)->port_id;
  1460. }
  1461. EXPORT_SYMBOL(cxgb4_port_idx);
  1462. void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
  1463. struct tp_tcp_stats *v6)
  1464. {
  1465. struct adapter *adap = pci_get_drvdata(pdev);
  1466. spin_lock(&adap->stats_lock);
  1467. t4_tp_get_tcp_stats(adap, v4, v6, false);
  1468. spin_unlock(&adap->stats_lock);
  1469. }
  1470. EXPORT_SYMBOL(cxgb4_get_tcp_stats);
  1471. void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
  1472. const unsigned int *pgsz_order)
  1473. {
  1474. struct adapter *adap = netdev2adap(dev);
  1475. t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
  1476. t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
  1477. HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
  1478. HPZ3_V(pgsz_order[3]));
  1479. }
  1480. EXPORT_SYMBOL(cxgb4_iscsi_init);
  1481. int cxgb4_flush_eq_cache(struct net_device *dev)
  1482. {
  1483. struct adapter *adap = netdev2adap(dev);
  1484. return t4_sge_ctxt_flush(adap, adap->mbox, CTXT_EGRESS);
  1485. }
  1486. EXPORT_SYMBOL(cxgb4_flush_eq_cache);
  1487. static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
  1488. {
  1489. u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
  1490. __be64 indices;
  1491. int ret;
  1492. spin_lock(&adap->win0_lock);
  1493. ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
  1494. sizeof(indices), (__be32 *)&indices,
  1495. T4_MEMORY_READ);
  1496. spin_unlock(&adap->win0_lock);
  1497. if (!ret) {
  1498. *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
  1499. *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
  1500. }
  1501. return ret;
  1502. }
  1503. int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
  1504. u16 size)
  1505. {
  1506. struct adapter *adap = netdev2adap(dev);
  1507. u16 hw_pidx, hw_cidx;
  1508. int ret;
  1509. ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
  1510. if (ret)
  1511. goto out;
  1512. if (pidx != hw_pidx) {
  1513. u16 delta;
  1514. u32 val;
  1515. if (pidx >= hw_pidx)
  1516. delta = pidx - hw_pidx;
  1517. else
  1518. delta = size - hw_pidx + pidx;
  1519. if (is_t4(adap->params.chip))
  1520. val = PIDX_V(delta);
  1521. else
  1522. val = PIDX_T5_V(delta);
  1523. wmb();
  1524. t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
  1525. QID_V(qid) | val);
  1526. }
  1527. out:
  1528. return ret;
  1529. }
  1530. EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
  1531. int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
  1532. {
  1533. u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
  1534. u32 edc0_end, edc1_end, mc0_end, mc1_end;
  1535. u32 offset, memtype, memaddr;
  1536. struct adapter *adap;
  1537. u32 hma_size = 0;
  1538. int ret;
  1539. adap = netdev2adap(dev);
  1540. offset = ((stag >> 8) * 32) + adap->vres.stag.start;
  1541. /* Figure out where the offset lands in the Memory Type/Address scheme.
  1542. * This code assumes that the memory is laid out starting at offset 0
  1543. * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
  1544. * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
  1545. * MC0, and some have both MC0 and MC1.
  1546. */
  1547. size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
  1548. edc0_size = EDRAM0_SIZE_G(size) << 20;
  1549. size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
  1550. edc1_size = EDRAM1_SIZE_G(size) << 20;
  1551. size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
  1552. mc0_size = EXT_MEM0_SIZE_G(size) << 20;
  1553. if (t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A) & HMA_MUX_F) {
  1554. size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
  1555. hma_size = EXT_MEM1_SIZE_G(size) << 20;
  1556. }
  1557. edc0_end = edc0_size;
  1558. edc1_end = edc0_end + edc1_size;
  1559. mc0_end = edc1_end + mc0_size;
  1560. if (offset < edc0_end) {
  1561. memtype = MEM_EDC0;
  1562. memaddr = offset;
  1563. } else if (offset < edc1_end) {
  1564. memtype = MEM_EDC1;
  1565. memaddr = offset - edc0_end;
  1566. } else {
  1567. if (hma_size && (offset < (edc1_end + hma_size))) {
  1568. memtype = MEM_HMA;
  1569. memaddr = offset - edc1_end;
  1570. } else if (offset < mc0_end) {
  1571. memtype = MEM_MC0;
  1572. memaddr = offset - edc1_end;
  1573. } else if (is_t5(adap->params.chip)) {
  1574. size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
  1575. mc1_size = EXT_MEM1_SIZE_G(size) << 20;
  1576. mc1_end = mc0_end + mc1_size;
  1577. if (offset < mc1_end) {
  1578. memtype = MEM_MC1;
  1579. memaddr = offset - mc0_end;
  1580. } else {
  1581. /* offset beyond the end of any memory */
  1582. goto err;
  1583. }
  1584. } else {
  1585. /* T4/T6 only has a single memory channel */
  1586. goto err;
  1587. }
  1588. }
  1589. spin_lock(&adap->win0_lock);
  1590. ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
  1591. spin_unlock(&adap->win0_lock);
  1592. return ret;
  1593. err:
  1594. dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
  1595. stag, offset);
  1596. return -EINVAL;
  1597. }
  1598. EXPORT_SYMBOL(cxgb4_read_tpte);
  1599. u64 cxgb4_read_sge_timestamp(struct net_device *dev)
  1600. {
  1601. u32 hi, lo;
  1602. struct adapter *adap;
  1603. adap = netdev2adap(dev);
  1604. lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
  1605. hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
  1606. return ((u64)hi << 32) | (u64)lo;
  1607. }
  1608. EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
  1609. int cxgb4_bar2_sge_qregs(struct net_device *dev,
  1610. unsigned int qid,
  1611. enum cxgb4_bar2_qtype qtype,
  1612. int user,
  1613. u64 *pbar2_qoffset,
  1614. unsigned int *pbar2_qid)
  1615. {
  1616. return t4_bar2_sge_qregs(netdev2adap(dev),
  1617. qid,
  1618. (qtype == CXGB4_BAR2_QTYPE_EGRESS
  1619. ? T4_BAR2_QTYPE_EGRESS
  1620. : T4_BAR2_QTYPE_INGRESS),
  1621. user,
  1622. pbar2_qoffset,
  1623. pbar2_qid);
  1624. }
  1625. EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
  1626. static struct pci_driver cxgb4_driver;
  1627. static void check_neigh_update(struct neighbour *neigh)
  1628. {
  1629. const struct device *parent;
  1630. const struct net_device *netdev = neigh->dev;
  1631. if (is_vlan_dev(netdev))
  1632. netdev = vlan_dev_real_dev(netdev);
  1633. parent = netdev->dev.parent;
  1634. if (parent && parent->driver == &cxgb4_driver.driver)
  1635. t4_l2t_update(dev_get_drvdata(parent), neigh);
  1636. }
  1637. static int netevent_cb(struct notifier_block *nb, unsigned long event,
  1638. void *data)
  1639. {
  1640. switch (event) {
  1641. case NETEVENT_NEIGH_UPDATE:
  1642. check_neigh_update(data);
  1643. break;
  1644. case NETEVENT_REDIRECT:
  1645. default:
  1646. break;
  1647. }
  1648. return 0;
  1649. }
  1650. static bool netevent_registered;
  1651. static struct notifier_block cxgb4_netevent_nb = {
  1652. .notifier_call = netevent_cb
  1653. };
  1654. static void drain_db_fifo(struct adapter *adap, int usecs)
  1655. {
  1656. u32 v1, v2, lp_count, hp_count;
  1657. do {
  1658. v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
  1659. v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
  1660. if (is_t4(adap->params.chip)) {
  1661. lp_count = LP_COUNT_G(v1);
  1662. hp_count = HP_COUNT_G(v1);
  1663. } else {
  1664. lp_count = LP_COUNT_T5_G(v1);
  1665. hp_count = HP_COUNT_T5_G(v2);
  1666. }
  1667. if (lp_count == 0 && hp_count == 0)
  1668. break;
  1669. set_current_state(TASK_UNINTERRUPTIBLE);
  1670. schedule_timeout(usecs_to_jiffies(usecs));
  1671. } while (1);
  1672. }
  1673. static void disable_txq_db(struct sge_txq *q)
  1674. {
  1675. unsigned long flags;
  1676. spin_lock_irqsave(&q->db_lock, flags);
  1677. q->db_disabled = 1;
  1678. spin_unlock_irqrestore(&q->db_lock, flags);
  1679. }
  1680. static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
  1681. {
  1682. spin_lock_irq(&q->db_lock);
  1683. if (q->db_pidx_inc) {
  1684. /* Make sure that all writes to the TX descriptors
  1685. * are committed before we tell HW about them.
  1686. */
  1687. wmb();
  1688. t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
  1689. QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
  1690. q->db_pidx_inc = 0;
  1691. }
  1692. q->db_disabled = 0;
  1693. spin_unlock_irq(&q->db_lock);
  1694. }
  1695. static void disable_dbs(struct adapter *adap)
  1696. {
  1697. int i;
  1698. for_each_ethrxq(&adap->sge, i)
  1699. disable_txq_db(&adap->sge.ethtxq[i].q);
  1700. if (is_offload(adap)) {
  1701. struct sge_uld_txq_info *txq_info =
  1702. adap->sge.uld_txq_info[CXGB4_TX_OFLD];
  1703. if (txq_info) {
  1704. for_each_ofldtxq(&adap->sge, i) {
  1705. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  1706. disable_txq_db(&txq->q);
  1707. }
  1708. }
  1709. }
  1710. for_each_port(adap, i)
  1711. disable_txq_db(&adap->sge.ctrlq[i].q);
  1712. }
  1713. static void enable_dbs(struct adapter *adap)
  1714. {
  1715. int i;
  1716. for_each_ethrxq(&adap->sge, i)
  1717. enable_txq_db(adap, &adap->sge.ethtxq[i].q);
  1718. if (is_offload(adap)) {
  1719. struct sge_uld_txq_info *txq_info =
  1720. adap->sge.uld_txq_info[CXGB4_TX_OFLD];
  1721. if (txq_info) {
  1722. for_each_ofldtxq(&adap->sge, i) {
  1723. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  1724. enable_txq_db(adap, &txq->q);
  1725. }
  1726. }
  1727. }
  1728. for_each_port(adap, i)
  1729. enable_txq_db(adap, &adap->sge.ctrlq[i].q);
  1730. }
  1731. static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
  1732. {
  1733. enum cxgb4_uld type = CXGB4_ULD_RDMA;
  1734. if (adap->uld && adap->uld[type].handle)
  1735. adap->uld[type].control(adap->uld[type].handle, cmd);
  1736. }
  1737. static void process_db_full(struct work_struct *work)
  1738. {
  1739. struct adapter *adap;
  1740. adap = container_of(work, struct adapter, db_full_task);
  1741. drain_db_fifo(adap, dbfifo_drain_delay);
  1742. enable_dbs(adap);
  1743. notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
  1744. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  1745. t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
  1746. DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
  1747. DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
  1748. else
  1749. t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
  1750. DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
  1751. }
  1752. static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
  1753. {
  1754. u16 hw_pidx, hw_cidx;
  1755. int ret;
  1756. spin_lock_irq(&q->db_lock);
  1757. ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
  1758. if (ret)
  1759. goto out;
  1760. if (q->db_pidx != hw_pidx) {
  1761. u16 delta;
  1762. u32 val;
  1763. if (q->db_pidx >= hw_pidx)
  1764. delta = q->db_pidx - hw_pidx;
  1765. else
  1766. delta = q->size - hw_pidx + q->db_pidx;
  1767. if (is_t4(adap->params.chip))
  1768. val = PIDX_V(delta);
  1769. else
  1770. val = PIDX_T5_V(delta);
  1771. wmb();
  1772. t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
  1773. QID_V(q->cntxt_id) | val);
  1774. }
  1775. out:
  1776. q->db_disabled = 0;
  1777. q->db_pidx_inc = 0;
  1778. spin_unlock_irq(&q->db_lock);
  1779. if (ret)
  1780. CH_WARN(adap, "DB drop recovery failed.\n");
  1781. }
  1782. static void recover_all_queues(struct adapter *adap)
  1783. {
  1784. int i;
  1785. for_each_ethrxq(&adap->sge, i)
  1786. sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
  1787. if (is_offload(adap)) {
  1788. struct sge_uld_txq_info *txq_info =
  1789. adap->sge.uld_txq_info[CXGB4_TX_OFLD];
  1790. if (txq_info) {
  1791. for_each_ofldtxq(&adap->sge, i) {
  1792. struct sge_uld_txq *txq = &txq_info->uldtxq[i];
  1793. sync_txq_pidx(adap, &txq->q);
  1794. }
  1795. }
  1796. }
  1797. for_each_port(adap, i)
  1798. sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
  1799. }
  1800. static void process_db_drop(struct work_struct *work)
  1801. {
  1802. struct adapter *adap;
  1803. adap = container_of(work, struct adapter, db_drop_task);
  1804. if (is_t4(adap->params.chip)) {
  1805. drain_db_fifo(adap, dbfifo_drain_delay);
  1806. notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
  1807. drain_db_fifo(adap, dbfifo_drain_delay);
  1808. recover_all_queues(adap);
  1809. drain_db_fifo(adap, dbfifo_drain_delay);
  1810. enable_dbs(adap);
  1811. notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
  1812. } else if (is_t5(adap->params.chip)) {
  1813. u32 dropped_db = t4_read_reg(adap, 0x010ac);
  1814. u16 qid = (dropped_db >> 15) & 0x1ffff;
  1815. u16 pidx_inc = dropped_db & 0x1fff;
  1816. u64 bar2_qoffset;
  1817. unsigned int bar2_qid;
  1818. int ret;
  1819. ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
  1820. 0, &bar2_qoffset, &bar2_qid);
  1821. if (ret)
  1822. dev_err(adap->pdev_dev, "doorbell drop recovery: "
  1823. "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
  1824. else
  1825. writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
  1826. adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
  1827. /* Re-enable BAR2 WC */
  1828. t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
  1829. }
  1830. if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
  1831. t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
  1832. }
  1833. void t4_db_full(struct adapter *adap)
  1834. {
  1835. if (is_t4(adap->params.chip)) {
  1836. disable_dbs(adap);
  1837. notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
  1838. t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
  1839. DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
  1840. queue_work(adap->workq, &adap->db_full_task);
  1841. }
  1842. }
  1843. void t4_db_dropped(struct adapter *adap)
  1844. {
  1845. if (is_t4(adap->params.chip)) {
  1846. disable_dbs(adap);
  1847. notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
  1848. }
  1849. queue_work(adap->workq, &adap->db_drop_task);
  1850. }
  1851. void t4_register_netevent_notifier(void)
  1852. {
  1853. if (!netevent_registered) {
  1854. register_netevent_notifier(&cxgb4_netevent_nb);
  1855. netevent_registered = true;
  1856. }
  1857. }
  1858. static void detach_ulds(struct adapter *adap)
  1859. {
  1860. unsigned int i;
  1861. mutex_lock(&uld_mutex);
  1862. list_del(&adap->list_node);
  1863. for (i = 0; i < CXGB4_ULD_MAX; i++)
  1864. if (adap->uld && adap->uld[i].handle)
  1865. adap->uld[i].state_change(adap->uld[i].handle,
  1866. CXGB4_STATE_DETACH);
  1867. if (netevent_registered && list_empty(&adapter_list)) {
  1868. unregister_netevent_notifier(&cxgb4_netevent_nb);
  1869. netevent_registered = false;
  1870. }
  1871. mutex_unlock(&uld_mutex);
  1872. }
  1873. static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
  1874. {
  1875. unsigned int i;
  1876. mutex_lock(&uld_mutex);
  1877. for (i = 0; i < CXGB4_ULD_MAX; i++)
  1878. if (adap->uld && adap->uld[i].handle)
  1879. adap->uld[i].state_change(adap->uld[i].handle,
  1880. new_state);
  1881. mutex_unlock(&uld_mutex);
  1882. }
  1883. #if IS_ENABLED(CONFIG_IPV6)
  1884. static int cxgb4_inet6addr_handler(struct notifier_block *this,
  1885. unsigned long event, void *data)
  1886. {
  1887. struct inet6_ifaddr *ifa = data;
  1888. struct net_device *event_dev = ifa->idev->dev;
  1889. const struct device *parent = NULL;
  1890. #if IS_ENABLED(CONFIG_BONDING)
  1891. struct adapter *adap;
  1892. #endif
  1893. if (is_vlan_dev(event_dev))
  1894. event_dev = vlan_dev_real_dev(event_dev);
  1895. #if IS_ENABLED(CONFIG_BONDING)
  1896. if (event_dev->flags & IFF_MASTER) {
  1897. list_for_each_entry(adap, &adapter_list, list_node) {
  1898. switch (event) {
  1899. case NETDEV_UP:
  1900. cxgb4_clip_get(adap->port[0],
  1901. (const u32 *)ifa, 1);
  1902. break;
  1903. case NETDEV_DOWN:
  1904. cxgb4_clip_release(adap->port[0],
  1905. (const u32 *)ifa, 1);
  1906. break;
  1907. default:
  1908. break;
  1909. }
  1910. }
  1911. return NOTIFY_OK;
  1912. }
  1913. #endif
  1914. if (event_dev)
  1915. parent = event_dev->dev.parent;
  1916. if (parent && parent->driver == &cxgb4_driver.driver) {
  1917. switch (event) {
  1918. case NETDEV_UP:
  1919. cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
  1920. break;
  1921. case NETDEV_DOWN:
  1922. cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
  1923. break;
  1924. default:
  1925. break;
  1926. }
  1927. }
  1928. return NOTIFY_OK;
  1929. }
  1930. static bool inet6addr_registered;
  1931. static struct notifier_block cxgb4_inet6addr_notifier = {
  1932. .notifier_call = cxgb4_inet6addr_handler
  1933. };
  1934. static void update_clip(const struct adapter *adap)
  1935. {
  1936. int i;
  1937. struct net_device *dev;
  1938. int ret;
  1939. rcu_read_lock();
  1940. for (i = 0; i < MAX_NPORTS; i++) {
  1941. dev = adap->port[i];
  1942. ret = 0;
  1943. if (dev)
  1944. ret = cxgb4_update_root_dev_clip(dev);
  1945. if (ret < 0)
  1946. break;
  1947. }
  1948. rcu_read_unlock();
  1949. }
  1950. #endif /* IS_ENABLED(CONFIG_IPV6) */
  1951. /**
  1952. * cxgb_up - enable the adapter
  1953. * @adap: adapter being enabled
  1954. *
  1955. * Called when the first port is enabled, this function performs the
  1956. * actions necessary to make an adapter operational, such as completing
  1957. * the initialization of HW modules, and enabling interrupts.
  1958. *
  1959. * Must be called with the rtnl lock held.
  1960. */
  1961. static int cxgb_up(struct adapter *adap)
  1962. {
  1963. int err;
  1964. mutex_lock(&uld_mutex);
  1965. err = setup_sge_queues(adap);
  1966. if (err)
  1967. goto rel_lock;
  1968. err = setup_rss(adap);
  1969. if (err)
  1970. goto freeq;
  1971. if (adap->flags & USING_MSIX) {
  1972. name_msix_vecs(adap);
  1973. err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
  1974. adap->msix_info[0].desc, adap);
  1975. if (err)
  1976. goto irq_err;
  1977. err = request_msix_queue_irqs(adap);
  1978. if (err) {
  1979. free_irq(adap->msix_info[0].vec, adap);
  1980. goto irq_err;
  1981. }
  1982. } else {
  1983. err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
  1984. (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
  1985. adap->port[0]->name, adap);
  1986. if (err)
  1987. goto irq_err;
  1988. }
  1989. enable_rx(adap);
  1990. t4_sge_start(adap);
  1991. t4_intr_enable(adap);
  1992. adap->flags |= FULL_INIT_DONE;
  1993. mutex_unlock(&uld_mutex);
  1994. notify_ulds(adap, CXGB4_STATE_UP);
  1995. #if IS_ENABLED(CONFIG_IPV6)
  1996. update_clip(adap);
  1997. #endif
  1998. /* Initialize hash mac addr list*/
  1999. INIT_LIST_HEAD(&adap->mac_hlist);
  2000. return err;
  2001. irq_err:
  2002. dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
  2003. freeq:
  2004. t4_free_sge_resources(adap);
  2005. rel_lock:
  2006. mutex_unlock(&uld_mutex);
  2007. return err;
  2008. }
  2009. static void cxgb_down(struct adapter *adapter)
  2010. {
  2011. cancel_work_sync(&adapter->tid_release_task);
  2012. cancel_work_sync(&adapter->db_full_task);
  2013. cancel_work_sync(&adapter->db_drop_task);
  2014. adapter->tid_release_task_busy = false;
  2015. adapter->tid_release_head = NULL;
  2016. t4_sge_stop(adapter);
  2017. t4_free_sge_resources(adapter);
  2018. adapter->flags &= ~FULL_INIT_DONE;
  2019. }
  2020. /*
  2021. * net_device operations
  2022. */
  2023. static int cxgb_open(struct net_device *dev)
  2024. {
  2025. int err;
  2026. struct port_info *pi = netdev_priv(dev);
  2027. struct adapter *adapter = pi->adapter;
  2028. netif_carrier_off(dev);
  2029. if (!(adapter->flags & FULL_INIT_DONE)) {
  2030. err = cxgb_up(adapter);
  2031. if (err < 0)
  2032. return err;
  2033. }
  2034. /* It's possible that the basic port information could have
  2035. * changed since we first read it.
  2036. */
  2037. err = t4_update_port_info(pi);
  2038. if (err < 0)
  2039. return err;
  2040. err = link_start(dev);
  2041. if (!err)
  2042. netif_tx_start_all_queues(dev);
  2043. return err;
  2044. }
  2045. static int cxgb_close(struct net_device *dev)
  2046. {
  2047. struct port_info *pi = netdev_priv(dev);
  2048. struct adapter *adapter = pi->adapter;
  2049. int ret;
  2050. netif_tx_stop_all_queues(dev);
  2051. netif_carrier_off(dev);
  2052. ret = t4_enable_pi_params(adapter, adapter->pf, pi,
  2053. false, false, false);
  2054. #ifdef CONFIG_CHELSIO_T4_DCB
  2055. cxgb4_dcb_reset(dev);
  2056. dcb_tx_queue_prio_enable(dev, false);
  2057. #endif
  2058. return ret;
  2059. }
  2060. int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
  2061. __be32 sip, __be16 sport, __be16 vlan,
  2062. unsigned int queue, unsigned char port, unsigned char mask)
  2063. {
  2064. int ret;
  2065. struct filter_entry *f;
  2066. struct adapter *adap;
  2067. int i;
  2068. u8 *val;
  2069. adap = netdev2adap(dev);
  2070. /* Adjust stid to correct filter index */
  2071. stid -= adap->tids.sftid_base;
  2072. stid += adap->tids.nftids;
  2073. /* Check to make sure the filter requested is writable ...
  2074. */
  2075. f = &adap->tids.ftid_tab[stid];
  2076. ret = writable_filter(f);
  2077. if (ret)
  2078. return ret;
  2079. /* Clear out any old resources being used by the filter before
  2080. * we start constructing the new filter.
  2081. */
  2082. if (f->valid)
  2083. clear_filter(adap, f);
  2084. /* Clear out filter specifications */
  2085. memset(&f->fs, 0, sizeof(struct ch_filter_specification));
  2086. f->fs.val.lport = cpu_to_be16(sport);
  2087. f->fs.mask.lport = ~0;
  2088. val = (u8 *)&sip;
  2089. if ((val[0] | val[1] | val[2] | val[3]) != 0) {
  2090. for (i = 0; i < 4; i++) {
  2091. f->fs.val.lip[i] = val[i];
  2092. f->fs.mask.lip[i] = ~0;
  2093. }
  2094. if (adap->params.tp.vlan_pri_map & PORT_F) {
  2095. f->fs.val.iport = port;
  2096. f->fs.mask.iport = mask;
  2097. }
  2098. }
  2099. if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
  2100. f->fs.val.proto = IPPROTO_TCP;
  2101. f->fs.mask.proto = ~0;
  2102. }
  2103. f->fs.dirsteer = 1;
  2104. f->fs.iq = queue;
  2105. /* Mark filter as locked */
  2106. f->locked = 1;
  2107. f->fs.rpttid = 1;
  2108. /* Save the actual tid. We need this to get the corresponding
  2109. * filter entry structure in filter_rpl.
  2110. */
  2111. f->tid = stid + adap->tids.ftid_base;
  2112. ret = set_filter_wr(adap, stid);
  2113. if (ret) {
  2114. clear_filter(adap, f);
  2115. return ret;
  2116. }
  2117. return 0;
  2118. }
  2119. EXPORT_SYMBOL(cxgb4_create_server_filter);
  2120. int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
  2121. unsigned int queue, bool ipv6)
  2122. {
  2123. struct filter_entry *f;
  2124. struct adapter *adap;
  2125. adap = netdev2adap(dev);
  2126. /* Adjust stid to correct filter index */
  2127. stid -= adap->tids.sftid_base;
  2128. stid += adap->tids.nftids;
  2129. f = &adap->tids.ftid_tab[stid];
  2130. /* Unlock the filter */
  2131. f->locked = 0;
  2132. return delete_filter(adap, stid);
  2133. }
  2134. EXPORT_SYMBOL(cxgb4_remove_server_filter);
  2135. static void cxgb_get_stats(struct net_device *dev,
  2136. struct rtnl_link_stats64 *ns)
  2137. {
  2138. struct port_stats stats;
  2139. struct port_info *p = netdev_priv(dev);
  2140. struct adapter *adapter = p->adapter;
  2141. /* Block retrieving statistics during EEH error
  2142. * recovery. Otherwise, the recovery might fail
  2143. * and the PCI device will be removed permanently
  2144. */
  2145. spin_lock(&adapter->stats_lock);
  2146. if (!netif_device_present(dev)) {
  2147. spin_unlock(&adapter->stats_lock);
  2148. return;
  2149. }
  2150. t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
  2151. &p->stats_base);
  2152. spin_unlock(&adapter->stats_lock);
  2153. ns->tx_bytes = stats.tx_octets;
  2154. ns->tx_packets = stats.tx_frames;
  2155. ns->rx_bytes = stats.rx_octets;
  2156. ns->rx_packets = stats.rx_frames;
  2157. ns->multicast = stats.rx_mcast_frames;
  2158. /* detailed rx_errors */
  2159. ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
  2160. stats.rx_runt;
  2161. ns->rx_over_errors = 0;
  2162. ns->rx_crc_errors = stats.rx_fcs_err;
  2163. ns->rx_frame_errors = stats.rx_symbol_err;
  2164. ns->rx_dropped = stats.rx_ovflow0 + stats.rx_ovflow1 +
  2165. stats.rx_ovflow2 + stats.rx_ovflow3 +
  2166. stats.rx_trunc0 + stats.rx_trunc1 +
  2167. stats.rx_trunc2 + stats.rx_trunc3;
  2168. ns->rx_missed_errors = 0;
  2169. /* detailed tx_errors */
  2170. ns->tx_aborted_errors = 0;
  2171. ns->tx_carrier_errors = 0;
  2172. ns->tx_fifo_errors = 0;
  2173. ns->tx_heartbeat_errors = 0;
  2174. ns->tx_window_errors = 0;
  2175. ns->tx_errors = stats.tx_error_frames;
  2176. ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
  2177. ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
  2178. }
  2179. static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
  2180. {
  2181. unsigned int mbox;
  2182. int ret = 0, prtad, devad;
  2183. struct port_info *pi = netdev_priv(dev);
  2184. struct adapter *adapter = pi->adapter;
  2185. struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
  2186. switch (cmd) {
  2187. case SIOCGMIIPHY:
  2188. if (pi->mdio_addr < 0)
  2189. return -EOPNOTSUPP;
  2190. data->phy_id = pi->mdio_addr;
  2191. break;
  2192. case SIOCGMIIREG:
  2193. case SIOCSMIIREG:
  2194. if (mdio_phy_id_is_c45(data->phy_id)) {
  2195. prtad = mdio_phy_id_prtad(data->phy_id);
  2196. devad = mdio_phy_id_devad(data->phy_id);
  2197. } else if (data->phy_id < 32) {
  2198. prtad = data->phy_id;
  2199. devad = 0;
  2200. data->reg_num &= 0x1f;
  2201. } else
  2202. return -EINVAL;
  2203. mbox = pi->adapter->pf;
  2204. if (cmd == SIOCGMIIREG)
  2205. ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
  2206. data->reg_num, &data->val_out);
  2207. else
  2208. ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
  2209. data->reg_num, data->val_in);
  2210. break;
  2211. case SIOCGHWTSTAMP:
  2212. return copy_to_user(req->ifr_data, &pi->tstamp_config,
  2213. sizeof(pi->tstamp_config)) ?
  2214. -EFAULT : 0;
  2215. case SIOCSHWTSTAMP:
  2216. if (copy_from_user(&pi->tstamp_config, req->ifr_data,
  2217. sizeof(pi->tstamp_config)))
  2218. return -EFAULT;
  2219. if (!is_t4(adapter->params.chip)) {
  2220. switch (pi->tstamp_config.tx_type) {
  2221. case HWTSTAMP_TX_OFF:
  2222. case HWTSTAMP_TX_ON:
  2223. break;
  2224. default:
  2225. return -ERANGE;
  2226. }
  2227. switch (pi->tstamp_config.rx_filter) {
  2228. case HWTSTAMP_FILTER_NONE:
  2229. pi->rxtstamp = false;
  2230. break;
  2231. case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
  2232. case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
  2233. cxgb4_ptprx_timestamping(pi, pi->port_id,
  2234. PTP_TS_L4);
  2235. break;
  2236. case HWTSTAMP_FILTER_PTP_V2_EVENT:
  2237. cxgb4_ptprx_timestamping(pi, pi->port_id,
  2238. PTP_TS_L2_L4);
  2239. break;
  2240. case HWTSTAMP_FILTER_ALL:
  2241. case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
  2242. case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
  2243. case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
  2244. case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
  2245. pi->rxtstamp = true;
  2246. break;
  2247. default:
  2248. pi->tstamp_config.rx_filter =
  2249. HWTSTAMP_FILTER_NONE;
  2250. return -ERANGE;
  2251. }
  2252. if ((pi->tstamp_config.tx_type == HWTSTAMP_TX_OFF) &&
  2253. (pi->tstamp_config.rx_filter ==
  2254. HWTSTAMP_FILTER_NONE)) {
  2255. if (cxgb4_ptp_txtype(adapter, pi->port_id) >= 0)
  2256. pi->ptp_enable = false;
  2257. }
  2258. if (pi->tstamp_config.rx_filter !=
  2259. HWTSTAMP_FILTER_NONE) {
  2260. if (cxgb4_ptp_redirect_rx_packet(adapter,
  2261. pi) >= 0)
  2262. pi->ptp_enable = true;
  2263. }
  2264. } else {
  2265. /* For T4 Adapters */
  2266. switch (pi->tstamp_config.rx_filter) {
  2267. case HWTSTAMP_FILTER_NONE:
  2268. pi->rxtstamp = false;
  2269. break;
  2270. case HWTSTAMP_FILTER_ALL:
  2271. pi->rxtstamp = true;
  2272. break;
  2273. default:
  2274. pi->tstamp_config.rx_filter =
  2275. HWTSTAMP_FILTER_NONE;
  2276. return -ERANGE;
  2277. }
  2278. }
  2279. return copy_to_user(req->ifr_data, &pi->tstamp_config,
  2280. sizeof(pi->tstamp_config)) ?
  2281. -EFAULT : 0;
  2282. default:
  2283. return -EOPNOTSUPP;
  2284. }
  2285. return ret;
  2286. }
  2287. static void cxgb_set_rxmode(struct net_device *dev)
  2288. {
  2289. /* unfortunately we can't return errors to the stack */
  2290. set_rxmode(dev, -1, false);
  2291. }
  2292. static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
  2293. {
  2294. int ret;
  2295. struct port_info *pi = netdev_priv(dev);
  2296. ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
  2297. -1, -1, -1, true);
  2298. if (!ret)
  2299. dev->mtu = new_mtu;
  2300. return ret;
  2301. }
  2302. #ifdef CONFIG_PCI_IOV
  2303. static int cxgb4_mgmt_open(struct net_device *dev)
  2304. {
  2305. /* Turn carrier off since we don't have to transmit anything on this
  2306. * interface.
  2307. */
  2308. netif_carrier_off(dev);
  2309. return 0;
  2310. }
  2311. /* Fill MAC address that will be assigned by the FW */
  2312. static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap)
  2313. {
  2314. u8 hw_addr[ETH_ALEN], macaddr[ETH_ALEN];
  2315. unsigned int i, vf, nvfs;
  2316. u16 a, b;
  2317. int err;
  2318. u8 *na;
  2319. adap->params.pci.vpd_cap_addr = pci_find_capability(adap->pdev,
  2320. PCI_CAP_ID_VPD);
  2321. err = t4_get_raw_vpd_params(adap, &adap->params.vpd);
  2322. if (err)
  2323. return;
  2324. na = adap->params.vpd.na;
  2325. for (i = 0; i < ETH_ALEN; i++)
  2326. hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
  2327. hex2val(na[2 * i + 1]));
  2328. a = (hw_addr[0] << 8) | hw_addr[1];
  2329. b = (hw_addr[1] << 8) | hw_addr[2];
  2330. a ^= b;
  2331. a |= 0x0200; /* locally assigned Ethernet MAC address */
  2332. a &= ~0x0100; /* not a multicast Ethernet MAC address */
  2333. macaddr[0] = a >> 8;
  2334. macaddr[1] = a & 0xff;
  2335. for (i = 2; i < 5; i++)
  2336. macaddr[i] = hw_addr[i + 1];
  2337. for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev);
  2338. vf < nvfs; vf++) {
  2339. macaddr[5] = adap->pf * 16 + vf;
  2340. ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr);
  2341. }
  2342. }
  2343. static int cxgb4_mgmt_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
  2344. {
  2345. struct port_info *pi = netdev_priv(dev);
  2346. struct adapter *adap = pi->adapter;
  2347. int ret;
  2348. /* verify MAC addr is valid */
  2349. if (!is_valid_ether_addr(mac)) {
  2350. dev_err(pi->adapter->pdev_dev,
  2351. "Invalid Ethernet address %pM for VF %d\n",
  2352. mac, vf);
  2353. return -EINVAL;
  2354. }
  2355. dev_info(pi->adapter->pdev_dev,
  2356. "Setting MAC %pM on VF %d\n", mac, vf);
  2357. ret = t4_set_vf_mac_acl(adap, vf + 1, 1, mac);
  2358. if (!ret)
  2359. ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, mac);
  2360. return ret;
  2361. }
  2362. static int cxgb4_mgmt_get_vf_config(struct net_device *dev,
  2363. int vf, struct ifla_vf_info *ivi)
  2364. {
  2365. struct port_info *pi = netdev_priv(dev);
  2366. struct adapter *adap = pi->adapter;
  2367. struct vf_info *vfinfo;
  2368. if (vf >= adap->num_vfs)
  2369. return -EINVAL;
  2370. vfinfo = &adap->vfinfo[vf];
  2371. ivi->vf = vf;
  2372. ivi->max_tx_rate = vfinfo->tx_rate;
  2373. ivi->min_tx_rate = 0;
  2374. ether_addr_copy(ivi->mac, vfinfo->vf_mac_addr);
  2375. ivi->vlan = vfinfo->vlan;
  2376. return 0;
  2377. }
  2378. static int cxgb4_mgmt_get_phys_port_id(struct net_device *dev,
  2379. struct netdev_phys_item_id *ppid)
  2380. {
  2381. struct port_info *pi = netdev_priv(dev);
  2382. unsigned int phy_port_id;
  2383. phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
  2384. ppid->id_len = sizeof(phy_port_id);
  2385. memcpy(ppid->id, &phy_port_id, ppid->id_len);
  2386. return 0;
  2387. }
  2388. static int cxgb4_mgmt_set_vf_rate(struct net_device *dev, int vf,
  2389. int min_tx_rate, int max_tx_rate)
  2390. {
  2391. struct port_info *pi = netdev_priv(dev);
  2392. struct adapter *adap = pi->adapter;
  2393. unsigned int link_ok, speed, mtu;
  2394. u32 fw_pfvf, fw_class;
  2395. int class_id = vf;
  2396. int ret;
  2397. u16 pktsize;
  2398. if (vf >= adap->num_vfs)
  2399. return -EINVAL;
  2400. if (min_tx_rate) {
  2401. dev_err(adap->pdev_dev,
  2402. "Min tx rate (%d) (> 0) for VF %d is Invalid.\n",
  2403. min_tx_rate, vf);
  2404. return -EINVAL;
  2405. }
  2406. ret = t4_get_link_params(pi, &link_ok, &speed, &mtu);
  2407. if (ret != FW_SUCCESS) {
  2408. dev_err(adap->pdev_dev,
  2409. "Failed to get link information for VF %d\n", vf);
  2410. return -EINVAL;
  2411. }
  2412. if (!link_ok) {
  2413. dev_err(adap->pdev_dev, "Link down for VF %d\n", vf);
  2414. return -EINVAL;
  2415. }
  2416. if (max_tx_rate > speed) {
  2417. dev_err(adap->pdev_dev,
  2418. "Max tx rate %d for VF %d can't be > link-speed %u",
  2419. max_tx_rate, vf, speed);
  2420. return -EINVAL;
  2421. }
  2422. pktsize = mtu;
  2423. /* subtract ethhdr size and 4 bytes crc since, f/w appends it */
  2424. pktsize = pktsize - sizeof(struct ethhdr) - 4;
  2425. /* subtract ipv4 hdr size, tcp hdr size to get typical IPv4 MSS size */
  2426. pktsize = pktsize - sizeof(struct iphdr) - sizeof(struct tcphdr);
  2427. /* configure Traffic Class for rate-limiting */
  2428. ret = t4_sched_params(adap, SCHED_CLASS_TYPE_PACKET,
  2429. SCHED_CLASS_LEVEL_CL_RL,
  2430. SCHED_CLASS_MODE_CLASS,
  2431. SCHED_CLASS_RATEUNIT_BITS,
  2432. SCHED_CLASS_RATEMODE_ABS,
  2433. pi->tx_chan, class_id, 0,
  2434. max_tx_rate * 1000, 0, pktsize);
  2435. if (ret) {
  2436. dev_err(adap->pdev_dev, "Err %d for Traffic Class config\n",
  2437. ret);
  2438. return -EINVAL;
  2439. }
  2440. dev_info(adap->pdev_dev,
  2441. "Class %d with MSS %u configured with rate %u\n",
  2442. class_id, pktsize, max_tx_rate);
  2443. /* bind VF to configured Traffic Class */
  2444. fw_pfvf = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
  2445. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH));
  2446. fw_class = class_id;
  2447. ret = t4_set_params(adap, adap->mbox, adap->pf, vf + 1, 1, &fw_pfvf,
  2448. &fw_class);
  2449. if (ret) {
  2450. dev_err(adap->pdev_dev,
  2451. "Err %d in binding VF %d to Traffic Class %d\n",
  2452. ret, vf, class_id);
  2453. return -EINVAL;
  2454. }
  2455. dev_info(adap->pdev_dev, "PF %d VF %d is bound to Class %d\n",
  2456. adap->pf, vf, class_id);
  2457. adap->vfinfo[vf].tx_rate = max_tx_rate;
  2458. return 0;
  2459. }
  2460. static int cxgb4_mgmt_set_vf_vlan(struct net_device *dev, int vf,
  2461. u16 vlan, u8 qos, __be16 vlan_proto)
  2462. {
  2463. struct port_info *pi = netdev_priv(dev);
  2464. struct adapter *adap = pi->adapter;
  2465. int ret;
  2466. if (vf >= adap->num_vfs || vlan > 4095 || qos > 7)
  2467. return -EINVAL;
  2468. if (vlan_proto != htons(ETH_P_8021Q) || qos != 0)
  2469. return -EPROTONOSUPPORT;
  2470. ret = t4_set_vlan_acl(adap, adap->mbox, vf + 1, vlan);
  2471. if (!ret) {
  2472. adap->vfinfo[vf].vlan = vlan;
  2473. return 0;
  2474. }
  2475. dev_err(adap->pdev_dev, "Err %d %s VLAN ACL for PF/VF %d/%d\n",
  2476. ret, (vlan ? "setting" : "clearing"), adap->pf, vf);
  2477. return ret;
  2478. }
  2479. #endif /* CONFIG_PCI_IOV */
  2480. static int cxgb_set_mac_addr(struct net_device *dev, void *p)
  2481. {
  2482. int ret;
  2483. struct sockaddr *addr = p;
  2484. struct port_info *pi = netdev_priv(dev);
  2485. if (!is_valid_ether_addr(addr->sa_data))
  2486. return -EADDRNOTAVAIL;
  2487. ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
  2488. pi->xact_addr_filt, addr->sa_data, true, true);
  2489. if (ret < 0)
  2490. return ret;
  2491. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  2492. pi->xact_addr_filt = ret;
  2493. return 0;
  2494. }
  2495. #ifdef CONFIG_NET_POLL_CONTROLLER
  2496. static void cxgb_netpoll(struct net_device *dev)
  2497. {
  2498. struct port_info *pi = netdev_priv(dev);
  2499. struct adapter *adap = pi->adapter;
  2500. if (adap->flags & USING_MSIX) {
  2501. int i;
  2502. struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
  2503. for (i = pi->nqsets; i; i--, rx++)
  2504. t4_sge_intr_msix(0, &rx->rspq);
  2505. } else
  2506. t4_intr_handler(adap)(0, adap);
  2507. }
  2508. #endif
  2509. static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
  2510. {
  2511. struct port_info *pi = netdev_priv(dev);
  2512. struct adapter *adap = pi->adapter;
  2513. struct sched_class *e;
  2514. struct ch_sched_params p;
  2515. struct ch_sched_queue qe;
  2516. u32 req_rate;
  2517. int err = 0;
  2518. if (!can_sched(dev))
  2519. return -ENOTSUPP;
  2520. if (index < 0 || index > pi->nqsets - 1)
  2521. return -EINVAL;
  2522. if (!(adap->flags & FULL_INIT_DONE)) {
  2523. dev_err(adap->pdev_dev,
  2524. "Failed to rate limit on queue %d. Link Down?\n",
  2525. index);
  2526. return -EINVAL;
  2527. }
  2528. /* Convert from Mbps to Kbps */
  2529. req_rate = rate * 1000;
  2530. /* Max rate is 100 Gbps */
  2531. if (req_rate > SCHED_MAX_RATE_KBPS) {
  2532. dev_err(adap->pdev_dev,
  2533. "Invalid rate %u Mbps, Max rate is %u Mbps\n",
  2534. rate, SCHED_MAX_RATE_KBPS / 1000);
  2535. return -ERANGE;
  2536. }
  2537. /* First unbind the queue from any existing class */
  2538. memset(&qe, 0, sizeof(qe));
  2539. qe.queue = index;
  2540. qe.class = SCHED_CLS_NONE;
  2541. err = cxgb4_sched_class_unbind(dev, (void *)(&qe), SCHED_QUEUE);
  2542. if (err) {
  2543. dev_err(adap->pdev_dev,
  2544. "Unbinding Queue %d on port %d fail. Err: %d\n",
  2545. index, pi->port_id, err);
  2546. return err;
  2547. }
  2548. /* Queue already unbound */
  2549. if (!req_rate)
  2550. return 0;
  2551. /* Fetch any available unused or matching scheduling class */
  2552. memset(&p, 0, sizeof(p));
  2553. p.type = SCHED_CLASS_TYPE_PACKET;
  2554. p.u.params.level = SCHED_CLASS_LEVEL_CL_RL;
  2555. p.u.params.mode = SCHED_CLASS_MODE_CLASS;
  2556. p.u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS;
  2557. p.u.params.ratemode = SCHED_CLASS_RATEMODE_ABS;
  2558. p.u.params.channel = pi->tx_chan;
  2559. p.u.params.class = SCHED_CLS_NONE;
  2560. p.u.params.minrate = 0;
  2561. p.u.params.maxrate = req_rate;
  2562. p.u.params.weight = 0;
  2563. p.u.params.pktsize = dev->mtu;
  2564. e = cxgb4_sched_class_alloc(dev, &p);
  2565. if (!e)
  2566. return -ENOMEM;
  2567. /* Bind the queue to a scheduling class */
  2568. memset(&qe, 0, sizeof(qe));
  2569. qe.queue = index;
  2570. qe.class = e->idx;
  2571. err = cxgb4_sched_class_bind(dev, (void *)(&qe), SCHED_QUEUE);
  2572. if (err)
  2573. dev_err(adap->pdev_dev,
  2574. "Queue rate limiting failed. Err: %d\n", err);
  2575. return err;
  2576. }
  2577. static int cxgb_setup_tc_flower(struct net_device *dev,
  2578. struct tc_cls_flower_offload *cls_flower)
  2579. {
  2580. switch (cls_flower->command) {
  2581. case TC_CLSFLOWER_REPLACE:
  2582. return cxgb4_tc_flower_replace(dev, cls_flower);
  2583. case TC_CLSFLOWER_DESTROY:
  2584. return cxgb4_tc_flower_destroy(dev, cls_flower);
  2585. case TC_CLSFLOWER_STATS:
  2586. return cxgb4_tc_flower_stats(dev, cls_flower);
  2587. default:
  2588. return -EOPNOTSUPP;
  2589. }
  2590. }
  2591. static int cxgb_setup_tc_cls_u32(struct net_device *dev,
  2592. struct tc_cls_u32_offload *cls_u32)
  2593. {
  2594. switch (cls_u32->command) {
  2595. case TC_CLSU32_NEW_KNODE:
  2596. case TC_CLSU32_REPLACE_KNODE:
  2597. return cxgb4_config_knode(dev, cls_u32);
  2598. case TC_CLSU32_DELETE_KNODE:
  2599. return cxgb4_delete_knode(dev, cls_u32);
  2600. default:
  2601. return -EOPNOTSUPP;
  2602. }
  2603. }
  2604. static int cxgb_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
  2605. void *cb_priv)
  2606. {
  2607. struct net_device *dev = cb_priv;
  2608. struct port_info *pi = netdev2pinfo(dev);
  2609. struct adapter *adap = netdev2adap(dev);
  2610. if (!(adap->flags & FULL_INIT_DONE)) {
  2611. dev_err(adap->pdev_dev,
  2612. "Failed to setup tc on port %d. Link Down?\n",
  2613. pi->port_id);
  2614. return -EINVAL;
  2615. }
  2616. if (!tc_cls_can_offload_and_chain0(dev, type_data))
  2617. return -EOPNOTSUPP;
  2618. switch (type) {
  2619. case TC_SETUP_CLSU32:
  2620. return cxgb_setup_tc_cls_u32(dev, type_data);
  2621. case TC_SETUP_CLSFLOWER:
  2622. return cxgb_setup_tc_flower(dev, type_data);
  2623. default:
  2624. return -EOPNOTSUPP;
  2625. }
  2626. }
  2627. static int cxgb_setup_tc_block(struct net_device *dev,
  2628. struct tc_block_offload *f)
  2629. {
  2630. struct port_info *pi = netdev2pinfo(dev);
  2631. if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
  2632. return -EOPNOTSUPP;
  2633. switch (f->command) {
  2634. case TC_BLOCK_BIND:
  2635. return tcf_block_cb_register(f->block, cxgb_setup_tc_block_cb,
  2636. pi, dev);
  2637. case TC_BLOCK_UNBIND:
  2638. tcf_block_cb_unregister(f->block, cxgb_setup_tc_block_cb, pi);
  2639. return 0;
  2640. default:
  2641. return -EOPNOTSUPP;
  2642. }
  2643. }
  2644. static int cxgb_setup_tc(struct net_device *dev, enum tc_setup_type type,
  2645. void *type_data)
  2646. {
  2647. switch (type) {
  2648. case TC_SETUP_BLOCK:
  2649. return cxgb_setup_tc_block(dev, type_data);
  2650. default:
  2651. return -EOPNOTSUPP;
  2652. }
  2653. }
  2654. static void cxgb_del_udp_tunnel(struct net_device *netdev,
  2655. struct udp_tunnel_info *ti)
  2656. {
  2657. struct port_info *pi = netdev_priv(netdev);
  2658. struct adapter *adapter = pi->adapter;
  2659. unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
  2660. u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
  2661. int ret = 0, i;
  2662. if (chip_ver < CHELSIO_T6)
  2663. return;
  2664. switch (ti->type) {
  2665. case UDP_TUNNEL_TYPE_VXLAN:
  2666. if (!adapter->vxlan_port_cnt ||
  2667. adapter->vxlan_port != ti->port)
  2668. return; /* Invalid VxLAN destination port */
  2669. adapter->vxlan_port_cnt--;
  2670. if (adapter->vxlan_port_cnt)
  2671. return;
  2672. adapter->vxlan_port = 0;
  2673. t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A, 0);
  2674. break;
  2675. case UDP_TUNNEL_TYPE_GENEVE:
  2676. if (!adapter->geneve_port_cnt ||
  2677. adapter->geneve_port != ti->port)
  2678. return; /* Invalid GENEVE destination port */
  2679. adapter->geneve_port_cnt--;
  2680. if (adapter->geneve_port_cnt)
  2681. return;
  2682. adapter->geneve_port = 0;
  2683. t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A, 0);
  2684. default:
  2685. return;
  2686. }
  2687. /* Matchall mac entries can be deleted only after all tunnel ports
  2688. * are brought down or removed.
  2689. */
  2690. if (!adapter->rawf_cnt)
  2691. return;
  2692. for_each_port(adapter, i) {
  2693. pi = adap2pinfo(adapter, i);
  2694. ret = t4_free_raw_mac_filt(adapter, pi->viid,
  2695. match_all_mac, match_all_mac,
  2696. adapter->rawf_start +
  2697. pi->port_id,
  2698. 1, pi->port_id, false);
  2699. if (ret < 0) {
  2700. netdev_info(netdev, "Failed to free mac filter entry, for port %d\n",
  2701. i);
  2702. return;
  2703. }
  2704. atomic_dec(&adapter->mps_encap[adapter->rawf_start +
  2705. pi->port_id].refcnt);
  2706. }
  2707. }
  2708. static void cxgb_add_udp_tunnel(struct net_device *netdev,
  2709. struct udp_tunnel_info *ti)
  2710. {
  2711. struct port_info *pi = netdev_priv(netdev);
  2712. struct adapter *adapter = pi->adapter;
  2713. unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
  2714. u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
  2715. int i, ret;
  2716. if (chip_ver < CHELSIO_T6 || !adapter->rawf_cnt)
  2717. return;
  2718. switch (ti->type) {
  2719. case UDP_TUNNEL_TYPE_VXLAN:
  2720. /* Callback for adding vxlan port can be called with the same
  2721. * port for both IPv4 and IPv6. We should not disable the
  2722. * offloading when the same port for both protocols is added
  2723. * and later one of them is removed.
  2724. */
  2725. if (adapter->vxlan_port_cnt &&
  2726. adapter->vxlan_port == ti->port) {
  2727. adapter->vxlan_port_cnt++;
  2728. return;
  2729. }
  2730. /* We will support only one VxLAN port */
  2731. if (adapter->vxlan_port_cnt) {
  2732. netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
  2733. be16_to_cpu(adapter->vxlan_port),
  2734. be16_to_cpu(ti->port));
  2735. return;
  2736. }
  2737. adapter->vxlan_port = ti->port;
  2738. adapter->vxlan_port_cnt = 1;
  2739. t4_write_reg(adapter, MPS_RX_VXLAN_TYPE_A,
  2740. VXLAN_V(be16_to_cpu(ti->port)) | VXLAN_EN_F);
  2741. break;
  2742. case UDP_TUNNEL_TYPE_GENEVE:
  2743. if (adapter->geneve_port_cnt &&
  2744. adapter->geneve_port == ti->port) {
  2745. adapter->geneve_port_cnt++;
  2746. return;
  2747. }
  2748. /* We will support only one GENEVE port */
  2749. if (adapter->geneve_port_cnt) {
  2750. netdev_info(netdev, "UDP port %d already offloaded, not adding port %d\n",
  2751. be16_to_cpu(adapter->geneve_port),
  2752. be16_to_cpu(ti->port));
  2753. return;
  2754. }
  2755. adapter->geneve_port = ti->port;
  2756. adapter->geneve_port_cnt = 1;
  2757. t4_write_reg(adapter, MPS_RX_GENEVE_TYPE_A,
  2758. GENEVE_V(be16_to_cpu(ti->port)) | GENEVE_EN_F);
  2759. default:
  2760. return;
  2761. }
  2762. /* Create a 'match all' mac filter entry for inner mac,
  2763. * if raw mac interface is supported. Once the linux kernel provides
  2764. * driver entry points for adding/deleting the inner mac addresses,
  2765. * we will remove this 'match all' entry and fallback to adding
  2766. * exact match filters.
  2767. */
  2768. for_each_port(adapter, i) {
  2769. pi = adap2pinfo(adapter, i);
  2770. ret = t4_alloc_raw_mac_filt(adapter, pi->viid,
  2771. match_all_mac,
  2772. match_all_mac,
  2773. adapter->rawf_start +
  2774. pi->port_id,
  2775. 1, pi->port_id, false);
  2776. if (ret < 0) {
  2777. netdev_info(netdev, "Failed to allocate a mac filter entry, not adding port %d\n",
  2778. be16_to_cpu(ti->port));
  2779. cxgb_del_udp_tunnel(netdev, ti);
  2780. return;
  2781. }
  2782. atomic_inc(&adapter->mps_encap[ret].refcnt);
  2783. }
  2784. }
  2785. static netdev_features_t cxgb_features_check(struct sk_buff *skb,
  2786. struct net_device *dev,
  2787. netdev_features_t features)
  2788. {
  2789. struct port_info *pi = netdev_priv(dev);
  2790. struct adapter *adapter = pi->adapter;
  2791. if (CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
  2792. return features;
  2793. /* Check if hw supports offload for this packet */
  2794. if (!skb->encapsulation || cxgb_encap_offload_supported(skb))
  2795. return features;
  2796. /* Offload is not supported for this encapsulated packet */
  2797. return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
  2798. }
  2799. static netdev_features_t cxgb_fix_features(struct net_device *dev,
  2800. netdev_features_t features)
  2801. {
  2802. /* Disable GRO, if RX_CSUM is disabled */
  2803. if (!(features & NETIF_F_RXCSUM))
  2804. features &= ~NETIF_F_GRO;
  2805. return features;
  2806. }
  2807. static const struct net_device_ops cxgb4_netdev_ops = {
  2808. .ndo_open = cxgb_open,
  2809. .ndo_stop = cxgb_close,
  2810. .ndo_start_xmit = t4_eth_xmit,
  2811. .ndo_select_queue = cxgb_select_queue,
  2812. .ndo_get_stats64 = cxgb_get_stats,
  2813. .ndo_set_rx_mode = cxgb_set_rxmode,
  2814. .ndo_set_mac_address = cxgb_set_mac_addr,
  2815. .ndo_set_features = cxgb_set_features,
  2816. .ndo_validate_addr = eth_validate_addr,
  2817. .ndo_do_ioctl = cxgb_ioctl,
  2818. .ndo_change_mtu = cxgb_change_mtu,
  2819. #ifdef CONFIG_NET_POLL_CONTROLLER
  2820. .ndo_poll_controller = cxgb_netpoll,
  2821. #endif
  2822. #ifdef CONFIG_CHELSIO_T4_FCOE
  2823. .ndo_fcoe_enable = cxgb_fcoe_enable,
  2824. .ndo_fcoe_disable = cxgb_fcoe_disable,
  2825. #endif /* CONFIG_CHELSIO_T4_FCOE */
  2826. .ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
  2827. .ndo_setup_tc = cxgb_setup_tc,
  2828. .ndo_udp_tunnel_add = cxgb_add_udp_tunnel,
  2829. .ndo_udp_tunnel_del = cxgb_del_udp_tunnel,
  2830. .ndo_features_check = cxgb_features_check,
  2831. .ndo_fix_features = cxgb_fix_features,
  2832. };
  2833. #ifdef CONFIG_PCI_IOV
  2834. static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
  2835. .ndo_open = cxgb4_mgmt_open,
  2836. .ndo_set_vf_mac = cxgb4_mgmt_set_vf_mac,
  2837. .ndo_get_vf_config = cxgb4_mgmt_get_vf_config,
  2838. .ndo_set_vf_rate = cxgb4_mgmt_set_vf_rate,
  2839. .ndo_get_phys_port_id = cxgb4_mgmt_get_phys_port_id,
  2840. .ndo_set_vf_vlan = cxgb4_mgmt_set_vf_vlan,
  2841. };
  2842. #endif
  2843. static void cxgb4_mgmt_get_drvinfo(struct net_device *dev,
  2844. struct ethtool_drvinfo *info)
  2845. {
  2846. struct adapter *adapter = netdev2adap(dev);
  2847. strlcpy(info->driver, cxgb4_driver_name, sizeof(info->driver));
  2848. strlcpy(info->version, cxgb4_driver_version,
  2849. sizeof(info->version));
  2850. strlcpy(info->bus_info, pci_name(adapter->pdev),
  2851. sizeof(info->bus_info));
  2852. }
  2853. static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
  2854. .get_drvinfo = cxgb4_mgmt_get_drvinfo,
  2855. };
  2856. static void notify_fatal_err(struct work_struct *work)
  2857. {
  2858. struct adapter *adap;
  2859. adap = container_of(work, struct adapter, fatal_err_notify_task);
  2860. notify_ulds(adap, CXGB4_STATE_FATAL_ERROR);
  2861. }
  2862. void t4_fatal_err(struct adapter *adap)
  2863. {
  2864. int port;
  2865. if (pci_channel_offline(adap->pdev))
  2866. return;
  2867. /* Disable the SGE since ULDs are going to free resources that
  2868. * could be exposed to the adapter. RDMA MWs for example...
  2869. */
  2870. t4_shutdown_adapter(adap);
  2871. for_each_port(adap, port) {
  2872. struct net_device *dev = adap->port[port];
  2873. /* If we get here in very early initialization the network
  2874. * devices may not have been set up yet.
  2875. */
  2876. if (!dev)
  2877. continue;
  2878. netif_tx_stop_all_queues(dev);
  2879. netif_carrier_off(dev);
  2880. }
  2881. dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
  2882. queue_work(adap->workq, &adap->fatal_err_notify_task);
  2883. }
  2884. static void setup_memwin(struct adapter *adap)
  2885. {
  2886. u32 nic_win_base = t4_get_util_window(adap);
  2887. t4_setup_memwin(adap, nic_win_base, MEMWIN_NIC);
  2888. }
  2889. static void setup_memwin_rdma(struct adapter *adap)
  2890. {
  2891. if (adap->vres.ocq.size) {
  2892. u32 start;
  2893. unsigned int sz_kb;
  2894. start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
  2895. start &= PCI_BASE_ADDRESS_MEM_MASK;
  2896. start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
  2897. sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
  2898. t4_write_reg(adap,
  2899. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
  2900. start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
  2901. t4_write_reg(adap,
  2902. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
  2903. adap->vres.ocq.start);
  2904. t4_read_reg(adap,
  2905. PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
  2906. }
  2907. }
  2908. /* HMA Definitions */
  2909. /* The maximum number of address that can be send in a single FW cmd */
  2910. #define HMA_MAX_ADDR_IN_CMD 5
  2911. #define HMA_PAGE_SIZE PAGE_SIZE
  2912. #define HMA_MAX_NO_FW_ADDRESS (16 << 10) /* FW supports 16K addresses */
  2913. #define HMA_PAGE_ORDER \
  2914. ((HMA_PAGE_SIZE < HMA_MAX_NO_FW_ADDRESS) ? \
  2915. ilog2(HMA_MAX_NO_FW_ADDRESS / HMA_PAGE_SIZE) : 0)
  2916. /* The minimum and maximum possible HMA sizes that can be specified in the FW
  2917. * configuration(in units of MB).
  2918. */
  2919. #define HMA_MIN_TOTAL_SIZE 1
  2920. #define HMA_MAX_TOTAL_SIZE \
  2921. (((HMA_PAGE_SIZE << HMA_PAGE_ORDER) * \
  2922. HMA_MAX_NO_FW_ADDRESS) >> 20)
  2923. static void adap_free_hma_mem(struct adapter *adapter)
  2924. {
  2925. struct scatterlist *iter;
  2926. struct page *page;
  2927. int i;
  2928. if (!adapter->hma.sgt)
  2929. return;
  2930. if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) {
  2931. dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl,
  2932. adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL);
  2933. adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG;
  2934. }
  2935. for_each_sg(adapter->hma.sgt->sgl, iter,
  2936. adapter->hma.sgt->orig_nents, i) {
  2937. page = sg_page(iter);
  2938. if (page)
  2939. __free_pages(page, HMA_PAGE_ORDER);
  2940. }
  2941. kfree(adapter->hma.phy_addr);
  2942. sg_free_table(adapter->hma.sgt);
  2943. kfree(adapter->hma.sgt);
  2944. adapter->hma.sgt = NULL;
  2945. }
  2946. static int adap_config_hma(struct adapter *adapter)
  2947. {
  2948. struct scatterlist *sgl, *iter;
  2949. struct sg_table *sgt;
  2950. struct page *newpage;
  2951. unsigned int i, j, k;
  2952. u32 param, hma_size;
  2953. unsigned int ncmds;
  2954. size_t page_size;
  2955. u32 page_order;
  2956. int node, ret;
  2957. /* HMA is supported only for T6+ cards.
  2958. * Avoid initializing HMA in kdump kernels.
  2959. */
  2960. if (is_kdump_kernel() ||
  2961. CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
  2962. return 0;
  2963. /* Get the HMA region size required by fw */
  2964. param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  2965. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_HMA_SIZE));
  2966. ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
  2967. 1, &param, &hma_size);
  2968. /* An error means card has its own memory or HMA is not supported by
  2969. * the firmware. Return without any errors.
  2970. */
  2971. if (ret || !hma_size)
  2972. return 0;
  2973. if (hma_size < HMA_MIN_TOTAL_SIZE ||
  2974. hma_size > HMA_MAX_TOTAL_SIZE) {
  2975. dev_err(adapter->pdev_dev,
  2976. "HMA size %uMB beyond bounds(%u-%lu)MB\n",
  2977. hma_size, HMA_MIN_TOTAL_SIZE, HMA_MAX_TOTAL_SIZE);
  2978. return -EINVAL;
  2979. }
  2980. page_size = HMA_PAGE_SIZE;
  2981. page_order = HMA_PAGE_ORDER;
  2982. adapter->hma.sgt = kzalloc(sizeof(*adapter->hma.sgt), GFP_KERNEL);
  2983. if (unlikely(!adapter->hma.sgt)) {
  2984. dev_err(adapter->pdev_dev, "HMA SG table allocation failed\n");
  2985. return -ENOMEM;
  2986. }
  2987. sgt = adapter->hma.sgt;
  2988. /* FW returned value will be in MB's
  2989. */
  2990. sgt->orig_nents = (hma_size << 20) / (page_size << page_order);
  2991. if (sg_alloc_table(sgt, sgt->orig_nents, GFP_KERNEL)) {
  2992. dev_err(adapter->pdev_dev, "HMA SGL allocation failed\n");
  2993. kfree(adapter->hma.sgt);
  2994. adapter->hma.sgt = NULL;
  2995. return -ENOMEM;
  2996. }
  2997. sgl = adapter->hma.sgt->sgl;
  2998. node = dev_to_node(adapter->pdev_dev);
  2999. for_each_sg(sgl, iter, sgt->orig_nents, i) {
  3000. newpage = alloc_pages_node(node, __GFP_NOWARN | GFP_KERNEL |
  3001. __GFP_ZERO, page_order);
  3002. if (!newpage) {
  3003. dev_err(adapter->pdev_dev,
  3004. "Not enough memory for HMA page allocation\n");
  3005. ret = -ENOMEM;
  3006. goto free_hma;
  3007. }
  3008. sg_set_page(iter, newpage, page_size << page_order, 0);
  3009. }
  3010. sgt->nents = dma_map_sg(adapter->pdev_dev, sgl, sgt->orig_nents,
  3011. DMA_BIDIRECTIONAL);
  3012. if (!sgt->nents) {
  3013. dev_err(adapter->pdev_dev,
  3014. "Not enough memory for HMA DMA mapping");
  3015. ret = -ENOMEM;
  3016. goto free_hma;
  3017. }
  3018. adapter->hma.flags |= HMA_DMA_MAPPED_FLAG;
  3019. adapter->hma.phy_addr = kcalloc(sgt->nents, sizeof(dma_addr_t),
  3020. GFP_KERNEL);
  3021. if (unlikely(!adapter->hma.phy_addr))
  3022. goto free_hma;
  3023. for_each_sg(sgl, iter, sgt->nents, i) {
  3024. newpage = sg_page(iter);
  3025. adapter->hma.phy_addr[i] = sg_dma_address(iter);
  3026. }
  3027. ncmds = DIV_ROUND_UP(sgt->nents, HMA_MAX_ADDR_IN_CMD);
  3028. /* Pass on the addresses to firmware */
  3029. for (i = 0, k = 0; i < ncmds; i++, k += HMA_MAX_ADDR_IN_CMD) {
  3030. struct fw_hma_cmd hma_cmd;
  3031. u8 naddr = HMA_MAX_ADDR_IN_CMD;
  3032. u8 soc = 0, eoc = 0;
  3033. u8 hma_mode = 1; /* Presently we support only Page table mode */
  3034. soc = (i == 0) ? 1 : 0;
  3035. eoc = (i == ncmds - 1) ? 1 : 0;
  3036. /* For last cmd, set naddr corresponding to remaining
  3037. * addresses
  3038. */
  3039. if (i == ncmds - 1) {
  3040. naddr = sgt->nents % HMA_MAX_ADDR_IN_CMD;
  3041. naddr = naddr ? naddr : HMA_MAX_ADDR_IN_CMD;
  3042. }
  3043. memset(&hma_cmd, 0, sizeof(hma_cmd));
  3044. hma_cmd.op_pkd = htonl(FW_CMD_OP_V(FW_HMA_CMD) |
  3045. FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
  3046. hma_cmd.retval_len16 = htonl(FW_LEN16(hma_cmd));
  3047. hma_cmd.mode_to_pcie_params =
  3048. htonl(FW_HMA_CMD_MODE_V(hma_mode) |
  3049. FW_HMA_CMD_SOC_V(soc) | FW_HMA_CMD_EOC_V(eoc));
  3050. /* HMA cmd size specified in MB's */
  3051. hma_cmd.naddr_size =
  3052. htonl(FW_HMA_CMD_SIZE_V(hma_size) |
  3053. FW_HMA_CMD_NADDR_V(naddr));
  3054. /* Total Page size specified in units of 4K */
  3055. hma_cmd.addr_size_pkd =
  3056. htonl(FW_HMA_CMD_ADDR_SIZE_V
  3057. ((page_size << page_order) >> 12));
  3058. /* Fill the 5 addresses */
  3059. for (j = 0; j < naddr; j++) {
  3060. hma_cmd.phy_address[j] =
  3061. cpu_to_be64(adapter->hma.phy_addr[j + k]);
  3062. }
  3063. ret = t4_wr_mbox(adapter, adapter->mbox, &hma_cmd,
  3064. sizeof(hma_cmd), &hma_cmd);
  3065. if (ret) {
  3066. dev_err(adapter->pdev_dev,
  3067. "HMA FW command failed with err %d\n", ret);
  3068. goto free_hma;
  3069. }
  3070. }
  3071. if (!ret)
  3072. dev_info(adapter->pdev_dev,
  3073. "Reserved %uMB host memory for HMA\n", hma_size);
  3074. return ret;
  3075. free_hma:
  3076. adap_free_hma_mem(adapter);
  3077. return ret;
  3078. }
  3079. static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
  3080. {
  3081. u32 v;
  3082. int ret;
  3083. /* get device capabilities */
  3084. memset(c, 0, sizeof(*c));
  3085. c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3086. FW_CMD_REQUEST_F | FW_CMD_READ_F);
  3087. c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
  3088. ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
  3089. if (ret < 0)
  3090. return ret;
  3091. c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3092. FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
  3093. ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
  3094. if (ret < 0)
  3095. return ret;
  3096. ret = t4_config_glbl_rss(adap, adap->pf,
  3097. FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
  3098. FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
  3099. FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
  3100. if (ret < 0)
  3101. return ret;
  3102. ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
  3103. MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
  3104. FW_CMD_CAP_PF);
  3105. if (ret < 0)
  3106. return ret;
  3107. t4_sge_init(adap);
  3108. /* tweak some settings */
  3109. t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
  3110. t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
  3111. t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
  3112. v = t4_read_reg(adap, TP_PIO_DATA_A);
  3113. t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
  3114. /* first 4 Tx modulation queues point to consecutive Tx channels */
  3115. adap->params.tp.tx_modq_map = 0xE4;
  3116. t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
  3117. TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
  3118. /* associate each Tx modulation queue with consecutive Tx channels */
  3119. v = 0x84218421;
  3120. t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  3121. &v, 1, TP_TX_SCHED_HDR_A);
  3122. t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  3123. &v, 1, TP_TX_SCHED_FIFO_A);
  3124. t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
  3125. &v, 1, TP_TX_SCHED_PCMD_A);
  3126. #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
  3127. if (is_offload(adap)) {
  3128. t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
  3129. TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3130. TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3131. TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3132. TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
  3133. t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
  3134. TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3135. TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3136. TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
  3137. TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
  3138. }
  3139. /* get basic stuff going */
  3140. return t4_early_init(adap, adap->pf);
  3141. }
  3142. /*
  3143. * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
  3144. */
  3145. #define MAX_ATIDS 8192U
  3146. /*
  3147. * Phase 0 of initialization: contact FW, obtain config, perform basic init.
  3148. *
  3149. * If the firmware we're dealing with has Configuration File support, then
  3150. * we use that to perform all configuration
  3151. */
  3152. /*
  3153. * Tweak configuration based on module parameters, etc. Most of these have
  3154. * defaults assigned to them by Firmware Configuration Files (if we're using
  3155. * them) but need to be explicitly set if we're using hard-coded
  3156. * initialization. But even in the case of using Firmware Configuration
  3157. * Files, we'd like to expose the ability to change these via module
  3158. * parameters so these are essentially common tweaks/settings for
  3159. * Configuration Files and hard-coded initialization ...
  3160. */
  3161. static int adap_init0_tweaks(struct adapter *adapter)
  3162. {
  3163. /*
  3164. * Fix up various Host-Dependent Parameters like Page Size, Cache
  3165. * Line Size, etc. The firmware default is for a 4KB Page Size and
  3166. * 64B Cache Line Size ...
  3167. */
  3168. t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
  3169. /*
  3170. * Process module parameters which affect early initialization.
  3171. */
  3172. if (rx_dma_offset != 2 && rx_dma_offset != 0) {
  3173. dev_err(&adapter->pdev->dev,
  3174. "Ignoring illegal rx_dma_offset=%d, using 2\n",
  3175. rx_dma_offset);
  3176. rx_dma_offset = 2;
  3177. }
  3178. t4_set_reg_field(adapter, SGE_CONTROL_A,
  3179. PKTSHIFT_V(PKTSHIFT_M),
  3180. PKTSHIFT_V(rx_dma_offset));
  3181. /*
  3182. * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
  3183. * adds the pseudo header itself.
  3184. */
  3185. t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
  3186. CSUM_HAS_PSEUDO_HDR_F, 0);
  3187. return 0;
  3188. }
  3189. /* 10Gb/s-BT PHY Support. chip-external 10Gb/s-BT PHYs are complex chips
  3190. * unto themselves and they contain their own firmware to perform their
  3191. * tasks ...
  3192. */
  3193. static int phy_aq1202_version(const u8 *phy_fw_data,
  3194. size_t phy_fw_size)
  3195. {
  3196. int offset;
  3197. /* At offset 0x8 you're looking for the primary image's
  3198. * starting offset which is 3 Bytes wide
  3199. *
  3200. * At offset 0xa of the primary image, you look for the offset
  3201. * of the DRAM segment which is 3 Bytes wide.
  3202. *
  3203. * The FW version is at offset 0x27e of the DRAM and is 2 Bytes
  3204. * wide
  3205. */
  3206. #define be16(__p) (((__p)[0] << 8) | (__p)[1])
  3207. #define le16(__p) ((__p)[0] | ((__p)[1] << 8))
  3208. #define le24(__p) (le16(__p) | ((__p)[2] << 16))
  3209. offset = le24(phy_fw_data + 0x8) << 12;
  3210. offset = le24(phy_fw_data + offset + 0xa);
  3211. return be16(phy_fw_data + offset + 0x27e);
  3212. #undef be16
  3213. #undef le16
  3214. #undef le24
  3215. }
  3216. static struct info_10gbt_phy_fw {
  3217. unsigned int phy_fw_id; /* PCI Device ID */
  3218. char *phy_fw_file; /* /lib/firmware/ PHY Firmware file */
  3219. int (*phy_fw_version)(const u8 *phy_fw_data, size_t phy_fw_size);
  3220. int phy_flash; /* Has FLASH for PHY Firmware */
  3221. } phy_info_array[] = {
  3222. {
  3223. PHY_AQ1202_DEVICEID,
  3224. PHY_AQ1202_FIRMWARE,
  3225. phy_aq1202_version,
  3226. 1,
  3227. },
  3228. {
  3229. PHY_BCM84834_DEVICEID,
  3230. PHY_BCM84834_FIRMWARE,
  3231. NULL,
  3232. 0,
  3233. },
  3234. { 0, NULL, NULL },
  3235. };
  3236. static struct info_10gbt_phy_fw *find_phy_info(int devid)
  3237. {
  3238. int i;
  3239. for (i = 0; i < ARRAY_SIZE(phy_info_array); i++) {
  3240. if (phy_info_array[i].phy_fw_id == devid)
  3241. return &phy_info_array[i];
  3242. }
  3243. return NULL;
  3244. }
  3245. /* Handle updating of chip-external 10Gb/s-BT PHY firmware. This needs to
  3246. * happen after the FW_RESET_CMD but before the FW_INITIALIZE_CMD. On error
  3247. * we return a negative error number. If we transfer new firmware we return 1
  3248. * (from t4_load_phy_fw()). If we don't do anything we return 0.
  3249. */
  3250. static int adap_init0_phy(struct adapter *adap)
  3251. {
  3252. const struct firmware *phyf;
  3253. int ret;
  3254. struct info_10gbt_phy_fw *phy_info;
  3255. /* Use the device ID to determine which PHY file to flash.
  3256. */
  3257. phy_info = find_phy_info(adap->pdev->device);
  3258. if (!phy_info) {
  3259. dev_warn(adap->pdev_dev,
  3260. "No PHY Firmware file found for this PHY\n");
  3261. return -EOPNOTSUPP;
  3262. }
  3263. /* If we have a T4 PHY firmware file under /lib/firmware/cxgb4/, then
  3264. * use that. The adapter firmware provides us with a memory buffer
  3265. * where we can load a PHY firmware file from the host if we want to
  3266. * override the PHY firmware File in flash.
  3267. */
  3268. ret = request_firmware_direct(&phyf, phy_info->phy_fw_file,
  3269. adap->pdev_dev);
  3270. if (ret < 0) {
  3271. /* For adapters without FLASH attached to PHY for their
  3272. * firmware, it's obviously a fatal error if we can't get the
  3273. * firmware to the adapter. For adapters with PHY firmware
  3274. * FLASH storage, it's worth a warning if we can't find the
  3275. * PHY Firmware but we'll neuter the error ...
  3276. */
  3277. dev_err(adap->pdev_dev, "unable to find PHY Firmware image "
  3278. "/lib/firmware/%s, error %d\n",
  3279. phy_info->phy_fw_file, -ret);
  3280. if (phy_info->phy_flash) {
  3281. int cur_phy_fw_ver = 0;
  3282. t4_phy_fw_ver(adap, &cur_phy_fw_ver);
  3283. dev_warn(adap->pdev_dev, "continuing with, on-adapter "
  3284. "FLASH copy, version %#x\n", cur_phy_fw_ver);
  3285. ret = 0;
  3286. }
  3287. return ret;
  3288. }
  3289. /* Load PHY Firmware onto adapter.
  3290. */
  3291. ret = t4_load_phy_fw(adap, MEMWIN_NIC, &adap->win0_lock,
  3292. phy_info->phy_fw_version,
  3293. (u8 *)phyf->data, phyf->size);
  3294. if (ret < 0)
  3295. dev_err(adap->pdev_dev, "PHY Firmware transfer error %d\n",
  3296. -ret);
  3297. else if (ret > 0) {
  3298. int new_phy_fw_ver = 0;
  3299. if (phy_info->phy_fw_version)
  3300. new_phy_fw_ver = phy_info->phy_fw_version(phyf->data,
  3301. phyf->size);
  3302. dev_info(adap->pdev_dev, "Successfully transferred PHY "
  3303. "Firmware /lib/firmware/%s, version %#x\n",
  3304. phy_info->phy_fw_file, new_phy_fw_ver);
  3305. }
  3306. release_firmware(phyf);
  3307. return ret;
  3308. }
  3309. /*
  3310. * Attempt to initialize the adapter via a Firmware Configuration File.
  3311. */
  3312. static int adap_init0_config(struct adapter *adapter, int reset)
  3313. {
  3314. struct fw_caps_config_cmd caps_cmd;
  3315. const struct firmware *cf;
  3316. unsigned long mtype = 0, maddr = 0;
  3317. u32 finiver, finicsum, cfcsum;
  3318. int ret;
  3319. int config_issued = 0;
  3320. char *fw_config_file, fw_config_file_path[256];
  3321. char *config_name = NULL;
  3322. /*
  3323. * Reset device if necessary.
  3324. */
  3325. if (reset) {
  3326. ret = t4_fw_reset(adapter, adapter->mbox,
  3327. PIORSTMODE_F | PIORST_F);
  3328. if (ret < 0)
  3329. goto bye;
  3330. }
  3331. /* If this is a 10Gb/s-BT adapter make sure the chip-external
  3332. * 10Gb/s-BT PHYs have up-to-date firmware. Note that this step needs
  3333. * to be performed after any global adapter RESET above since some
  3334. * PHYs only have local RAM copies of the PHY firmware.
  3335. */
  3336. if (is_10gbt_device(adapter->pdev->device)) {
  3337. ret = adap_init0_phy(adapter);
  3338. if (ret < 0)
  3339. goto bye;
  3340. }
  3341. /*
  3342. * If we have a T4 configuration file under /lib/firmware/cxgb4/,
  3343. * then use that. Otherwise, use the configuration file stored
  3344. * in the adapter flash ...
  3345. */
  3346. switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
  3347. case CHELSIO_T4:
  3348. fw_config_file = FW4_CFNAME;
  3349. break;
  3350. case CHELSIO_T5:
  3351. fw_config_file = FW5_CFNAME;
  3352. break;
  3353. case CHELSIO_T6:
  3354. fw_config_file = FW6_CFNAME;
  3355. break;
  3356. default:
  3357. dev_err(adapter->pdev_dev, "Device %d is not supported\n",
  3358. adapter->pdev->device);
  3359. ret = -EINVAL;
  3360. goto bye;
  3361. }
  3362. ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
  3363. if (ret < 0) {
  3364. config_name = "On FLASH";
  3365. mtype = FW_MEMTYPE_CF_FLASH;
  3366. maddr = t4_flash_cfg_addr(adapter);
  3367. } else {
  3368. u32 params[7], val[7];
  3369. sprintf(fw_config_file_path,
  3370. "/lib/firmware/%s", fw_config_file);
  3371. config_name = fw_config_file_path;
  3372. if (cf->size >= FLASH_CFG_MAX_SIZE)
  3373. ret = -ENOMEM;
  3374. else {
  3375. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3376. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
  3377. ret = t4_query_params(adapter, adapter->mbox,
  3378. adapter->pf, 0, 1, params, val);
  3379. if (ret == 0) {
  3380. /*
  3381. * For t4_memory_rw() below addresses and
  3382. * sizes have to be in terms of multiples of 4
  3383. * bytes. So, if the Configuration File isn't
  3384. * a multiple of 4 bytes in length we'll have
  3385. * to write that out separately since we can't
  3386. * guarantee that the bytes following the
  3387. * residual byte in the buffer returned by
  3388. * request_firmware() are zeroed out ...
  3389. */
  3390. size_t resid = cf->size & 0x3;
  3391. size_t size = cf->size & ~0x3;
  3392. __be32 *data = (__be32 *)cf->data;
  3393. mtype = FW_PARAMS_PARAM_Y_G(val[0]);
  3394. maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
  3395. spin_lock(&adapter->win0_lock);
  3396. ret = t4_memory_rw(adapter, 0, mtype, maddr,
  3397. size, data, T4_MEMORY_WRITE);
  3398. if (ret == 0 && resid != 0) {
  3399. union {
  3400. __be32 word;
  3401. char buf[4];
  3402. } last;
  3403. int i;
  3404. last.word = data[size >> 2];
  3405. for (i = resid; i < 4; i++)
  3406. last.buf[i] = 0;
  3407. ret = t4_memory_rw(adapter, 0, mtype,
  3408. maddr + size,
  3409. 4, &last.word,
  3410. T4_MEMORY_WRITE);
  3411. }
  3412. spin_unlock(&adapter->win0_lock);
  3413. }
  3414. }
  3415. release_firmware(cf);
  3416. if (ret)
  3417. goto bye;
  3418. }
  3419. /*
  3420. * Issue a Capability Configuration command to the firmware to get it
  3421. * to parse the Configuration File. We don't use t4_fw_config_file()
  3422. * because we want the ability to modify various features after we've
  3423. * processed the configuration file ...
  3424. */
  3425. memset(&caps_cmd, 0, sizeof(caps_cmd));
  3426. caps_cmd.op_to_write =
  3427. htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3428. FW_CMD_REQUEST_F |
  3429. FW_CMD_READ_F);
  3430. caps_cmd.cfvalid_to_len16 =
  3431. htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
  3432. FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
  3433. FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
  3434. FW_LEN16(caps_cmd));
  3435. ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
  3436. &caps_cmd);
  3437. /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
  3438. * Configuration File in FLASH), our last gasp effort is to use the
  3439. * Firmware Configuration File which is embedded in the firmware. A
  3440. * very few early versions of the firmware didn't have one embedded
  3441. * but we can ignore those.
  3442. */
  3443. if (ret == -ENOENT) {
  3444. memset(&caps_cmd, 0, sizeof(caps_cmd));
  3445. caps_cmd.op_to_write =
  3446. htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3447. FW_CMD_REQUEST_F |
  3448. FW_CMD_READ_F);
  3449. caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
  3450. ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
  3451. sizeof(caps_cmd), &caps_cmd);
  3452. config_name = "Firmware Default";
  3453. }
  3454. config_issued = 1;
  3455. if (ret < 0)
  3456. goto bye;
  3457. finiver = ntohl(caps_cmd.finiver);
  3458. finicsum = ntohl(caps_cmd.finicsum);
  3459. cfcsum = ntohl(caps_cmd.cfcsum);
  3460. if (finicsum != cfcsum)
  3461. dev_warn(adapter->pdev_dev, "Configuration File checksum "\
  3462. "mismatch: [fini] csum=%#x, computed csum=%#x\n",
  3463. finicsum, cfcsum);
  3464. /*
  3465. * And now tell the firmware to use the configuration we just loaded.
  3466. */
  3467. caps_cmd.op_to_write =
  3468. htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3469. FW_CMD_REQUEST_F |
  3470. FW_CMD_WRITE_F);
  3471. caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
  3472. ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
  3473. NULL);
  3474. if (ret < 0)
  3475. goto bye;
  3476. /*
  3477. * Tweak configuration based on system architecture, module
  3478. * parameters, etc.
  3479. */
  3480. ret = adap_init0_tweaks(adapter);
  3481. if (ret < 0)
  3482. goto bye;
  3483. /* We will proceed even if HMA init fails. */
  3484. ret = adap_config_hma(adapter);
  3485. if (ret)
  3486. dev_err(adapter->pdev_dev,
  3487. "HMA configuration failed with error %d\n", ret);
  3488. /*
  3489. * And finally tell the firmware to initialize itself using the
  3490. * parameters from the Configuration File.
  3491. */
  3492. ret = t4_fw_initialize(adapter, adapter->mbox);
  3493. if (ret < 0)
  3494. goto bye;
  3495. /* Emit Firmware Configuration File information and return
  3496. * successfully.
  3497. */
  3498. dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
  3499. "Configuration File \"%s\", version %#x, computed checksum %#x\n",
  3500. config_name, finiver, cfcsum);
  3501. return 0;
  3502. /*
  3503. * Something bad happened. Return the error ... (If the "error"
  3504. * is that there's no Configuration File on the adapter we don't
  3505. * want to issue a warning since this is fairly common.)
  3506. */
  3507. bye:
  3508. if (config_issued && ret != -ENOENT)
  3509. dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
  3510. config_name, -ret);
  3511. return ret;
  3512. }
  3513. static struct fw_info fw_info_array[] = {
  3514. {
  3515. .chip = CHELSIO_T4,
  3516. .fs_name = FW4_CFNAME,
  3517. .fw_mod_name = FW4_FNAME,
  3518. .fw_hdr = {
  3519. .chip = FW_HDR_CHIP_T4,
  3520. .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
  3521. .intfver_nic = FW_INTFVER(T4, NIC),
  3522. .intfver_vnic = FW_INTFVER(T4, VNIC),
  3523. .intfver_ri = FW_INTFVER(T4, RI),
  3524. .intfver_iscsi = FW_INTFVER(T4, ISCSI),
  3525. .intfver_fcoe = FW_INTFVER(T4, FCOE),
  3526. },
  3527. }, {
  3528. .chip = CHELSIO_T5,
  3529. .fs_name = FW5_CFNAME,
  3530. .fw_mod_name = FW5_FNAME,
  3531. .fw_hdr = {
  3532. .chip = FW_HDR_CHIP_T5,
  3533. .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
  3534. .intfver_nic = FW_INTFVER(T5, NIC),
  3535. .intfver_vnic = FW_INTFVER(T5, VNIC),
  3536. .intfver_ri = FW_INTFVER(T5, RI),
  3537. .intfver_iscsi = FW_INTFVER(T5, ISCSI),
  3538. .intfver_fcoe = FW_INTFVER(T5, FCOE),
  3539. },
  3540. }, {
  3541. .chip = CHELSIO_T6,
  3542. .fs_name = FW6_CFNAME,
  3543. .fw_mod_name = FW6_FNAME,
  3544. .fw_hdr = {
  3545. .chip = FW_HDR_CHIP_T6,
  3546. .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
  3547. .intfver_nic = FW_INTFVER(T6, NIC),
  3548. .intfver_vnic = FW_INTFVER(T6, VNIC),
  3549. .intfver_ofld = FW_INTFVER(T6, OFLD),
  3550. .intfver_ri = FW_INTFVER(T6, RI),
  3551. .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
  3552. .intfver_iscsi = FW_INTFVER(T6, ISCSI),
  3553. .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
  3554. .intfver_fcoe = FW_INTFVER(T6, FCOE),
  3555. },
  3556. }
  3557. };
  3558. static struct fw_info *find_fw_info(int chip)
  3559. {
  3560. int i;
  3561. for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
  3562. if (fw_info_array[i].chip == chip)
  3563. return &fw_info_array[i];
  3564. }
  3565. return NULL;
  3566. }
  3567. /*
  3568. * Phase 0 of initialization: contact FW, obtain config, perform basic init.
  3569. */
  3570. static int adap_init0(struct adapter *adap)
  3571. {
  3572. int ret;
  3573. u32 v, port_vec;
  3574. enum dev_state state;
  3575. u32 params[7], val[7];
  3576. struct fw_caps_config_cmd caps_cmd;
  3577. int reset = 1;
  3578. /* Grab Firmware Device Log parameters as early as possible so we have
  3579. * access to it for debugging, etc.
  3580. */
  3581. ret = t4_init_devlog_params(adap);
  3582. if (ret < 0)
  3583. return ret;
  3584. /* Contact FW, advertising Master capability */
  3585. ret = t4_fw_hello(adap, adap->mbox, adap->mbox,
  3586. is_kdump_kernel() ? MASTER_MUST : MASTER_MAY, &state);
  3587. if (ret < 0) {
  3588. dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
  3589. ret);
  3590. return ret;
  3591. }
  3592. if (ret == adap->mbox)
  3593. adap->flags |= MASTER_PF;
  3594. /*
  3595. * If we're the Master PF Driver and the device is uninitialized,
  3596. * then let's consider upgrading the firmware ... (We always want
  3597. * to check the firmware version number in order to A. get it for
  3598. * later reporting and B. to warn if the currently loaded firmware
  3599. * is excessively mismatched relative to the driver.)
  3600. */
  3601. t4_get_version_info(adap);
  3602. ret = t4_check_fw_version(adap);
  3603. /* If firmware is too old (not supported by driver) force an update. */
  3604. if (ret)
  3605. state = DEV_STATE_UNINIT;
  3606. if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
  3607. struct fw_info *fw_info;
  3608. struct fw_hdr *card_fw;
  3609. const struct firmware *fw;
  3610. const u8 *fw_data = NULL;
  3611. unsigned int fw_size = 0;
  3612. /* This is the firmware whose headers the driver was compiled
  3613. * against
  3614. */
  3615. fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
  3616. if (fw_info == NULL) {
  3617. dev_err(adap->pdev_dev,
  3618. "unable to get firmware info for chip %d.\n",
  3619. CHELSIO_CHIP_VERSION(adap->params.chip));
  3620. return -EINVAL;
  3621. }
  3622. /* allocate memory to read the header of the firmware on the
  3623. * card
  3624. */
  3625. card_fw = kvzalloc(sizeof(*card_fw), GFP_KERNEL);
  3626. if (!card_fw) {
  3627. ret = -ENOMEM;
  3628. goto bye;
  3629. }
  3630. /* Get FW from from /lib/firmware/ */
  3631. ret = request_firmware(&fw, fw_info->fw_mod_name,
  3632. adap->pdev_dev);
  3633. if (ret < 0) {
  3634. dev_err(adap->pdev_dev,
  3635. "unable to load firmware image %s, error %d\n",
  3636. fw_info->fw_mod_name, ret);
  3637. } else {
  3638. fw_data = fw->data;
  3639. fw_size = fw->size;
  3640. }
  3641. /* upgrade FW logic */
  3642. ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
  3643. state, &reset);
  3644. /* Cleaning up */
  3645. release_firmware(fw);
  3646. kvfree(card_fw);
  3647. if (ret < 0)
  3648. goto bye;
  3649. }
  3650. /*
  3651. * Grab VPD parameters. This should be done after we establish a
  3652. * connection to the firmware since some of the VPD parameters
  3653. * (notably the Core Clock frequency) are retrieved via requests to
  3654. * the firmware. On the other hand, we need these fairly early on
  3655. * so we do this right after getting ahold of the firmware.
  3656. */
  3657. ret = t4_get_vpd_params(adap, &adap->params.vpd);
  3658. if (ret < 0)
  3659. goto bye;
  3660. /*
  3661. * Find out what ports are available to us. Note that we need to do
  3662. * this before calling adap_init0_no_config() since it needs nports
  3663. * and portvec ...
  3664. */
  3665. v =
  3666. FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3667. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
  3668. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
  3669. if (ret < 0)
  3670. goto bye;
  3671. adap->params.nports = hweight32(port_vec);
  3672. adap->params.portvec = port_vec;
  3673. /* If the firmware is initialized already, emit a simply note to that
  3674. * effect. Otherwise, it's time to try initializing the adapter.
  3675. */
  3676. if (state == DEV_STATE_INIT) {
  3677. ret = adap_config_hma(adap);
  3678. if (ret)
  3679. dev_err(adap->pdev_dev,
  3680. "HMA configuration failed with error %d\n",
  3681. ret);
  3682. dev_info(adap->pdev_dev, "Coming up as %s: "\
  3683. "Adapter already initialized\n",
  3684. adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
  3685. } else {
  3686. dev_info(adap->pdev_dev, "Coming up as MASTER: "\
  3687. "Initializing adapter\n");
  3688. /* Find out whether we're dealing with a version of the
  3689. * firmware which has configuration file support.
  3690. */
  3691. params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
  3692. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
  3693. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
  3694. params, val);
  3695. /* If the firmware doesn't support Configuration Files,
  3696. * return an error.
  3697. */
  3698. if (ret < 0) {
  3699. dev_err(adap->pdev_dev, "firmware doesn't support "
  3700. "Firmware Configuration Files\n");
  3701. goto bye;
  3702. }
  3703. /* The firmware provides us with a memory buffer where we can
  3704. * load a Configuration File from the host if we want to
  3705. * override the Configuration File in flash.
  3706. */
  3707. ret = adap_init0_config(adap, reset);
  3708. if (ret == -ENOENT) {
  3709. dev_err(adap->pdev_dev, "no Configuration File "
  3710. "present on adapter.\n");
  3711. goto bye;
  3712. }
  3713. if (ret < 0) {
  3714. dev_err(adap->pdev_dev, "could not initialize "
  3715. "adapter, error %d\n", -ret);
  3716. goto bye;
  3717. }
  3718. }
  3719. /* Give the SGE code a chance to pull in anything that it needs ...
  3720. * Note that this must be called after we retrieve our VPD parameters
  3721. * in order to know how to convert core ticks to seconds, etc.
  3722. */
  3723. ret = t4_sge_init(adap);
  3724. if (ret < 0)
  3725. goto bye;
  3726. if (is_bypass_device(adap->pdev->device))
  3727. adap->params.bypass = 1;
  3728. /*
  3729. * Grab some of our basic fundamental operating parameters.
  3730. */
  3731. #define FW_PARAM_DEV(param) \
  3732. (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
  3733. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
  3734. #define FW_PARAM_PFVF(param) \
  3735. FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
  3736. FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
  3737. FW_PARAMS_PARAM_Y_V(0) | \
  3738. FW_PARAMS_PARAM_Z_V(0)
  3739. params[0] = FW_PARAM_PFVF(EQ_START);
  3740. params[1] = FW_PARAM_PFVF(L2T_START);
  3741. params[2] = FW_PARAM_PFVF(L2T_END);
  3742. params[3] = FW_PARAM_PFVF(FILTER_START);
  3743. params[4] = FW_PARAM_PFVF(FILTER_END);
  3744. params[5] = FW_PARAM_PFVF(IQFLINT_START);
  3745. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
  3746. if (ret < 0)
  3747. goto bye;
  3748. adap->sge.egr_start = val[0];
  3749. adap->l2t_start = val[1];
  3750. adap->l2t_end = val[2];
  3751. adap->tids.ftid_base = val[3];
  3752. adap->tids.nftids = val[4] - val[3] + 1;
  3753. adap->sge.ingr_start = val[5];
  3754. if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
  3755. /* Read the raw mps entries. In T6, the last 2 tcam entries
  3756. * are reserved for raw mac addresses (rawf = 2, one per port).
  3757. */
  3758. params[0] = FW_PARAM_PFVF(RAWF_START);
  3759. params[1] = FW_PARAM_PFVF(RAWF_END);
  3760. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
  3761. params, val);
  3762. if (ret == 0) {
  3763. adap->rawf_start = val[0];
  3764. adap->rawf_cnt = val[1] - val[0] + 1;
  3765. }
  3766. }
  3767. /* qids (ingress/egress) returned from firmware can be anywhere
  3768. * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
  3769. * Hence driver needs to allocate memory for this range to
  3770. * store the queue info. Get the highest IQFLINT/EQ index returned
  3771. * in FW_EQ_*_CMD.alloc command.
  3772. */
  3773. params[0] = FW_PARAM_PFVF(EQ_END);
  3774. params[1] = FW_PARAM_PFVF(IQFLINT_END);
  3775. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
  3776. if (ret < 0)
  3777. goto bye;
  3778. adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
  3779. adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
  3780. adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
  3781. sizeof(*adap->sge.egr_map), GFP_KERNEL);
  3782. if (!adap->sge.egr_map) {
  3783. ret = -ENOMEM;
  3784. goto bye;
  3785. }
  3786. adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
  3787. sizeof(*adap->sge.ingr_map), GFP_KERNEL);
  3788. if (!adap->sge.ingr_map) {
  3789. ret = -ENOMEM;
  3790. goto bye;
  3791. }
  3792. /* Allocate the memory for the vaious egress queue bitmaps
  3793. * ie starving_fl, txq_maperr and blocked_fl.
  3794. */
  3795. adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
  3796. sizeof(long), GFP_KERNEL);
  3797. if (!adap->sge.starving_fl) {
  3798. ret = -ENOMEM;
  3799. goto bye;
  3800. }
  3801. adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
  3802. sizeof(long), GFP_KERNEL);
  3803. if (!adap->sge.txq_maperr) {
  3804. ret = -ENOMEM;
  3805. goto bye;
  3806. }
  3807. #ifdef CONFIG_DEBUG_FS
  3808. adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
  3809. sizeof(long), GFP_KERNEL);
  3810. if (!adap->sge.blocked_fl) {
  3811. ret = -ENOMEM;
  3812. goto bye;
  3813. }
  3814. #endif
  3815. params[0] = FW_PARAM_PFVF(CLIP_START);
  3816. params[1] = FW_PARAM_PFVF(CLIP_END);
  3817. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
  3818. if (ret < 0)
  3819. goto bye;
  3820. adap->clipt_start = val[0];
  3821. adap->clipt_end = val[1];
  3822. /* We don't yet have a PARAMs calls to retrieve the number of Traffic
  3823. * Classes supported by the hardware/firmware so we hard code it here
  3824. * for now.
  3825. */
  3826. adap->params.nsched_cls = is_t4(adap->params.chip) ? 15 : 16;
  3827. /* query params related to active filter region */
  3828. params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
  3829. params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
  3830. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
  3831. /* If Active filter size is set we enable establishing
  3832. * offload connection through firmware work request
  3833. */
  3834. if ((val[0] != val[1]) && (ret >= 0)) {
  3835. adap->flags |= FW_OFLD_CONN;
  3836. adap->tids.aftid_base = val[0];
  3837. adap->tids.aftid_end = val[1];
  3838. }
  3839. /* If we're running on newer firmware, let it know that we're
  3840. * prepared to deal with encapsulated CPL messages. Older
  3841. * firmware won't understand this and we'll just get
  3842. * unencapsulated messages ...
  3843. */
  3844. params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
  3845. val[0] = 1;
  3846. (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
  3847. /*
  3848. * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
  3849. * capability. Earlier versions of the firmware didn't have the
  3850. * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
  3851. * permission to use ULPTX MEMWRITE DSGL.
  3852. */
  3853. if (is_t4(adap->params.chip)) {
  3854. adap->params.ulptx_memwrite_dsgl = false;
  3855. } else {
  3856. params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
  3857. ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
  3858. 1, params, val);
  3859. adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
  3860. }
  3861. /* See if FW supports FW_RI_FR_NSMR_TPTE_WR work request */
  3862. params[0] = FW_PARAM_DEV(RI_FR_NSMR_TPTE_WR);
  3863. ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
  3864. 1, params, val);
  3865. adap->params.fr_nsmr_tpte_wr_support = (ret == 0 && val[0] != 0);
  3866. /* See if FW supports FW_FILTER2 work request */
  3867. if (is_t4(adap->params.chip)) {
  3868. adap->params.filter2_wr_support = 0;
  3869. } else {
  3870. params[0] = FW_PARAM_DEV(FILTER2_WR);
  3871. ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
  3872. 1, params, val);
  3873. adap->params.filter2_wr_support = (ret == 0 && val[0] != 0);
  3874. }
  3875. /*
  3876. * Get device capabilities so we can determine what resources we need
  3877. * to manage.
  3878. */
  3879. memset(&caps_cmd, 0, sizeof(caps_cmd));
  3880. caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
  3881. FW_CMD_REQUEST_F | FW_CMD_READ_F);
  3882. caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
  3883. ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
  3884. &caps_cmd);
  3885. if (ret < 0)
  3886. goto bye;
  3887. if (caps_cmd.ofldcaps ||
  3888. (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER))) {
  3889. /* query offload-related parameters */
  3890. params[0] = FW_PARAM_DEV(NTID);
  3891. params[1] = FW_PARAM_PFVF(SERVER_START);
  3892. params[2] = FW_PARAM_PFVF(SERVER_END);
  3893. params[3] = FW_PARAM_PFVF(TDDP_START);
  3894. params[4] = FW_PARAM_PFVF(TDDP_END);
  3895. params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
  3896. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
  3897. params, val);
  3898. if (ret < 0)
  3899. goto bye;
  3900. adap->tids.ntids = val[0];
  3901. adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
  3902. adap->tids.stid_base = val[1];
  3903. adap->tids.nstids = val[2] - val[1] + 1;
  3904. /*
  3905. * Setup server filter region. Divide the available filter
  3906. * region into two parts. Regular filters get 1/3rd and server
  3907. * filters get 2/3rd part. This is only enabled if workarond
  3908. * path is enabled.
  3909. * 1. For regular filters.
  3910. * 2. Server filter: This are special filters which are used
  3911. * to redirect SYN packets to offload queue.
  3912. */
  3913. if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
  3914. adap->tids.sftid_base = adap->tids.ftid_base +
  3915. DIV_ROUND_UP(adap->tids.nftids, 3);
  3916. adap->tids.nsftids = adap->tids.nftids -
  3917. DIV_ROUND_UP(adap->tids.nftids, 3);
  3918. adap->tids.nftids = adap->tids.sftid_base -
  3919. adap->tids.ftid_base;
  3920. }
  3921. adap->vres.ddp.start = val[3];
  3922. adap->vres.ddp.size = val[4] - val[3] + 1;
  3923. adap->params.ofldq_wr_cred = val[5];
  3924. if (caps_cmd.niccaps & htons(FW_CAPS_CONFIG_NIC_HASHFILTER)) {
  3925. ret = init_hash_filter(adap);
  3926. if (ret < 0)
  3927. goto bye;
  3928. } else {
  3929. adap->params.offload = 1;
  3930. adap->num_ofld_uld += 1;
  3931. }
  3932. }
  3933. if (caps_cmd.rdmacaps) {
  3934. params[0] = FW_PARAM_PFVF(STAG_START);
  3935. params[1] = FW_PARAM_PFVF(STAG_END);
  3936. params[2] = FW_PARAM_PFVF(RQ_START);
  3937. params[3] = FW_PARAM_PFVF(RQ_END);
  3938. params[4] = FW_PARAM_PFVF(PBL_START);
  3939. params[5] = FW_PARAM_PFVF(PBL_END);
  3940. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
  3941. params, val);
  3942. if (ret < 0)
  3943. goto bye;
  3944. adap->vres.stag.start = val[0];
  3945. adap->vres.stag.size = val[1] - val[0] + 1;
  3946. adap->vres.rq.start = val[2];
  3947. adap->vres.rq.size = val[3] - val[2] + 1;
  3948. adap->vres.pbl.start = val[4];
  3949. adap->vres.pbl.size = val[5] - val[4] + 1;
  3950. params[0] = FW_PARAM_PFVF(SRQ_START);
  3951. params[1] = FW_PARAM_PFVF(SRQ_END);
  3952. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
  3953. params, val);
  3954. if (!ret) {
  3955. adap->vres.srq.start = val[0];
  3956. adap->vres.srq.size = val[1] - val[0] + 1;
  3957. }
  3958. if (adap->vres.srq.size) {
  3959. adap->srq = t4_init_srq(adap->vres.srq.size);
  3960. if (!adap->srq)
  3961. dev_warn(&adap->pdev->dev, "could not allocate SRQ, continuing\n");
  3962. }
  3963. params[0] = FW_PARAM_PFVF(SQRQ_START);
  3964. params[1] = FW_PARAM_PFVF(SQRQ_END);
  3965. params[2] = FW_PARAM_PFVF(CQ_START);
  3966. params[3] = FW_PARAM_PFVF(CQ_END);
  3967. params[4] = FW_PARAM_PFVF(OCQ_START);
  3968. params[5] = FW_PARAM_PFVF(OCQ_END);
  3969. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
  3970. val);
  3971. if (ret < 0)
  3972. goto bye;
  3973. adap->vres.qp.start = val[0];
  3974. adap->vres.qp.size = val[1] - val[0] + 1;
  3975. adap->vres.cq.start = val[2];
  3976. adap->vres.cq.size = val[3] - val[2] + 1;
  3977. adap->vres.ocq.start = val[4];
  3978. adap->vres.ocq.size = val[5] - val[4] + 1;
  3979. params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
  3980. params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
  3981. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
  3982. val);
  3983. if (ret < 0) {
  3984. adap->params.max_ordird_qp = 8;
  3985. adap->params.max_ird_adapter = 32 * adap->tids.ntids;
  3986. ret = 0;
  3987. } else {
  3988. adap->params.max_ordird_qp = val[0];
  3989. adap->params.max_ird_adapter = val[1];
  3990. }
  3991. dev_info(adap->pdev_dev,
  3992. "max_ordird_qp %d max_ird_adapter %d\n",
  3993. adap->params.max_ordird_qp,
  3994. adap->params.max_ird_adapter);
  3995. /* Enable write_with_immediate if FW supports it */
  3996. params[0] = FW_PARAM_DEV(RDMA_WRITE_WITH_IMM);
  3997. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
  3998. val);
  3999. adap->params.write_w_imm_support = (ret == 0 && val[0] != 0);
  4000. /* Enable write_cmpl if FW supports it */
  4001. params[0] = FW_PARAM_DEV(RI_WRITE_CMPL_WR);
  4002. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, params,
  4003. val);
  4004. adap->params.write_cmpl_support = (ret == 0 && val[0] != 0);
  4005. adap->num_ofld_uld += 2;
  4006. }
  4007. if (caps_cmd.iscsicaps) {
  4008. params[0] = FW_PARAM_PFVF(ISCSI_START);
  4009. params[1] = FW_PARAM_PFVF(ISCSI_END);
  4010. ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
  4011. params, val);
  4012. if (ret < 0)
  4013. goto bye;
  4014. adap->vres.iscsi.start = val[0];
  4015. adap->vres.iscsi.size = val[1] - val[0] + 1;
  4016. /* LIO target and cxgb4i initiaitor */
  4017. adap->num_ofld_uld += 2;
  4018. }
  4019. if (caps_cmd.cryptocaps) {
  4020. if (ntohs(caps_cmd.cryptocaps) &
  4021. FW_CAPS_CONFIG_CRYPTO_LOOKASIDE) {
  4022. params[0] = FW_PARAM_PFVF(NCRYPTO_LOOKASIDE);
  4023. ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
  4024. 2, params, val);
  4025. if (ret < 0) {
  4026. if (ret != -EINVAL)
  4027. goto bye;
  4028. } else {
  4029. adap->vres.ncrypto_fc = val[0];
  4030. }
  4031. adap->num_ofld_uld += 1;
  4032. }
  4033. if (ntohs(caps_cmd.cryptocaps) &
  4034. FW_CAPS_CONFIG_TLS_INLINE) {
  4035. params[0] = FW_PARAM_PFVF(TLS_START);
  4036. params[1] = FW_PARAM_PFVF(TLS_END);
  4037. ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
  4038. 2, params, val);
  4039. if (ret < 0)
  4040. goto bye;
  4041. adap->vres.key.start = val[0];
  4042. adap->vres.key.size = val[1] - val[0] + 1;
  4043. adap->num_uld += 1;
  4044. }
  4045. adap->params.crypto = ntohs(caps_cmd.cryptocaps);
  4046. }
  4047. #undef FW_PARAM_PFVF
  4048. #undef FW_PARAM_DEV
  4049. /* The MTU/MSS Table is initialized by now, so load their values. If
  4050. * we're initializing the adapter, then we'll make any modifications
  4051. * we want to the MTU/MSS Table and also initialize the congestion
  4052. * parameters.
  4053. */
  4054. t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
  4055. if (state != DEV_STATE_INIT) {
  4056. int i;
  4057. /* The default MTU Table contains values 1492 and 1500.
  4058. * However, for TCP, it's better to have two values which are
  4059. * a multiple of 8 +/- 4 bytes apart near this popular MTU.
  4060. * This allows us to have a TCP Data Payload which is a
  4061. * multiple of 8 regardless of what combination of TCP Options
  4062. * are in use (always a multiple of 4 bytes) which is
  4063. * important for performance reasons. For instance, if no
  4064. * options are in use, then we have a 20-byte IP header and a
  4065. * 20-byte TCP header. In this case, a 1500-byte MSS would
  4066. * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
  4067. * which is not a multiple of 8. So using an MSS of 1488 in
  4068. * this case results in a TCP Data Payload of 1448 bytes which
  4069. * is a multiple of 8. On the other hand, if 12-byte TCP Time
  4070. * Stamps have been negotiated, then an MTU of 1500 bytes
  4071. * results in a TCP Data Payload of 1448 bytes which, as
  4072. * above, is a multiple of 8 bytes ...
  4073. */
  4074. for (i = 0; i < NMTUS; i++)
  4075. if (adap->params.mtus[i] == 1492) {
  4076. adap->params.mtus[i] = 1488;
  4077. break;
  4078. }
  4079. t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
  4080. adap->params.b_wnd);
  4081. }
  4082. t4_init_sge_params(adap);
  4083. adap->flags |= FW_OK;
  4084. t4_init_tp_params(adap, true);
  4085. return 0;
  4086. /*
  4087. * Something bad happened. If a command timed out or failed with EIO
  4088. * FW does not operate within its spec or something catastrophic
  4089. * happened to HW/FW, stop issuing commands.
  4090. */
  4091. bye:
  4092. adap_free_hma_mem(adap);
  4093. kfree(adap->sge.egr_map);
  4094. kfree(adap->sge.ingr_map);
  4095. kfree(adap->sge.starving_fl);
  4096. kfree(adap->sge.txq_maperr);
  4097. #ifdef CONFIG_DEBUG_FS
  4098. kfree(adap->sge.blocked_fl);
  4099. #endif
  4100. if (ret != -ETIMEDOUT && ret != -EIO)
  4101. t4_fw_bye(adap, adap->mbox);
  4102. return ret;
  4103. }
  4104. /* EEH callbacks */
  4105. static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
  4106. pci_channel_state_t state)
  4107. {
  4108. int i;
  4109. struct adapter *adap = pci_get_drvdata(pdev);
  4110. if (!adap)
  4111. goto out;
  4112. rtnl_lock();
  4113. adap->flags &= ~FW_OK;
  4114. notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
  4115. spin_lock(&adap->stats_lock);
  4116. for_each_port(adap, i) {
  4117. struct net_device *dev = adap->port[i];
  4118. if (dev) {
  4119. netif_device_detach(dev);
  4120. netif_carrier_off(dev);
  4121. }
  4122. }
  4123. spin_unlock(&adap->stats_lock);
  4124. disable_interrupts(adap);
  4125. if (adap->flags & FULL_INIT_DONE)
  4126. cxgb_down(adap);
  4127. rtnl_unlock();
  4128. if ((adap->flags & DEV_ENABLED)) {
  4129. pci_disable_device(pdev);
  4130. adap->flags &= ~DEV_ENABLED;
  4131. }
  4132. out: return state == pci_channel_io_perm_failure ?
  4133. PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
  4134. }
  4135. static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
  4136. {
  4137. int i, ret;
  4138. struct fw_caps_config_cmd c;
  4139. struct adapter *adap = pci_get_drvdata(pdev);
  4140. if (!adap) {
  4141. pci_restore_state(pdev);
  4142. pci_save_state(pdev);
  4143. return PCI_ERS_RESULT_RECOVERED;
  4144. }
  4145. if (!(adap->flags & DEV_ENABLED)) {
  4146. if (pci_enable_device(pdev)) {
  4147. dev_err(&pdev->dev, "Cannot reenable PCI "
  4148. "device after reset\n");
  4149. return PCI_ERS_RESULT_DISCONNECT;
  4150. }
  4151. adap->flags |= DEV_ENABLED;
  4152. }
  4153. pci_set_master(pdev);
  4154. pci_restore_state(pdev);
  4155. pci_save_state(pdev);
  4156. pci_cleanup_aer_uncorrect_error_status(pdev);
  4157. if (t4_wait_dev_ready(adap->regs) < 0)
  4158. return PCI_ERS_RESULT_DISCONNECT;
  4159. if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
  4160. return PCI_ERS_RESULT_DISCONNECT;
  4161. adap->flags |= FW_OK;
  4162. if (adap_init1(adap, &c))
  4163. return PCI_ERS_RESULT_DISCONNECT;
  4164. for_each_port(adap, i) {
  4165. struct port_info *p = adap2pinfo(adap, i);
  4166. ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
  4167. NULL, NULL);
  4168. if (ret < 0)
  4169. return PCI_ERS_RESULT_DISCONNECT;
  4170. p->viid = ret;
  4171. p->xact_addr_filt = -1;
  4172. }
  4173. t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
  4174. adap->params.b_wnd);
  4175. setup_memwin(adap);
  4176. if (cxgb_up(adap))
  4177. return PCI_ERS_RESULT_DISCONNECT;
  4178. return PCI_ERS_RESULT_RECOVERED;
  4179. }
  4180. static void eeh_resume(struct pci_dev *pdev)
  4181. {
  4182. int i;
  4183. struct adapter *adap = pci_get_drvdata(pdev);
  4184. if (!adap)
  4185. return;
  4186. rtnl_lock();
  4187. for_each_port(adap, i) {
  4188. struct net_device *dev = adap->port[i];
  4189. if (dev) {
  4190. if (netif_running(dev)) {
  4191. link_start(dev);
  4192. cxgb_set_rxmode(dev);
  4193. }
  4194. netif_device_attach(dev);
  4195. }
  4196. }
  4197. rtnl_unlock();
  4198. }
  4199. static const struct pci_error_handlers cxgb4_eeh = {
  4200. .error_detected = eeh_err_detected,
  4201. .slot_reset = eeh_slot_reset,
  4202. .resume = eeh_resume,
  4203. };
  4204. /* Return true if the Link Configuration supports "High Speeds" (those greater
  4205. * than 1Gb/s).
  4206. */
  4207. static inline bool is_x_10g_port(const struct link_config *lc)
  4208. {
  4209. unsigned int speeds, high_speeds;
  4210. speeds = FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_G(lc->pcaps));
  4211. high_speeds = speeds &
  4212. ~(FW_PORT_CAP32_SPEED_100M | FW_PORT_CAP32_SPEED_1G);
  4213. return high_speeds != 0;
  4214. }
  4215. /*
  4216. * Perform default configuration of DMA queues depending on the number and type
  4217. * of ports we found and the number of available CPUs. Most settings can be
  4218. * modified by the admin prior to actual use.
  4219. */
  4220. static void cfg_queues(struct adapter *adap)
  4221. {
  4222. struct sge *s = &adap->sge;
  4223. int i = 0, n10g = 0, qidx = 0;
  4224. #ifndef CONFIG_CHELSIO_T4_DCB
  4225. int q10g = 0;
  4226. #endif
  4227. /* Reduce memory usage in kdump environment, disable all offload.
  4228. */
  4229. if (is_kdump_kernel() || (is_uld(adap) && t4_uld_mem_alloc(adap))) {
  4230. adap->params.offload = 0;
  4231. adap->params.crypto = 0;
  4232. }
  4233. n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
  4234. #ifdef CONFIG_CHELSIO_T4_DCB
  4235. /* For Data Center Bridging support we need to be able to support up
  4236. * to 8 Traffic Priorities; each of which will be assigned to its
  4237. * own TX Queue in order to prevent Head-Of-Line Blocking.
  4238. */
  4239. if (adap->params.nports * 8 > MAX_ETH_QSETS) {
  4240. dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
  4241. MAX_ETH_QSETS, adap->params.nports * 8);
  4242. BUG_ON(1);
  4243. }
  4244. for_each_port(adap, i) {
  4245. struct port_info *pi = adap2pinfo(adap, i);
  4246. pi->first_qset = qidx;
  4247. pi->nqsets = is_kdump_kernel() ? 1 : 8;
  4248. qidx += pi->nqsets;
  4249. }
  4250. #else /* !CONFIG_CHELSIO_T4_DCB */
  4251. /*
  4252. * We default to 1 queue per non-10G port and up to # of cores queues
  4253. * per 10G port.
  4254. */
  4255. if (n10g)
  4256. q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
  4257. if (q10g > netif_get_num_default_rss_queues())
  4258. q10g = netif_get_num_default_rss_queues();
  4259. if (is_kdump_kernel())
  4260. q10g = 1;
  4261. for_each_port(adap, i) {
  4262. struct port_info *pi = adap2pinfo(adap, i);
  4263. pi->first_qset = qidx;
  4264. pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
  4265. qidx += pi->nqsets;
  4266. }
  4267. #endif /* !CONFIG_CHELSIO_T4_DCB */
  4268. s->ethqsets = qidx;
  4269. s->max_ethqsets = qidx; /* MSI-X may lower it later */
  4270. if (is_uld(adap)) {
  4271. /*
  4272. * For offload we use 1 queue/channel if all ports are up to 1G,
  4273. * otherwise we divide all available queues amongst the channels
  4274. * capped by the number of available cores.
  4275. */
  4276. if (n10g) {
  4277. i = min_t(int, MAX_OFLD_QSETS, num_online_cpus());
  4278. s->ofldqsets = roundup(i, adap->params.nports);
  4279. } else {
  4280. s->ofldqsets = adap->params.nports;
  4281. }
  4282. }
  4283. for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
  4284. struct sge_eth_rxq *r = &s->ethrxq[i];
  4285. init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
  4286. r->fl.size = 72;
  4287. }
  4288. for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
  4289. s->ethtxq[i].q.size = 1024;
  4290. for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
  4291. s->ctrlq[i].q.size = 512;
  4292. if (!is_t4(adap->params.chip))
  4293. s->ptptxq.q.size = 8;
  4294. init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
  4295. init_rspq(adap, &s->intrq, 0, 1, 512, 64);
  4296. }
  4297. /*
  4298. * Reduce the number of Ethernet queues across all ports to at most n.
  4299. * n provides at least one queue per port.
  4300. */
  4301. static void reduce_ethqs(struct adapter *adap, int n)
  4302. {
  4303. int i;
  4304. struct port_info *pi;
  4305. while (n < adap->sge.ethqsets)
  4306. for_each_port(adap, i) {
  4307. pi = adap2pinfo(adap, i);
  4308. if (pi->nqsets > 1) {
  4309. pi->nqsets--;
  4310. adap->sge.ethqsets--;
  4311. if (adap->sge.ethqsets <= n)
  4312. break;
  4313. }
  4314. }
  4315. n = 0;
  4316. for_each_port(adap, i) {
  4317. pi = adap2pinfo(adap, i);
  4318. pi->first_qset = n;
  4319. n += pi->nqsets;
  4320. }
  4321. }
  4322. static int get_msix_info(struct adapter *adap)
  4323. {
  4324. struct uld_msix_info *msix_info;
  4325. unsigned int max_ingq = 0;
  4326. if (is_offload(adap))
  4327. max_ingq += MAX_OFLD_QSETS * adap->num_ofld_uld;
  4328. if (is_pci_uld(adap))
  4329. max_ingq += MAX_OFLD_QSETS * adap->num_uld;
  4330. if (!max_ingq)
  4331. goto out;
  4332. msix_info = kcalloc(max_ingq, sizeof(*msix_info), GFP_KERNEL);
  4333. if (!msix_info)
  4334. return -ENOMEM;
  4335. adap->msix_bmap_ulds.msix_bmap = kcalloc(BITS_TO_LONGS(max_ingq),
  4336. sizeof(long), GFP_KERNEL);
  4337. if (!adap->msix_bmap_ulds.msix_bmap) {
  4338. kfree(msix_info);
  4339. return -ENOMEM;
  4340. }
  4341. spin_lock_init(&adap->msix_bmap_ulds.lock);
  4342. adap->msix_info_ulds = msix_info;
  4343. out:
  4344. return 0;
  4345. }
  4346. static void free_msix_info(struct adapter *adap)
  4347. {
  4348. if (!(adap->num_uld && adap->num_ofld_uld))
  4349. return;
  4350. kfree(adap->msix_info_ulds);
  4351. kfree(adap->msix_bmap_ulds.msix_bmap);
  4352. }
  4353. /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
  4354. #define EXTRA_VECS 2
  4355. static int enable_msix(struct adapter *adap)
  4356. {
  4357. int ofld_need = 0, uld_need = 0;
  4358. int i, j, want, need, allocated;
  4359. struct sge *s = &adap->sge;
  4360. unsigned int nchan = adap->params.nports;
  4361. struct msix_entry *entries;
  4362. int max_ingq = MAX_INGQ;
  4363. if (is_pci_uld(adap))
  4364. max_ingq += (MAX_OFLD_QSETS * adap->num_uld);
  4365. if (is_offload(adap))
  4366. max_ingq += (MAX_OFLD_QSETS * adap->num_ofld_uld);
  4367. entries = kmalloc_array(max_ingq + 1, sizeof(*entries),
  4368. GFP_KERNEL);
  4369. if (!entries)
  4370. return -ENOMEM;
  4371. /* map for msix */
  4372. if (get_msix_info(adap)) {
  4373. adap->params.offload = 0;
  4374. adap->params.crypto = 0;
  4375. }
  4376. for (i = 0; i < max_ingq + 1; ++i)
  4377. entries[i].entry = i;
  4378. want = s->max_ethqsets + EXTRA_VECS;
  4379. if (is_offload(adap)) {
  4380. want += adap->num_ofld_uld * s->ofldqsets;
  4381. ofld_need = adap->num_ofld_uld * nchan;
  4382. }
  4383. if (is_pci_uld(adap)) {
  4384. want += adap->num_uld * s->ofldqsets;
  4385. uld_need = adap->num_uld * nchan;
  4386. }
  4387. #ifdef CONFIG_CHELSIO_T4_DCB
  4388. /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
  4389. * each port.
  4390. */
  4391. need = 8 * adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
  4392. #else
  4393. need = adap->params.nports + EXTRA_VECS + ofld_need + uld_need;
  4394. #endif
  4395. allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
  4396. if (allocated < 0) {
  4397. dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
  4398. " not using MSI-X\n");
  4399. kfree(entries);
  4400. return allocated;
  4401. }
  4402. /* Distribute available vectors to the various queue groups.
  4403. * Every group gets its minimum requirement and NIC gets top
  4404. * priority for leftovers.
  4405. */
  4406. i = allocated - EXTRA_VECS - ofld_need - uld_need;
  4407. if (i < s->max_ethqsets) {
  4408. s->max_ethqsets = i;
  4409. if (i < s->ethqsets)
  4410. reduce_ethqs(adap, i);
  4411. }
  4412. if (is_uld(adap)) {
  4413. if (allocated < want)
  4414. s->nqs_per_uld = nchan;
  4415. else
  4416. s->nqs_per_uld = s->ofldqsets;
  4417. }
  4418. for (i = 0; i < (s->max_ethqsets + EXTRA_VECS); ++i)
  4419. adap->msix_info[i].vec = entries[i].vector;
  4420. if (is_uld(adap)) {
  4421. for (j = 0 ; i < allocated; ++i, j++) {
  4422. adap->msix_info_ulds[j].vec = entries[i].vector;
  4423. adap->msix_info_ulds[j].idx = i;
  4424. }
  4425. adap->msix_bmap_ulds.mapsize = j;
  4426. }
  4427. dev_info(adap->pdev_dev, "%d MSI-X vectors allocated, "
  4428. "nic %d per uld %d\n",
  4429. allocated, s->max_ethqsets, s->nqs_per_uld);
  4430. kfree(entries);
  4431. return 0;
  4432. }
  4433. #undef EXTRA_VECS
  4434. static int init_rss(struct adapter *adap)
  4435. {
  4436. unsigned int i;
  4437. int err;
  4438. err = t4_init_rss_mode(adap, adap->mbox);
  4439. if (err)
  4440. return err;
  4441. for_each_port(adap, i) {
  4442. struct port_info *pi = adap2pinfo(adap, i);
  4443. pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
  4444. if (!pi->rss)
  4445. return -ENOMEM;
  4446. }
  4447. return 0;
  4448. }
  4449. /* Dump basic information about the adapter */
  4450. static void print_adapter_info(struct adapter *adapter)
  4451. {
  4452. /* Hardware/Firmware/etc. Version/Revision IDs */
  4453. t4_dump_version_info(adapter);
  4454. /* Software/Hardware configuration */
  4455. dev_info(adapter->pdev_dev, "Configuration: %sNIC %s, %s capable\n",
  4456. is_offload(adapter) ? "R" : "",
  4457. ((adapter->flags & USING_MSIX) ? "MSI-X" :
  4458. (adapter->flags & USING_MSI) ? "MSI" : ""),
  4459. is_offload(adapter) ? "Offload" : "non-Offload");
  4460. }
  4461. static void print_port_info(const struct net_device *dev)
  4462. {
  4463. char buf[80];
  4464. char *bufp = buf;
  4465. const char *spd = "";
  4466. const struct port_info *pi = netdev_priv(dev);
  4467. const struct adapter *adap = pi->adapter;
  4468. if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
  4469. spd = " 2.5 GT/s";
  4470. else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
  4471. spd = " 5 GT/s";
  4472. else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
  4473. spd = " 8 GT/s";
  4474. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100M)
  4475. bufp += sprintf(bufp, "100M/");
  4476. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_1G)
  4477. bufp += sprintf(bufp, "1G/");
  4478. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_10G)
  4479. bufp += sprintf(bufp, "10G/");
  4480. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_25G)
  4481. bufp += sprintf(bufp, "25G/");
  4482. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_40G)
  4483. bufp += sprintf(bufp, "40G/");
  4484. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_50G)
  4485. bufp += sprintf(bufp, "50G/");
  4486. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_100G)
  4487. bufp += sprintf(bufp, "100G/");
  4488. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_200G)
  4489. bufp += sprintf(bufp, "200G/");
  4490. if (pi->link_cfg.pcaps & FW_PORT_CAP32_SPEED_400G)
  4491. bufp += sprintf(bufp, "400G/");
  4492. if (bufp != buf)
  4493. --bufp;
  4494. sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
  4495. netdev_info(dev, "%s: Chelsio %s (%s) %s\n",
  4496. dev->name, adap->params.vpd.id, adap->name, buf);
  4497. }
  4498. /*
  4499. * Free the following resources:
  4500. * - memory used for tables
  4501. * - MSI/MSI-X
  4502. * - net devices
  4503. * - resources FW is holding for us
  4504. */
  4505. static void free_some_resources(struct adapter *adapter)
  4506. {
  4507. unsigned int i;
  4508. kvfree(adapter->mps_encap);
  4509. kvfree(adapter->smt);
  4510. kvfree(adapter->l2t);
  4511. kvfree(adapter->srq);
  4512. t4_cleanup_sched(adapter);
  4513. kvfree(adapter->tids.tid_tab);
  4514. cxgb4_cleanup_tc_flower(adapter);
  4515. cxgb4_cleanup_tc_u32(adapter);
  4516. kfree(adapter->sge.egr_map);
  4517. kfree(adapter->sge.ingr_map);
  4518. kfree(adapter->sge.starving_fl);
  4519. kfree(adapter->sge.txq_maperr);
  4520. #ifdef CONFIG_DEBUG_FS
  4521. kfree(adapter->sge.blocked_fl);
  4522. #endif
  4523. disable_msi(adapter);
  4524. for_each_port(adapter, i)
  4525. if (adapter->port[i]) {
  4526. struct port_info *pi = adap2pinfo(adapter, i);
  4527. if (pi->viid != 0)
  4528. t4_free_vi(adapter, adapter->mbox, adapter->pf,
  4529. 0, pi->viid);
  4530. kfree(adap2pinfo(adapter, i)->rss);
  4531. free_netdev(adapter->port[i]);
  4532. }
  4533. if (adapter->flags & FW_OK)
  4534. t4_fw_bye(adapter, adapter->pf);
  4535. }
  4536. #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
  4537. #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
  4538. NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
  4539. #define SEGMENT_SIZE 128
  4540. static int t4_get_chip_type(struct adapter *adap, int ver)
  4541. {
  4542. u32 pl_rev = REV_G(t4_read_reg(adap, PL_REV_A));
  4543. switch (ver) {
  4544. case CHELSIO_T4:
  4545. return CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
  4546. case CHELSIO_T5:
  4547. return CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
  4548. case CHELSIO_T6:
  4549. return CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
  4550. default:
  4551. break;
  4552. }
  4553. return -EINVAL;
  4554. }
  4555. #ifdef CONFIG_PCI_IOV
  4556. static void cxgb4_mgmt_setup(struct net_device *dev)
  4557. {
  4558. dev->type = ARPHRD_NONE;
  4559. dev->mtu = 0;
  4560. dev->hard_header_len = 0;
  4561. dev->addr_len = 0;
  4562. dev->tx_queue_len = 0;
  4563. dev->flags |= IFF_NOARP;
  4564. dev->priv_flags |= IFF_NO_QUEUE;
  4565. /* Initialize the device structure. */
  4566. dev->netdev_ops = &cxgb4_mgmt_netdev_ops;
  4567. dev->ethtool_ops = &cxgb4_mgmt_ethtool_ops;
  4568. }
  4569. static int cxgb4_iov_configure(struct pci_dev *pdev, int num_vfs)
  4570. {
  4571. struct adapter *adap = pci_get_drvdata(pdev);
  4572. int err = 0;
  4573. int current_vfs = pci_num_vf(pdev);
  4574. u32 pcie_fw;
  4575. pcie_fw = readl(adap->regs + PCIE_FW_A);
  4576. /* Check if fw is initialized */
  4577. if (!(pcie_fw & PCIE_FW_INIT_F)) {
  4578. dev_warn(&pdev->dev, "Device not initialized\n");
  4579. return -EOPNOTSUPP;
  4580. }
  4581. /* If any of the VF's is already assigned to Guest OS, then
  4582. * SRIOV for the same cannot be modified
  4583. */
  4584. if (current_vfs && pci_vfs_assigned(pdev)) {
  4585. dev_err(&pdev->dev,
  4586. "Cannot modify SR-IOV while VFs are assigned\n");
  4587. return current_vfs;
  4588. }
  4589. /* Note that the upper-level code ensures that we're never called with
  4590. * a non-zero "num_vfs" when we already have VFs instantiated. But
  4591. * it never hurts to code defensively.
  4592. */
  4593. if (num_vfs != 0 && current_vfs != 0)
  4594. return -EBUSY;
  4595. /* Nothing to do for no change. */
  4596. if (num_vfs == current_vfs)
  4597. return num_vfs;
  4598. /* Disable SRIOV when zero is passed. */
  4599. if (!num_vfs) {
  4600. pci_disable_sriov(pdev);
  4601. /* free VF Management Interface */
  4602. unregister_netdev(adap->port[0]);
  4603. free_netdev(adap->port[0]);
  4604. adap->port[0] = NULL;
  4605. /* free VF resources */
  4606. adap->num_vfs = 0;
  4607. kfree(adap->vfinfo);
  4608. adap->vfinfo = NULL;
  4609. return 0;
  4610. }
  4611. if (!current_vfs) {
  4612. struct fw_pfvf_cmd port_cmd, port_rpl;
  4613. struct net_device *netdev;
  4614. unsigned int pmask, port;
  4615. struct pci_dev *pbridge;
  4616. struct port_info *pi;
  4617. char name[IFNAMSIZ];
  4618. u32 devcap2;
  4619. u16 flags;
  4620. int pos;
  4621. /* If we want to instantiate Virtual Functions, then our
  4622. * parent bridge's PCI-E needs to support Alternative Routing
  4623. * ID (ARI) because our VFs will show up at function offset 8
  4624. * and above.
  4625. */
  4626. pbridge = pdev->bus->self;
  4627. pos = pci_find_capability(pbridge, PCI_CAP_ID_EXP);
  4628. pci_read_config_word(pbridge, pos + PCI_EXP_FLAGS, &flags);
  4629. pci_read_config_dword(pbridge, pos + PCI_EXP_DEVCAP2, &devcap2);
  4630. if ((flags & PCI_EXP_FLAGS_VERS) < 2 ||
  4631. !(devcap2 & PCI_EXP_DEVCAP2_ARI)) {
  4632. /* Our parent bridge does not support ARI so issue a
  4633. * warning and skip instantiating the VFs. They
  4634. * won't be reachable.
  4635. */
  4636. dev_warn(&pdev->dev, "Parent bridge %02x:%02x.%x doesn't support ARI; can't instantiate Virtual Functions\n",
  4637. pbridge->bus->number, PCI_SLOT(pbridge->devfn),
  4638. PCI_FUNC(pbridge->devfn));
  4639. return -ENOTSUPP;
  4640. }
  4641. memset(&port_cmd, 0, sizeof(port_cmd));
  4642. port_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
  4643. FW_CMD_REQUEST_F |
  4644. FW_CMD_READ_F |
  4645. FW_PFVF_CMD_PFN_V(adap->pf) |
  4646. FW_PFVF_CMD_VFN_V(0));
  4647. port_cmd.retval_len16 = cpu_to_be32(FW_LEN16(port_cmd));
  4648. err = t4_wr_mbox(adap, adap->mbox, &port_cmd, sizeof(port_cmd),
  4649. &port_rpl);
  4650. if (err)
  4651. return err;
  4652. pmask = FW_PFVF_CMD_PMASK_G(be32_to_cpu(port_rpl.type_to_neq));
  4653. port = ffs(pmask) - 1;
  4654. /* Allocate VF Management Interface. */
  4655. snprintf(name, IFNAMSIZ, "mgmtpf%d,%d", adap->adap_idx,
  4656. adap->pf);
  4657. netdev = alloc_netdev(sizeof(struct port_info),
  4658. name, NET_NAME_UNKNOWN, cxgb4_mgmt_setup);
  4659. if (!netdev)
  4660. return -ENOMEM;
  4661. pi = netdev_priv(netdev);
  4662. pi->adapter = adap;
  4663. pi->lport = port;
  4664. pi->tx_chan = port;
  4665. SET_NETDEV_DEV(netdev, &pdev->dev);
  4666. adap->port[0] = netdev;
  4667. pi->port_id = 0;
  4668. err = register_netdev(adap->port[0]);
  4669. if (err) {
  4670. pr_info("Unable to register VF mgmt netdev %s\n", name);
  4671. free_netdev(adap->port[0]);
  4672. adap->port[0] = NULL;
  4673. return err;
  4674. }
  4675. /* Allocate and set up VF Information. */
  4676. adap->vfinfo = kcalloc(pci_sriov_get_totalvfs(pdev),
  4677. sizeof(struct vf_info), GFP_KERNEL);
  4678. if (!adap->vfinfo) {
  4679. unregister_netdev(adap->port[0]);
  4680. free_netdev(adap->port[0]);
  4681. adap->port[0] = NULL;
  4682. return -ENOMEM;
  4683. }
  4684. cxgb4_mgmt_fill_vf_station_mac_addr(adap);
  4685. }
  4686. /* Instantiate the requested number of VFs. */
  4687. err = pci_enable_sriov(pdev, num_vfs);
  4688. if (err) {
  4689. pr_info("Unable to instantiate %d VFs\n", num_vfs);
  4690. if (!current_vfs) {
  4691. unregister_netdev(adap->port[0]);
  4692. free_netdev(adap->port[0]);
  4693. adap->port[0] = NULL;
  4694. kfree(adap->vfinfo);
  4695. adap->vfinfo = NULL;
  4696. }
  4697. return err;
  4698. }
  4699. adap->num_vfs = num_vfs;
  4700. return num_vfs;
  4701. }
  4702. #endif /* CONFIG_PCI_IOV */
  4703. static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  4704. {
  4705. struct net_device *netdev;
  4706. struct adapter *adapter;
  4707. static int adap_idx = 1;
  4708. int s_qpp, qpp, num_seg;
  4709. struct port_info *pi;
  4710. bool highdma = false;
  4711. enum chip_type chip;
  4712. void __iomem *regs;
  4713. int func, chip_ver;
  4714. u16 device_id;
  4715. int i, err;
  4716. u32 whoami;
  4717. printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
  4718. err = pci_request_regions(pdev, KBUILD_MODNAME);
  4719. if (err) {
  4720. /* Just info, some other driver may have claimed the device. */
  4721. dev_info(&pdev->dev, "cannot obtain PCI resources\n");
  4722. return err;
  4723. }
  4724. err = pci_enable_device(pdev);
  4725. if (err) {
  4726. dev_err(&pdev->dev, "cannot enable PCI device\n");
  4727. goto out_release_regions;
  4728. }
  4729. regs = pci_ioremap_bar(pdev, 0);
  4730. if (!regs) {
  4731. dev_err(&pdev->dev, "cannot map device registers\n");
  4732. err = -ENOMEM;
  4733. goto out_disable_device;
  4734. }
  4735. adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
  4736. if (!adapter) {
  4737. err = -ENOMEM;
  4738. goto out_unmap_bar0;
  4739. }
  4740. adapter->regs = regs;
  4741. err = t4_wait_dev_ready(regs);
  4742. if (err < 0)
  4743. goto out_free_adapter;
  4744. /* We control everything through one PF */
  4745. whoami = t4_read_reg(adapter, PL_WHOAMI_A);
  4746. pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
  4747. chip = t4_get_chip_type(adapter, CHELSIO_PCI_ID_VER(device_id));
  4748. if (chip < 0) {
  4749. dev_err(&pdev->dev, "Device %d is not supported\n", device_id);
  4750. err = chip;
  4751. goto out_free_adapter;
  4752. }
  4753. chip_ver = CHELSIO_CHIP_VERSION(chip);
  4754. func = chip_ver <= CHELSIO_T5 ?
  4755. SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
  4756. adapter->pdev = pdev;
  4757. adapter->pdev_dev = &pdev->dev;
  4758. adapter->name = pci_name(pdev);
  4759. adapter->mbox = func;
  4760. adapter->pf = func;
  4761. adapter->params.chip = chip;
  4762. adapter->adap_idx = adap_idx;
  4763. adapter->msg_enable = DFLT_MSG_ENABLE;
  4764. adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
  4765. (sizeof(struct mbox_cmd) *
  4766. T4_OS_LOG_MBOX_CMDS),
  4767. GFP_KERNEL);
  4768. if (!adapter->mbox_log) {
  4769. err = -ENOMEM;
  4770. goto out_free_adapter;
  4771. }
  4772. spin_lock_init(&adapter->mbox_lock);
  4773. INIT_LIST_HEAD(&adapter->mlist.list);
  4774. adapter->mbox_log->size = T4_OS_LOG_MBOX_CMDS;
  4775. pci_set_drvdata(pdev, adapter);
  4776. if (func != ent->driver_data) {
  4777. pci_disable_device(pdev);
  4778. pci_save_state(pdev); /* to restore SR-IOV later */
  4779. return 0;
  4780. }
  4781. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4782. highdma = true;
  4783. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  4784. if (err) {
  4785. dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
  4786. "coherent allocations\n");
  4787. goto out_free_adapter;
  4788. }
  4789. } else {
  4790. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4791. if (err) {
  4792. dev_err(&pdev->dev, "no usable DMA configuration\n");
  4793. goto out_free_adapter;
  4794. }
  4795. }
  4796. pci_enable_pcie_error_reporting(pdev);
  4797. pci_set_master(pdev);
  4798. pci_save_state(pdev);
  4799. adap_idx++;
  4800. adapter->workq = create_singlethread_workqueue("cxgb4");
  4801. if (!adapter->workq) {
  4802. err = -ENOMEM;
  4803. goto out_free_adapter;
  4804. }
  4805. /* PCI device has been enabled */
  4806. adapter->flags |= DEV_ENABLED;
  4807. memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
  4808. /* If possible, we use PCIe Relaxed Ordering Attribute to deliver
  4809. * Ingress Packet Data to Free List Buffers in order to allow for
  4810. * chipset performance optimizations between the Root Complex and
  4811. * Memory Controllers. (Messages to the associated Ingress Queue
  4812. * notifying new Packet Placement in the Free Lists Buffers will be
  4813. * send without the Relaxed Ordering Attribute thus guaranteeing that
  4814. * all preceding PCIe Transaction Layer Packets will be processed
  4815. * first.) But some Root Complexes have various issues with Upstream
  4816. * Transaction Layer Packets with the Relaxed Ordering Attribute set.
  4817. * The PCIe devices which under the Root Complexes will be cleared the
  4818. * Relaxed Ordering bit in the configuration space, So we check our
  4819. * PCIe configuration space to see if it's flagged with advice against
  4820. * using Relaxed Ordering.
  4821. */
  4822. if (!pcie_relaxed_ordering_enabled(pdev))
  4823. adapter->flags |= ROOT_NO_RELAXED_ORDERING;
  4824. spin_lock_init(&adapter->stats_lock);
  4825. spin_lock_init(&adapter->tid_release_lock);
  4826. spin_lock_init(&adapter->win0_lock);
  4827. INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
  4828. INIT_WORK(&adapter->db_full_task, process_db_full);
  4829. INIT_WORK(&adapter->db_drop_task, process_db_drop);
  4830. INIT_WORK(&adapter->fatal_err_notify_task, notify_fatal_err);
  4831. err = t4_prep_adapter(adapter);
  4832. if (err)
  4833. goto out_free_adapter;
  4834. if (is_kdump_kernel()) {
  4835. /* Collect hardware state and append to /proc/vmcore */
  4836. err = cxgb4_cudbg_vmcore_add_dump(adapter);
  4837. if (err) {
  4838. dev_warn(adapter->pdev_dev,
  4839. "Fail collecting vmcore device dump, err: %d. Continuing\n",
  4840. err);
  4841. err = 0;
  4842. }
  4843. }
  4844. if (!is_t4(adapter->params.chip)) {
  4845. s_qpp = (QUEUESPERPAGEPF0_S +
  4846. (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
  4847. adapter->pf);
  4848. qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
  4849. SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
  4850. num_seg = PAGE_SIZE / SEGMENT_SIZE;
  4851. /* Each segment size is 128B. Write coalescing is enabled only
  4852. * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
  4853. * queue is less no of segments that can be accommodated in
  4854. * a page size.
  4855. */
  4856. if (qpp > num_seg) {
  4857. dev_err(&pdev->dev,
  4858. "Incorrect number of egress queues per page\n");
  4859. err = -EINVAL;
  4860. goto out_free_adapter;
  4861. }
  4862. adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
  4863. pci_resource_len(pdev, 2));
  4864. if (!adapter->bar2) {
  4865. dev_err(&pdev->dev, "cannot map device bar2 region\n");
  4866. err = -ENOMEM;
  4867. goto out_free_adapter;
  4868. }
  4869. }
  4870. setup_memwin(adapter);
  4871. err = adap_init0(adapter);
  4872. #ifdef CONFIG_DEBUG_FS
  4873. bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
  4874. #endif
  4875. setup_memwin_rdma(adapter);
  4876. if (err)
  4877. goto out_unmap_bar;
  4878. /* configure SGE_STAT_CFG_A to read WC stats */
  4879. if (!is_t4(adapter->params.chip))
  4880. t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) |
  4881. (is_t5(adapter->params.chip) ? STATMODE_V(0) :
  4882. T6_STATMODE_V(0)));
  4883. for_each_port(adapter, i) {
  4884. netdev = alloc_etherdev_mq(sizeof(struct port_info),
  4885. MAX_ETH_QSETS);
  4886. if (!netdev) {
  4887. err = -ENOMEM;
  4888. goto out_free_dev;
  4889. }
  4890. SET_NETDEV_DEV(netdev, &pdev->dev);
  4891. adapter->port[i] = netdev;
  4892. pi = netdev_priv(netdev);
  4893. pi->adapter = adapter;
  4894. pi->xact_addr_filt = -1;
  4895. pi->port_id = i;
  4896. netdev->irq = pdev->irq;
  4897. netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
  4898. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
  4899. NETIF_F_RXCSUM | NETIF_F_RXHASH |
  4900. NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
  4901. NETIF_F_HW_TC;
  4902. if (chip_ver > CHELSIO_T5) {
  4903. netdev->hw_enc_features |= NETIF_F_IP_CSUM |
  4904. NETIF_F_IPV6_CSUM |
  4905. NETIF_F_RXCSUM |
  4906. NETIF_F_GSO_UDP_TUNNEL |
  4907. NETIF_F_TSO | NETIF_F_TSO6;
  4908. netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
  4909. }
  4910. if (highdma)
  4911. netdev->hw_features |= NETIF_F_HIGHDMA;
  4912. netdev->features |= netdev->hw_features;
  4913. netdev->vlan_features = netdev->features & VLAN_FEAT;
  4914. netdev->priv_flags |= IFF_UNICAST_FLT;
  4915. /* MTU range: 81 - 9600 */
  4916. netdev->min_mtu = 81; /* accommodate SACK */
  4917. netdev->max_mtu = MAX_MTU;
  4918. netdev->netdev_ops = &cxgb4_netdev_ops;
  4919. #ifdef CONFIG_CHELSIO_T4_DCB
  4920. netdev->dcbnl_ops = &cxgb4_dcb_ops;
  4921. cxgb4_dcb_state_init(netdev);
  4922. #endif
  4923. cxgb4_set_ethtool_ops(netdev);
  4924. }
  4925. cxgb4_init_ethtool_dump(adapter);
  4926. pci_set_drvdata(pdev, adapter);
  4927. if (adapter->flags & FW_OK) {
  4928. err = t4_port_init(adapter, func, func, 0);
  4929. if (err)
  4930. goto out_free_dev;
  4931. } else if (adapter->params.nports == 1) {
  4932. /* If we don't have a connection to the firmware -- possibly
  4933. * because of an error -- grab the raw VPD parameters so we
  4934. * can set the proper MAC Address on the debug network
  4935. * interface that we've created.
  4936. */
  4937. u8 hw_addr[ETH_ALEN];
  4938. u8 *na = adapter->params.vpd.na;
  4939. err = t4_get_raw_vpd_params(adapter, &adapter->params.vpd);
  4940. if (!err) {
  4941. for (i = 0; i < ETH_ALEN; i++)
  4942. hw_addr[i] = (hex2val(na[2 * i + 0]) * 16 +
  4943. hex2val(na[2 * i + 1]));
  4944. t4_set_hw_addr(adapter, 0, hw_addr);
  4945. }
  4946. }
  4947. /* Configure queues and allocate tables now, they can be needed as
  4948. * soon as the first register_netdev completes.
  4949. */
  4950. cfg_queues(adapter);
  4951. adapter->smt = t4_init_smt();
  4952. if (!adapter->smt) {
  4953. /* We tolerate a lack of SMT, giving up some functionality */
  4954. dev_warn(&pdev->dev, "could not allocate SMT, continuing\n");
  4955. }
  4956. adapter->l2t = t4_init_l2t(adapter->l2t_start, adapter->l2t_end);
  4957. if (!adapter->l2t) {
  4958. /* We tolerate a lack of L2T, giving up some functionality */
  4959. dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
  4960. adapter->params.offload = 0;
  4961. }
  4962. adapter->mps_encap = kvcalloc(adapter->params.arch.mps_tcam_size,
  4963. sizeof(struct mps_encap_entry),
  4964. GFP_KERNEL);
  4965. if (!adapter->mps_encap)
  4966. dev_warn(&pdev->dev, "could not allocate MPS Encap entries, continuing\n");
  4967. #if IS_ENABLED(CONFIG_IPV6)
  4968. if (chip_ver <= CHELSIO_T5 &&
  4969. (!(t4_read_reg(adapter, LE_DB_CONFIG_A) & ASLIPCOMPEN_F))) {
  4970. /* CLIP functionality is not present in hardware,
  4971. * hence disable all offload features
  4972. */
  4973. dev_warn(&pdev->dev,
  4974. "CLIP not enabled in hardware, continuing\n");
  4975. adapter->params.offload = 0;
  4976. } else {
  4977. adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
  4978. adapter->clipt_end);
  4979. if (!adapter->clipt) {
  4980. /* We tolerate a lack of clip_table, giving up
  4981. * some functionality
  4982. */
  4983. dev_warn(&pdev->dev,
  4984. "could not allocate Clip table, continuing\n");
  4985. adapter->params.offload = 0;
  4986. }
  4987. }
  4988. #endif
  4989. for_each_port(adapter, i) {
  4990. pi = adap2pinfo(adapter, i);
  4991. pi->sched_tbl = t4_init_sched(adapter->params.nsched_cls);
  4992. if (!pi->sched_tbl)
  4993. dev_warn(&pdev->dev,
  4994. "could not activate scheduling on port %d\n",
  4995. i);
  4996. }
  4997. if (tid_init(&adapter->tids) < 0) {
  4998. dev_warn(&pdev->dev, "could not allocate TID table, "
  4999. "continuing\n");
  5000. adapter->params.offload = 0;
  5001. } else {
  5002. adapter->tc_u32 = cxgb4_init_tc_u32(adapter);
  5003. if (!adapter->tc_u32)
  5004. dev_warn(&pdev->dev,
  5005. "could not offload tc u32, continuing\n");
  5006. if (cxgb4_init_tc_flower(adapter))
  5007. dev_warn(&pdev->dev,
  5008. "could not offload tc flower, continuing\n");
  5009. }
  5010. if (is_offload(adapter) || is_hashfilter(adapter)) {
  5011. if (t4_read_reg(adapter, LE_DB_CONFIG_A) & HASHEN_F) {
  5012. u32 hash_base, hash_reg;
  5013. if (chip <= CHELSIO_T5) {
  5014. hash_reg = LE_DB_TID_HASHBASE_A;
  5015. hash_base = t4_read_reg(adapter, hash_reg);
  5016. adapter->tids.hash_base = hash_base / 4;
  5017. } else {
  5018. hash_reg = T6_LE_DB_HASH_TID_BASE_A;
  5019. hash_base = t4_read_reg(adapter, hash_reg);
  5020. adapter->tids.hash_base = hash_base;
  5021. }
  5022. }
  5023. }
  5024. /* See what interrupts we'll be using */
  5025. if (msi > 1 && enable_msix(adapter) == 0)
  5026. adapter->flags |= USING_MSIX;
  5027. else if (msi > 0 && pci_enable_msi(pdev) == 0) {
  5028. adapter->flags |= USING_MSI;
  5029. if (msi > 1)
  5030. free_msix_info(adapter);
  5031. }
  5032. /* check for PCI Express bandwidth capabiltites */
  5033. pcie_print_link_status(pdev);
  5034. err = init_rss(adapter);
  5035. if (err)
  5036. goto out_free_dev;
  5037. err = setup_fw_sge_queues(adapter);
  5038. if (err) {
  5039. dev_err(adapter->pdev_dev,
  5040. "FW sge queue allocation failed, err %d", err);
  5041. goto out_free_dev;
  5042. }
  5043. /*
  5044. * The card is now ready to go. If any errors occur during device
  5045. * registration we do not fail the whole card but rather proceed only
  5046. * with the ports we manage to register successfully. However we must
  5047. * register at least one net device.
  5048. */
  5049. for_each_port(adapter, i) {
  5050. pi = adap2pinfo(adapter, i);
  5051. adapter->port[i]->dev_port = pi->lport;
  5052. netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
  5053. netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
  5054. netif_carrier_off(adapter->port[i]);
  5055. err = register_netdev(adapter->port[i]);
  5056. if (err)
  5057. break;
  5058. adapter->chan_map[pi->tx_chan] = i;
  5059. print_port_info(adapter->port[i]);
  5060. }
  5061. if (i == 0) {
  5062. dev_err(&pdev->dev, "could not register any net devices\n");
  5063. goto out_free_dev;
  5064. }
  5065. if (err) {
  5066. dev_warn(&pdev->dev, "only %d net devices registered\n", i);
  5067. err = 0;
  5068. }
  5069. if (cxgb4_debugfs_root) {
  5070. adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
  5071. cxgb4_debugfs_root);
  5072. setup_debugfs(adapter);
  5073. }
  5074. /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
  5075. pdev->needs_freset = 1;
  5076. if (is_uld(adapter)) {
  5077. mutex_lock(&uld_mutex);
  5078. list_add_tail(&adapter->list_node, &adapter_list);
  5079. mutex_unlock(&uld_mutex);
  5080. }
  5081. if (!is_t4(adapter->params.chip))
  5082. cxgb4_ptp_init(adapter);
  5083. print_adapter_info(adapter);
  5084. return 0;
  5085. out_free_dev:
  5086. t4_free_sge_resources(adapter);
  5087. free_some_resources(adapter);
  5088. if (adapter->flags & USING_MSIX)
  5089. free_msix_info(adapter);
  5090. if (adapter->num_uld || adapter->num_ofld_uld)
  5091. t4_uld_mem_free(adapter);
  5092. out_unmap_bar:
  5093. if (!is_t4(adapter->params.chip))
  5094. iounmap(adapter->bar2);
  5095. out_free_adapter:
  5096. if (adapter->workq)
  5097. destroy_workqueue(adapter->workq);
  5098. kfree(adapter->mbox_log);
  5099. kfree(adapter);
  5100. out_unmap_bar0:
  5101. iounmap(regs);
  5102. out_disable_device:
  5103. pci_disable_pcie_error_reporting(pdev);
  5104. pci_disable_device(pdev);
  5105. out_release_regions:
  5106. pci_release_regions(pdev);
  5107. return err;
  5108. }
  5109. static void remove_one(struct pci_dev *pdev)
  5110. {
  5111. struct adapter *adapter = pci_get_drvdata(pdev);
  5112. if (!adapter) {
  5113. pci_release_regions(pdev);
  5114. return;
  5115. }
  5116. adapter->flags |= SHUTTING_DOWN;
  5117. if (adapter->pf == 4) {
  5118. int i;
  5119. /* Tear down per-adapter Work Queue first since it can contain
  5120. * references to our adapter data structure.
  5121. */
  5122. destroy_workqueue(adapter->workq);
  5123. if (is_uld(adapter)) {
  5124. detach_ulds(adapter);
  5125. t4_uld_clean_up(adapter);
  5126. }
  5127. adap_free_hma_mem(adapter);
  5128. disable_interrupts(adapter);
  5129. for_each_port(adapter, i)
  5130. if (adapter->port[i]->reg_state == NETREG_REGISTERED)
  5131. unregister_netdev(adapter->port[i]);
  5132. debugfs_remove_recursive(adapter->debugfs_root);
  5133. if (!is_t4(adapter->params.chip))
  5134. cxgb4_ptp_stop(adapter);
  5135. /* If we allocated filters, free up state associated with any
  5136. * valid filters ...
  5137. */
  5138. clear_all_filters(adapter);
  5139. if (adapter->flags & FULL_INIT_DONE)
  5140. cxgb_down(adapter);
  5141. if (adapter->flags & USING_MSIX)
  5142. free_msix_info(adapter);
  5143. if (adapter->num_uld || adapter->num_ofld_uld)
  5144. t4_uld_mem_free(adapter);
  5145. free_some_resources(adapter);
  5146. #if IS_ENABLED(CONFIG_IPV6)
  5147. t4_cleanup_clip_tbl(adapter);
  5148. #endif
  5149. if (!is_t4(adapter->params.chip))
  5150. iounmap(adapter->bar2);
  5151. }
  5152. #ifdef CONFIG_PCI_IOV
  5153. else {
  5154. cxgb4_iov_configure(adapter->pdev, 0);
  5155. }
  5156. #endif
  5157. iounmap(adapter->regs);
  5158. pci_disable_pcie_error_reporting(pdev);
  5159. if ((adapter->flags & DEV_ENABLED)) {
  5160. pci_disable_device(pdev);
  5161. adapter->flags &= ~DEV_ENABLED;
  5162. }
  5163. pci_release_regions(pdev);
  5164. kfree(adapter->mbox_log);
  5165. synchronize_rcu();
  5166. kfree(adapter);
  5167. }
  5168. /* "Shutdown" quiesces the device, stopping Ingress Packet and Interrupt
  5169. * delivery. This is essentially a stripped down version of the PCI remove()
  5170. * function where we do the minimal amount of work necessary to shutdown any
  5171. * further activity.
  5172. */
  5173. static void shutdown_one(struct pci_dev *pdev)
  5174. {
  5175. struct adapter *adapter = pci_get_drvdata(pdev);
  5176. /* As with remove_one() above (see extended comment), we only want do
  5177. * do cleanup on PCI Devices which went all the way through init_one()
  5178. * ...
  5179. */
  5180. if (!adapter) {
  5181. pci_release_regions(pdev);
  5182. return;
  5183. }
  5184. adapter->flags |= SHUTTING_DOWN;
  5185. if (adapter->pf == 4) {
  5186. int i;
  5187. for_each_port(adapter, i)
  5188. if (adapter->port[i]->reg_state == NETREG_REGISTERED)
  5189. cxgb_close(adapter->port[i]);
  5190. if (is_uld(adapter)) {
  5191. detach_ulds(adapter);
  5192. t4_uld_clean_up(adapter);
  5193. }
  5194. disable_interrupts(adapter);
  5195. disable_msi(adapter);
  5196. t4_sge_stop(adapter);
  5197. if (adapter->flags & FW_OK)
  5198. t4_fw_bye(adapter, adapter->mbox);
  5199. }
  5200. }
  5201. static struct pci_driver cxgb4_driver = {
  5202. .name = KBUILD_MODNAME,
  5203. .id_table = cxgb4_pci_tbl,
  5204. .probe = init_one,
  5205. .remove = remove_one,
  5206. .shutdown = shutdown_one,
  5207. #ifdef CONFIG_PCI_IOV
  5208. .sriov_configure = cxgb4_iov_configure,
  5209. #endif
  5210. .err_handler = &cxgb4_eeh,
  5211. };
  5212. static int __init cxgb4_init_module(void)
  5213. {
  5214. int ret;
  5215. /* Debugfs support is optional, just warn if this fails */
  5216. cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
  5217. if (!cxgb4_debugfs_root)
  5218. pr_warn("could not create debugfs entry, continuing\n");
  5219. ret = pci_register_driver(&cxgb4_driver);
  5220. if (ret < 0)
  5221. debugfs_remove(cxgb4_debugfs_root);
  5222. #if IS_ENABLED(CONFIG_IPV6)
  5223. if (!inet6addr_registered) {
  5224. register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
  5225. inet6addr_registered = true;
  5226. }
  5227. #endif
  5228. return ret;
  5229. }
  5230. static void __exit cxgb4_cleanup_module(void)
  5231. {
  5232. #if IS_ENABLED(CONFIG_IPV6)
  5233. if (inet6addr_registered) {
  5234. unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
  5235. inet6addr_registered = false;
  5236. }
  5237. #endif
  5238. pci_unregister_driver(&cxgb4_driver);
  5239. debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
  5240. }
  5241. module_init(cxgb4_init_module);
  5242. module_exit(cxgb4_cleanup_module);