bnxt.c 142 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742
  1. /* Broadcom NetXtreme-C/E network driver.
  2. *
  3. * Copyright (c) 2014-2015 Broadcom Corporation
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation.
  8. */
  9. #include <linux/module.h>
  10. #include <linux/stringify.h>
  11. #include <linux/kernel.h>
  12. #include <linux/timer.h>
  13. #include <linux/errno.h>
  14. #include <linux/ioport.h>
  15. #include <linux/slab.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/pci.h>
  19. #include <linux/netdevice.h>
  20. #include <linux/etherdevice.h>
  21. #include <linux/skbuff.h>
  22. #include <linux/dma-mapping.h>
  23. #include <linux/bitops.h>
  24. #include <linux/io.h>
  25. #include <linux/irq.h>
  26. #include <linux/delay.h>
  27. #include <asm/byteorder.h>
  28. #include <asm/page.h>
  29. #include <linux/time.h>
  30. #include <linux/mii.h>
  31. #include <linux/if.h>
  32. #include <linux/if_vlan.h>
  33. #include <net/ip.h>
  34. #include <net/tcp.h>
  35. #include <net/udp.h>
  36. #include <net/checksum.h>
  37. #include <net/ip6_checksum.h>
  38. #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
  39. #include <net/vxlan.h>
  40. #endif
  41. #ifdef CONFIG_NET_RX_BUSY_POLL
  42. #include <net/busy_poll.h>
  43. #endif
  44. #include <linux/workqueue.h>
  45. #include <linux/prefetch.h>
  46. #include <linux/cache.h>
  47. #include <linux/log2.h>
  48. #include <linux/aer.h>
  49. #include <linux/bitmap.h>
  50. #include <linux/cpu_rmap.h>
  51. #include "bnxt_hsi.h"
  52. #include "bnxt.h"
  53. #include "bnxt_sriov.h"
  54. #include "bnxt_ethtool.h"
  55. #define BNXT_TX_TIMEOUT (5 * HZ)
  56. static const char version[] =
  57. "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
  58. MODULE_LICENSE("GPL");
  59. MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
  60. MODULE_VERSION(DRV_MODULE_VERSION);
  61. #define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
  62. #define BNXT_RX_DMA_OFFSET NET_SKB_PAD
  63. #define BNXT_RX_COPY_THRESH 256
  64. #define BNXT_TX_PUSH_THRESH 92
  65. enum board_idx {
  66. BCM57302,
  67. BCM57304,
  68. BCM57404,
  69. BCM57406,
  70. BCM57304_VF,
  71. BCM57404_VF,
  72. };
  73. /* indexed by enum above */
  74. static const struct {
  75. char *name;
  76. } board_info[] = {
  77. { "Broadcom BCM57302 NetXtreme-C Single-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
  78. { "Broadcom BCM57304 NetXtreme-C Dual-port 10Gb/25Gb/40Gb/50Gb Ethernet" },
  79. { "Broadcom BCM57404 NetXtreme-E Dual-port 10Gb/25Gb Ethernet" },
  80. { "Broadcom BCM57406 NetXtreme-E Dual-port 10Gb Ethernet" },
  81. { "Broadcom BCM57304 NetXtreme-C Ethernet Virtual Function" },
  82. { "Broadcom BCM57404 NetXtreme-E Ethernet Virtual Function" },
  83. };
  84. static const struct pci_device_id bnxt_pci_tbl[] = {
  85. { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
  86. { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
  87. { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
  88. { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
  89. #ifdef CONFIG_BNXT_SRIOV
  90. { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = BCM57304_VF },
  91. { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = BCM57404_VF },
  92. #endif
  93. { 0 }
  94. };
  95. MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
  96. static const u16 bnxt_vf_req_snif[] = {
  97. HWRM_FUNC_CFG,
  98. HWRM_PORT_PHY_QCFG,
  99. HWRM_CFA_L2_FILTER_ALLOC,
  100. };
  101. static bool bnxt_vf_pciid(enum board_idx idx)
  102. {
  103. return (idx == BCM57304_VF || idx == BCM57404_VF);
  104. }
  105. #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
  106. #define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
  107. #define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
  108. #define BNXT_CP_DB_REARM(db, raw_cons) \
  109. writel(DB_CP_REARM_FLAGS | RING_CMP(raw_cons), db)
  110. #define BNXT_CP_DB(db, raw_cons) \
  111. writel(DB_CP_FLAGS | RING_CMP(raw_cons), db)
  112. #define BNXT_CP_DB_IRQ_DIS(db) \
  113. writel(DB_CP_IRQ_DIS_FLAGS, db)
  114. static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
  115. {
  116. /* Tell compiler to fetch tx indices from memory. */
  117. barrier();
  118. return bp->tx_ring_size -
  119. ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask);
  120. }
  121. static const u16 bnxt_lhint_arr[] = {
  122. TX_BD_FLAGS_LHINT_512_AND_SMALLER,
  123. TX_BD_FLAGS_LHINT_512_TO_1023,
  124. TX_BD_FLAGS_LHINT_1024_TO_2047,
  125. TX_BD_FLAGS_LHINT_1024_TO_2047,
  126. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  127. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  128. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  129. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  130. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  131. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  132. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  133. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  134. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  135. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  136. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  137. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  138. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  139. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  140. TX_BD_FLAGS_LHINT_2048_AND_LARGER,
  141. };
  142. static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
  143. {
  144. struct bnxt *bp = netdev_priv(dev);
  145. struct tx_bd *txbd;
  146. struct tx_bd_ext *txbd1;
  147. struct netdev_queue *txq;
  148. int i;
  149. dma_addr_t mapping;
  150. unsigned int length, pad = 0;
  151. u32 len, free_size, vlan_tag_flags, cfa_action, flags;
  152. u16 prod, last_frag;
  153. struct pci_dev *pdev = bp->pdev;
  154. struct bnxt_napi *bnapi;
  155. struct bnxt_tx_ring_info *txr;
  156. struct bnxt_sw_tx_bd *tx_buf;
  157. i = skb_get_queue_mapping(skb);
  158. if (unlikely(i >= bp->tx_nr_rings)) {
  159. dev_kfree_skb_any(skb);
  160. return NETDEV_TX_OK;
  161. }
  162. bnapi = bp->bnapi[i];
  163. txr = &bnapi->tx_ring;
  164. txq = netdev_get_tx_queue(dev, i);
  165. prod = txr->tx_prod;
  166. free_size = bnxt_tx_avail(bp, txr);
  167. if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
  168. netif_tx_stop_queue(txq);
  169. return NETDEV_TX_BUSY;
  170. }
  171. length = skb->len;
  172. len = skb_headlen(skb);
  173. last_frag = skb_shinfo(skb)->nr_frags;
  174. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  175. txbd->tx_bd_opaque = prod;
  176. tx_buf = &txr->tx_buf_ring[prod];
  177. tx_buf->skb = skb;
  178. tx_buf->nr_frags = last_frag;
  179. vlan_tag_flags = 0;
  180. cfa_action = 0;
  181. if (skb_vlan_tag_present(skb)) {
  182. vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
  183. skb_vlan_tag_get(skb);
  184. /* Currently supports 8021Q, 8021AD vlan offloads
  185. * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
  186. */
  187. if (skb->vlan_proto == htons(ETH_P_8021Q))
  188. vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
  189. }
  190. if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
  191. struct tx_push_bd *push = txr->tx_push;
  192. struct tx_bd *tx_push = &push->txbd1;
  193. struct tx_bd_ext *tx_push1 = &push->txbd2;
  194. void *pdata = tx_push1 + 1;
  195. int j;
  196. /* Set COAL_NOW to be ready quickly for the next push */
  197. tx_push->tx_bd_len_flags_type =
  198. cpu_to_le32((length << TX_BD_LEN_SHIFT) |
  199. TX_BD_TYPE_LONG_TX_BD |
  200. TX_BD_FLAGS_LHINT_512_AND_SMALLER |
  201. TX_BD_FLAGS_COAL_NOW |
  202. TX_BD_FLAGS_PACKET_END |
  203. (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
  204. if (skb->ip_summed == CHECKSUM_PARTIAL)
  205. tx_push1->tx_bd_hsize_lflags =
  206. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  207. else
  208. tx_push1->tx_bd_hsize_lflags = 0;
  209. tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  210. tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
  211. skb_copy_from_linear_data(skb, pdata, len);
  212. pdata += len;
  213. for (j = 0; j < last_frag; j++) {
  214. skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
  215. void *fptr;
  216. fptr = skb_frag_address_safe(frag);
  217. if (!fptr)
  218. goto normal_tx;
  219. memcpy(pdata, fptr, skb_frag_size(frag));
  220. pdata += skb_frag_size(frag);
  221. }
  222. memcpy(txbd, tx_push, sizeof(*txbd));
  223. prod = NEXT_TX(prod);
  224. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  225. memcpy(txbd, tx_push1, sizeof(*txbd));
  226. prod = NEXT_TX(prod);
  227. push->doorbell =
  228. cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
  229. txr->tx_prod = prod;
  230. netdev_tx_sent_queue(txq, skb->len);
  231. __iowrite64_copy(txr->tx_doorbell, push,
  232. (length + sizeof(*push) + 8) / 8);
  233. tx_buf->is_push = 1;
  234. goto tx_done;
  235. }
  236. normal_tx:
  237. if (length < BNXT_MIN_PKT_SIZE) {
  238. pad = BNXT_MIN_PKT_SIZE - length;
  239. if (skb_pad(skb, pad)) {
  240. /* SKB already freed. */
  241. tx_buf->skb = NULL;
  242. return NETDEV_TX_OK;
  243. }
  244. length = BNXT_MIN_PKT_SIZE;
  245. }
  246. mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
  247. if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
  248. dev_kfree_skb_any(skb);
  249. tx_buf->skb = NULL;
  250. return NETDEV_TX_OK;
  251. }
  252. dma_unmap_addr_set(tx_buf, mapping, mapping);
  253. flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
  254. ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
  255. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  256. prod = NEXT_TX(prod);
  257. txbd1 = (struct tx_bd_ext *)
  258. &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  259. txbd1->tx_bd_hsize_lflags = 0;
  260. if (skb_is_gso(skb)) {
  261. u32 hdr_len;
  262. if (skb->encapsulation)
  263. hdr_len = skb_inner_network_offset(skb) +
  264. skb_inner_network_header_len(skb) +
  265. inner_tcp_hdrlen(skb);
  266. else
  267. hdr_len = skb_transport_offset(skb) +
  268. tcp_hdrlen(skb);
  269. txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
  270. TX_BD_FLAGS_T_IPID |
  271. (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
  272. length = skb_shinfo(skb)->gso_size;
  273. txbd1->tx_bd_mss = cpu_to_le32(length);
  274. length += hdr_len;
  275. } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
  276. txbd1->tx_bd_hsize_lflags =
  277. cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
  278. txbd1->tx_bd_mss = 0;
  279. }
  280. length >>= 9;
  281. flags |= bnxt_lhint_arr[length];
  282. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  283. txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
  284. txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
  285. for (i = 0; i < last_frag; i++) {
  286. skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
  287. prod = NEXT_TX(prod);
  288. txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
  289. len = skb_frag_size(frag);
  290. mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
  291. DMA_TO_DEVICE);
  292. if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
  293. goto tx_dma_error;
  294. tx_buf = &txr->tx_buf_ring[prod];
  295. dma_unmap_addr_set(tx_buf, mapping, mapping);
  296. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  297. flags = len << TX_BD_LEN_SHIFT;
  298. txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
  299. }
  300. flags &= ~TX_BD_LEN;
  301. txbd->tx_bd_len_flags_type =
  302. cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
  303. TX_BD_FLAGS_PACKET_END);
  304. netdev_tx_sent_queue(txq, skb->len);
  305. /* Sync BD data before updating doorbell */
  306. wmb();
  307. prod = NEXT_TX(prod);
  308. txr->tx_prod = prod;
  309. writel(DB_KEY_TX | prod, txr->tx_doorbell);
  310. writel(DB_KEY_TX | prod, txr->tx_doorbell);
  311. tx_done:
  312. mmiowb();
  313. if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
  314. netif_tx_stop_queue(txq);
  315. /* netif_tx_stop_queue() must be done before checking
  316. * tx index in bnxt_tx_avail() below, because in
  317. * bnxt_tx_int(), we update tx index before checking for
  318. * netif_tx_queue_stopped().
  319. */
  320. smp_mb();
  321. if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
  322. netif_tx_wake_queue(txq);
  323. }
  324. return NETDEV_TX_OK;
  325. tx_dma_error:
  326. last_frag = i;
  327. /* start back at beginning and unmap skb */
  328. prod = txr->tx_prod;
  329. tx_buf = &txr->tx_buf_ring[prod];
  330. tx_buf->skb = NULL;
  331. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  332. skb_headlen(skb), PCI_DMA_TODEVICE);
  333. prod = NEXT_TX(prod);
  334. /* unmap remaining mapped pages */
  335. for (i = 0; i < last_frag; i++) {
  336. prod = NEXT_TX(prod);
  337. tx_buf = &txr->tx_buf_ring[prod];
  338. dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  339. skb_frag_size(&skb_shinfo(skb)->frags[i]),
  340. PCI_DMA_TODEVICE);
  341. }
  342. dev_kfree_skb_any(skb);
  343. return NETDEV_TX_OK;
  344. }
  345. static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
  346. {
  347. struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
  348. int index = bnapi->index;
  349. struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, index);
  350. u16 cons = txr->tx_cons;
  351. struct pci_dev *pdev = bp->pdev;
  352. int i;
  353. unsigned int tx_bytes = 0;
  354. for (i = 0; i < nr_pkts; i++) {
  355. struct bnxt_sw_tx_bd *tx_buf;
  356. struct sk_buff *skb;
  357. int j, last;
  358. tx_buf = &txr->tx_buf_ring[cons];
  359. cons = NEXT_TX(cons);
  360. skb = tx_buf->skb;
  361. tx_buf->skb = NULL;
  362. if (tx_buf->is_push) {
  363. tx_buf->is_push = 0;
  364. goto next_tx_int;
  365. }
  366. dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
  367. skb_headlen(skb), PCI_DMA_TODEVICE);
  368. last = tx_buf->nr_frags;
  369. for (j = 0; j < last; j++) {
  370. cons = NEXT_TX(cons);
  371. tx_buf = &txr->tx_buf_ring[cons];
  372. dma_unmap_page(
  373. &pdev->dev,
  374. dma_unmap_addr(tx_buf, mapping),
  375. skb_frag_size(&skb_shinfo(skb)->frags[j]),
  376. PCI_DMA_TODEVICE);
  377. }
  378. next_tx_int:
  379. cons = NEXT_TX(cons);
  380. tx_bytes += skb->len;
  381. dev_kfree_skb_any(skb);
  382. }
  383. netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
  384. txr->tx_cons = cons;
  385. /* Need to make the tx_cons update visible to bnxt_start_xmit()
  386. * before checking for netif_tx_queue_stopped(). Without the
  387. * memory barrier, there is a small possibility that bnxt_start_xmit()
  388. * will miss it and cause the queue to be stopped forever.
  389. */
  390. smp_mb();
  391. if (unlikely(netif_tx_queue_stopped(txq)) &&
  392. (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
  393. __netif_tx_lock(txq, smp_processor_id());
  394. if (netif_tx_queue_stopped(txq) &&
  395. bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
  396. txr->dev_state != BNXT_DEV_STATE_CLOSING)
  397. netif_tx_wake_queue(txq);
  398. __netif_tx_unlock(txq);
  399. }
  400. }
  401. static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
  402. gfp_t gfp)
  403. {
  404. u8 *data;
  405. struct pci_dev *pdev = bp->pdev;
  406. data = kmalloc(bp->rx_buf_size, gfp);
  407. if (!data)
  408. return NULL;
  409. *mapping = dma_map_single(&pdev->dev, data + BNXT_RX_DMA_OFFSET,
  410. bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
  411. if (dma_mapping_error(&pdev->dev, *mapping)) {
  412. kfree(data);
  413. data = NULL;
  414. }
  415. return data;
  416. }
  417. static inline int bnxt_alloc_rx_data(struct bnxt *bp,
  418. struct bnxt_rx_ring_info *rxr,
  419. u16 prod, gfp_t gfp)
  420. {
  421. struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  422. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
  423. u8 *data;
  424. dma_addr_t mapping;
  425. data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
  426. if (!data)
  427. return -ENOMEM;
  428. rx_buf->data = data;
  429. dma_unmap_addr_set(rx_buf, mapping, mapping);
  430. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  431. return 0;
  432. }
  433. static void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons,
  434. u8 *data)
  435. {
  436. u16 prod = rxr->rx_prod;
  437. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  438. struct rx_bd *cons_bd, *prod_bd;
  439. prod_rx_buf = &rxr->rx_buf_ring[prod];
  440. cons_rx_buf = &rxr->rx_buf_ring[cons];
  441. prod_rx_buf->data = data;
  442. dma_unmap_addr_set(prod_rx_buf, mapping,
  443. dma_unmap_addr(cons_rx_buf, mapping));
  444. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  445. cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
  446. prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
  447. }
  448. static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
  449. {
  450. u16 next, max = rxr->rx_agg_bmap_size;
  451. next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
  452. if (next >= max)
  453. next = find_first_zero_bit(rxr->rx_agg_bmap, max);
  454. return next;
  455. }
  456. static inline int bnxt_alloc_rx_page(struct bnxt *bp,
  457. struct bnxt_rx_ring_info *rxr,
  458. u16 prod, gfp_t gfp)
  459. {
  460. struct rx_bd *rxbd =
  461. &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  462. struct bnxt_sw_rx_agg_bd *rx_agg_buf;
  463. struct pci_dev *pdev = bp->pdev;
  464. struct page *page;
  465. dma_addr_t mapping;
  466. u16 sw_prod = rxr->rx_sw_agg_prod;
  467. page = alloc_page(gfp);
  468. if (!page)
  469. return -ENOMEM;
  470. mapping = dma_map_page(&pdev->dev, page, 0, PAGE_SIZE,
  471. PCI_DMA_FROMDEVICE);
  472. if (dma_mapping_error(&pdev->dev, mapping)) {
  473. __free_page(page);
  474. return -EIO;
  475. }
  476. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  477. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  478. __set_bit(sw_prod, rxr->rx_agg_bmap);
  479. rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
  480. rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
  481. rx_agg_buf->page = page;
  482. rx_agg_buf->mapping = mapping;
  483. rxbd->rx_bd_haddr = cpu_to_le64(mapping);
  484. rxbd->rx_bd_opaque = sw_prod;
  485. return 0;
  486. }
  487. static void bnxt_reuse_rx_agg_bufs(struct bnxt_napi *bnapi, u16 cp_cons,
  488. u32 agg_bufs)
  489. {
  490. struct bnxt *bp = bnapi->bp;
  491. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  492. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  493. u16 prod = rxr->rx_agg_prod;
  494. u16 sw_prod = rxr->rx_sw_agg_prod;
  495. u32 i;
  496. for (i = 0; i < agg_bufs; i++) {
  497. u16 cons;
  498. struct rx_agg_cmp *agg;
  499. struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
  500. struct rx_bd *prod_bd;
  501. struct page *page;
  502. agg = (struct rx_agg_cmp *)
  503. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  504. cons = agg->rx_agg_cmp_opaque;
  505. __clear_bit(cons, rxr->rx_agg_bmap);
  506. if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
  507. sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
  508. __set_bit(sw_prod, rxr->rx_agg_bmap);
  509. prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
  510. cons_rx_buf = &rxr->rx_agg_ring[cons];
  511. /* It is possible for sw_prod to be equal to cons, so
  512. * set cons_rx_buf->page to NULL first.
  513. */
  514. page = cons_rx_buf->page;
  515. cons_rx_buf->page = NULL;
  516. prod_rx_buf->page = page;
  517. prod_rx_buf->mapping = cons_rx_buf->mapping;
  518. prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  519. prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
  520. prod_bd->rx_bd_opaque = sw_prod;
  521. prod = NEXT_RX_AGG(prod);
  522. sw_prod = NEXT_RX_AGG(sw_prod);
  523. cp_cons = NEXT_CMP(cp_cons);
  524. }
  525. rxr->rx_agg_prod = prod;
  526. rxr->rx_sw_agg_prod = sw_prod;
  527. }
  528. static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
  529. struct bnxt_rx_ring_info *rxr, u16 cons,
  530. u16 prod, u8 *data, dma_addr_t dma_addr,
  531. unsigned int len)
  532. {
  533. int err;
  534. struct sk_buff *skb;
  535. err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
  536. if (unlikely(err)) {
  537. bnxt_reuse_rx_data(rxr, cons, data);
  538. return NULL;
  539. }
  540. skb = build_skb(data, 0);
  541. dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
  542. PCI_DMA_FROMDEVICE);
  543. if (!skb) {
  544. kfree(data);
  545. return NULL;
  546. }
  547. skb_reserve(skb, BNXT_RX_OFFSET);
  548. skb_put(skb, len);
  549. return skb;
  550. }
  551. static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, struct bnxt_napi *bnapi,
  552. struct sk_buff *skb, u16 cp_cons,
  553. u32 agg_bufs)
  554. {
  555. struct pci_dev *pdev = bp->pdev;
  556. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  557. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  558. u16 prod = rxr->rx_agg_prod;
  559. u32 i;
  560. for (i = 0; i < agg_bufs; i++) {
  561. u16 cons, frag_len;
  562. struct rx_agg_cmp *agg;
  563. struct bnxt_sw_rx_agg_bd *cons_rx_buf;
  564. struct page *page;
  565. dma_addr_t mapping;
  566. agg = (struct rx_agg_cmp *)
  567. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  568. cons = agg->rx_agg_cmp_opaque;
  569. frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
  570. RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
  571. cons_rx_buf = &rxr->rx_agg_ring[cons];
  572. skb_fill_page_desc(skb, i, cons_rx_buf->page, 0, frag_len);
  573. __clear_bit(cons, rxr->rx_agg_bmap);
  574. /* It is possible for bnxt_alloc_rx_page() to allocate
  575. * a sw_prod index that equals the cons index, so we
  576. * need to clear the cons entry now.
  577. */
  578. mapping = dma_unmap_addr(cons_rx_buf, mapping);
  579. page = cons_rx_buf->page;
  580. cons_rx_buf->page = NULL;
  581. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
  582. struct skb_shared_info *shinfo;
  583. unsigned int nr_frags;
  584. shinfo = skb_shinfo(skb);
  585. nr_frags = --shinfo->nr_frags;
  586. __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
  587. dev_kfree_skb(skb);
  588. cons_rx_buf->page = page;
  589. /* Update prod since possibly some pages have been
  590. * allocated already.
  591. */
  592. rxr->rx_agg_prod = prod;
  593. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs - i);
  594. return NULL;
  595. }
  596. dma_unmap_page(&pdev->dev, mapping, PAGE_SIZE,
  597. PCI_DMA_FROMDEVICE);
  598. skb->data_len += frag_len;
  599. skb->len += frag_len;
  600. skb->truesize += PAGE_SIZE;
  601. prod = NEXT_RX_AGG(prod);
  602. cp_cons = NEXT_CMP(cp_cons);
  603. }
  604. rxr->rx_agg_prod = prod;
  605. return skb;
  606. }
  607. static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
  608. u8 agg_bufs, u32 *raw_cons)
  609. {
  610. u16 last;
  611. struct rx_agg_cmp *agg;
  612. *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
  613. last = RING_CMP(*raw_cons);
  614. agg = (struct rx_agg_cmp *)
  615. &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
  616. return RX_AGG_CMP_VALID(agg, *raw_cons);
  617. }
  618. static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
  619. unsigned int len,
  620. dma_addr_t mapping)
  621. {
  622. struct bnxt *bp = bnapi->bp;
  623. struct pci_dev *pdev = bp->pdev;
  624. struct sk_buff *skb;
  625. skb = napi_alloc_skb(&bnapi->napi, len);
  626. if (!skb)
  627. return NULL;
  628. dma_sync_single_for_cpu(&pdev->dev, mapping,
  629. bp->rx_copy_thresh, PCI_DMA_FROMDEVICE);
  630. memcpy(skb->data - BNXT_RX_OFFSET, data, len + BNXT_RX_OFFSET);
  631. dma_sync_single_for_device(&pdev->dev, mapping,
  632. bp->rx_copy_thresh,
  633. PCI_DMA_FROMDEVICE);
  634. skb_put(skb, len);
  635. return skb;
  636. }
  637. static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
  638. struct rx_tpa_start_cmp *tpa_start,
  639. struct rx_tpa_start_cmp_ext *tpa_start1)
  640. {
  641. u8 agg_id = TPA_START_AGG_ID(tpa_start);
  642. u16 cons, prod;
  643. struct bnxt_tpa_info *tpa_info;
  644. struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
  645. struct rx_bd *prod_bd;
  646. dma_addr_t mapping;
  647. cons = tpa_start->rx_tpa_start_cmp_opaque;
  648. prod = rxr->rx_prod;
  649. cons_rx_buf = &rxr->rx_buf_ring[cons];
  650. prod_rx_buf = &rxr->rx_buf_ring[prod];
  651. tpa_info = &rxr->rx_tpa[agg_id];
  652. prod_rx_buf->data = tpa_info->data;
  653. mapping = tpa_info->mapping;
  654. dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
  655. prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
  656. prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
  657. tpa_info->data = cons_rx_buf->data;
  658. cons_rx_buf->data = NULL;
  659. tpa_info->mapping = dma_unmap_addr(cons_rx_buf, mapping);
  660. tpa_info->len =
  661. le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
  662. RX_TPA_START_CMP_LEN_SHIFT;
  663. if (likely(TPA_START_HASH_VALID(tpa_start))) {
  664. u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
  665. tpa_info->hash_type = PKT_HASH_TYPE_L4;
  666. tpa_info->gso_type = SKB_GSO_TCPV4;
  667. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  668. if (hash_type == 3)
  669. tpa_info->gso_type = SKB_GSO_TCPV6;
  670. tpa_info->rss_hash =
  671. le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
  672. } else {
  673. tpa_info->hash_type = PKT_HASH_TYPE_NONE;
  674. tpa_info->gso_type = 0;
  675. if (netif_msg_rx_err(bp))
  676. netdev_warn(bp->dev, "TPA packet without valid hash\n");
  677. }
  678. tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
  679. tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
  680. rxr->rx_prod = NEXT_RX(prod);
  681. cons = NEXT_RX(cons);
  682. cons_rx_buf = &rxr->rx_buf_ring[cons];
  683. bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
  684. rxr->rx_prod = NEXT_RX(rxr->rx_prod);
  685. cons_rx_buf->data = NULL;
  686. }
  687. static void bnxt_abort_tpa(struct bnxt *bp, struct bnxt_napi *bnapi,
  688. u16 cp_cons, u32 agg_bufs)
  689. {
  690. if (agg_bufs)
  691. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  692. }
  693. #define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
  694. #define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
  695. static inline struct sk_buff *bnxt_gro_skb(struct bnxt_tpa_info *tpa_info,
  696. struct rx_tpa_end_cmp *tpa_end,
  697. struct rx_tpa_end_cmp_ext *tpa_end1,
  698. struct sk_buff *skb)
  699. {
  700. #ifdef CONFIG_INET
  701. struct tcphdr *th;
  702. int payload_off, tcp_opt_len = 0;
  703. int len, nw_off;
  704. NAPI_GRO_CB(skb)->count = TPA_END_TPA_SEGS(tpa_end);
  705. skb_shinfo(skb)->gso_size =
  706. le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
  707. skb_shinfo(skb)->gso_type = tpa_info->gso_type;
  708. payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  709. RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
  710. RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
  711. if (TPA_END_GRO_TS(tpa_end))
  712. tcp_opt_len = 12;
  713. if (tpa_info->gso_type == SKB_GSO_TCPV4) {
  714. struct iphdr *iph;
  715. nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
  716. ETH_HLEN;
  717. skb_set_network_header(skb, nw_off);
  718. iph = ip_hdr(skb);
  719. skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
  720. len = skb->len - skb_transport_offset(skb);
  721. th = tcp_hdr(skb);
  722. th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
  723. } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
  724. struct ipv6hdr *iph;
  725. nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
  726. ETH_HLEN;
  727. skb_set_network_header(skb, nw_off);
  728. iph = ipv6_hdr(skb);
  729. skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
  730. len = skb->len - skb_transport_offset(skb);
  731. th = tcp_hdr(skb);
  732. th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
  733. } else {
  734. dev_kfree_skb_any(skb);
  735. return NULL;
  736. }
  737. tcp_gro_complete(skb);
  738. if (nw_off) { /* tunnel */
  739. struct udphdr *uh = NULL;
  740. if (skb->protocol == htons(ETH_P_IP)) {
  741. struct iphdr *iph = (struct iphdr *)skb->data;
  742. if (iph->protocol == IPPROTO_UDP)
  743. uh = (struct udphdr *)(iph + 1);
  744. } else {
  745. struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
  746. if (iph->nexthdr == IPPROTO_UDP)
  747. uh = (struct udphdr *)(iph + 1);
  748. }
  749. if (uh) {
  750. if (uh->check)
  751. skb_shinfo(skb)->gso_type |=
  752. SKB_GSO_UDP_TUNNEL_CSUM;
  753. else
  754. skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
  755. }
  756. }
  757. #endif
  758. return skb;
  759. }
  760. static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
  761. struct bnxt_napi *bnapi,
  762. u32 *raw_cons,
  763. struct rx_tpa_end_cmp *tpa_end,
  764. struct rx_tpa_end_cmp_ext *tpa_end1,
  765. bool *agg_event)
  766. {
  767. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  768. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  769. u8 agg_id = TPA_END_AGG_ID(tpa_end);
  770. u8 *data, agg_bufs;
  771. u16 cp_cons = RING_CMP(*raw_cons);
  772. unsigned int len;
  773. struct bnxt_tpa_info *tpa_info;
  774. dma_addr_t mapping;
  775. struct sk_buff *skb;
  776. tpa_info = &rxr->rx_tpa[agg_id];
  777. data = tpa_info->data;
  778. prefetch(data);
  779. len = tpa_info->len;
  780. mapping = tpa_info->mapping;
  781. agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
  782. RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
  783. if (agg_bufs) {
  784. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
  785. return ERR_PTR(-EBUSY);
  786. *agg_event = true;
  787. cp_cons = NEXT_CMP(cp_cons);
  788. }
  789. if (unlikely(agg_bufs > MAX_SKB_FRAGS)) {
  790. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  791. netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
  792. agg_bufs, (int)MAX_SKB_FRAGS);
  793. return NULL;
  794. }
  795. if (len <= bp->rx_copy_thresh) {
  796. skb = bnxt_copy_skb(bnapi, data, len, mapping);
  797. if (!skb) {
  798. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  799. return NULL;
  800. }
  801. } else {
  802. u8 *new_data;
  803. dma_addr_t new_mapping;
  804. new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
  805. if (!new_data) {
  806. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  807. return NULL;
  808. }
  809. tpa_info->data = new_data;
  810. tpa_info->mapping = new_mapping;
  811. skb = build_skb(data, 0);
  812. dma_unmap_single(&bp->pdev->dev, mapping, bp->rx_buf_use_size,
  813. PCI_DMA_FROMDEVICE);
  814. if (!skb) {
  815. kfree(data);
  816. bnxt_abort_tpa(bp, bnapi, cp_cons, agg_bufs);
  817. return NULL;
  818. }
  819. skb_reserve(skb, BNXT_RX_OFFSET);
  820. skb_put(skb, len);
  821. }
  822. if (agg_bufs) {
  823. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  824. if (!skb) {
  825. /* Page reuse already handled by bnxt_rx_pages(). */
  826. return NULL;
  827. }
  828. }
  829. skb->protocol = eth_type_trans(skb, bp->dev);
  830. if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
  831. skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
  832. if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
  833. netdev_features_t features = skb->dev->features;
  834. u16 vlan_proto = tpa_info->metadata >>
  835. RX_CMP_FLAGS2_METADATA_TPID_SFT;
  836. if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
  837. vlan_proto == ETH_P_8021Q) ||
  838. ((features & NETIF_F_HW_VLAN_STAG_RX) &&
  839. vlan_proto == ETH_P_8021AD)) {
  840. __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
  841. tpa_info->metadata &
  842. RX_CMP_FLAGS2_METADATA_VID_MASK);
  843. }
  844. }
  845. skb_checksum_none_assert(skb);
  846. if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
  847. skb->ip_summed = CHECKSUM_UNNECESSARY;
  848. skb->csum_level =
  849. (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
  850. }
  851. if (TPA_END_GRO(tpa_end))
  852. skb = bnxt_gro_skb(tpa_info, tpa_end, tpa_end1, skb);
  853. return skb;
  854. }
  855. /* returns the following:
  856. * 1 - 1 packet successfully received
  857. * 0 - successful TPA_START, packet not completed yet
  858. * -EBUSY - completion ring does not have all the agg buffers yet
  859. * -ENOMEM - packet aborted due to out of memory
  860. * -EIO - packet aborted due to hw error indicated in BD
  861. */
  862. static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
  863. bool *agg_event)
  864. {
  865. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  866. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  867. struct net_device *dev = bp->dev;
  868. struct rx_cmp *rxcmp;
  869. struct rx_cmp_ext *rxcmp1;
  870. u32 tmp_raw_cons = *raw_cons;
  871. u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
  872. struct bnxt_sw_rx_bd *rx_buf;
  873. unsigned int len;
  874. u8 *data, agg_bufs, cmp_type;
  875. dma_addr_t dma_addr;
  876. struct sk_buff *skb;
  877. int rc = 0;
  878. rxcmp = (struct rx_cmp *)
  879. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  880. tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
  881. cp_cons = RING_CMP(tmp_raw_cons);
  882. rxcmp1 = (struct rx_cmp_ext *)
  883. &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
  884. if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
  885. return -EBUSY;
  886. cmp_type = RX_CMP_TYPE(rxcmp);
  887. prod = rxr->rx_prod;
  888. if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
  889. bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
  890. (struct rx_tpa_start_cmp_ext *)rxcmp1);
  891. goto next_rx_no_prod;
  892. } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
  893. skb = bnxt_tpa_end(bp, bnapi, &tmp_raw_cons,
  894. (struct rx_tpa_end_cmp *)rxcmp,
  895. (struct rx_tpa_end_cmp_ext *)rxcmp1,
  896. agg_event);
  897. if (unlikely(IS_ERR(skb)))
  898. return -EBUSY;
  899. rc = -ENOMEM;
  900. if (likely(skb)) {
  901. skb_record_rx_queue(skb, bnapi->index);
  902. skb_mark_napi_id(skb, &bnapi->napi);
  903. if (bnxt_busy_polling(bnapi))
  904. netif_receive_skb(skb);
  905. else
  906. napi_gro_receive(&bnapi->napi, skb);
  907. rc = 1;
  908. }
  909. goto next_rx_no_prod;
  910. }
  911. cons = rxcmp->rx_cmp_opaque;
  912. rx_buf = &rxr->rx_buf_ring[cons];
  913. data = rx_buf->data;
  914. prefetch(data);
  915. agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) & RX_CMP_AGG_BUFS) >>
  916. RX_CMP_AGG_BUFS_SHIFT;
  917. if (agg_bufs) {
  918. if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
  919. return -EBUSY;
  920. cp_cons = NEXT_CMP(cp_cons);
  921. *agg_event = true;
  922. }
  923. rx_buf->data = NULL;
  924. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
  925. bnxt_reuse_rx_data(rxr, cons, data);
  926. if (agg_bufs)
  927. bnxt_reuse_rx_agg_bufs(bnapi, cp_cons, agg_bufs);
  928. rc = -EIO;
  929. goto next_rx;
  930. }
  931. len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
  932. dma_addr = dma_unmap_addr(rx_buf, mapping);
  933. if (len <= bp->rx_copy_thresh) {
  934. skb = bnxt_copy_skb(bnapi, data, len, dma_addr);
  935. bnxt_reuse_rx_data(rxr, cons, data);
  936. if (!skb) {
  937. rc = -ENOMEM;
  938. goto next_rx;
  939. }
  940. } else {
  941. skb = bnxt_rx_skb(bp, rxr, cons, prod, data, dma_addr, len);
  942. if (!skb) {
  943. rc = -ENOMEM;
  944. goto next_rx;
  945. }
  946. }
  947. if (agg_bufs) {
  948. skb = bnxt_rx_pages(bp, bnapi, skb, cp_cons, agg_bufs);
  949. if (!skb) {
  950. rc = -ENOMEM;
  951. goto next_rx;
  952. }
  953. }
  954. if (RX_CMP_HASH_VALID(rxcmp)) {
  955. u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
  956. enum pkt_hash_types type = PKT_HASH_TYPE_L4;
  957. /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
  958. if (hash_type != 1 && hash_type != 3)
  959. type = PKT_HASH_TYPE_L3;
  960. skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
  961. }
  962. skb->protocol = eth_type_trans(skb, dev);
  963. if (rxcmp1->rx_cmp_flags2 &
  964. cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) {
  965. netdev_features_t features = skb->dev->features;
  966. u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
  967. u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
  968. if (((features & NETIF_F_HW_VLAN_CTAG_RX) &&
  969. vlan_proto == ETH_P_8021Q) ||
  970. ((features & NETIF_F_HW_VLAN_STAG_RX) &&
  971. vlan_proto == ETH_P_8021AD))
  972. __vlan_hwaccel_put_tag(skb, htons(vlan_proto),
  973. meta_data &
  974. RX_CMP_FLAGS2_METADATA_VID_MASK);
  975. }
  976. skb_checksum_none_assert(skb);
  977. if (RX_CMP_L4_CS_OK(rxcmp1)) {
  978. if (dev->features & NETIF_F_RXCSUM) {
  979. skb->ip_summed = CHECKSUM_UNNECESSARY;
  980. skb->csum_level = RX_CMP_ENCAP(rxcmp1);
  981. }
  982. } else {
  983. if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS)
  984. cpr->rx_l4_csum_errors++;
  985. }
  986. skb_record_rx_queue(skb, bnapi->index);
  987. skb_mark_napi_id(skb, &bnapi->napi);
  988. if (bnxt_busy_polling(bnapi))
  989. netif_receive_skb(skb);
  990. else
  991. napi_gro_receive(&bnapi->napi, skb);
  992. rc = 1;
  993. next_rx:
  994. rxr->rx_prod = NEXT_RX(prod);
  995. next_rx_no_prod:
  996. *raw_cons = tmp_raw_cons;
  997. return rc;
  998. }
  999. static int bnxt_async_event_process(struct bnxt *bp,
  1000. struct hwrm_async_event_cmpl *cmpl)
  1001. {
  1002. u16 event_id = le16_to_cpu(cmpl->event_id);
  1003. /* TODO CHIMP_FW: Define event id's for link change, error etc */
  1004. switch (event_id) {
  1005. case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
  1006. set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
  1007. schedule_work(&bp->sp_task);
  1008. break;
  1009. default:
  1010. netdev_err(bp->dev, "unhandled ASYNC event (id 0x%x)\n",
  1011. event_id);
  1012. break;
  1013. }
  1014. return 0;
  1015. }
  1016. static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
  1017. {
  1018. u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
  1019. struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
  1020. struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
  1021. (struct hwrm_fwd_req_cmpl *)txcmp;
  1022. switch (cmpl_type) {
  1023. case CMPL_BASE_TYPE_HWRM_DONE:
  1024. seq_id = le16_to_cpu(h_cmpl->sequence_id);
  1025. if (seq_id == bp->hwrm_intr_seq_id)
  1026. bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
  1027. else
  1028. netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
  1029. break;
  1030. case CMPL_BASE_TYPE_HWRM_FWD_REQ:
  1031. vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
  1032. if ((vf_id < bp->pf.first_vf_id) ||
  1033. (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
  1034. netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
  1035. vf_id);
  1036. return -EINVAL;
  1037. }
  1038. set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
  1039. set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
  1040. schedule_work(&bp->sp_task);
  1041. break;
  1042. case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
  1043. bnxt_async_event_process(bp,
  1044. (struct hwrm_async_event_cmpl *)txcmp);
  1045. default:
  1046. break;
  1047. }
  1048. return 0;
  1049. }
  1050. static irqreturn_t bnxt_msix(int irq, void *dev_instance)
  1051. {
  1052. struct bnxt_napi *bnapi = dev_instance;
  1053. struct bnxt *bp = bnapi->bp;
  1054. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1055. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1056. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1057. napi_schedule(&bnapi->napi);
  1058. return IRQ_HANDLED;
  1059. }
  1060. static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
  1061. {
  1062. u32 raw_cons = cpr->cp_raw_cons;
  1063. u16 cons = RING_CMP(raw_cons);
  1064. struct tx_cmp *txcmp;
  1065. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1066. return TX_CMP_VALID(txcmp, raw_cons);
  1067. }
  1068. static irqreturn_t bnxt_inta(int irq, void *dev_instance)
  1069. {
  1070. struct bnxt_napi *bnapi = dev_instance;
  1071. struct bnxt *bp = bnapi->bp;
  1072. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1073. u32 cons = RING_CMP(cpr->cp_raw_cons);
  1074. u32 int_status;
  1075. prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
  1076. if (!bnxt_has_work(bp, cpr)) {
  1077. int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
  1078. /* return if erroneous interrupt */
  1079. if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
  1080. return IRQ_NONE;
  1081. }
  1082. /* disable ring IRQ */
  1083. BNXT_CP_DB_IRQ_DIS(cpr->cp_doorbell);
  1084. /* Return here if interrupt is shared and is disabled. */
  1085. if (unlikely(atomic_read(&bp->intr_sem) != 0))
  1086. return IRQ_HANDLED;
  1087. napi_schedule(&bnapi->napi);
  1088. return IRQ_HANDLED;
  1089. }
  1090. static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
  1091. {
  1092. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1093. u32 raw_cons = cpr->cp_raw_cons;
  1094. u32 cons;
  1095. int tx_pkts = 0;
  1096. int rx_pkts = 0;
  1097. bool rx_event = false;
  1098. bool agg_event = false;
  1099. struct tx_cmp *txcmp;
  1100. while (1) {
  1101. int rc;
  1102. cons = RING_CMP(raw_cons);
  1103. txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
  1104. if (!TX_CMP_VALID(txcmp, raw_cons))
  1105. break;
  1106. if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
  1107. tx_pkts++;
  1108. /* return full budget so NAPI will complete. */
  1109. if (unlikely(tx_pkts > bp->tx_wake_thresh))
  1110. rx_pkts = budget;
  1111. } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
  1112. rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
  1113. if (likely(rc >= 0))
  1114. rx_pkts += rc;
  1115. else if (rc == -EBUSY) /* partial completion */
  1116. break;
  1117. rx_event = true;
  1118. } else if (unlikely((TX_CMP_TYPE(txcmp) ==
  1119. CMPL_BASE_TYPE_HWRM_DONE) ||
  1120. (TX_CMP_TYPE(txcmp) ==
  1121. CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
  1122. (TX_CMP_TYPE(txcmp) ==
  1123. CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
  1124. bnxt_hwrm_handler(bp, txcmp);
  1125. }
  1126. raw_cons = NEXT_RAW_CMP(raw_cons);
  1127. if (rx_pkts == budget)
  1128. break;
  1129. }
  1130. cpr->cp_raw_cons = raw_cons;
  1131. /* ACK completion ring before freeing tx ring and producing new
  1132. * buffers in rx/agg rings to prevent overflowing the completion
  1133. * ring.
  1134. */
  1135. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  1136. if (tx_pkts)
  1137. bnxt_tx_int(bp, bnapi, tx_pkts);
  1138. if (rx_event) {
  1139. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  1140. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1141. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  1142. if (agg_event) {
  1143. writel(DB_KEY_RX | rxr->rx_agg_prod,
  1144. rxr->rx_agg_doorbell);
  1145. writel(DB_KEY_RX | rxr->rx_agg_prod,
  1146. rxr->rx_agg_doorbell);
  1147. }
  1148. }
  1149. return rx_pkts;
  1150. }
  1151. static int bnxt_poll(struct napi_struct *napi, int budget)
  1152. {
  1153. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1154. struct bnxt *bp = bnapi->bp;
  1155. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1156. int work_done = 0;
  1157. if (!bnxt_lock_napi(bnapi))
  1158. return budget;
  1159. while (1) {
  1160. work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
  1161. if (work_done >= budget)
  1162. break;
  1163. if (!bnxt_has_work(bp, cpr)) {
  1164. napi_complete(napi);
  1165. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1166. break;
  1167. }
  1168. }
  1169. mmiowb();
  1170. bnxt_unlock_napi(bnapi);
  1171. return work_done;
  1172. }
  1173. #ifdef CONFIG_NET_RX_BUSY_POLL
  1174. static int bnxt_busy_poll(struct napi_struct *napi)
  1175. {
  1176. struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
  1177. struct bnxt *bp = bnapi->bp;
  1178. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1179. int rx_work, budget = 4;
  1180. if (atomic_read(&bp->intr_sem) != 0)
  1181. return LL_FLUSH_FAILED;
  1182. if (!bnxt_lock_poll(bnapi))
  1183. return LL_FLUSH_BUSY;
  1184. rx_work = bnxt_poll_work(bp, bnapi, budget);
  1185. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  1186. bnxt_unlock_poll(bnapi);
  1187. return rx_work;
  1188. }
  1189. #endif
  1190. static void bnxt_free_tx_skbs(struct bnxt *bp)
  1191. {
  1192. int i, max_idx;
  1193. struct pci_dev *pdev = bp->pdev;
  1194. if (!bp->bnapi)
  1195. return;
  1196. max_idx = bp->tx_nr_pages * TX_DESC_CNT;
  1197. for (i = 0; i < bp->tx_nr_rings; i++) {
  1198. struct bnxt_napi *bnapi = bp->bnapi[i];
  1199. struct bnxt_tx_ring_info *txr;
  1200. int j;
  1201. if (!bnapi)
  1202. continue;
  1203. txr = &bnapi->tx_ring;
  1204. for (j = 0; j < max_idx;) {
  1205. struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
  1206. struct sk_buff *skb = tx_buf->skb;
  1207. int k, last;
  1208. if (!skb) {
  1209. j++;
  1210. continue;
  1211. }
  1212. tx_buf->skb = NULL;
  1213. if (tx_buf->is_push) {
  1214. dev_kfree_skb(skb);
  1215. j += 2;
  1216. continue;
  1217. }
  1218. dma_unmap_single(&pdev->dev,
  1219. dma_unmap_addr(tx_buf, mapping),
  1220. skb_headlen(skb),
  1221. PCI_DMA_TODEVICE);
  1222. last = tx_buf->nr_frags;
  1223. j += 2;
  1224. for (k = 0; k < last; k++, j = NEXT_TX(j)) {
  1225. skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
  1226. tx_buf = &txr->tx_buf_ring[j];
  1227. dma_unmap_page(
  1228. &pdev->dev,
  1229. dma_unmap_addr(tx_buf, mapping),
  1230. skb_frag_size(frag), PCI_DMA_TODEVICE);
  1231. }
  1232. dev_kfree_skb(skb);
  1233. }
  1234. netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
  1235. }
  1236. }
  1237. static void bnxt_free_rx_skbs(struct bnxt *bp)
  1238. {
  1239. int i, max_idx, max_agg_idx;
  1240. struct pci_dev *pdev = bp->pdev;
  1241. if (!bp->bnapi)
  1242. return;
  1243. max_idx = bp->rx_nr_pages * RX_DESC_CNT;
  1244. max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
  1245. for (i = 0; i < bp->rx_nr_rings; i++) {
  1246. struct bnxt_napi *bnapi = bp->bnapi[i];
  1247. struct bnxt_rx_ring_info *rxr;
  1248. int j;
  1249. if (!bnapi)
  1250. continue;
  1251. rxr = &bnapi->rx_ring;
  1252. if (rxr->rx_tpa) {
  1253. for (j = 0; j < MAX_TPA; j++) {
  1254. struct bnxt_tpa_info *tpa_info =
  1255. &rxr->rx_tpa[j];
  1256. u8 *data = tpa_info->data;
  1257. if (!data)
  1258. continue;
  1259. dma_unmap_single(
  1260. &pdev->dev,
  1261. dma_unmap_addr(tpa_info, mapping),
  1262. bp->rx_buf_use_size,
  1263. PCI_DMA_FROMDEVICE);
  1264. tpa_info->data = NULL;
  1265. kfree(data);
  1266. }
  1267. }
  1268. for (j = 0; j < max_idx; j++) {
  1269. struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
  1270. u8 *data = rx_buf->data;
  1271. if (!data)
  1272. continue;
  1273. dma_unmap_single(&pdev->dev,
  1274. dma_unmap_addr(rx_buf, mapping),
  1275. bp->rx_buf_use_size,
  1276. PCI_DMA_FROMDEVICE);
  1277. rx_buf->data = NULL;
  1278. kfree(data);
  1279. }
  1280. for (j = 0; j < max_agg_idx; j++) {
  1281. struct bnxt_sw_rx_agg_bd *rx_agg_buf =
  1282. &rxr->rx_agg_ring[j];
  1283. struct page *page = rx_agg_buf->page;
  1284. if (!page)
  1285. continue;
  1286. dma_unmap_page(&pdev->dev,
  1287. dma_unmap_addr(rx_agg_buf, mapping),
  1288. PAGE_SIZE, PCI_DMA_FROMDEVICE);
  1289. rx_agg_buf->page = NULL;
  1290. __clear_bit(j, rxr->rx_agg_bmap);
  1291. __free_page(page);
  1292. }
  1293. }
  1294. }
  1295. static void bnxt_free_skbs(struct bnxt *bp)
  1296. {
  1297. bnxt_free_tx_skbs(bp);
  1298. bnxt_free_rx_skbs(bp);
  1299. }
  1300. static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1301. {
  1302. struct pci_dev *pdev = bp->pdev;
  1303. int i;
  1304. for (i = 0; i < ring->nr_pages; i++) {
  1305. if (!ring->pg_arr[i])
  1306. continue;
  1307. dma_free_coherent(&pdev->dev, ring->page_size,
  1308. ring->pg_arr[i], ring->dma_arr[i]);
  1309. ring->pg_arr[i] = NULL;
  1310. }
  1311. if (ring->pg_tbl) {
  1312. dma_free_coherent(&pdev->dev, ring->nr_pages * 8,
  1313. ring->pg_tbl, ring->pg_tbl_map);
  1314. ring->pg_tbl = NULL;
  1315. }
  1316. if (ring->vmem_size && *ring->vmem) {
  1317. vfree(*ring->vmem);
  1318. *ring->vmem = NULL;
  1319. }
  1320. }
  1321. static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_struct *ring)
  1322. {
  1323. int i;
  1324. struct pci_dev *pdev = bp->pdev;
  1325. if (ring->nr_pages > 1) {
  1326. ring->pg_tbl = dma_alloc_coherent(&pdev->dev,
  1327. ring->nr_pages * 8,
  1328. &ring->pg_tbl_map,
  1329. GFP_KERNEL);
  1330. if (!ring->pg_tbl)
  1331. return -ENOMEM;
  1332. }
  1333. for (i = 0; i < ring->nr_pages; i++) {
  1334. ring->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
  1335. ring->page_size,
  1336. &ring->dma_arr[i],
  1337. GFP_KERNEL);
  1338. if (!ring->pg_arr[i])
  1339. return -ENOMEM;
  1340. if (ring->nr_pages > 1)
  1341. ring->pg_tbl[i] = cpu_to_le64(ring->dma_arr[i]);
  1342. }
  1343. if (ring->vmem_size) {
  1344. *ring->vmem = vzalloc(ring->vmem_size);
  1345. if (!(*ring->vmem))
  1346. return -ENOMEM;
  1347. }
  1348. return 0;
  1349. }
  1350. static void bnxt_free_rx_rings(struct bnxt *bp)
  1351. {
  1352. int i;
  1353. if (!bp->bnapi)
  1354. return;
  1355. for (i = 0; i < bp->rx_nr_rings; i++) {
  1356. struct bnxt_napi *bnapi = bp->bnapi[i];
  1357. struct bnxt_rx_ring_info *rxr;
  1358. struct bnxt_ring_struct *ring;
  1359. if (!bnapi)
  1360. continue;
  1361. rxr = &bnapi->rx_ring;
  1362. kfree(rxr->rx_tpa);
  1363. rxr->rx_tpa = NULL;
  1364. kfree(rxr->rx_agg_bmap);
  1365. rxr->rx_agg_bmap = NULL;
  1366. ring = &rxr->rx_ring_struct;
  1367. bnxt_free_ring(bp, ring);
  1368. ring = &rxr->rx_agg_ring_struct;
  1369. bnxt_free_ring(bp, ring);
  1370. }
  1371. }
  1372. static int bnxt_alloc_rx_rings(struct bnxt *bp)
  1373. {
  1374. int i, rc, agg_rings = 0, tpa_rings = 0;
  1375. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  1376. agg_rings = 1;
  1377. if (bp->flags & BNXT_FLAG_TPA)
  1378. tpa_rings = 1;
  1379. for (i = 0; i < bp->rx_nr_rings; i++) {
  1380. struct bnxt_napi *bnapi = bp->bnapi[i];
  1381. struct bnxt_rx_ring_info *rxr;
  1382. struct bnxt_ring_struct *ring;
  1383. if (!bnapi)
  1384. continue;
  1385. rxr = &bnapi->rx_ring;
  1386. ring = &rxr->rx_ring_struct;
  1387. rc = bnxt_alloc_ring(bp, ring);
  1388. if (rc)
  1389. return rc;
  1390. if (agg_rings) {
  1391. u16 mem_size;
  1392. ring = &rxr->rx_agg_ring_struct;
  1393. rc = bnxt_alloc_ring(bp, ring);
  1394. if (rc)
  1395. return rc;
  1396. rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
  1397. mem_size = rxr->rx_agg_bmap_size / 8;
  1398. rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
  1399. if (!rxr->rx_agg_bmap)
  1400. return -ENOMEM;
  1401. if (tpa_rings) {
  1402. rxr->rx_tpa = kcalloc(MAX_TPA,
  1403. sizeof(struct bnxt_tpa_info),
  1404. GFP_KERNEL);
  1405. if (!rxr->rx_tpa)
  1406. return -ENOMEM;
  1407. }
  1408. }
  1409. }
  1410. return 0;
  1411. }
  1412. static void bnxt_free_tx_rings(struct bnxt *bp)
  1413. {
  1414. int i;
  1415. struct pci_dev *pdev = bp->pdev;
  1416. if (!bp->bnapi)
  1417. return;
  1418. for (i = 0; i < bp->tx_nr_rings; i++) {
  1419. struct bnxt_napi *bnapi = bp->bnapi[i];
  1420. struct bnxt_tx_ring_info *txr;
  1421. struct bnxt_ring_struct *ring;
  1422. if (!bnapi)
  1423. continue;
  1424. txr = &bnapi->tx_ring;
  1425. if (txr->tx_push) {
  1426. dma_free_coherent(&pdev->dev, bp->tx_push_size,
  1427. txr->tx_push, txr->tx_push_mapping);
  1428. txr->tx_push = NULL;
  1429. }
  1430. ring = &txr->tx_ring_struct;
  1431. bnxt_free_ring(bp, ring);
  1432. }
  1433. }
  1434. static int bnxt_alloc_tx_rings(struct bnxt *bp)
  1435. {
  1436. int i, j, rc;
  1437. struct pci_dev *pdev = bp->pdev;
  1438. bp->tx_push_size = 0;
  1439. if (bp->tx_push_thresh) {
  1440. int push_size;
  1441. push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
  1442. bp->tx_push_thresh);
  1443. if (push_size > 128) {
  1444. push_size = 0;
  1445. bp->tx_push_thresh = 0;
  1446. }
  1447. bp->tx_push_size = push_size;
  1448. }
  1449. for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
  1450. struct bnxt_napi *bnapi = bp->bnapi[i];
  1451. struct bnxt_tx_ring_info *txr;
  1452. struct bnxt_ring_struct *ring;
  1453. if (!bnapi)
  1454. continue;
  1455. txr = &bnapi->tx_ring;
  1456. ring = &txr->tx_ring_struct;
  1457. rc = bnxt_alloc_ring(bp, ring);
  1458. if (rc)
  1459. return rc;
  1460. if (bp->tx_push_size) {
  1461. struct tx_bd *txbd;
  1462. dma_addr_t mapping;
  1463. /* One pre-allocated DMA buffer to backup
  1464. * TX push operation
  1465. */
  1466. txr->tx_push = dma_alloc_coherent(&pdev->dev,
  1467. bp->tx_push_size,
  1468. &txr->tx_push_mapping,
  1469. GFP_KERNEL);
  1470. if (!txr->tx_push)
  1471. return -ENOMEM;
  1472. txbd = &txr->tx_push->txbd1;
  1473. mapping = txr->tx_push_mapping +
  1474. sizeof(struct tx_push_bd);
  1475. txbd->tx_bd_haddr = cpu_to_le64(mapping);
  1476. memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
  1477. }
  1478. ring->queue_id = bp->q_info[j].queue_id;
  1479. if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
  1480. j++;
  1481. }
  1482. return 0;
  1483. }
  1484. static void bnxt_free_cp_rings(struct bnxt *bp)
  1485. {
  1486. int i;
  1487. if (!bp->bnapi)
  1488. return;
  1489. for (i = 0; i < bp->cp_nr_rings; i++) {
  1490. struct bnxt_napi *bnapi = bp->bnapi[i];
  1491. struct bnxt_cp_ring_info *cpr;
  1492. struct bnxt_ring_struct *ring;
  1493. if (!bnapi)
  1494. continue;
  1495. cpr = &bnapi->cp_ring;
  1496. ring = &cpr->cp_ring_struct;
  1497. bnxt_free_ring(bp, ring);
  1498. }
  1499. }
  1500. static int bnxt_alloc_cp_rings(struct bnxt *bp)
  1501. {
  1502. int i, rc;
  1503. for (i = 0; i < bp->cp_nr_rings; i++) {
  1504. struct bnxt_napi *bnapi = bp->bnapi[i];
  1505. struct bnxt_cp_ring_info *cpr;
  1506. struct bnxt_ring_struct *ring;
  1507. if (!bnapi)
  1508. continue;
  1509. cpr = &bnapi->cp_ring;
  1510. ring = &cpr->cp_ring_struct;
  1511. rc = bnxt_alloc_ring(bp, ring);
  1512. if (rc)
  1513. return rc;
  1514. }
  1515. return 0;
  1516. }
  1517. static void bnxt_init_ring_struct(struct bnxt *bp)
  1518. {
  1519. int i;
  1520. for (i = 0; i < bp->cp_nr_rings; i++) {
  1521. struct bnxt_napi *bnapi = bp->bnapi[i];
  1522. struct bnxt_cp_ring_info *cpr;
  1523. struct bnxt_rx_ring_info *rxr;
  1524. struct bnxt_tx_ring_info *txr;
  1525. struct bnxt_ring_struct *ring;
  1526. if (!bnapi)
  1527. continue;
  1528. cpr = &bnapi->cp_ring;
  1529. ring = &cpr->cp_ring_struct;
  1530. ring->nr_pages = bp->cp_nr_pages;
  1531. ring->page_size = HW_CMPD_RING_SIZE;
  1532. ring->pg_arr = (void **)cpr->cp_desc_ring;
  1533. ring->dma_arr = cpr->cp_desc_mapping;
  1534. ring->vmem_size = 0;
  1535. rxr = &bnapi->rx_ring;
  1536. ring = &rxr->rx_ring_struct;
  1537. ring->nr_pages = bp->rx_nr_pages;
  1538. ring->page_size = HW_RXBD_RING_SIZE;
  1539. ring->pg_arr = (void **)rxr->rx_desc_ring;
  1540. ring->dma_arr = rxr->rx_desc_mapping;
  1541. ring->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
  1542. ring->vmem = (void **)&rxr->rx_buf_ring;
  1543. ring = &rxr->rx_agg_ring_struct;
  1544. ring->nr_pages = bp->rx_agg_nr_pages;
  1545. ring->page_size = HW_RXBD_RING_SIZE;
  1546. ring->pg_arr = (void **)rxr->rx_agg_desc_ring;
  1547. ring->dma_arr = rxr->rx_agg_desc_mapping;
  1548. ring->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
  1549. ring->vmem = (void **)&rxr->rx_agg_ring;
  1550. txr = &bnapi->tx_ring;
  1551. ring = &txr->tx_ring_struct;
  1552. ring->nr_pages = bp->tx_nr_pages;
  1553. ring->page_size = HW_RXBD_RING_SIZE;
  1554. ring->pg_arr = (void **)txr->tx_desc_ring;
  1555. ring->dma_arr = txr->tx_desc_mapping;
  1556. ring->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
  1557. ring->vmem = (void **)&txr->tx_buf_ring;
  1558. }
  1559. }
  1560. static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
  1561. {
  1562. int i;
  1563. u32 prod;
  1564. struct rx_bd **rx_buf_ring;
  1565. rx_buf_ring = (struct rx_bd **)ring->pg_arr;
  1566. for (i = 0, prod = 0; i < ring->nr_pages; i++) {
  1567. int j;
  1568. struct rx_bd *rxbd;
  1569. rxbd = rx_buf_ring[i];
  1570. if (!rxbd)
  1571. continue;
  1572. for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
  1573. rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
  1574. rxbd->rx_bd_opaque = prod;
  1575. }
  1576. }
  1577. }
  1578. static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
  1579. {
  1580. struct net_device *dev = bp->dev;
  1581. struct bnxt_napi *bnapi = bp->bnapi[ring_nr];
  1582. struct bnxt_rx_ring_info *rxr;
  1583. struct bnxt_ring_struct *ring;
  1584. u32 prod, type;
  1585. int i;
  1586. if (!bnapi)
  1587. return -EINVAL;
  1588. type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
  1589. RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
  1590. if (NET_IP_ALIGN == 2)
  1591. type |= RX_BD_FLAGS_SOP;
  1592. rxr = &bnapi->rx_ring;
  1593. ring = &rxr->rx_ring_struct;
  1594. bnxt_init_rxbd_pages(ring, type);
  1595. prod = rxr->rx_prod;
  1596. for (i = 0; i < bp->rx_ring_size; i++) {
  1597. if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
  1598. netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
  1599. ring_nr, i, bp->rx_ring_size);
  1600. break;
  1601. }
  1602. prod = NEXT_RX(prod);
  1603. }
  1604. rxr->rx_prod = prod;
  1605. ring->fw_ring_id = INVALID_HW_RING_ID;
  1606. if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
  1607. return 0;
  1608. ring = &rxr->rx_agg_ring_struct;
  1609. type = ((u32)PAGE_SIZE << RX_BD_LEN_SHIFT) |
  1610. RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
  1611. bnxt_init_rxbd_pages(ring, type);
  1612. prod = rxr->rx_agg_prod;
  1613. for (i = 0; i < bp->rx_agg_ring_size; i++) {
  1614. if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
  1615. netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
  1616. ring_nr, i, bp->rx_ring_size);
  1617. break;
  1618. }
  1619. prod = NEXT_RX_AGG(prod);
  1620. }
  1621. rxr->rx_agg_prod = prod;
  1622. ring->fw_ring_id = INVALID_HW_RING_ID;
  1623. if (bp->flags & BNXT_FLAG_TPA) {
  1624. if (rxr->rx_tpa) {
  1625. u8 *data;
  1626. dma_addr_t mapping;
  1627. for (i = 0; i < MAX_TPA; i++) {
  1628. data = __bnxt_alloc_rx_data(bp, &mapping,
  1629. GFP_KERNEL);
  1630. if (!data)
  1631. return -ENOMEM;
  1632. rxr->rx_tpa[i].data = data;
  1633. rxr->rx_tpa[i].mapping = mapping;
  1634. }
  1635. } else {
  1636. netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
  1637. return -ENOMEM;
  1638. }
  1639. }
  1640. return 0;
  1641. }
  1642. static int bnxt_init_rx_rings(struct bnxt *bp)
  1643. {
  1644. int i, rc = 0;
  1645. for (i = 0; i < bp->rx_nr_rings; i++) {
  1646. rc = bnxt_init_one_rx_ring(bp, i);
  1647. if (rc)
  1648. break;
  1649. }
  1650. return rc;
  1651. }
  1652. static int bnxt_init_tx_rings(struct bnxt *bp)
  1653. {
  1654. u16 i;
  1655. bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
  1656. MAX_SKB_FRAGS + 1);
  1657. for (i = 0; i < bp->tx_nr_rings; i++) {
  1658. struct bnxt_napi *bnapi = bp->bnapi[i];
  1659. struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
  1660. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  1661. ring->fw_ring_id = INVALID_HW_RING_ID;
  1662. }
  1663. return 0;
  1664. }
  1665. static void bnxt_free_ring_grps(struct bnxt *bp)
  1666. {
  1667. kfree(bp->grp_info);
  1668. bp->grp_info = NULL;
  1669. }
  1670. static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
  1671. {
  1672. int i;
  1673. if (irq_re_init) {
  1674. bp->grp_info = kcalloc(bp->cp_nr_rings,
  1675. sizeof(struct bnxt_ring_grp_info),
  1676. GFP_KERNEL);
  1677. if (!bp->grp_info)
  1678. return -ENOMEM;
  1679. }
  1680. for (i = 0; i < bp->cp_nr_rings; i++) {
  1681. if (irq_re_init)
  1682. bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
  1683. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  1684. bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
  1685. bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
  1686. bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
  1687. }
  1688. return 0;
  1689. }
  1690. static void bnxt_free_vnics(struct bnxt *bp)
  1691. {
  1692. kfree(bp->vnic_info);
  1693. bp->vnic_info = NULL;
  1694. bp->nr_vnics = 0;
  1695. }
  1696. static int bnxt_alloc_vnics(struct bnxt *bp)
  1697. {
  1698. int num_vnics = 1;
  1699. #ifdef CONFIG_RFS_ACCEL
  1700. if (bp->flags & BNXT_FLAG_RFS)
  1701. num_vnics += bp->rx_nr_rings;
  1702. #endif
  1703. bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
  1704. GFP_KERNEL);
  1705. if (!bp->vnic_info)
  1706. return -ENOMEM;
  1707. bp->nr_vnics = num_vnics;
  1708. return 0;
  1709. }
  1710. static void bnxt_init_vnics(struct bnxt *bp)
  1711. {
  1712. int i;
  1713. for (i = 0; i < bp->nr_vnics; i++) {
  1714. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  1715. vnic->fw_vnic_id = INVALID_HW_RING_ID;
  1716. vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
  1717. vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
  1718. if (bp->vnic_info[i].rss_hash_key) {
  1719. if (i == 0)
  1720. prandom_bytes(vnic->rss_hash_key,
  1721. HW_HASH_KEY_SIZE);
  1722. else
  1723. memcpy(vnic->rss_hash_key,
  1724. bp->vnic_info[0].rss_hash_key,
  1725. HW_HASH_KEY_SIZE);
  1726. }
  1727. }
  1728. }
  1729. static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
  1730. {
  1731. int pages;
  1732. pages = ring_size / desc_per_pg;
  1733. if (!pages)
  1734. return 1;
  1735. pages++;
  1736. while (pages & (pages - 1))
  1737. pages++;
  1738. return pages;
  1739. }
  1740. static void bnxt_set_tpa_flags(struct bnxt *bp)
  1741. {
  1742. bp->flags &= ~BNXT_FLAG_TPA;
  1743. if (bp->dev->features & NETIF_F_LRO)
  1744. bp->flags |= BNXT_FLAG_LRO;
  1745. if ((bp->dev->features & NETIF_F_GRO) && (bp->pdev->revision > 0))
  1746. bp->flags |= BNXT_FLAG_GRO;
  1747. }
  1748. /* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
  1749. * be set on entry.
  1750. */
  1751. void bnxt_set_ring_params(struct bnxt *bp)
  1752. {
  1753. u32 ring_size, rx_size, rx_space;
  1754. u32 agg_factor = 0, agg_ring_size = 0;
  1755. /* 8 for CRC and VLAN */
  1756. rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
  1757. rx_space = rx_size + NET_SKB_PAD +
  1758. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1759. bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
  1760. ring_size = bp->rx_ring_size;
  1761. bp->rx_agg_ring_size = 0;
  1762. bp->rx_agg_nr_pages = 0;
  1763. if (bp->flags & BNXT_FLAG_TPA)
  1764. agg_factor = 4;
  1765. bp->flags &= ~BNXT_FLAG_JUMBO;
  1766. if (rx_space > PAGE_SIZE) {
  1767. u32 jumbo_factor;
  1768. bp->flags |= BNXT_FLAG_JUMBO;
  1769. jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
  1770. if (jumbo_factor > agg_factor)
  1771. agg_factor = jumbo_factor;
  1772. }
  1773. agg_ring_size = ring_size * agg_factor;
  1774. if (agg_ring_size) {
  1775. bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
  1776. RX_DESC_CNT);
  1777. if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
  1778. u32 tmp = agg_ring_size;
  1779. bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
  1780. agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
  1781. netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
  1782. tmp, agg_ring_size);
  1783. }
  1784. bp->rx_agg_ring_size = agg_ring_size;
  1785. bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
  1786. rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
  1787. rx_space = rx_size + NET_SKB_PAD +
  1788. SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
  1789. }
  1790. bp->rx_buf_use_size = rx_size;
  1791. bp->rx_buf_size = rx_space;
  1792. bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
  1793. bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
  1794. ring_size = bp->tx_ring_size;
  1795. bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
  1796. bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
  1797. ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
  1798. bp->cp_ring_size = ring_size;
  1799. bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
  1800. if (bp->cp_nr_pages > MAX_CP_PAGES) {
  1801. bp->cp_nr_pages = MAX_CP_PAGES;
  1802. bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
  1803. netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
  1804. ring_size, bp->cp_ring_size);
  1805. }
  1806. bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
  1807. bp->cp_ring_mask = bp->cp_bit - 1;
  1808. }
  1809. static void bnxt_free_vnic_attributes(struct bnxt *bp)
  1810. {
  1811. int i;
  1812. struct bnxt_vnic_info *vnic;
  1813. struct pci_dev *pdev = bp->pdev;
  1814. if (!bp->vnic_info)
  1815. return;
  1816. for (i = 0; i < bp->nr_vnics; i++) {
  1817. vnic = &bp->vnic_info[i];
  1818. kfree(vnic->fw_grp_ids);
  1819. vnic->fw_grp_ids = NULL;
  1820. kfree(vnic->uc_list);
  1821. vnic->uc_list = NULL;
  1822. if (vnic->mc_list) {
  1823. dma_free_coherent(&pdev->dev, vnic->mc_list_size,
  1824. vnic->mc_list, vnic->mc_list_mapping);
  1825. vnic->mc_list = NULL;
  1826. }
  1827. if (vnic->rss_table) {
  1828. dma_free_coherent(&pdev->dev, PAGE_SIZE,
  1829. vnic->rss_table,
  1830. vnic->rss_table_dma_addr);
  1831. vnic->rss_table = NULL;
  1832. }
  1833. vnic->rss_hash_key = NULL;
  1834. vnic->flags = 0;
  1835. }
  1836. }
  1837. static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
  1838. {
  1839. int i, rc = 0, size;
  1840. struct bnxt_vnic_info *vnic;
  1841. struct pci_dev *pdev = bp->pdev;
  1842. int max_rings;
  1843. for (i = 0; i < bp->nr_vnics; i++) {
  1844. vnic = &bp->vnic_info[i];
  1845. if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
  1846. int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
  1847. if (mem_size > 0) {
  1848. vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
  1849. if (!vnic->uc_list) {
  1850. rc = -ENOMEM;
  1851. goto out;
  1852. }
  1853. }
  1854. }
  1855. if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
  1856. vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
  1857. vnic->mc_list =
  1858. dma_alloc_coherent(&pdev->dev,
  1859. vnic->mc_list_size,
  1860. &vnic->mc_list_mapping,
  1861. GFP_KERNEL);
  1862. if (!vnic->mc_list) {
  1863. rc = -ENOMEM;
  1864. goto out;
  1865. }
  1866. }
  1867. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  1868. max_rings = bp->rx_nr_rings;
  1869. else
  1870. max_rings = 1;
  1871. vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
  1872. if (!vnic->fw_grp_ids) {
  1873. rc = -ENOMEM;
  1874. goto out;
  1875. }
  1876. /* Allocate rss table and hash key */
  1877. vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  1878. &vnic->rss_table_dma_addr,
  1879. GFP_KERNEL);
  1880. if (!vnic->rss_table) {
  1881. rc = -ENOMEM;
  1882. goto out;
  1883. }
  1884. size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
  1885. vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
  1886. vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
  1887. }
  1888. return 0;
  1889. out:
  1890. return rc;
  1891. }
  1892. static void bnxt_free_hwrm_resources(struct bnxt *bp)
  1893. {
  1894. struct pci_dev *pdev = bp->pdev;
  1895. dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
  1896. bp->hwrm_cmd_resp_dma_addr);
  1897. bp->hwrm_cmd_resp_addr = NULL;
  1898. if (bp->hwrm_dbg_resp_addr) {
  1899. dma_free_coherent(&pdev->dev, HWRM_DBG_REG_BUF_SIZE,
  1900. bp->hwrm_dbg_resp_addr,
  1901. bp->hwrm_dbg_resp_dma_addr);
  1902. bp->hwrm_dbg_resp_addr = NULL;
  1903. }
  1904. }
  1905. static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
  1906. {
  1907. struct pci_dev *pdev = bp->pdev;
  1908. bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
  1909. &bp->hwrm_cmd_resp_dma_addr,
  1910. GFP_KERNEL);
  1911. if (!bp->hwrm_cmd_resp_addr)
  1912. return -ENOMEM;
  1913. bp->hwrm_dbg_resp_addr = dma_alloc_coherent(&pdev->dev,
  1914. HWRM_DBG_REG_BUF_SIZE,
  1915. &bp->hwrm_dbg_resp_dma_addr,
  1916. GFP_KERNEL);
  1917. if (!bp->hwrm_dbg_resp_addr)
  1918. netdev_warn(bp->dev, "fail to alloc debug register dma mem\n");
  1919. return 0;
  1920. }
  1921. static void bnxt_free_stats(struct bnxt *bp)
  1922. {
  1923. u32 size, i;
  1924. struct pci_dev *pdev = bp->pdev;
  1925. if (!bp->bnapi)
  1926. return;
  1927. size = sizeof(struct ctx_hw_stats);
  1928. for (i = 0; i < bp->cp_nr_rings; i++) {
  1929. struct bnxt_napi *bnapi = bp->bnapi[i];
  1930. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1931. if (cpr->hw_stats) {
  1932. dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
  1933. cpr->hw_stats_map);
  1934. cpr->hw_stats = NULL;
  1935. }
  1936. }
  1937. }
  1938. static int bnxt_alloc_stats(struct bnxt *bp)
  1939. {
  1940. u32 size, i;
  1941. struct pci_dev *pdev = bp->pdev;
  1942. size = sizeof(struct ctx_hw_stats);
  1943. for (i = 0; i < bp->cp_nr_rings; i++) {
  1944. struct bnxt_napi *bnapi = bp->bnapi[i];
  1945. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  1946. cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
  1947. &cpr->hw_stats_map,
  1948. GFP_KERNEL);
  1949. if (!cpr->hw_stats)
  1950. return -ENOMEM;
  1951. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  1952. }
  1953. return 0;
  1954. }
  1955. static void bnxt_clear_ring_indices(struct bnxt *bp)
  1956. {
  1957. int i;
  1958. if (!bp->bnapi)
  1959. return;
  1960. for (i = 0; i < bp->cp_nr_rings; i++) {
  1961. struct bnxt_napi *bnapi = bp->bnapi[i];
  1962. struct bnxt_cp_ring_info *cpr;
  1963. struct bnxt_rx_ring_info *rxr;
  1964. struct bnxt_tx_ring_info *txr;
  1965. if (!bnapi)
  1966. continue;
  1967. cpr = &bnapi->cp_ring;
  1968. cpr->cp_raw_cons = 0;
  1969. txr = &bnapi->tx_ring;
  1970. txr->tx_prod = 0;
  1971. txr->tx_cons = 0;
  1972. rxr = &bnapi->rx_ring;
  1973. rxr->rx_prod = 0;
  1974. rxr->rx_agg_prod = 0;
  1975. rxr->rx_sw_agg_prod = 0;
  1976. }
  1977. }
  1978. static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
  1979. {
  1980. #ifdef CONFIG_RFS_ACCEL
  1981. int i;
  1982. /* Under rtnl_lock and all our NAPIs have been disabled. It's
  1983. * safe to delete the hash table.
  1984. */
  1985. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  1986. struct hlist_head *head;
  1987. struct hlist_node *tmp;
  1988. struct bnxt_ntuple_filter *fltr;
  1989. head = &bp->ntp_fltr_hash_tbl[i];
  1990. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  1991. hlist_del(&fltr->hash);
  1992. kfree(fltr);
  1993. }
  1994. }
  1995. if (irq_reinit) {
  1996. kfree(bp->ntp_fltr_bmap);
  1997. bp->ntp_fltr_bmap = NULL;
  1998. }
  1999. bp->ntp_fltr_count = 0;
  2000. #endif
  2001. }
  2002. static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
  2003. {
  2004. #ifdef CONFIG_RFS_ACCEL
  2005. int i, rc = 0;
  2006. if (!(bp->flags & BNXT_FLAG_RFS))
  2007. return 0;
  2008. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
  2009. INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
  2010. bp->ntp_fltr_count = 0;
  2011. bp->ntp_fltr_bmap = kzalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
  2012. GFP_KERNEL);
  2013. if (!bp->ntp_fltr_bmap)
  2014. rc = -ENOMEM;
  2015. return rc;
  2016. #else
  2017. return 0;
  2018. #endif
  2019. }
  2020. static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
  2021. {
  2022. bnxt_free_vnic_attributes(bp);
  2023. bnxt_free_tx_rings(bp);
  2024. bnxt_free_rx_rings(bp);
  2025. bnxt_free_cp_rings(bp);
  2026. bnxt_free_ntp_fltrs(bp, irq_re_init);
  2027. if (irq_re_init) {
  2028. bnxt_free_stats(bp);
  2029. bnxt_free_ring_grps(bp);
  2030. bnxt_free_vnics(bp);
  2031. kfree(bp->bnapi);
  2032. bp->bnapi = NULL;
  2033. } else {
  2034. bnxt_clear_ring_indices(bp);
  2035. }
  2036. }
  2037. static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
  2038. {
  2039. int i, rc, size, arr_size;
  2040. void *bnapi;
  2041. if (irq_re_init) {
  2042. /* Allocate bnapi mem pointer array and mem block for
  2043. * all queues
  2044. */
  2045. arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
  2046. bp->cp_nr_rings);
  2047. size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
  2048. bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
  2049. if (!bnapi)
  2050. return -ENOMEM;
  2051. bp->bnapi = bnapi;
  2052. bnapi += arr_size;
  2053. for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
  2054. bp->bnapi[i] = bnapi;
  2055. bp->bnapi[i]->index = i;
  2056. bp->bnapi[i]->bp = bp;
  2057. }
  2058. rc = bnxt_alloc_stats(bp);
  2059. if (rc)
  2060. goto alloc_mem_err;
  2061. rc = bnxt_alloc_ntp_fltrs(bp);
  2062. if (rc)
  2063. goto alloc_mem_err;
  2064. rc = bnxt_alloc_vnics(bp);
  2065. if (rc)
  2066. goto alloc_mem_err;
  2067. }
  2068. bnxt_init_ring_struct(bp);
  2069. rc = bnxt_alloc_rx_rings(bp);
  2070. if (rc)
  2071. goto alloc_mem_err;
  2072. rc = bnxt_alloc_tx_rings(bp);
  2073. if (rc)
  2074. goto alloc_mem_err;
  2075. rc = bnxt_alloc_cp_rings(bp);
  2076. if (rc)
  2077. goto alloc_mem_err;
  2078. bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
  2079. BNXT_VNIC_UCAST_FLAG;
  2080. rc = bnxt_alloc_vnic_attributes(bp);
  2081. if (rc)
  2082. goto alloc_mem_err;
  2083. return 0;
  2084. alloc_mem_err:
  2085. bnxt_free_mem(bp, true);
  2086. return rc;
  2087. }
  2088. void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
  2089. u16 cmpl_ring, u16 target_id)
  2090. {
  2091. struct hwrm_cmd_req_hdr *req = request;
  2092. req->cmpl_ring_req_type =
  2093. cpu_to_le32(req_type | (cmpl_ring << HWRM_CMPL_RING_SFT));
  2094. req->target_id_seq_id = cpu_to_le32(target_id << HWRM_TARGET_FID_SFT);
  2095. req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
  2096. }
  2097. int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2098. {
  2099. int i, intr_process, rc;
  2100. struct hwrm_cmd_req_hdr *req = msg;
  2101. u32 *data = msg;
  2102. __le32 *resp_len, *valid;
  2103. u16 cp_ring_id, len = 0;
  2104. struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
  2105. req->target_id_seq_id |= cpu_to_le32(bp->hwrm_cmd_seq++);
  2106. memset(resp, 0, PAGE_SIZE);
  2107. cp_ring_id = (le32_to_cpu(req->cmpl_ring_req_type) &
  2108. HWRM_CMPL_RING_MASK) >>
  2109. HWRM_CMPL_RING_SFT;
  2110. intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
  2111. /* Write request msg to hwrm channel */
  2112. __iowrite32_copy(bp->bar0, data, msg_len / 4);
  2113. /* currently supports only one outstanding message */
  2114. if (intr_process)
  2115. bp->hwrm_intr_seq_id = le32_to_cpu(req->target_id_seq_id) &
  2116. HWRM_SEQ_ID_MASK;
  2117. /* Ring channel doorbell */
  2118. writel(1, bp->bar0 + 0x100);
  2119. i = 0;
  2120. if (intr_process) {
  2121. /* Wait until hwrm response cmpl interrupt is processed */
  2122. while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
  2123. i++ < timeout) {
  2124. usleep_range(600, 800);
  2125. }
  2126. if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
  2127. netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
  2128. req->cmpl_ring_req_type);
  2129. return -1;
  2130. }
  2131. } else {
  2132. /* Check if response len is updated */
  2133. resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
  2134. for (i = 0; i < timeout; i++) {
  2135. len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
  2136. HWRM_RESP_LEN_SFT;
  2137. if (len)
  2138. break;
  2139. usleep_range(600, 800);
  2140. }
  2141. if (i >= timeout) {
  2142. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
  2143. timeout, req->cmpl_ring_req_type,
  2144. req->target_id_seq_id, *resp_len);
  2145. return -1;
  2146. }
  2147. /* Last word of resp contains valid bit */
  2148. valid = bp->hwrm_cmd_resp_addr + len - 4;
  2149. for (i = 0; i < timeout; i++) {
  2150. if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
  2151. break;
  2152. usleep_range(600, 800);
  2153. }
  2154. if (i >= timeout) {
  2155. netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
  2156. timeout, req->cmpl_ring_req_type,
  2157. req->target_id_seq_id, len, *valid);
  2158. return -1;
  2159. }
  2160. }
  2161. rc = le16_to_cpu(resp->error_code);
  2162. if (rc) {
  2163. netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
  2164. le16_to_cpu(resp->req_type),
  2165. le16_to_cpu(resp->seq_id), rc);
  2166. return rc;
  2167. }
  2168. return 0;
  2169. }
  2170. int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
  2171. {
  2172. int rc;
  2173. mutex_lock(&bp->hwrm_cmd_lock);
  2174. rc = _hwrm_send_message(bp, msg, msg_len, timeout);
  2175. mutex_unlock(&bp->hwrm_cmd_lock);
  2176. return rc;
  2177. }
  2178. static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
  2179. {
  2180. struct hwrm_func_drv_rgtr_input req = {0};
  2181. int i;
  2182. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
  2183. req.enables =
  2184. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
  2185. FUNC_DRV_RGTR_REQ_ENABLES_VER |
  2186. FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
  2187. /* TODO: current async event fwd bits are not defined and the firmware
  2188. * only checks if it is non-zero to enable async event forwarding
  2189. */
  2190. req.async_event_fwd[0] |= cpu_to_le32(1);
  2191. req.os_type = cpu_to_le16(1);
  2192. req.ver_maj = DRV_VER_MAJ;
  2193. req.ver_min = DRV_VER_MIN;
  2194. req.ver_upd = DRV_VER_UPD;
  2195. if (BNXT_PF(bp)) {
  2196. unsigned long vf_req_snif_bmap[4];
  2197. u32 *data = (u32 *)vf_req_snif_bmap;
  2198. memset(vf_req_snif_bmap, 0, 32);
  2199. for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
  2200. __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
  2201. for (i = 0; i < 8; i++) {
  2202. req.vf_req_fwd[i] = cpu_to_le32(*data);
  2203. data++;
  2204. }
  2205. req.enables |=
  2206. cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
  2207. }
  2208. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2209. }
  2210. static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
  2211. {
  2212. u32 rc = 0;
  2213. struct hwrm_tunnel_dst_port_free_input req = {0};
  2214. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
  2215. req.tunnel_type = tunnel_type;
  2216. switch (tunnel_type) {
  2217. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
  2218. req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
  2219. break;
  2220. case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
  2221. req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
  2222. break;
  2223. default:
  2224. break;
  2225. }
  2226. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2227. if (rc)
  2228. netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
  2229. rc);
  2230. return rc;
  2231. }
  2232. static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
  2233. u8 tunnel_type)
  2234. {
  2235. u32 rc = 0;
  2236. struct hwrm_tunnel_dst_port_alloc_input req = {0};
  2237. struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2238. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
  2239. req.tunnel_type = tunnel_type;
  2240. req.tunnel_dst_port_val = port;
  2241. mutex_lock(&bp->hwrm_cmd_lock);
  2242. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2243. if (rc) {
  2244. netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
  2245. rc);
  2246. goto err_out;
  2247. }
  2248. if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN)
  2249. bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
  2250. else if (tunnel_type & TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE)
  2251. bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
  2252. err_out:
  2253. mutex_unlock(&bp->hwrm_cmd_lock);
  2254. return rc;
  2255. }
  2256. static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
  2257. {
  2258. struct hwrm_cfa_l2_set_rx_mask_input req = {0};
  2259. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2260. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
  2261. req.dflt_vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  2262. req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
  2263. req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
  2264. req.mask = cpu_to_le32(vnic->rx_mask);
  2265. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2266. }
  2267. #ifdef CONFIG_RFS_ACCEL
  2268. static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
  2269. struct bnxt_ntuple_filter *fltr)
  2270. {
  2271. struct hwrm_cfa_ntuple_filter_free_input req = {0};
  2272. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
  2273. req.ntuple_filter_id = fltr->filter_id;
  2274. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2275. }
  2276. #define BNXT_NTP_FLTR_FLAGS \
  2277. (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
  2278. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
  2279. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
  2280. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
  2281. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
  2282. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
  2283. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
  2284. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
  2285. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
  2286. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
  2287. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
  2288. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
  2289. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
  2290. CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID)
  2291. static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
  2292. struct bnxt_ntuple_filter *fltr)
  2293. {
  2294. int rc = 0;
  2295. struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
  2296. struct hwrm_cfa_ntuple_filter_alloc_output *resp =
  2297. bp->hwrm_cmd_resp_addr;
  2298. struct flow_keys *keys = &fltr->fkeys;
  2299. struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
  2300. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
  2301. req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[0];
  2302. req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
  2303. req.ethertype = htons(ETH_P_IP);
  2304. memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
  2305. req.ipaddr_type = 4;
  2306. req.ip_protocol = keys->basic.ip_proto;
  2307. req.src_ipaddr[0] = keys->addrs.v4addrs.src;
  2308. req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  2309. req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
  2310. req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
  2311. req.src_port = keys->ports.src;
  2312. req.src_port_mask = cpu_to_be16(0xffff);
  2313. req.dst_port = keys->ports.dst;
  2314. req.dst_port_mask = cpu_to_be16(0xffff);
  2315. req.dst_vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  2316. mutex_lock(&bp->hwrm_cmd_lock);
  2317. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2318. if (!rc)
  2319. fltr->filter_id = resp->ntuple_filter_id;
  2320. mutex_unlock(&bp->hwrm_cmd_lock);
  2321. return rc;
  2322. }
  2323. #endif
  2324. static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
  2325. u8 *mac_addr)
  2326. {
  2327. u32 rc = 0;
  2328. struct hwrm_cfa_l2_filter_alloc_input req = {0};
  2329. struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2330. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
  2331. req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX |
  2332. CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
  2333. req.dst_vnic_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
  2334. req.enables =
  2335. cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
  2336. CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_VNIC_ID |
  2337. CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
  2338. memcpy(req.l2_addr, mac_addr, ETH_ALEN);
  2339. req.l2_addr_mask[0] = 0xff;
  2340. req.l2_addr_mask[1] = 0xff;
  2341. req.l2_addr_mask[2] = 0xff;
  2342. req.l2_addr_mask[3] = 0xff;
  2343. req.l2_addr_mask[4] = 0xff;
  2344. req.l2_addr_mask[5] = 0xff;
  2345. mutex_lock(&bp->hwrm_cmd_lock);
  2346. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2347. if (!rc)
  2348. bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
  2349. resp->l2_filter_id;
  2350. mutex_unlock(&bp->hwrm_cmd_lock);
  2351. return rc;
  2352. }
  2353. static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
  2354. {
  2355. u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
  2356. int rc = 0;
  2357. /* Any associated ntuple filters will also be cleared by firmware. */
  2358. mutex_lock(&bp->hwrm_cmd_lock);
  2359. for (i = 0; i < num_of_vnics; i++) {
  2360. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2361. for (j = 0; j < vnic->uc_filter_count; j++) {
  2362. struct hwrm_cfa_l2_filter_free_input req = {0};
  2363. bnxt_hwrm_cmd_hdr_init(bp, &req,
  2364. HWRM_CFA_L2_FILTER_FREE, -1, -1);
  2365. req.l2_filter_id = vnic->fw_l2_filter_id[j];
  2366. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2367. HWRM_CMD_TIMEOUT);
  2368. }
  2369. vnic->uc_filter_count = 0;
  2370. }
  2371. mutex_unlock(&bp->hwrm_cmd_lock);
  2372. return rc;
  2373. }
  2374. static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
  2375. {
  2376. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2377. struct hwrm_vnic_tpa_cfg_input req = {0};
  2378. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
  2379. if (tpa_flags) {
  2380. u16 mss = bp->dev->mtu - 40;
  2381. u32 nsegs, n, segs = 0, flags;
  2382. flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
  2383. VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
  2384. VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
  2385. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
  2386. VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
  2387. if (tpa_flags & BNXT_FLAG_GRO)
  2388. flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
  2389. req.flags = cpu_to_le32(flags);
  2390. req.enables =
  2391. cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
  2392. VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS);
  2393. /* Number of segs are log2 units, and first packet is not
  2394. * included as part of this units.
  2395. */
  2396. if (mss <= PAGE_SIZE) {
  2397. n = PAGE_SIZE / mss;
  2398. nsegs = (MAX_SKB_FRAGS - 1) * n;
  2399. } else {
  2400. n = mss / PAGE_SIZE;
  2401. if (mss & (PAGE_SIZE - 1))
  2402. n++;
  2403. nsegs = (MAX_SKB_FRAGS - n) / n;
  2404. }
  2405. segs = ilog2(nsegs);
  2406. req.max_agg_segs = cpu_to_le16(segs);
  2407. req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
  2408. }
  2409. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  2410. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2411. }
  2412. static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
  2413. {
  2414. u32 i, j, max_rings;
  2415. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2416. struct hwrm_vnic_rss_cfg_input req = {0};
  2417. if (vnic->fw_rss_cos_lb_ctx == INVALID_HW_RING_ID)
  2418. return 0;
  2419. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
  2420. if (set_rss) {
  2421. vnic->hash_type = BNXT_RSS_HASH_TYPE_FLAG_IPV4 |
  2422. BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV4 |
  2423. BNXT_RSS_HASH_TYPE_FLAG_IPV6 |
  2424. BNXT_RSS_HASH_TYPE_FLAG_TCP_IPV6;
  2425. req.hash_type = cpu_to_le32(vnic->hash_type);
  2426. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  2427. max_rings = bp->rx_nr_rings;
  2428. else
  2429. max_rings = 1;
  2430. /* Fill the RSS indirection table with ring group ids */
  2431. for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
  2432. if (j == max_rings)
  2433. j = 0;
  2434. vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
  2435. }
  2436. req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
  2437. req.hash_key_tbl_addr =
  2438. cpu_to_le64(vnic->rss_hash_key_dma_addr);
  2439. }
  2440. req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
  2441. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2442. }
  2443. static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
  2444. {
  2445. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2446. struct hwrm_vnic_plcmodes_cfg_input req = {0};
  2447. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
  2448. req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
  2449. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
  2450. VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
  2451. req.enables =
  2452. cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
  2453. VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
  2454. /* thresholds not implemented in firmware yet */
  2455. req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
  2456. req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
  2457. req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
  2458. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2459. }
  2460. static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id)
  2461. {
  2462. struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
  2463. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
  2464. req.rss_cos_lb_ctx_id =
  2465. cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx);
  2466. hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2467. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
  2468. }
  2469. static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
  2470. {
  2471. int i;
  2472. for (i = 0; i < bp->nr_vnics; i++) {
  2473. struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
  2474. if (vnic->fw_rss_cos_lb_ctx != INVALID_HW_RING_ID)
  2475. bnxt_hwrm_vnic_ctx_free_one(bp, i);
  2476. }
  2477. bp->rsscos_nr_ctxs = 0;
  2478. }
  2479. static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id)
  2480. {
  2481. int rc;
  2482. struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
  2483. struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
  2484. bp->hwrm_cmd_resp_addr;
  2485. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
  2486. -1);
  2487. mutex_lock(&bp->hwrm_cmd_lock);
  2488. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2489. if (!rc)
  2490. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx =
  2491. le16_to_cpu(resp->rss_cos_lb_ctx_id);
  2492. mutex_unlock(&bp->hwrm_cmd_lock);
  2493. return rc;
  2494. }
  2495. static int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
  2496. {
  2497. int grp_idx = 0;
  2498. struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
  2499. struct hwrm_vnic_cfg_input req = {0};
  2500. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
  2501. /* Only RSS support for now TBD: COS & LB */
  2502. req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP |
  2503. VNIC_CFG_REQ_ENABLES_RSS_RULE);
  2504. req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx);
  2505. req.cos_rule = cpu_to_le16(0xffff);
  2506. if (vnic->flags & BNXT_VNIC_RSS_FLAG)
  2507. grp_idx = 0;
  2508. else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
  2509. grp_idx = vnic_id - 1;
  2510. req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
  2511. req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
  2512. req.lb_rule = cpu_to_le16(0xffff);
  2513. req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
  2514. VLAN_HLEN);
  2515. if (bp->flags & BNXT_FLAG_STRIP_VLAN)
  2516. req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
  2517. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2518. }
  2519. static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
  2520. {
  2521. u32 rc = 0;
  2522. if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
  2523. struct hwrm_vnic_free_input req = {0};
  2524. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
  2525. req.vnic_id =
  2526. cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
  2527. rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2528. if (rc)
  2529. return rc;
  2530. bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
  2531. }
  2532. return rc;
  2533. }
  2534. static void bnxt_hwrm_vnic_free(struct bnxt *bp)
  2535. {
  2536. u16 i;
  2537. for (i = 0; i < bp->nr_vnics; i++)
  2538. bnxt_hwrm_vnic_free_one(bp, i);
  2539. }
  2540. static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, u16 start_grp_id,
  2541. u16 end_grp_id)
  2542. {
  2543. u32 rc = 0, i, j;
  2544. struct hwrm_vnic_alloc_input req = {0};
  2545. struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2546. /* map ring groups to this vnic */
  2547. for (i = start_grp_id, j = 0; i < end_grp_id; i++, j++) {
  2548. if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) {
  2549. netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
  2550. j, (end_grp_id - start_grp_id));
  2551. break;
  2552. }
  2553. bp->vnic_info[vnic_id].fw_grp_ids[j] =
  2554. bp->grp_info[i].fw_grp_id;
  2555. }
  2556. bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
  2557. if (vnic_id == 0)
  2558. req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
  2559. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
  2560. mutex_lock(&bp->hwrm_cmd_lock);
  2561. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2562. if (!rc)
  2563. bp->vnic_info[vnic_id].fw_vnic_id = le32_to_cpu(resp->vnic_id);
  2564. mutex_unlock(&bp->hwrm_cmd_lock);
  2565. return rc;
  2566. }
  2567. static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
  2568. {
  2569. u16 i;
  2570. u32 rc = 0;
  2571. mutex_lock(&bp->hwrm_cmd_lock);
  2572. for (i = 0; i < bp->rx_nr_rings; i++) {
  2573. struct hwrm_ring_grp_alloc_input req = {0};
  2574. struct hwrm_ring_grp_alloc_output *resp =
  2575. bp->hwrm_cmd_resp_addr;
  2576. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
  2577. req.cr = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
  2578. req.rr = cpu_to_le16(bp->grp_info[i].rx_fw_ring_id);
  2579. req.ar = cpu_to_le16(bp->grp_info[i].agg_fw_ring_id);
  2580. req.sc = cpu_to_le16(bp->grp_info[i].fw_stats_ctx);
  2581. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2582. HWRM_CMD_TIMEOUT);
  2583. if (rc)
  2584. break;
  2585. bp->grp_info[i].fw_grp_id = le32_to_cpu(resp->ring_group_id);
  2586. }
  2587. mutex_unlock(&bp->hwrm_cmd_lock);
  2588. return rc;
  2589. }
  2590. static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
  2591. {
  2592. u16 i;
  2593. u32 rc = 0;
  2594. struct hwrm_ring_grp_free_input req = {0};
  2595. if (!bp->grp_info)
  2596. return 0;
  2597. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
  2598. mutex_lock(&bp->hwrm_cmd_lock);
  2599. for (i = 0; i < bp->cp_nr_rings; i++) {
  2600. if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
  2601. continue;
  2602. req.ring_group_id =
  2603. cpu_to_le32(bp->grp_info[i].fw_grp_id);
  2604. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2605. HWRM_CMD_TIMEOUT);
  2606. if (rc)
  2607. break;
  2608. bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
  2609. }
  2610. mutex_unlock(&bp->hwrm_cmd_lock);
  2611. return rc;
  2612. }
  2613. static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
  2614. struct bnxt_ring_struct *ring,
  2615. u32 ring_type, u32 map_index,
  2616. u32 stats_ctx_id)
  2617. {
  2618. int rc = 0, err = 0;
  2619. struct hwrm_ring_alloc_input req = {0};
  2620. struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2621. u16 ring_id;
  2622. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
  2623. req.enables = 0;
  2624. if (ring->nr_pages > 1) {
  2625. req.page_tbl_addr = cpu_to_le64(ring->pg_tbl_map);
  2626. /* Page size is in log2 units */
  2627. req.page_size = BNXT_PAGE_SHIFT;
  2628. req.page_tbl_depth = 1;
  2629. } else {
  2630. req.page_tbl_addr = cpu_to_le64(ring->dma_arr[0]);
  2631. }
  2632. req.fbo = 0;
  2633. /* Association of ring index with doorbell index and MSIX number */
  2634. req.logical_id = cpu_to_le16(map_index);
  2635. switch (ring_type) {
  2636. case HWRM_RING_ALLOC_TX:
  2637. req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
  2638. /* Association of transmit ring with completion ring */
  2639. req.cmpl_ring_id =
  2640. cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
  2641. req.length = cpu_to_le32(bp->tx_ring_mask + 1);
  2642. req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
  2643. req.queue_id = cpu_to_le16(ring->queue_id);
  2644. break;
  2645. case HWRM_RING_ALLOC_RX:
  2646. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  2647. req.length = cpu_to_le32(bp->rx_ring_mask + 1);
  2648. break;
  2649. case HWRM_RING_ALLOC_AGG:
  2650. req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
  2651. req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
  2652. break;
  2653. case HWRM_RING_ALLOC_CMPL:
  2654. req.ring_type = RING_ALLOC_REQ_RING_TYPE_CMPL;
  2655. req.length = cpu_to_le32(bp->cp_ring_mask + 1);
  2656. if (bp->flags & BNXT_FLAG_USING_MSIX)
  2657. req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
  2658. break;
  2659. default:
  2660. netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
  2661. ring_type);
  2662. return -1;
  2663. }
  2664. mutex_lock(&bp->hwrm_cmd_lock);
  2665. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2666. err = le16_to_cpu(resp->error_code);
  2667. ring_id = le16_to_cpu(resp->ring_id);
  2668. mutex_unlock(&bp->hwrm_cmd_lock);
  2669. if (rc || err) {
  2670. switch (ring_type) {
  2671. case RING_FREE_REQ_RING_TYPE_CMPL:
  2672. netdev_err(bp->dev, "hwrm_ring_alloc cp failed. rc:%x err:%x\n",
  2673. rc, err);
  2674. return -1;
  2675. case RING_FREE_REQ_RING_TYPE_RX:
  2676. netdev_err(bp->dev, "hwrm_ring_alloc rx failed. rc:%x err:%x\n",
  2677. rc, err);
  2678. return -1;
  2679. case RING_FREE_REQ_RING_TYPE_TX:
  2680. netdev_err(bp->dev, "hwrm_ring_alloc tx failed. rc:%x err:%x\n",
  2681. rc, err);
  2682. return -1;
  2683. default:
  2684. netdev_err(bp->dev, "Invalid ring\n");
  2685. return -1;
  2686. }
  2687. }
  2688. ring->fw_ring_id = ring_id;
  2689. return rc;
  2690. }
  2691. static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
  2692. {
  2693. int i, rc = 0;
  2694. if (bp->cp_nr_rings) {
  2695. for (i = 0; i < bp->cp_nr_rings; i++) {
  2696. struct bnxt_napi *bnapi = bp->bnapi[i];
  2697. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2698. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  2699. rc = hwrm_ring_alloc_send_msg(bp, ring,
  2700. HWRM_RING_ALLOC_CMPL, i,
  2701. INVALID_STATS_CTX_ID);
  2702. if (rc)
  2703. goto err_out;
  2704. cpr->cp_doorbell = bp->bar1 + i * 0x80;
  2705. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  2706. bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
  2707. }
  2708. }
  2709. if (bp->tx_nr_rings) {
  2710. for (i = 0; i < bp->tx_nr_rings; i++) {
  2711. struct bnxt_napi *bnapi = bp->bnapi[i];
  2712. struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
  2713. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  2714. u16 fw_stats_ctx = bp->grp_info[i].fw_stats_ctx;
  2715. rc = hwrm_ring_alloc_send_msg(bp, ring,
  2716. HWRM_RING_ALLOC_TX, i,
  2717. fw_stats_ctx);
  2718. if (rc)
  2719. goto err_out;
  2720. txr->tx_doorbell = bp->bar1 + i * 0x80;
  2721. }
  2722. }
  2723. if (bp->rx_nr_rings) {
  2724. for (i = 0; i < bp->rx_nr_rings; i++) {
  2725. struct bnxt_napi *bnapi = bp->bnapi[i];
  2726. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  2727. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  2728. rc = hwrm_ring_alloc_send_msg(bp, ring,
  2729. HWRM_RING_ALLOC_RX, i,
  2730. INVALID_STATS_CTX_ID);
  2731. if (rc)
  2732. goto err_out;
  2733. rxr->rx_doorbell = bp->bar1 + i * 0x80;
  2734. writel(DB_KEY_RX | rxr->rx_prod, rxr->rx_doorbell);
  2735. bp->grp_info[i].rx_fw_ring_id = ring->fw_ring_id;
  2736. }
  2737. }
  2738. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  2739. for (i = 0; i < bp->rx_nr_rings; i++) {
  2740. struct bnxt_napi *bnapi = bp->bnapi[i];
  2741. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  2742. struct bnxt_ring_struct *ring =
  2743. &rxr->rx_agg_ring_struct;
  2744. rc = hwrm_ring_alloc_send_msg(bp, ring,
  2745. HWRM_RING_ALLOC_AGG,
  2746. bp->rx_nr_rings + i,
  2747. INVALID_STATS_CTX_ID);
  2748. if (rc)
  2749. goto err_out;
  2750. rxr->rx_agg_doorbell =
  2751. bp->bar1 + (bp->rx_nr_rings + i) * 0x80;
  2752. writel(DB_KEY_RX | rxr->rx_agg_prod,
  2753. rxr->rx_agg_doorbell);
  2754. bp->grp_info[i].agg_fw_ring_id = ring->fw_ring_id;
  2755. }
  2756. }
  2757. err_out:
  2758. return rc;
  2759. }
  2760. static int hwrm_ring_free_send_msg(struct bnxt *bp,
  2761. struct bnxt_ring_struct *ring,
  2762. u32 ring_type, int cmpl_ring_id)
  2763. {
  2764. int rc;
  2765. struct hwrm_ring_free_input req = {0};
  2766. struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
  2767. u16 error_code;
  2768. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, -1, -1);
  2769. req.ring_type = ring_type;
  2770. req.ring_id = cpu_to_le16(ring->fw_ring_id);
  2771. mutex_lock(&bp->hwrm_cmd_lock);
  2772. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2773. error_code = le16_to_cpu(resp->error_code);
  2774. mutex_unlock(&bp->hwrm_cmd_lock);
  2775. if (rc || error_code) {
  2776. switch (ring_type) {
  2777. case RING_FREE_REQ_RING_TYPE_CMPL:
  2778. netdev_err(bp->dev, "hwrm_ring_free cp failed. rc:%d\n",
  2779. rc);
  2780. return rc;
  2781. case RING_FREE_REQ_RING_TYPE_RX:
  2782. netdev_err(bp->dev, "hwrm_ring_free rx failed. rc:%d\n",
  2783. rc);
  2784. return rc;
  2785. case RING_FREE_REQ_RING_TYPE_TX:
  2786. netdev_err(bp->dev, "hwrm_ring_free tx failed. rc:%d\n",
  2787. rc);
  2788. return rc;
  2789. default:
  2790. netdev_err(bp->dev, "Invalid ring\n");
  2791. return -1;
  2792. }
  2793. }
  2794. return 0;
  2795. }
  2796. static int bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
  2797. {
  2798. int i, rc = 0;
  2799. if (!bp->bnapi)
  2800. return 0;
  2801. if (bp->tx_nr_rings) {
  2802. for (i = 0; i < bp->tx_nr_rings; i++) {
  2803. struct bnxt_napi *bnapi = bp->bnapi[i];
  2804. struct bnxt_tx_ring_info *txr = &bnapi->tx_ring;
  2805. struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
  2806. u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
  2807. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  2808. hwrm_ring_free_send_msg(
  2809. bp, ring,
  2810. RING_FREE_REQ_RING_TYPE_TX,
  2811. close_path ? cmpl_ring_id :
  2812. INVALID_HW_RING_ID);
  2813. ring->fw_ring_id = INVALID_HW_RING_ID;
  2814. }
  2815. }
  2816. }
  2817. if (bp->rx_nr_rings) {
  2818. for (i = 0; i < bp->rx_nr_rings; i++) {
  2819. struct bnxt_napi *bnapi = bp->bnapi[i];
  2820. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  2821. struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
  2822. u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
  2823. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  2824. hwrm_ring_free_send_msg(
  2825. bp, ring,
  2826. RING_FREE_REQ_RING_TYPE_RX,
  2827. close_path ? cmpl_ring_id :
  2828. INVALID_HW_RING_ID);
  2829. ring->fw_ring_id = INVALID_HW_RING_ID;
  2830. bp->grp_info[i].rx_fw_ring_id =
  2831. INVALID_HW_RING_ID;
  2832. }
  2833. }
  2834. }
  2835. if (bp->rx_agg_nr_pages) {
  2836. for (i = 0; i < bp->rx_nr_rings; i++) {
  2837. struct bnxt_napi *bnapi = bp->bnapi[i];
  2838. struct bnxt_rx_ring_info *rxr = &bnapi->rx_ring;
  2839. struct bnxt_ring_struct *ring =
  2840. &rxr->rx_agg_ring_struct;
  2841. u32 cmpl_ring_id = bp->grp_info[i].cp_fw_ring_id;
  2842. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  2843. hwrm_ring_free_send_msg(
  2844. bp, ring,
  2845. RING_FREE_REQ_RING_TYPE_RX,
  2846. close_path ? cmpl_ring_id :
  2847. INVALID_HW_RING_ID);
  2848. ring->fw_ring_id = INVALID_HW_RING_ID;
  2849. bp->grp_info[i].agg_fw_ring_id =
  2850. INVALID_HW_RING_ID;
  2851. }
  2852. }
  2853. }
  2854. if (bp->cp_nr_rings) {
  2855. for (i = 0; i < bp->cp_nr_rings; i++) {
  2856. struct bnxt_napi *bnapi = bp->bnapi[i];
  2857. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2858. struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
  2859. if (ring->fw_ring_id != INVALID_HW_RING_ID) {
  2860. hwrm_ring_free_send_msg(
  2861. bp, ring,
  2862. RING_FREE_REQ_RING_TYPE_CMPL,
  2863. INVALID_HW_RING_ID);
  2864. ring->fw_ring_id = INVALID_HW_RING_ID;
  2865. bp->grp_info[i].cp_fw_ring_id =
  2866. INVALID_HW_RING_ID;
  2867. }
  2868. }
  2869. }
  2870. return rc;
  2871. }
  2872. int bnxt_hwrm_set_coal(struct bnxt *bp)
  2873. {
  2874. int i, rc = 0;
  2875. struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
  2876. u16 max_buf, max_buf_irq;
  2877. u16 buf_tmr, buf_tmr_irq;
  2878. u32 flags;
  2879. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
  2880. -1, -1);
  2881. /* Each rx completion (2 records) should be DMAed immediately */
  2882. max_buf = min_t(u16, bp->coal_bufs / 4, 2);
  2883. /* max_buf must not be zero */
  2884. max_buf = clamp_t(u16, max_buf, 1, 63);
  2885. max_buf_irq = clamp_t(u16, bp->coal_bufs_irq, 1, 63);
  2886. buf_tmr = max_t(u16, bp->coal_ticks / 4, 1);
  2887. buf_tmr_irq = max_t(u16, bp->coal_ticks_irq, 1);
  2888. flags = RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
  2889. /* RING_IDLE generates more IRQs for lower latency. Enable it only
  2890. * if coal_ticks is less than 25 us.
  2891. */
  2892. if (BNXT_COAL_TIMER_TO_USEC(bp->coal_ticks) < 25)
  2893. flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
  2894. req.flags = cpu_to_le16(flags);
  2895. req.num_cmpl_dma_aggr = cpu_to_le16(max_buf);
  2896. req.num_cmpl_dma_aggr_during_int = cpu_to_le16(max_buf_irq);
  2897. req.cmpl_aggr_dma_tmr = cpu_to_le16(buf_tmr);
  2898. req.cmpl_aggr_dma_tmr_during_int = cpu_to_le16(buf_tmr_irq);
  2899. req.int_lat_tmr_min = cpu_to_le16(buf_tmr);
  2900. req.int_lat_tmr_max = cpu_to_le16(bp->coal_ticks);
  2901. req.num_cmpl_aggr_int = cpu_to_le16(bp->coal_bufs);
  2902. mutex_lock(&bp->hwrm_cmd_lock);
  2903. for (i = 0; i < bp->cp_nr_rings; i++) {
  2904. req.ring_id = cpu_to_le16(bp->grp_info[i].cp_fw_ring_id);
  2905. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2906. HWRM_CMD_TIMEOUT);
  2907. if (rc)
  2908. break;
  2909. }
  2910. mutex_unlock(&bp->hwrm_cmd_lock);
  2911. return rc;
  2912. }
  2913. static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
  2914. {
  2915. int rc = 0, i;
  2916. struct hwrm_stat_ctx_free_input req = {0};
  2917. if (!bp->bnapi)
  2918. return 0;
  2919. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
  2920. mutex_lock(&bp->hwrm_cmd_lock);
  2921. for (i = 0; i < bp->cp_nr_rings; i++) {
  2922. struct bnxt_napi *bnapi = bp->bnapi[i];
  2923. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2924. if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
  2925. req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
  2926. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2927. HWRM_CMD_TIMEOUT);
  2928. if (rc)
  2929. break;
  2930. cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
  2931. }
  2932. }
  2933. mutex_unlock(&bp->hwrm_cmd_lock);
  2934. return rc;
  2935. }
  2936. static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
  2937. {
  2938. int rc = 0, i;
  2939. struct hwrm_stat_ctx_alloc_input req = {0};
  2940. struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
  2941. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
  2942. req.update_period_ms = cpu_to_le32(1000);
  2943. mutex_lock(&bp->hwrm_cmd_lock);
  2944. for (i = 0; i < bp->cp_nr_rings; i++) {
  2945. struct bnxt_napi *bnapi = bp->bnapi[i];
  2946. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  2947. req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
  2948. rc = _hwrm_send_message(bp, &req, sizeof(req),
  2949. HWRM_CMD_TIMEOUT);
  2950. if (rc)
  2951. break;
  2952. cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
  2953. bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
  2954. }
  2955. mutex_unlock(&bp->hwrm_cmd_lock);
  2956. return 0;
  2957. }
  2958. static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
  2959. {
  2960. int rc = 0;
  2961. struct hwrm_func_qcaps_input req = {0};
  2962. struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
  2963. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
  2964. req.fid = cpu_to_le16(0xffff);
  2965. mutex_lock(&bp->hwrm_cmd_lock);
  2966. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  2967. if (rc)
  2968. goto hwrm_func_qcaps_exit;
  2969. if (BNXT_PF(bp)) {
  2970. struct bnxt_pf_info *pf = &bp->pf;
  2971. pf->fw_fid = le16_to_cpu(resp->fid);
  2972. pf->port_id = le16_to_cpu(resp->port_id);
  2973. memcpy(pf->mac_addr, resp->perm_mac_address, ETH_ALEN);
  2974. pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  2975. pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  2976. pf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  2977. pf->max_pf_tx_rings = pf->max_tx_rings;
  2978. pf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  2979. pf->max_pf_rx_rings = pf->max_rx_rings;
  2980. pf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  2981. pf->max_vnics = le16_to_cpu(resp->max_vnics);
  2982. pf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  2983. pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
  2984. pf->max_vfs = le16_to_cpu(resp->max_vfs);
  2985. pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
  2986. pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
  2987. pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
  2988. pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
  2989. pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
  2990. pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
  2991. } else {
  2992. #ifdef CONFIG_BNXT_SRIOV
  2993. struct bnxt_vf_info *vf = &bp->vf;
  2994. vf->fw_fid = le16_to_cpu(resp->fid);
  2995. memcpy(vf->mac_addr, resp->perm_mac_address, ETH_ALEN);
  2996. if (!is_valid_ether_addr(vf->mac_addr))
  2997. random_ether_addr(vf->mac_addr);
  2998. vf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
  2999. vf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
  3000. vf->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
  3001. vf->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
  3002. vf->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
  3003. vf->max_vnics = le16_to_cpu(resp->max_vnics);
  3004. vf->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
  3005. #endif
  3006. }
  3007. bp->tx_push_thresh = 0;
  3008. if (resp->flags &
  3009. cpu_to_le32(FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED))
  3010. bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
  3011. hwrm_func_qcaps_exit:
  3012. mutex_unlock(&bp->hwrm_cmd_lock);
  3013. return rc;
  3014. }
  3015. static int bnxt_hwrm_func_reset(struct bnxt *bp)
  3016. {
  3017. struct hwrm_func_reset_input req = {0};
  3018. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
  3019. req.enables = 0;
  3020. return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
  3021. }
  3022. static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
  3023. {
  3024. int rc = 0;
  3025. struct hwrm_queue_qportcfg_input req = {0};
  3026. struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3027. u8 i, *qptr;
  3028. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
  3029. mutex_lock(&bp->hwrm_cmd_lock);
  3030. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3031. if (rc)
  3032. goto qportcfg_exit;
  3033. if (!resp->max_configurable_queues) {
  3034. rc = -EINVAL;
  3035. goto qportcfg_exit;
  3036. }
  3037. bp->max_tc = resp->max_configurable_queues;
  3038. if (bp->max_tc > BNXT_MAX_QUEUE)
  3039. bp->max_tc = BNXT_MAX_QUEUE;
  3040. qptr = &resp->queue_id0;
  3041. for (i = 0; i < bp->max_tc; i++) {
  3042. bp->q_info[i].queue_id = *qptr++;
  3043. bp->q_info[i].queue_profile = *qptr++;
  3044. }
  3045. qportcfg_exit:
  3046. mutex_unlock(&bp->hwrm_cmd_lock);
  3047. return rc;
  3048. }
  3049. static int bnxt_hwrm_ver_get(struct bnxt *bp)
  3050. {
  3051. int rc;
  3052. struct hwrm_ver_get_input req = {0};
  3053. struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
  3054. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
  3055. req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
  3056. req.hwrm_intf_min = HWRM_VERSION_MINOR;
  3057. req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
  3058. mutex_lock(&bp->hwrm_cmd_lock);
  3059. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3060. if (rc)
  3061. goto hwrm_ver_get_exit;
  3062. memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
  3063. if (req.hwrm_intf_maj != resp->hwrm_intf_maj ||
  3064. req.hwrm_intf_min != resp->hwrm_intf_min ||
  3065. req.hwrm_intf_upd != resp->hwrm_intf_upd) {
  3066. netdev_warn(bp->dev, "HWRM interface %d.%d.%d does not match driver interface %d.%d.%d.\n",
  3067. resp->hwrm_intf_maj, resp->hwrm_intf_min,
  3068. resp->hwrm_intf_upd, req.hwrm_intf_maj,
  3069. req.hwrm_intf_min, req.hwrm_intf_upd);
  3070. netdev_warn(bp->dev, "Please update driver or firmware with matching interface versions.\n");
  3071. }
  3072. snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "bc %d.%d.%d rm %d.%d.%d",
  3073. resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld,
  3074. resp->hwrm_intf_maj, resp->hwrm_intf_min, resp->hwrm_intf_upd);
  3075. hwrm_ver_get_exit:
  3076. mutex_unlock(&bp->hwrm_cmd_lock);
  3077. return rc;
  3078. }
  3079. static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
  3080. {
  3081. if (bp->vxlan_port_cnt) {
  3082. bnxt_hwrm_tunnel_dst_port_free(
  3083. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  3084. }
  3085. bp->vxlan_port_cnt = 0;
  3086. if (bp->nge_port_cnt) {
  3087. bnxt_hwrm_tunnel_dst_port_free(
  3088. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
  3089. }
  3090. bp->nge_port_cnt = 0;
  3091. }
  3092. static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
  3093. {
  3094. int rc, i;
  3095. u32 tpa_flags = 0;
  3096. if (set_tpa)
  3097. tpa_flags = bp->flags & BNXT_FLAG_TPA;
  3098. for (i = 0; i < bp->nr_vnics; i++) {
  3099. rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
  3100. if (rc) {
  3101. netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
  3102. rc, i);
  3103. return rc;
  3104. }
  3105. }
  3106. return 0;
  3107. }
  3108. static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
  3109. {
  3110. int i;
  3111. for (i = 0; i < bp->nr_vnics; i++)
  3112. bnxt_hwrm_vnic_set_rss(bp, i, false);
  3113. }
  3114. static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
  3115. bool irq_re_init)
  3116. {
  3117. if (bp->vnic_info) {
  3118. bnxt_hwrm_clear_vnic_filter(bp);
  3119. /* clear all RSS setting before free vnic ctx */
  3120. bnxt_hwrm_clear_vnic_rss(bp);
  3121. bnxt_hwrm_vnic_ctx_free(bp);
  3122. /* before free the vnic, undo the vnic tpa settings */
  3123. if (bp->flags & BNXT_FLAG_TPA)
  3124. bnxt_set_tpa(bp, false);
  3125. bnxt_hwrm_vnic_free(bp);
  3126. }
  3127. bnxt_hwrm_ring_free(bp, close_path);
  3128. bnxt_hwrm_ring_grp_free(bp);
  3129. if (irq_re_init) {
  3130. bnxt_hwrm_stat_ctx_free(bp);
  3131. bnxt_hwrm_free_tunnel_ports(bp);
  3132. }
  3133. }
  3134. static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
  3135. {
  3136. int rc;
  3137. /* allocate context for vnic */
  3138. rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id);
  3139. if (rc) {
  3140. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  3141. vnic_id, rc);
  3142. goto vnic_setup_err;
  3143. }
  3144. bp->rsscos_nr_ctxs++;
  3145. /* configure default vnic, ring grp */
  3146. rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
  3147. if (rc) {
  3148. netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
  3149. vnic_id, rc);
  3150. goto vnic_setup_err;
  3151. }
  3152. /* Enable RSS hashing on vnic */
  3153. rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
  3154. if (rc) {
  3155. netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
  3156. vnic_id, rc);
  3157. goto vnic_setup_err;
  3158. }
  3159. if (bp->flags & BNXT_FLAG_AGG_RINGS) {
  3160. rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
  3161. if (rc) {
  3162. netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
  3163. vnic_id, rc);
  3164. }
  3165. }
  3166. vnic_setup_err:
  3167. return rc;
  3168. }
  3169. static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
  3170. {
  3171. #ifdef CONFIG_RFS_ACCEL
  3172. int i, rc = 0;
  3173. for (i = 0; i < bp->rx_nr_rings; i++) {
  3174. u16 vnic_id = i + 1;
  3175. u16 ring_id = i;
  3176. if (vnic_id >= bp->nr_vnics)
  3177. break;
  3178. bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG;
  3179. rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, ring_id + 1);
  3180. if (rc) {
  3181. netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
  3182. vnic_id, rc);
  3183. break;
  3184. }
  3185. rc = bnxt_setup_vnic(bp, vnic_id);
  3186. if (rc)
  3187. break;
  3188. }
  3189. return rc;
  3190. #else
  3191. return 0;
  3192. #endif
  3193. }
  3194. static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
  3195. {
  3196. int rc = 0;
  3197. if (irq_re_init) {
  3198. rc = bnxt_hwrm_stat_ctx_alloc(bp);
  3199. if (rc) {
  3200. netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
  3201. rc);
  3202. goto err_out;
  3203. }
  3204. }
  3205. rc = bnxt_hwrm_ring_alloc(bp);
  3206. if (rc) {
  3207. netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
  3208. goto err_out;
  3209. }
  3210. rc = bnxt_hwrm_ring_grp_alloc(bp);
  3211. if (rc) {
  3212. netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
  3213. goto err_out;
  3214. }
  3215. /* default vnic 0 */
  3216. rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, bp->rx_nr_rings);
  3217. if (rc) {
  3218. netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
  3219. goto err_out;
  3220. }
  3221. rc = bnxt_setup_vnic(bp, 0);
  3222. if (rc)
  3223. goto err_out;
  3224. if (bp->flags & BNXT_FLAG_RFS) {
  3225. rc = bnxt_alloc_rfs_vnics(bp);
  3226. if (rc)
  3227. goto err_out;
  3228. }
  3229. if (bp->flags & BNXT_FLAG_TPA) {
  3230. rc = bnxt_set_tpa(bp, true);
  3231. if (rc)
  3232. goto err_out;
  3233. }
  3234. if (BNXT_VF(bp))
  3235. bnxt_update_vf_mac(bp);
  3236. /* Filter for default vnic 0 */
  3237. rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
  3238. if (rc) {
  3239. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
  3240. goto err_out;
  3241. }
  3242. bp->vnic_info[0].uc_filter_count = 1;
  3243. bp->vnic_info[0].rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_UNICAST |
  3244. CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
  3245. if ((bp->dev->flags & IFF_PROMISC) && BNXT_PF(bp))
  3246. bp->vnic_info[0].rx_mask |=
  3247. CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  3248. rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  3249. if (rc) {
  3250. netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n", rc);
  3251. goto err_out;
  3252. }
  3253. rc = bnxt_hwrm_set_coal(bp);
  3254. if (rc)
  3255. netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
  3256. rc);
  3257. return 0;
  3258. err_out:
  3259. bnxt_hwrm_resource_free(bp, 0, true);
  3260. return rc;
  3261. }
  3262. static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
  3263. {
  3264. bnxt_hwrm_resource_free(bp, 1, irq_re_init);
  3265. return 0;
  3266. }
  3267. static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
  3268. {
  3269. bnxt_init_rx_rings(bp);
  3270. bnxt_init_tx_rings(bp);
  3271. bnxt_init_ring_grps(bp, irq_re_init);
  3272. bnxt_init_vnics(bp);
  3273. return bnxt_init_chip(bp, irq_re_init);
  3274. }
  3275. static void bnxt_disable_int(struct bnxt *bp)
  3276. {
  3277. int i;
  3278. if (!bp->bnapi)
  3279. return;
  3280. for (i = 0; i < bp->cp_nr_rings; i++) {
  3281. struct bnxt_napi *bnapi = bp->bnapi[i];
  3282. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3283. BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
  3284. }
  3285. }
  3286. static void bnxt_enable_int(struct bnxt *bp)
  3287. {
  3288. int i;
  3289. atomic_set(&bp->intr_sem, 0);
  3290. for (i = 0; i < bp->cp_nr_rings; i++) {
  3291. struct bnxt_napi *bnapi = bp->bnapi[i];
  3292. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3293. BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
  3294. }
  3295. }
  3296. static int bnxt_set_real_num_queues(struct bnxt *bp)
  3297. {
  3298. int rc;
  3299. struct net_device *dev = bp->dev;
  3300. rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings);
  3301. if (rc)
  3302. return rc;
  3303. rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
  3304. if (rc)
  3305. return rc;
  3306. #ifdef CONFIG_RFS_ACCEL
  3307. if (bp->rx_nr_rings)
  3308. dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
  3309. if (!dev->rx_cpu_rmap)
  3310. rc = -ENOMEM;
  3311. #endif
  3312. return rc;
  3313. }
  3314. static int bnxt_setup_msix(struct bnxt *bp)
  3315. {
  3316. struct msix_entry *msix_ent;
  3317. struct net_device *dev = bp->dev;
  3318. int i, total_vecs, rc = 0;
  3319. const int len = sizeof(bp->irq_tbl[0].name);
  3320. bp->flags &= ~BNXT_FLAG_USING_MSIX;
  3321. total_vecs = bp->cp_nr_rings;
  3322. msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
  3323. if (!msix_ent)
  3324. return -ENOMEM;
  3325. for (i = 0; i < total_vecs; i++) {
  3326. msix_ent[i].entry = i;
  3327. msix_ent[i].vector = 0;
  3328. }
  3329. total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, 1, total_vecs);
  3330. if (total_vecs < 0) {
  3331. rc = -ENODEV;
  3332. goto msix_setup_exit;
  3333. }
  3334. bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
  3335. if (bp->irq_tbl) {
  3336. int tcs;
  3337. /* Trim rings based upon num of vectors allocated */
  3338. bp->rx_nr_rings = min_t(int, total_vecs, bp->rx_nr_rings);
  3339. bp->tx_nr_rings = min_t(int, total_vecs, bp->tx_nr_rings);
  3340. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  3341. tcs = netdev_get_num_tc(dev);
  3342. if (tcs > 1) {
  3343. bp->tx_nr_rings_per_tc = bp->tx_nr_rings / tcs;
  3344. if (bp->tx_nr_rings_per_tc == 0) {
  3345. netdev_reset_tc(dev);
  3346. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  3347. } else {
  3348. int i, off, count;
  3349. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
  3350. for (i = 0; i < tcs; i++) {
  3351. count = bp->tx_nr_rings_per_tc;
  3352. off = i * count;
  3353. netdev_set_tc_queue(dev, i, count, off);
  3354. }
  3355. }
  3356. }
  3357. bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
  3358. for (i = 0; i < bp->cp_nr_rings; i++) {
  3359. bp->irq_tbl[i].vector = msix_ent[i].vector;
  3360. snprintf(bp->irq_tbl[i].name, len,
  3361. "%s-%s-%d", dev->name, "TxRx", i);
  3362. bp->irq_tbl[i].handler = bnxt_msix;
  3363. }
  3364. rc = bnxt_set_real_num_queues(bp);
  3365. if (rc)
  3366. goto msix_setup_exit;
  3367. } else {
  3368. rc = -ENOMEM;
  3369. goto msix_setup_exit;
  3370. }
  3371. bp->flags |= BNXT_FLAG_USING_MSIX;
  3372. kfree(msix_ent);
  3373. return 0;
  3374. msix_setup_exit:
  3375. netdev_err(bp->dev, "bnxt_setup_msix err: %x\n", rc);
  3376. pci_disable_msix(bp->pdev);
  3377. kfree(msix_ent);
  3378. return rc;
  3379. }
  3380. static int bnxt_setup_inta(struct bnxt *bp)
  3381. {
  3382. int rc;
  3383. const int len = sizeof(bp->irq_tbl[0].name);
  3384. if (netdev_get_num_tc(bp->dev))
  3385. netdev_reset_tc(bp->dev);
  3386. bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
  3387. if (!bp->irq_tbl) {
  3388. rc = -ENOMEM;
  3389. return rc;
  3390. }
  3391. bp->rx_nr_rings = 1;
  3392. bp->tx_nr_rings = 1;
  3393. bp->cp_nr_rings = 1;
  3394. bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
  3395. bp->irq_tbl[0].vector = bp->pdev->irq;
  3396. snprintf(bp->irq_tbl[0].name, len,
  3397. "%s-%s-%d", bp->dev->name, "TxRx", 0);
  3398. bp->irq_tbl[0].handler = bnxt_inta;
  3399. rc = bnxt_set_real_num_queues(bp);
  3400. return rc;
  3401. }
  3402. static int bnxt_setup_int_mode(struct bnxt *bp)
  3403. {
  3404. int rc = 0;
  3405. if (bp->flags & BNXT_FLAG_MSIX_CAP)
  3406. rc = bnxt_setup_msix(bp);
  3407. if (!(bp->flags & BNXT_FLAG_USING_MSIX)) {
  3408. /* fallback to INTA */
  3409. rc = bnxt_setup_inta(bp);
  3410. }
  3411. return rc;
  3412. }
  3413. static void bnxt_free_irq(struct bnxt *bp)
  3414. {
  3415. struct bnxt_irq *irq;
  3416. int i;
  3417. #ifdef CONFIG_RFS_ACCEL
  3418. free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
  3419. bp->dev->rx_cpu_rmap = NULL;
  3420. #endif
  3421. if (!bp->irq_tbl)
  3422. return;
  3423. for (i = 0; i < bp->cp_nr_rings; i++) {
  3424. irq = &bp->irq_tbl[i];
  3425. if (irq->requested)
  3426. free_irq(irq->vector, bp->bnapi[i]);
  3427. irq->requested = 0;
  3428. }
  3429. if (bp->flags & BNXT_FLAG_USING_MSIX)
  3430. pci_disable_msix(bp->pdev);
  3431. kfree(bp->irq_tbl);
  3432. bp->irq_tbl = NULL;
  3433. }
  3434. static int bnxt_request_irq(struct bnxt *bp)
  3435. {
  3436. int i, rc = 0;
  3437. unsigned long flags = 0;
  3438. #ifdef CONFIG_RFS_ACCEL
  3439. struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
  3440. #endif
  3441. if (!(bp->flags & BNXT_FLAG_USING_MSIX))
  3442. flags = IRQF_SHARED;
  3443. for (i = 0; i < bp->cp_nr_rings; i++) {
  3444. struct bnxt_irq *irq = &bp->irq_tbl[i];
  3445. #ifdef CONFIG_RFS_ACCEL
  3446. if (rmap && (i < bp->rx_nr_rings)) {
  3447. rc = irq_cpu_rmap_add(rmap, irq->vector);
  3448. if (rc)
  3449. netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
  3450. i);
  3451. }
  3452. #endif
  3453. rc = request_irq(irq->vector, irq->handler, flags, irq->name,
  3454. bp->bnapi[i]);
  3455. if (rc)
  3456. break;
  3457. irq->requested = 1;
  3458. }
  3459. return rc;
  3460. }
  3461. static void bnxt_del_napi(struct bnxt *bp)
  3462. {
  3463. int i;
  3464. if (!bp->bnapi)
  3465. return;
  3466. for (i = 0; i < bp->cp_nr_rings; i++) {
  3467. struct bnxt_napi *bnapi = bp->bnapi[i];
  3468. napi_hash_del(&bnapi->napi);
  3469. netif_napi_del(&bnapi->napi);
  3470. }
  3471. }
  3472. static void bnxt_init_napi(struct bnxt *bp)
  3473. {
  3474. int i;
  3475. struct bnxt_napi *bnapi;
  3476. if (bp->flags & BNXT_FLAG_USING_MSIX) {
  3477. for (i = 0; i < bp->cp_nr_rings; i++) {
  3478. bnapi = bp->bnapi[i];
  3479. netif_napi_add(bp->dev, &bnapi->napi,
  3480. bnxt_poll, 64);
  3481. napi_hash_add(&bnapi->napi);
  3482. }
  3483. } else {
  3484. bnapi = bp->bnapi[0];
  3485. netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
  3486. napi_hash_add(&bnapi->napi);
  3487. }
  3488. }
  3489. static void bnxt_disable_napi(struct bnxt *bp)
  3490. {
  3491. int i;
  3492. if (!bp->bnapi)
  3493. return;
  3494. for (i = 0; i < bp->cp_nr_rings; i++) {
  3495. napi_disable(&bp->bnapi[i]->napi);
  3496. bnxt_disable_poll(bp->bnapi[i]);
  3497. }
  3498. }
  3499. static void bnxt_enable_napi(struct bnxt *bp)
  3500. {
  3501. int i;
  3502. for (i = 0; i < bp->cp_nr_rings; i++) {
  3503. bnxt_enable_poll(bp->bnapi[i]);
  3504. napi_enable(&bp->bnapi[i]->napi);
  3505. }
  3506. }
  3507. static void bnxt_tx_disable(struct bnxt *bp)
  3508. {
  3509. int i;
  3510. struct bnxt_napi *bnapi;
  3511. struct bnxt_tx_ring_info *txr;
  3512. struct netdev_queue *txq;
  3513. if (bp->bnapi) {
  3514. for (i = 0; i < bp->tx_nr_rings; i++) {
  3515. bnapi = bp->bnapi[i];
  3516. txr = &bnapi->tx_ring;
  3517. txq = netdev_get_tx_queue(bp->dev, i);
  3518. __netif_tx_lock(txq, smp_processor_id());
  3519. txr->dev_state = BNXT_DEV_STATE_CLOSING;
  3520. __netif_tx_unlock(txq);
  3521. }
  3522. }
  3523. /* Stop all TX queues */
  3524. netif_tx_disable(bp->dev);
  3525. netif_carrier_off(bp->dev);
  3526. }
  3527. static void bnxt_tx_enable(struct bnxt *bp)
  3528. {
  3529. int i;
  3530. struct bnxt_napi *bnapi;
  3531. struct bnxt_tx_ring_info *txr;
  3532. struct netdev_queue *txq;
  3533. for (i = 0; i < bp->tx_nr_rings; i++) {
  3534. bnapi = bp->bnapi[i];
  3535. txr = &bnapi->tx_ring;
  3536. txq = netdev_get_tx_queue(bp->dev, i);
  3537. txr->dev_state = 0;
  3538. }
  3539. netif_tx_wake_all_queues(bp->dev);
  3540. if (bp->link_info.link_up)
  3541. netif_carrier_on(bp->dev);
  3542. }
  3543. static void bnxt_report_link(struct bnxt *bp)
  3544. {
  3545. if (bp->link_info.link_up) {
  3546. const char *duplex;
  3547. const char *flow_ctrl;
  3548. u16 speed;
  3549. netif_carrier_on(bp->dev);
  3550. if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
  3551. duplex = "full";
  3552. else
  3553. duplex = "half";
  3554. if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
  3555. flow_ctrl = "ON - receive & transmit";
  3556. else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
  3557. flow_ctrl = "ON - transmit";
  3558. else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
  3559. flow_ctrl = "ON - receive";
  3560. else
  3561. flow_ctrl = "none";
  3562. speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
  3563. netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
  3564. speed, duplex, flow_ctrl);
  3565. } else {
  3566. netif_carrier_off(bp->dev);
  3567. netdev_err(bp->dev, "NIC Link is Down\n");
  3568. }
  3569. }
  3570. static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
  3571. {
  3572. int rc = 0;
  3573. struct bnxt_link_info *link_info = &bp->link_info;
  3574. struct hwrm_port_phy_qcfg_input req = {0};
  3575. struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
  3576. u8 link_up = link_info->link_up;
  3577. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
  3578. mutex_lock(&bp->hwrm_cmd_lock);
  3579. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3580. if (rc) {
  3581. mutex_unlock(&bp->hwrm_cmd_lock);
  3582. return rc;
  3583. }
  3584. memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
  3585. link_info->phy_link_status = resp->link;
  3586. link_info->duplex = resp->duplex;
  3587. link_info->pause = resp->pause;
  3588. link_info->auto_mode = resp->auto_mode;
  3589. link_info->auto_pause_setting = resp->auto_pause;
  3590. link_info->force_pause_setting = resp->force_pause;
  3591. link_info->duplex_setting = resp->duplex_setting;
  3592. if (link_info->phy_link_status == BNXT_LINK_LINK)
  3593. link_info->link_speed = le16_to_cpu(resp->link_speed);
  3594. else
  3595. link_info->link_speed = 0;
  3596. link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
  3597. link_info->auto_link_speed = le16_to_cpu(resp->auto_link_speed);
  3598. link_info->support_speeds = le16_to_cpu(resp->support_speeds);
  3599. link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
  3600. link_info->preemphasis = le32_to_cpu(resp->preemphasis);
  3601. link_info->phy_ver[0] = resp->phy_maj;
  3602. link_info->phy_ver[1] = resp->phy_min;
  3603. link_info->phy_ver[2] = resp->phy_bld;
  3604. link_info->media_type = resp->media_type;
  3605. link_info->transceiver = resp->transceiver_type;
  3606. link_info->phy_addr = resp->phy_addr;
  3607. /* TODO: need to add more logic to report VF link */
  3608. if (chng_link_state) {
  3609. if (link_info->phy_link_status == BNXT_LINK_LINK)
  3610. link_info->link_up = 1;
  3611. else
  3612. link_info->link_up = 0;
  3613. if (link_up != link_info->link_up)
  3614. bnxt_report_link(bp);
  3615. } else {
  3616. /* alwasy link down if not require to update link state */
  3617. link_info->link_up = 0;
  3618. }
  3619. mutex_unlock(&bp->hwrm_cmd_lock);
  3620. return 0;
  3621. }
  3622. static void
  3623. bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
  3624. {
  3625. if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
  3626. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  3627. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
  3628. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  3629. req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
  3630. req->enables |=
  3631. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
  3632. } else {
  3633. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
  3634. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
  3635. if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
  3636. req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
  3637. req->enables |=
  3638. cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
  3639. }
  3640. }
  3641. static void bnxt_hwrm_set_link_common(struct bnxt *bp,
  3642. struct hwrm_port_phy_cfg_input *req)
  3643. {
  3644. u8 autoneg = bp->link_info.autoneg;
  3645. u16 fw_link_speed = bp->link_info.req_link_speed;
  3646. u32 advertising = bp->link_info.advertising;
  3647. if (autoneg & BNXT_AUTONEG_SPEED) {
  3648. req->auto_mode |=
  3649. PORT_PHY_CFG_REQ_AUTO_MODE_MASK;
  3650. req->enables |= cpu_to_le32(
  3651. PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
  3652. req->auto_link_speed_mask = cpu_to_le16(advertising);
  3653. req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
  3654. req->flags |=
  3655. cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
  3656. } else {
  3657. req->force_link_speed = cpu_to_le16(fw_link_speed);
  3658. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
  3659. }
  3660. /* currently don't support half duplex */
  3661. req->auto_duplex = PORT_PHY_CFG_REQ_AUTO_DUPLEX_FULL;
  3662. req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_DUPLEX);
  3663. /* tell chimp that the setting takes effect immediately */
  3664. req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
  3665. }
  3666. int bnxt_hwrm_set_pause(struct bnxt *bp)
  3667. {
  3668. struct hwrm_port_phy_cfg_input req = {0};
  3669. int rc;
  3670. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  3671. bnxt_hwrm_set_pause_common(bp, &req);
  3672. if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
  3673. bp->link_info.force_link_chng)
  3674. bnxt_hwrm_set_link_common(bp, &req);
  3675. mutex_lock(&bp->hwrm_cmd_lock);
  3676. rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3677. if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
  3678. /* since changing of pause setting doesn't trigger any link
  3679. * change event, the driver needs to update the current pause
  3680. * result upon successfully return of the phy_cfg command
  3681. */
  3682. bp->link_info.pause =
  3683. bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
  3684. bp->link_info.auto_pause_setting = 0;
  3685. if (!bp->link_info.force_link_chng)
  3686. bnxt_report_link(bp);
  3687. }
  3688. bp->link_info.force_link_chng = false;
  3689. mutex_unlock(&bp->hwrm_cmd_lock);
  3690. return rc;
  3691. }
  3692. int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause)
  3693. {
  3694. struct hwrm_port_phy_cfg_input req = {0};
  3695. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
  3696. if (set_pause)
  3697. bnxt_hwrm_set_pause_common(bp, &req);
  3698. bnxt_hwrm_set_link_common(bp, &req);
  3699. return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
  3700. }
  3701. static int bnxt_update_phy_setting(struct bnxt *bp)
  3702. {
  3703. int rc;
  3704. bool update_link = false;
  3705. bool update_pause = false;
  3706. struct bnxt_link_info *link_info = &bp->link_info;
  3707. rc = bnxt_update_link(bp, true);
  3708. if (rc) {
  3709. netdev_err(bp->dev, "failed to update link (rc: %x)\n",
  3710. rc);
  3711. return rc;
  3712. }
  3713. if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  3714. link_info->auto_pause_setting != link_info->req_flow_ctrl)
  3715. update_pause = true;
  3716. if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
  3717. link_info->force_pause_setting != link_info->req_flow_ctrl)
  3718. update_pause = true;
  3719. if (link_info->req_duplex != link_info->duplex_setting)
  3720. update_link = true;
  3721. if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
  3722. if (BNXT_AUTO_MODE(link_info->auto_mode))
  3723. update_link = true;
  3724. if (link_info->req_link_speed != link_info->force_link_speed)
  3725. update_link = true;
  3726. } else {
  3727. if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
  3728. update_link = true;
  3729. if (link_info->advertising != link_info->auto_link_speeds)
  3730. update_link = true;
  3731. if (link_info->req_link_speed != link_info->auto_link_speed)
  3732. update_link = true;
  3733. }
  3734. if (update_link)
  3735. rc = bnxt_hwrm_set_link_setting(bp, update_pause);
  3736. else if (update_pause)
  3737. rc = bnxt_hwrm_set_pause(bp);
  3738. if (rc) {
  3739. netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
  3740. rc);
  3741. return rc;
  3742. }
  3743. return rc;
  3744. }
  3745. /* Common routine to pre-map certain register block to different GRC window.
  3746. * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
  3747. * in PF and 3 windows in VF that can be customized to map in different
  3748. * register blocks.
  3749. */
  3750. static void bnxt_preset_reg_win(struct bnxt *bp)
  3751. {
  3752. if (BNXT_PF(bp)) {
  3753. /* CAG registers map to GRC window #4 */
  3754. writel(BNXT_CAG_REG_BASE,
  3755. bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
  3756. }
  3757. }
  3758. static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  3759. {
  3760. int rc = 0;
  3761. bnxt_preset_reg_win(bp);
  3762. netif_carrier_off(bp->dev);
  3763. if (irq_re_init) {
  3764. rc = bnxt_setup_int_mode(bp);
  3765. if (rc) {
  3766. netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
  3767. rc);
  3768. return rc;
  3769. }
  3770. }
  3771. if ((bp->flags & BNXT_FLAG_RFS) &&
  3772. !(bp->flags & BNXT_FLAG_USING_MSIX)) {
  3773. /* disable RFS if falling back to INTA */
  3774. bp->dev->hw_features &= ~NETIF_F_NTUPLE;
  3775. bp->flags &= ~BNXT_FLAG_RFS;
  3776. }
  3777. rc = bnxt_alloc_mem(bp, irq_re_init);
  3778. if (rc) {
  3779. netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
  3780. goto open_err_free_mem;
  3781. }
  3782. if (irq_re_init) {
  3783. bnxt_init_napi(bp);
  3784. rc = bnxt_request_irq(bp);
  3785. if (rc) {
  3786. netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
  3787. goto open_err;
  3788. }
  3789. }
  3790. bnxt_enable_napi(bp);
  3791. rc = bnxt_init_nic(bp, irq_re_init);
  3792. if (rc) {
  3793. netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
  3794. goto open_err;
  3795. }
  3796. if (link_re_init) {
  3797. rc = bnxt_update_phy_setting(bp);
  3798. if (rc)
  3799. goto open_err;
  3800. }
  3801. if (irq_re_init) {
  3802. #if defined(CONFIG_VXLAN) || defined(CONFIG_VXLAN_MODULE)
  3803. vxlan_get_rx_port(bp->dev);
  3804. #endif
  3805. if (!bnxt_hwrm_tunnel_dst_port_alloc(
  3806. bp, htons(0x17c1),
  3807. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE))
  3808. bp->nge_port_cnt = 1;
  3809. }
  3810. bp->state = BNXT_STATE_OPEN;
  3811. bnxt_enable_int(bp);
  3812. /* Enable TX queues */
  3813. bnxt_tx_enable(bp);
  3814. mod_timer(&bp->timer, jiffies + bp->current_interval);
  3815. return 0;
  3816. open_err:
  3817. bnxt_disable_napi(bp);
  3818. bnxt_del_napi(bp);
  3819. open_err_free_mem:
  3820. bnxt_free_skbs(bp);
  3821. bnxt_free_irq(bp);
  3822. bnxt_free_mem(bp, true);
  3823. return rc;
  3824. }
  3825. /* rtnl_lock held */
  3826. int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  3827. {
  3828. int rc = 0;
  3829. rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
  3830. if (rc) {
  3831. netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
  3832. dev_close(bp->dev);
  3833. }
  3834. return rc;
  3835. }
  3836. static int bnxt_open(struct net_device *dev)
  3837. {
  3838. struct bnxt *bp = netdev_priv(dev);
  3839. int rc = 0;
  3840. rc = bnxt_hwrm_func_reset(bp);
  3841. if (rc) {
  3842. netdev_err(bp->dev, "hwrm chip reset failure rc: %x\n",
  3843. rc);
  3844. rc = -1;
  3845. return rc;
  3846. }
  3847. return __bnxt_open_nic(bp, true, true);
  3848. }
  3849. static void bnxt_disable_int_sync(struct bnxt *bp)
  3850. {
  3851. int i;
  3852. atomic_inc(&bp->intr_sem);
  3853. if (!netif_running(bp->dev))
  3854. return;
  3855. bnxt_disable_int(bp);
  3856. for (i = 0; i < bp->cp_nr_rings; i++)
  3857. synchronize_irq(bp->irq_tbl[i].vector);
  3858. }
  3859. int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
  3860. {
  3861. int rc = 0;
  3862. #ifdef CONFIG_BNXT_SRIOV
  3863. if (bp->sriov_cfg) {
  3864. rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
  3865. !bp->sriov_cfg,
  3866. BNXT_SRIOV_CFG_WAIT_TMO);
  3867. if (rc)
  3868. netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
  3869. }
  3870. #endif
  3871. /* Change device state to avoid TX queue wake up's */
  3872. bnxt_tx_disable(bp);
  3873. bp->state = BNXT_STATE_CLOSED;
  3874. cancel_work_sync(&bp->sp_task);
  3875. /* Flush rings before disabling interrupts */
  3876. bnxt_shutdown_nic(bp, irq_re_init);
  3877. /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
  3878. bnxt_disable_napi(bp);
  3879. bnxt_disable_int_sync(bp);
  3880. del_timer_sync(&bp->timer);
  3881. bnxt_free_skbs(bp);
  3882. if (irq_re_init) {
  3883. bnxt_free_irq(bp);
  3884. bnxt_del_napi(bp);
  3885. }
  3886. bnxt_free_mem(bp, irq_re_init);
  3887. return rc;
  3888. }
  3889. static int bnxt_close(struct net_device *dev)
  3890. {
  3891. struct bnxt *bp = netdev_priv(dev);
  3892. bnxt_close_nic(bp, true, true);
  3893. return 0;
  3894. }
  3895. /* rtnl_lock held */
  3896. static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  3897. {
  3898. switch (cmd) {
  3899. case SIOCGMIIPHY:
  3900. /* fallthru */
  3901. case SIOCGMIIREG: {
  3902. if (!netif_running(dev))
  3903. return -EAGAIN;
  3904. return 0;
  3905. }
  3906. case SIOCSMIIREG:
  3907. if (!netif_running(dev))
  3908. return -EAGAIN;
  3909. return 0;
  3910. default:
  3911. /* do nothing */
  3912. break;
  3913. }
  3914. return -EOPNOTSUPP;
  3915. }
  3916. static struct rtnl_link_stats64 *
  3917. bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
  3918. {
  3919. u32 i;
  3920. struct bnxt *bp = netdev_priv(dev);
  3921. memset(stats, 0, sizeof(struct rtnl_link_stats64));
  3922. if (!bp->bnapi)
  3923. return stats;
  3924. /* TODO check if we need to synchronize with bnxt_close path */
  3925. for (i = 0; i < bp->cp_nr_rings; i++) {
  3926. struct bnxt_napi *bnapi = bp->bnapi[i];
  3927. struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
  3928. struct ctx_hw_stats *hw_stats = cpr->hw_stats;
  3929. stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
  3930. stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
  3931. stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
  3932. stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
  3933. stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
  3934. stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
  3935. stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
  3936. stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
  3937. stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
  3938. stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
  3939. stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
  3940. stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
  3941. stats->rx_missed_errors +=
  3942. le64_to_cpu(hw_stats->rx_discard_pkts);
  3943. stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
  3944. stats->rx_dropped += le64_to_cpu(hw_stats->rx_drop_pkts);
  3945. stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
  3946. }
  3947. return stats;
  3948. }
  3949. static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
  3950. {
  3951. struct net_device *dev = bp->dev;
  3952. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  3953. struct netdev_hw_addr *ha;
  3954. u8 *haddr;
  3955. int mc_count = 0;
  3956. bool update = false;
  3957. int off = 0;
  3958. netdev_for_each_mc_addr(ha, dev) {
  3959. if (mc_count >= BNXT_MAX_MC_ADDRS) {
  3960. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  3961. vnic->mc_list_count = 0;
  3962. return false;
  3963. }
  3964. haddr = ha->addr;
  3965. if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
  3966. memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
  3967. update = true;
  3968. }
  3969. off += ETH_ALEN;
  3970. mc_count++;
  3971. }
  3972. if (mc_count)
  3973. *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
  3974. if (mc_count != vnic->mc_list_count) {
  3975. vnic->mc_list_count = mc_count;
  3976. update = true;
  3977. }
  3978. return update;
  3979. }
  3980. static bool bnxt_uc_list_updated(struct bnxt *bp)
  3981. {
  3982. struct net_device *dev = bp->dev;
  3983. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  3984. struct netdev_hw_addr *ha;
  3985. int off = 0;
  3986. if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
  3987. return true;
  3988. netdev_for_each_uc_addr(ha, dev) {
  3989. if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
  3990. return true;
  3991. off += ETH_ALEN;
  3992. }
  3993. return false;
  3994. }
  3995. static void bnxt_set_rx_mode(struct net_device *dev)
  3996. {
  3997. struct bnxt *bp = netdev_priv(dev);
  3998. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  3999. u32 mask = vnic->rx_mask;
  4000. bool mc_update = false;
  4001. bool uc_update;
  4002. if (!netif_running(dev))
  4003. return;
  4004. mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
  4005. CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
  4006. CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
  4007. /* Only allow PF to be in promiscuous mode */
  4008. if ((dev->flags & IFF_PROMISC) && BNXT_PF(bp))
  4009. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  4010. uc_update = bnxt_uc_list_updated(bp);
  4011. if (dev->flags & IFF_ALLMULTI) {
  4012. mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
  4013. vnic->mc_list_count = 0;
  4014. } else {
  4015. mc_update = bnxt_mc_list_updated(bp, &mask);
  4016. }
  4017. if (mask != vnic->rx_mask || uc_update || mc_update) {
  4018. vnic->rx_mask = mask;
  4019. set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
  4020. schedule_work(&bp->sp_task);
  4021. }
  4022. }
  4023. static void bnxt_cfg_rx_mode(struct bnxt *bp)
  4024. {
  4025. struct net_device *dev = bp->dev;
  4026. struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
  4027. struct netdev_hw_addr *ha;
  4028. int i, off = 0, rc;
  4029. bool uc_update;
  4030. netif_addr_lock_bh(dev);
  4031. uc_update = bnxt_uc_list_updated(bp);
  4032. netif_addr_unlock_bh(dev);
  4033. if (!uc_update)
  4034. goto skip_uc;
  4035. mutex_lock(&bp->hwrm_cmd_lock);
  4036. for (i = 1; i < vnic->uc_filter_count; i++) {
  4037. struct hwrm_cfa_l2_filter_free_input req = {0};
  4038. bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
  4039. -1);
  4040. req.l2_filter_id = vnic->fw_l2_filter_id[i];
  4041. rc = _hwrm_send_message(bp, &req, sizeof(req),
  4042. HWRM_CMD_TIMEOUT);
  4043. }
  4044. mutex_unlock(&bp->hwrm_cmd_lock);
  4045. vnic->uc_filter_count = 1;
  4046. netif_addr_lock_bh(dev);
  4047. if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
  4048. vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
  4049. } else {
  4050. netdev_for_each_uc_addr(ha, dev) {
  4051. memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
  4052. off += ETH_ALEN;
  4053. vnic->uc_filter_count++;
  4054. }
  4055. }
  4056. netif_addr_unlock_bh(dev);
  4057. for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
  4058. rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
  4059. if (rc) {
  4060. netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
  4061. rc);
  4062. vnic->uc_filter_count = i;
  4063. }
  4064. }
  4065. skip_uc:
  4066. rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
  4067. if (rc)
  4068. netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
  4069. rc);
  4070. }
  4071. static netdev_features_t bnxt_fix_features(struct net_device *dev,
  4072. netdev_features_t features)
  4073. {
  4074. return features;
  4075. }
  4076. static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
  4077. {
  4078. struct bnxt *bp = netdev_priv(dev);
  4079. u32 flags = bp->flags;
  4080. u32 changes;
  4081. int rc = 0;
  4082. bool re_init = false;
  4083. bool update_tpa = false;
  4084. flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
  4085. if ((features & NETIF_F_GRO) && (bp->pdev->revision > 0))
  4086. flags |= BNXT_FLAG_GRO;
  4087. if (features & NETIF_F_LRO)
  4088. flags |= BNXT_FLAG_LRO;
  4089. if (features & NETIF_F_HW_VLAN_CTAG_RX)
  4090. flags |= BNXT_FLAG_STRIP_VLAN;
  4091. if (features & NETIF_F_NTUPLE)
  4092. flags |= BNXT_FLAG_RFS;
  4093. changes = flags ^ bp->flags;
  4094. if (changes & BNXT_FLAG_TPA) {
  4095. update_tpa = true;
  4096. if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
  4097. (flags & BNXT_FLAG_TPA) == 0)
  4098. re_init = true;
  4099. }
  4100. if (changes & ~BNXT_FLAG_TPA)
  4101. re_init = true;
  4102. if (flags != bp->flags) {
  4103. u32 old_flags = bp->flags;
  4104. bp->flags = flags;
  4105. if (!netif_running(dev)) {
  4106. if (update_tpa)
  4107. bnxt_set_ring_params(bp);
  4108. return rc;
  4109. }
  4110. if (re_init) {
  4111. bnxt_close_nic(bp, false, false);
  4112. if (update_tpa)
  4113. bnxt_set_ring_params(bp);
  4114. return bnxt_open_nic(bp, false, false);
  4115. }
  4116. if (update_tpa) {
  4117. rc = bnxt_set_tpa(bp,
  4118. (flags & BNXT_FLAG_TPA) ?
  4119. true : false);
  4120. if (rc)
  4121. bp->flags = old_flags;
  4122. }
  4123. }
  4124. return rc;
  4125. }
  4126. static void bnxt_dbg_dump_states(struct bnxt *bp)
  4127. {
  4128. int i;
  4129. struct bnxt_napi *bnapi;
  4130. struct bnxt_tx_ring_info *txr;
  4131. struct bnxt_rx_ring_info *rxr;
  4132. struct bnxt_cp_ring_info *cpr;
  4133. for (i = 0; i < bp->cp_nr_rings; i++) {
  4134. bnapi = bp->bnapi[i];
  4135. txr = &bnapi->tx_ring;
  4136. rxr = &bnapi->rx_ring;
  4137. cpr = &bnapi->cp_ring;
  4138. if (netif_msg_drv(bp)) {
  4139. netdev_info(bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
  4140. i, txr->tx_ring_struct.fw_ring_id,
  4141. txr->tx_prod, txr->tx_cons);
  4142. netdev_info(bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
  4143. i, rxr->rx_ring_struct.fw_ring_id,
  4144. rxr->rx_prod,
  4145. rxr->rx_agg_ring_struct.fw_ring_id,
  4146. rxr->rx_agg_prod, rxr->rx_sw_agg_prod);
  4147. netdev_info(bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
  4148. i, cpr->cp_ring_struct.fw_ring_id,
  4149. cpr->cp_raw_cons);
  4150. }
  4151. }
  4152. }
  4153. static void bnxt_reset_task(struct bnxt *bp)
  4154. {
  4155. bnxt_dbg_dump_states(bp);
  4156. if (netif_running(bp->dev))
  4157. bnxt_tx_disable(bp); /* prevent tx timout again */
  4158. }
  4159. static void bnxt_tx_timeout(struct net_device *dev)
  4160. {
  4161. struct bnxt *bp = netdev_priv(dev);
  4162. netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
  4163. set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
  4164. schedule_work(&bp->sp_task);
  4165. }
  4166. #ifdef CONFIG_NET_POLL_CONTROLLER
  4167. static void bnxt_poll_controller(struct net_device *dev)
  4168. {
  4169. struct bnxt *bp = netdev_priv(dev);
  4170. int i;
  4171. for (i = 0; i < bp->cp_nr_rings; i++) {
  4172. struct bnxt_irq *irq = &bp->irq_tbl[i];
  4173. disable_irq(irq->vector);
  4174. irq->handler(irq->vector, bp->bnapi[i]);
  4175. enable_irq(irq->vector);
  4176. }
  4177. }
  4178. #endif
  4179. static void bnxt_timer(unsigned long data)
  4180. {
  4181. struct bnxt *bp = (struct bnxt *)data;
  4182. struct net_device *dev = bp->dev;
  4183. if (!netif_running(dev))
  4184. return;
  4185. if (atomic_read(&bp->intr_sem) != 0)
  4186. goto bnxt_restart_timer;
  4187. bnxt_restart_timer:
  4188. mod_timer(&bp->timer, jiffies + bp->current_interval);
  4189. }
  4190. static void bnxt_cfg_ntp_filters(struct bnxt *);
  4191. static void bnxt_sp_task(struct work_struct *work)
  4192. {
  4193. struct bnxt *bp = container_of(work, struct bnxt, sp_task);
  4194. int rc;
  4195. if (bp->state != BNXT_STATE_OPEN)
  4196. return;
  4197. if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
  4198. bnxt_cfg_rx_mode(bp);
  4199. if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
  4200. bnxt_cfg_ntp_filters(bp);
  4201. if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
  4202. rc = bnxt_update_link(bp, true);
  4203. if (rc)
  4204. netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
  4205. rc);
  4206. }
  4207. if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
  4208. bnxt_hwrm_exec_fwd_req(bp);
  4209. if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
  4210. bnxt_hwrm_tunnel_dst_port_alloc(
  4211. bp, bp->vxlan_port,
  4212. TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  4213. }
  4214. if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
  4215. bnxt_hwrm_tunnel_dst_port_free(
  4216. bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
  4217. }
  4218. if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
  4219. bnxt_reset_task(bp);
  4220. }
  4221. static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
  4222. {
  4223. int rc;
  4224. struct bnxt *bp = netdev_priv(dev);
  4225. SET_NETDEV_DEV(dev, &pdev->dev);
  4226. /* enable device (incl. PCI PM wakeup), and bus-mastering */
  4227. rc = pci_enable_device(pdev);
  4228. if (rc) {
  4229. dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
  4230. goto init_err;
  4231. }
  4232. if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
  4233. dev_err(&pdev->dev,
  4234. "Cannot find PCI device base address, aborting\n");
  4235. rc = -ENODEV;
  4236. goto init_err_disable;
  4237. }
  4238. rc = pci_request_regions(pdev, DRV_MODULE_NAME);
  4239. if (rc) {
  4240. dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
  4241. goto init_err_disable;
  4242. }
  4243. if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
  4244. dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
  4245. dev_err(&pdev->dev, "System does not support DMA, aborting\n");
  4246. goto init_err_disable;
  4247. }
  4248. pci_set_master(pdev);
  4249. bp->dev = dev;
  4250. bp->pdev = pdev;
  4251. bp->bar0 = pci_ioremap_bar(pdev, 0);
  4252. if (!bp->bar0) {
  4253. dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
  4254. rc = -ENOMEM;
  4255. goto init_err_release;
  4256. }
  4257. bp->bar1 = pci_ioremap_bar(pdev, 2);
  4258. if (!bp->bar1) {
  4259. dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
  4260. rc = -ENOMEM;
  4261. goto init_err_release;
  4262. }
  4263. bp->bar2 = pci_ioremap_bar(pdev, 4);
  4264. if (!bp->bar2) {
  4265. dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
  4266. rc = -ENOMEM;
  4267. goto init_err_release;
  4268. }
  4269. INIT_WORK(&bp->sp_task, bnxt_sp_task);
  4270. spin_lock_init(&bp->ntp_fltr_lock);
  4271. bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
  4272. bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
  4273. bp->coal_ticks = BNXT_USEC_TO_COAL_TIMER(4);
  4274. bp->coal_bufs = 20;
  4275. bp->coal_ticks_irq = BNXT_USEC_TO_COAL_TIMER(1);
  4276. bp->coal_bufs_irq = 2;
  4277. init_timer(&bp->timer);
  4278. bp->timer.data = (unsigned long)bp;
  4279. bp->timer.function = bnxt_timer;
  4280. bp->current_interval = BNXT_TIMER_INTERVAL;
  4281. bp->state = BNXT_STATE_CLOSED;
  4282. return 0;
  4283. init_err_release:
  4284. if (bp->bar2) {
  4285. pci_iounmap(pdev, bp->bar2);
  4286. bp->bar2 = NULL;
  4287. }
  4288. if (bp->bar1) {
  4289. pci_iounmap(pdev, bp->bar1);
  4290. bp->bar1 = NULL;
  4291. }
  4292. if (bp->bar0) {
  4293. pci_iounmap(pdev, bp->bar0);
  4294. bp->bar0 = NULL;
  4295. }
  4296. pci_release_regions(pdev);
  4297. init_err_disable:
  4298. pci_disable_device(pdev);
  4299. init_err:
  4300. return rc;
  4301. }
  4302. /* rtnl_lock held */
  4303. static int bnxt_change_mac_addr(struct net_device *dev, void *p)
  4304. {
  4305. struct sockaddr *addr = p;
  4306. if (!is_valid_ether_addr(addr->sa_data))
  4307. return -EADDRNOTAVAIL;
  4308. memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
  4309. return 0;
  4310. }
  4311. /* rtnl_lock held */
  4312. static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
  4313. {
  4314. struct bnxt *bp = netdev_priv(dev);
  4315. if (new_mtu < 60 || new_mtu > 9000)
  4316. return -EINVAL;
  4317. if (netif_running(dev))
  4318. bnxt_close_nic(bp, false, false);
  4319. dev->mtu = new_mtu;
  4320. bnxt_set_ring_params(bp);
  4321. if (netif_running(dev))
  4322. return bnxt_open_nic(bp, false, false);
  4323. return 0;
  4324. }
  4325. static int bnxt_setup_tc(struct net_device *dev, u8 tc)
  4326. {
  4327. struct bnxt *bp = netdev_priv(dev);
  4328. if (tc > bp->max_tc) {
  4329. netdev_err(dev, "too many traffic classes requested: %d Max supported is %d\n",
  4330. tc, bp->max_tc);
  4331. return -EINVAL;
  4332. }
  4333. if (netdev_get_num_tc(dev) == tc)
  4334. return 0;
  4335. if (tc) {
  4336. int max_rx_rings, max_tx_rings;
  4337. bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
  4338. if (bp->tx_nr_rings_per_tc * tc > max_tx_rings)
  4339. return -ENOMEM;
  4340. }
  4341. /* Needs to close the device and do hw resource re-allocations */
  4342. if (netif_running(bp->dev))
  4343. bnxt_close_nic(bp, true, false);
  4344. if (tc) {
  4345. bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
  4346. netdev_set_num_tc(dev, tc);
  4347. } else {
  4348. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  4349. netdev_reset_tc(dev);
  4350. }
  4351. bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
  4352. bp->num_stat_ctxs = bp->cp_nr_rings;
  4353. if (netif_running(bp->dev))
  4354. return bnxt_open_nic(bp, true, false);
  4355. return 0;
  4356. }
  4357. #ifdef CONFIG_RFS_ACCEL
  4358. static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
  4359. struct bnxt_ntuple_filter *f2)
  4360. {
  4361. struct flow_keys *keys1 = &f1->fkeys;
  4362. struct flow_keys *keys2 = &f2->fkeys;
  4363. if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
  4364. keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
  4365. keys1->ports.ports == keys2->ports.ports &&
  4366. keys1->basic.ip_proto == keys2->basic.ip_proto &&
  4367. keys1->basic.n_proto == keys2->basic.n_proto &&
  4368. ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr))
  4369. return true;
  4370. return false;
  4371. }
  4372. static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
  4373. u16 rxq_index, u32 flow_id)
  4374. {
  4375. struct bnxt *bp = netdev_priv(dev);
  4376. struct bnxt_ntuple_filter *fltr, *new_fltr;
  4377. struct flow_keys *fkeys;
  4378. struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
  4379. int rc = 0, idx, bit_id;
  4380. struct hlist_head *head;
  4381. if (skb->encapsulation)
  4382. return -EPROTONOSUPPORT;
  4383. new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
  4384. if (!new_fltr)
  4385. return -ENOMEM;
  4386. fkeys = &new_fltr->fkeys;
  4387. if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
  4388. rc = -EPROTONOSUPPORT;
  4389. goto err_free;
  4390. }
  4391. if ((fkeys->basic.n_proto != htons(ETH_P_IP)) ||
  4392. ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
  4393. (fkeys->basic.ip_proto != IPPROTO_UDP))) {
  4394. rc = -EPROTONOSUPPORT;
  4395. goto err_free;
  4396. }
  4397. memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
  4398. idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
  4399. head = &bp->ntp_fltr_hash_tbl[idx];
  4400. rcu_read_lock();
  4401. hlist_for_each_entry_rcu(fltr, head, hash) {
  4402. if (bnxt_fltr_match(fltr, new_fltr)) {
  4403. rcu_read_unlock();
  4404. rc = 0;
  4405. goto err_free;
  4406. }
  4407. }
  4408. rcu_read_unlock();
  4409. spin_lock_bh(&bp->ntp_fltr_lock);
  4410. bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
  4411. BNXT_NTP_FLTR_MAX_FLTR, 0);
  4412. if (bit_id < 0) {
  4413. spin_unlock_bh(&bp->ntp_fltr_lock);
  4414. rc = -ENOMEM;
  4415. goto err_free;
  4416. }
  4417. new_fltr->sw_id = (u16)bit_id;
  4418. new_fltr->flow_id = flow_id;
  4419. new_fltr->rxq = rxq_index;
  4420. hlist_add_head_rcu(&new_fltr->hash, head);
  4421. bp->ntp_fltr_count++;
  4422. spin_unlock_bh(&bp->ntp_fltr_lock);
  4423. set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
  4424. schedule_work(&bp->sp_task);
  4425. return new_fltr->sw_id;
  4426. err_free:
  4427. kfree(new_fltr);
  4428. return rc;
  4429. }
  4430. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  4431. {
  4432. int i;
  4433. for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
  4434. struct hlist_head *head;
  4435. struct hlist_node *tmp;
  4436. struct bnxt_ntuple_filter *fltr;
  4437. int rc;
  4438. head = &bp->ntp_fltr_hash_tbl[i];
  4439. hlist_for_each_entry_safe(fltr, tmp, head, hash) {
  4440. bool del = false;
  4441. if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
  4442. if (rps_may_expire_flow(bp->dev, fltr->rxq,
  4443. fltr->flow_id,
  4444. fltr->sw_id)) {
  4445. bnxt_hwrm_cfa_ntuple_filter_free(bp,
  4446. fltr);
  4447. del = true;
  4448. }
  4449. } else {
  4450. rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
  4451. fltr);
  4452. if (rc)
  4453. del = true;
  4454. else
  4455. set_bit(BNXT_FLTR_VALID, &fltr->state);
  4456. }
  4457. if (del) {
  4458. spin_lock_bh(&bp->ntp_fltr_lock);
  4459. hlist_del_rcu(&fltr->hash);
  4460. bp->ntp_fltr_count--;
  4461. spin_unlock_bh(&bp->ntp_fltr_lock);
  4462. synchronize_rcu();
  4463. clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
  4464. kfree(fltr);
  4465. }
  4466. }
  4467. }
  4468. }
  4469. #else
  4470. static void bnxt_cfg_ntp_filters(struct bnxt *bp)
  4471. {
  4472. }
  4473. #endif /* CONFIG_RFS_ACCEL */
  4474. static void bnxt_add_vxlan_port(struct net_device *dev, sa_family_t sa_family,
  4475. __be16 port)
  4476. {
  4477. struct bnxt *bp = netdev_priv(dev);
  4478. if (!netif_running(dev))
  4479. return;
  4480. if (sa_family != AF_INET6 && sa_family != AF_INET)
  4481. return;
  4482. if (bp->vxlan_port_cnt && bp->vxlan_port != port)
  4483. return;
  4484. bp->vxlan_port_cnt++;
  4485. if (bp->vxlan_port_cnt == 1) {
  4486. bp->vxlan_port = port;
  4487. set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
  4488. schedule_work(&bp->sp_task);
  4489. }
  4490. }
  4491. static void bnxt_del_vxlan_port(struct net_device *dev, sa_family_t sa_family,
  4492. __be16 port)
  4493. {
  4494. struct bnxt *bp = netdev_priv(dev);
  4495. if (!netif_running(dev))
  4496. return;
  4497. if (sa_family != AF_INET6 && sa_family != AF_INET)
  4498. return;
  4499. if (bp->vxlan_port_cnt && bp->vxlan_port == port) {
  4500. bp->vxlan_port_cnt--;
  4501. if (bp->vxlan_port_cnt == 0) {
  4502. set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
  4503. schedule_work(&bp->sp_task);
  4504. }
  4505. }
  4506. }
  4507. static const struct net_device_ops bnxt_netdev_ops = {
  4508. .ndo_open = bnxt_open,
  4509. .ndo_start_xmit = bnxt_start_xmit,
  4510. .ndo_stop = bnxt_close,
  4511. .ndo_get_stats64 = bnxt_get_stats64,
  4512. .ndo_set_rx_mode = bnxt_set_rx_mode,
  4513. .ndo_do_ioctl = bnxt_ioctl,
  4514. .ndo_validate_addr = eth_validate_addr,
  4515. .ndo_set_mac_address = bnxt_change_mac_addr,
  4516. .ndo_change_mtu = bnxt_change_mtu,
  4517. .ndo_fix_features = bnxt_fix_features,
  4518. .ndo_set_features = bnxt_set_features,
  4519. .ndo_tx_timeout = bnxt_tx_timeout,
  4520. #ifdef CONFIG_BNXT_SRIOV
  4521. .ndo_get_vf_config = bnxt_get_vf_config,
  4522. .ndo_set_vf_mac = bnxt_set_vf_mac,
  4523. .ndo_set_vf_vlan = bnxt_set_vf_vlan,
  4524. .ndo_set_vf_rate = bnxt_set_vf_bw,
  4525. .ndo_set_vf_link_state = bnxt_set_vf_link_state,
  4526. .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
  4527. #endif
  4528. #ifdef CONFIG_NET_POLL_CONTROLLER
  4529. .ndo_poll_controller = bnxt_poll_controller,
  4530. #endif
  4531. .ndo_setup_tc = bnxt_setup_tc,
  4532. #ifdef CONFIG_RFS_ACCEL
  4533. .ndo_rx_flow_steer = bnxt_rx_flow_steer,
  4534. #endif
  4535. .ndo_add_vxlan_port = bnxt_add_vxlan_port,
  4536. .ndo_del_vxlan_port = bnxt_del_vxlan_port,
  4537. #ifdef CONFIG_NET_RX_BUSY_POLL
  4538. .ndo_busy_poll = bnxt_busy_poll,
  4539. #endif
  4540. };
  4541. static void bnxt_remove_one(struct pci_dev *pdev)
  4542. {
  4543. struct net_device *dev = pci_get_drvdata(pdev);
  4544. struct bnxt *bp = netdev_priv(dev);
  4545. if (BNXT_PF(bp))
  4546. bnxt_sriov_disable(bp);
  4547. unregister_netdev(dev);
  4548. cancel_work_sync(&bp->sp_task);
  4549. bp->sp_event = 0;
  4550. bnxt_free_hwrm_resources(bp);
  4551. pci_iounmap(pdev, bp->bar2);
  4552. pci_iounmap(pdev, bp->bar1);
  4553. pci_iounmap(pdev, bp->bar0);
  4554. free_netdev(dev);
  4555. pci_release_regions(pdev);
  4556. pci_disable_device(pdev);
  4557. }
  4558. static int bnxt_probe_phy(struct bnxt *bp)
  4559. {
  4560. int rc = 0;
  4561. struct bnxt_link_info *link_info = &bp->link_info;
  4562. char phy_ver[PHY_VER_STR_LEN];
  4563. rc = bnxt_update_link(bp, false);
  4564. if (rc) {
  4565. netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
  4566. rc);
  4567. return rc;
  4568. }
  4569. /*initialize the ethool setting copy with NVM settings */
  4570. if (BNXT_AUTO_MODE(link_info->auto_mode))
  4571. link_info->autoneg |= BNXT_AUTONEG_SPEED;
  4572. if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
  4573. if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
  4574. link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
  4575. link_info->req_flow_ctrl = link_info->auto_pause_setting;
  4576. } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
  4577. link_info->req_flow_ctrl = link_info->force_pause_setting;
  4578. }
  4579. link_info->req_duplex = link_info->duplex_setting;
  4580. if (link_info->autoneg & BNXT_AUTONEG_SPEED)
  4581. link_info->req_link_speed = link_info->auto_link_speed;
  4582. else
  4583. link_info->req_link_speed = link_info->force_link_speed;
  4584. link_info->advertising = link_info->auto_link_speeds;
  4585. snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
  4586. link_info->phy_ver[0],
  4587. link_info->phy_ver[1],
  4588. link_info->phy_ver[2]);
  4589. strcat(bp->fw_ver_str, phy_ver);
  4590. return rc;
  4591. }
  4592. static int bnxt_get_max_irq(struct pci_dev *pdev)
  4593. {
  4594. u16 ctrl;
  4595. if (!pdev->msix_cap)
  4596. return 1;
  4597. pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
  4598. return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
  4599. }
  4600. void bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx)
  4601. {
  4602. int max_rings = 0;
  4603. if (BNXT_PF(bp)) {
  4604. *max_tx = bp->pf.max_pf_tx_rings;
  4605. *max_rx = bp->pf.max_pf_rx_rings;
  4606. max_rings = min_t(int, bp->pf.max_irqs, bp->pf.max_cp_rings);
  4607. max_rings = min_t(int, max_rings, bp->pf.max_stat_ctxs);
  4608. } else {
  4609. #ifdef CONFIG_BNXT_SRIOV
  4610. *max_tx = bp->vf.max_tx_rings;
  4611. *max_rx = bp->vf.max_rx_rings;
  4612. max_rings = min_t(int, bp->vf.max_irqs, bp->vf.max_cp_rings);
  4613. max_rings = min_t(int, max_rings, bp->vf.max_stat_ctxs);
  4614. #endif
  4615. }
  4616. if (bp->flags & BNXT_FLAG_AGG_RINGS)
  4617. *max_rx >>= 1;
  4618. *max_rx = min_t(int, *max_rx, max_rings);
  4619. *max_tx = min_t(int, *max_tx, max_rings);
  4620. }
  4621. static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
  4622. {
  4623. static int version_printed;
  4624. struct net_device *dev;
  4625. struct bnxt *bp;
  4626. int rc, max_rx_rings, max_tx_rings, max_irqs, dflt_rings;
  4627. if (version_printed++ == 0)
  4628. pr_info("%s", version);
  4629. max_irqs = bnxt_get_max_irq(pdev);
  4630. dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
  4631. if (!dev)
  4632. return -ENOMEM;
  4633. bp = netdev_priv(dev);
  4634. if (bnxt_vf_pciid(ent->driver_data))
  4635. bp->flags |= BNXT_FLAG_VF;
  4636. if (pdev->msix_cap) {
  4637. bp->flags |= BNXT_FLAG_MSIX_CAP;
  4638. if (BNXT_PF(bp))
  4639. bp->flags |= BNXT_FLAG_RFS;
  4640. }
  4641. rc = bnxt_init_board(pdev, dev);
  4642. if (rc < 0)
  4643. goto init_err_free;
  4644. dev->netdev_ops = &bnxt_netdev_ops;
  4645. dev->watchdog_timeo = BNXT_TX_TIMEOUT;
  4646. dev->ethtool_ops = &bnxt_ethtool_ops;
  4647. pci_set_drvdata(pdev, dev);
  4648. dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  4649. NETIF_F_TSO | NETIF_F_TSO6 |
  4650. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  4651. NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT |
  4652. NETIF_F_RXHASH |
  4653. NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO;
  4654. if (bp->flags & BNXT_FLAG_RFS)
  4655. dev->hw_features |= NETIF_F_NTUPLE;
  4656. dev->hw_enc_features =
  4657. NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
  4658. NETIF_F_TSO | NETIF_F_TSO6 |
  4659. NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
  4660. NETIF_F_GSO_IPIP | NETIF_F_GSO_SIT;
  4661. dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
  4662. dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
  4663. NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
  4664. dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
  4665. dev->priv_flags |= IFF_UNICAST_FLT;
  4666. #ifdef CONFIG_BNXT_SRIOV
  4667. init_waitqueue_head(&bp->sriov_cfg_wait);
  4668. #endif
  4669. rc = bnxt_alloc_hwrm_resources(bp);
  4670. if (rc)
  4671. goto init_err;
  4672. mutex_init(&bp->hwrm_cmd_lock);
  4673. bnxt_hwrm_ver_get(bp);
  4674. rc = bnxt_hwrm_func_drv_rgtr(bp);
  4675. if (rc)
  4676. goto init_err;
  4677. /* Get the MAX capabilities for this function */
  4678. rc = bnxt_hwrm_func_qcaps(bp);
  4679. if (rc) {
  4680. netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
  4681. rc);
  4682. rc = -1;
  4683. goto init_err;
  4684. }
  4685. rc = bnxt_hwrm_queue_qportcfg(bp);
  4686. if (rc) {
  4687. netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
  4688. rc);
  4689. rc = -1;
  4690. goto init_err;
  4691. }
  4692. bnxt_set_tpa_flags(bp);
  4693. bnxt_set_ring_params(bp);
  4694. dflt_rings = netif_get_num_default_rss_queues();
  4695. if (BNXT_PF(bp)) {
  4696. memcpy(dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
  4697. bp->pf.max_irqs = max_irqs;
  4698. } else {
  4699. #if defined(CONFIG_BNXT_SRIOV)
  4700. memcpy(dev->dev_addr, bp->vf.mac_addr, ETH_ALEN);
  4701. bp->vf.max_irqs = max_irqs;
  4702. #endif
  4703. }
  4704. bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings);
  4705. bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
  4706. bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
  4707. bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
  4708. bp->cp_nr_rings = max_t(int, bp->rx_nr_rings, bp->tx_nr_rings);
  4709. bp->num_stat_ctxs = bp->cp_nr_rings;
  4710. if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
  4711. bp->flags |= BNXT_FLAG_STRIP_VLAN;
  4712. rc = bnxt_probe_phy(bp);
  4713. if (rc)
  4714. goto init_err;
  4715. rc = register_netdev(dev);
  4716. if (rc)
  4717. goto init_err;
  4718. netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
  4719. board_info[ent->driver_data].name,
  4720. (long)pci_resource_start(pdev, 0), dev->dev_addr);
  4721. return 0;
  4722. init_err:
  4723. pci_iounmap(pdev, bp->bar0);
  4724. pci_release_regions(pdev);
  4725. pci_disable_device(pdev);
  4726. init_err_free:
  4727. free_netdev(dev);
  4728. return rc;
  4729. }
  4730. static struct pci_driver bnxt_pci_driver = {
  4731. .name = DRV_MODULE_NAME,
  4732. .id_table = bnxt_pci_tbl,
  4733. .probe = bnxt_init_one,
  4734. .remove = bnxt_remove_one,
  4735. #if defined(CONFIG_BNXT_SRIOV)
  4736. .sriov_configure = bnxt_sriov_configure,
  4737. #endif
  4738. };
  4739. module_pci_driver(bnxt_pci_driver);