123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827 |
- /*
- * This file is part of the Chelsio T4 Ethernet driver for Linux.
- *
- * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/bitmap.h>
- #include <linux/crc32.h>
- #include <linux/ctype.h>
- #include <linux/debugfs.h>
- #include <linux/err.h>
- #include <linux/etherdevice.h>
- #include <linux/firmware.h>
- #include <linux/if.h>
- #include <linux/if_vlan.h>
- #include <linux/init.h>
- #include <linux/log2.h>
- #include <linux/mdio.h>
- #include <linux/module.h>
- #include <linux/moduleparam.h>
- #include <linux/mutex.h>
- #include <linux/netdevice.h>
- #include <linux/pci.h>
- #include <linux/aer.h>
- #include <linux/rtnetlink.h>
- #include <linux/sched.h>
- #include <linux/seq_file.h>
- #include <linux/sockios.h>
- #include <linux/vmalloc.h>
- #include <linux/workqueue.h>
- #include <net/neighbour.h>
- #include <net/netevent.h>
- #include <net/addrconf.h>
- #include <net/bonding.h>
- #include <net/addrconf.h>
- #include <asm/uaccess.h>
- #include "cxgb4.h"
- #include "t4_regs.h"
- #include "t4_values.h"
- #include "t4_msg.h"
- #include "t4fw_api.h"
- #include "t4fw_version.h"
- #include "cxgb4_dcb.h"
- #include "cxgb4_debugfs.h"
- #include "clip_tbl.h"
- #include "l2t.h"
- char cxgb4_driver_name[] = KBUILD_MODNAME;
- #ifdef DRV_VERSION
- #undef DRV_VERSION
- #endif
- #define DRV_VERSION "2.0.0-ko"
- const char cxgb4_driver_version[] = DRV_VERSION;
- #define DRV_DESC "Chelsio T4/T5 Network Driver"
- /* Host shadow copy of ingress filter entry. This is in host native format
- * and doesn't match the ordering or bit order, etc. of the hardware of the
- * firmware command. The use of bit-field structure elements is purely to
- * remind ourselves of the field size limitations and save memory in the case
- * where the filter table is large.
- */
- struct filter_entry {
- /* Administrative fields for filter.
- */
- u32 valid:1; /* filter allocated and valid */
- u32 locked:1; /* filter is administratively locked */
- u32 pending:1; /* filter action is pending firmware reply */
- u32 smtidx:8; /* Source MAC Table index for smac */
- struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
- /* The filter itself. Most of this is a straight copy of information
- * provided by the extended ioctl(). Some fields are translated to
- * internal forms -- for instance the Ingress Queue ID passed in from
- * the ioctl() is translated into the Absolute Ingress Queue ID.
- */
- struct ch_filter_specification fs;
- };
- #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
- NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
- NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
- /* Macros needed to support the PCI Device ID Table ...
- */
- #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
- static const struct pci_device_id cxgb4_pci_tbl[] = {
- #define CH_PCI_DEVICE_ID_FUNCTION 0x4
- /* Include PCI Device IDs for both PF4 and PF0-3 so our PCI probe() routine is
- * called for both.
- */
- #define CH_PCI_DEVICE_ID_FUNCTION2 0x0
- #define CH_PCI_ID_TABLE_ENTRY(devid) \
- {PCI_VDEVICE(CHELSIO, (devid)), 4}
- #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END \
- { 0, } \
- }
- #include "t4_pci_id_tbl.h"
- #define FW4_FNAME "cxgb4/t4fw.bin"
- #define FW5_FNAME "cxgb4/t5fw.bin"
- #define FW4_CFNAME "cxgb4/t4-config.txt"
- #define FW5_CFNAME "cxgb4/t5-config.txt"
- MODULE_DESCRIPTION(DRV_DESC);
- MODULE_AUTHOR("Chelsio Communications");
- MODULE_LICENSE("Dual BSD/GPL");
- MODULE_VERSION(DRV_VERSION);
- MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
- MODULE_FIRMWARE(FW4_FNAME);
- MODULE_FIRMWARE(FW5_FNAME);
- /*
- * Normally we're willing to become the firmware's Master PF but will be happy
- * if another PF has already become the Master and initialized the adapter.
- * Setting "force_init" will cause this driver to forcibly establish itself as
- * the Master PF and initialize the adapter.
- */
- static uint force_init;
- module_param(force_init, uint, 0644);
- MODULE_PARM_DESC(force_init, "Forcibly become Master PF and initialize adapter");
- /*
- * Normally if the firmware we connect to has Configuration File support, we
- * use that and only fall back to the old Driver-based initialization if the
- * Configuration File fails for some reason. If force_old_init is set, then
- * we'll always use the old Driver-based initialization sequence.
- */
- static uint force_old_init;
- module_param(force_old_init, uint, 0644);
- MODULE_PARM_DESC(force_old_init, "Force old initialization sequence, deprecated"
- " parameter");
- static int dflt_msg_enable = DFLT_MSG_ENABLE;
- module_param(dflt_msg_enable, int, 0644);
- MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
- /*
- * The driver uses the best interrupt scheme available on a platform in the
- * order MSI-X, MSI, legacy INTx interrupts. This parameter determines which
- * of these schemes the driver may consider as follows:
- *
- * msi = 2: choose from among all three options
- * msi = 1: only consider MSI and INTx interrupts
- * msi = 0: force INTx interrupts
- */
- static int msi = 2;
- module_param(msi, int, 0644);
- MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
- /*
- * Queue interrupt hold-off timer values. Queues default to the first of these
- * upon creation.
- */
- static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
- module_param_array(intr_holdoff, uint, NULL, 0644);
- MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
- "0..4 in microseconds, deprecated parameter");
- static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
- module_param_array(intr_cnt, uint, NULL, 0644);
- MODULE_PARM_DESC(intr_cnt,
- "thresholds 1..3 for queue interrupt packet counters, "
- "deprecated parameter");
- /*
- * Normally we tell the chip to deliver Ingress Packets into our DMA buffers
- * offset by 2 bytes in order to have the IP headers line up on 4-byte
- * boundaries. This is a requirement for many architectures which will throw
- * a machine check fault if an attempt is made to access one of the 4-byte IP
- * header fields on a non-4-byte boundary. And it's a major performance issue
- * even on some architectures which allow it like some implementations of the
- * x86 ISA. However, some architectures don't mind this and for some very
- * edge-case performance sensitive applications (like forwarding large volumes
- * of small packets), setting this DMA offset to 0 will decrease the number of
- * PCI-E Bus transfers enough to measurably affect performance.
- */
- static int rx_dma_offset = 2;
- static bool vf_acls;
- #ifdef CONFIG_PCI_IOV
- module_param(vf_acls, bool, 0644);
- MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement, "
- "deprecated parameter");
- /* Configure the number of PCI-E Virtual Function which are to be instantiated
- * on SR-IOV Capable Physical Functions.
- */
- static unsigned int num_vf[NUM_OF_PF_WITH_SRIOV];
- module_param_array(num_vf, uint, NULL, 0644);
- MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
- #endif
- /* TX Queue select used to determine what algorithm to use for selecting TX
- * queue. Select between the kernel provided function (select_queue=0) or user
- * cxgb_select_queue function (select_queue=1)
- *
- * Default: select_queue=0
- */
- static int select_queue;
- module_param(select_queue, int, 0644);
- MODULE_PARM_DESC(select_queue,
- "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
- static unsigned int tp_vlan_pri_map = HW_TPL_FR_MT_PR_IV_P_FC;
- module_param(tp_vlan_pri_map, uint, 0644);
- MODULE_PARM_DESC(tp_vlan_pri_map, "global compressed filter configuration, "
- "deprecated parameter");
- static struct dentry *cxgb4_debugfs_root;
- static LIST_HEAD(adapter_list);
- static DEFINE_MUTEX(uld_mutex);
- /* Adapter list to be accessed from atomic context */
- static LIST_HEAD(adap_rcu_list);
- static DEFINE_SPINLOCK(adap_rcu_lock);
- static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
- static const char *uld_str[] = { "RDMA", "iSCSI" };
- static void link_report(struct net_device *dev)
- {
- if (!netif_carrier_ok(dev))
- netdev_info(dev, "link down\n");
- else {
- static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
- const char *s = "10Mbps";
- const struct port_info *p = netdev_priv(dev);
- switch (p->link_cfg.speed) {
- case 10000:
- s = "10Gbps";
- break;
- case 1000:
- s = "1000Mbps";
- break;
- case 100:
- s = "100Mbps";
- break;
- case 40000:
- s = "40Gbps";
- break;
- }
- netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
- fc[p->link_cfg.fc]);
- }
- }
- #ifdef CONFIG_CHELSIO_T4_DCB
- /* Set up/tear down Data Center Bridging Priority mapping for a net device. */
- static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
- {
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
- int i;
- /* We use a simple mapping of Port TX Queue Index to DCB
- * Priority when we're enabling DCB.
- */
- for (i = 0; i < pi->nqsets; i++, txq++) {
- u32 name, value;
- int err;
- name = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X_V(
- FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
- FW_PARAMS_PARAM_YZ_V(txq->q.cntxt_id));
- value = enable ? i : 0xffffffff;
- /* Since we can be called while atomic (from "interrupt
- * level") we need to issue the Set Parameters Commannd
- * without sleeping (timeout < 0).
- */
- err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
- &name, &value);
- if (err)
- dev_err(adap->pdev_dev,
- "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
- enable ? "set" : "unset", pi->port_id, i, -err);
- else
- txq->dcb_prio = value;
- }
- }
- #endif /* CONFIG_CHELSIO_T4_DCB */
- void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
- {
- struct net_device *dev = adapter->port[port_id];
- /* Skip changes from disabled ports. */
- if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
- if (link_stat)
- netif_carrier_on(dev);
- else {
- #ifdef CONFIG_CHELSIO_T4_DCB
- cxgb4_dcb_state_init(dev);
- dcb_tx_queue_prio_enable(dev, false);
- #endif /* CONFIG_CHELSIO_T4_DCB */
- netif_carrier_off(dev);
- }
- link_report(dev);
- }
- }
- void t4_os_portmod_changed(const struct adapter *adap, int port_id)
- {
- static const char *mod_str[] = {
- NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
- };
- const struct net_device *dev = adap->port[port_id];
- const struct port_info *pi = netdev_priv(dev);
- if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
- netdev_info(dev, "port module unplugged\n");
- else if (pi->mod_type < ARRAY_SIZE(mod_str))
- netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
- }
- /*
- * Configure the exact and hash address filters to handle a port's multicast
- * and secondary unicast MAC addresses.
- */
- static int set_addr_filters(const struct net_device *dev, bool sleep)
- {
- u64 mhash = 0;
- u64 uhash = 0;
- bool free = true;
- u16 filt_idx[7];
- const u8 *addr[7];
- int ret, naddr = 0;
- const struct netdev_hw_addr *ha;
- int uc_cnt = netdev_uc_count(dev);
- int mc_cnt = netdev_mc_count(dev);
- const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
- /* first do the secondary unicast addresses */
- netdev_for_each_uc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
- ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
- naddr, addr, filt_idx, &uhash, sleep);
- if (ret < 0)
- return ret;
- free = false;
- naddr = 0;
- }
- }
- /* next set up the multicast addresses */
- netdev_for_each_mc_addr(ha, dev) {
- addr[naddr++] = ha->addr;
- if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
- ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
- naddr, addr, filt_idx, &mhash, sleep);
- if (ret < 0)
- return ret;
- free = false;
- naddr = 0;
- }
- }
- return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
- uhash | mhash, sleep);
- }
- int dbfifo_int_thresh = 10; /* 10 == 640 entry threshold */
- module_param(dbfifo_int_thresh, int, 0644);
- MODULE_PARM_DESC(dbfifo_int_thresh, "doorbell fifo interrupt threshold");
- /*
- * usecs to sleep while draining the dbfifo
- */
- static int dbfifo_drain_delay = 1000;
- module_param(dbfifo_drain_delay, int, 0644);
- MODULE_PARM_DESC(dbfifo_drain_delay,
- "usecs to sleep while draining the dbfifo");
- /*
- * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
- * If @mtu is -1 it is left unchanged.
- */
- static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
- {
- int ret;
- struct port_info *pi = netdev_priv(dev);
- ret = set_addr_filters(dev, sleep_ok);
- if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
- (dev->flags & IFF_PROMISC) ? 1 : 0,
- (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
- sleep_ok);
- return ret;
- }
- /**
- * link_start - enable a port
- * @dev: the port to enable
- *
- * Performs the MAC and PHY actions needed to enable a port.
- */
- static int link_start(struct net_device *dev)
- {
- int ret;
- struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
- /*
- * We do not set address filters and promiscuity here, the stack does
- * that step explicitly.
- */
- ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
- !!(dev->features & NETIF_F_HW_VLAN_CTAG_RX), true);
- if (ret == 0) {
- ret = t4_change_mac(pi->adapter, mb, pi->viid,
- pi->xact_addr_filt, dev->dev_addr, true,
- true);
- if (ret >= 0) {
- pi->xact_addr_filt = ret;
- ret = 0;
- }
- }
- if (ret == 0)
- ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
- &pi->link_cfg);
- if (ret == 0) {
- local_bh_disable();
- ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
- true, CXGB4_DCB_ENABLED);
- local_bh_enable();
- }
- return ret;
- }
- int cxgb4_dcb_enabled(const struct net_device *dev)
- {
- #ifdef CONFIG_CHELSIO_T4_DCB
- struct port_info *pi = netdev_priv(dev);
- if (!pi->dcb.enabled)
- return 0;
- return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
- (pi->dcb.state == CXGB4_DCB_STATE_HOST));
- #else
- return 0;
- #endif
- }
- EXPORT_SYMBOL(cxgb4_dcb_enabled);
- #ifdef CONFIG_CHELSIO_T4_DCB
- /* Handle a Data Center Bridging update message from the firmware. */
- static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
- {
- int port = FW_PORT_CMD_PORTID_G(ntohl(pcmd->op_to_portid));
- struct net_device *dev = adap->port[port];
- int old_dcb_enabled = cxgb4_dcb_enabled(dev);
- int new_dcb_enabled;
- cxgb4_dcb_handle_fw_update(adap, pcmd);
- new_dcb_enabled = cxgb4_dcb_enabled(dev);
- /* If the DCB has become enabled or disabled on the port then we're
- * going to need to set up/tear down DCB Priority parameters for the
- * TX Queues associated with the port.
- */
- if (new_dcb_enabled != old_dcb_enabled)
- dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
- }
- #endif /* CONFIG_CHELSIO_T4_DCB */
- /* Clear a filter and release any of its resources that we own. This also
- * clears the filter's "pending" status.
- */
- static void clear_filter(struct adapter *adap, struct filter_entry *f)
- {
- /* If the new or old filter have loopback rewriteing rules then we'll
- * need to free any existing Layer Two Table (L2T) entries of the old
- * filter rule. The firmware will handle freeing up any Source MAC
- * Table (SMT) entries used for rewriting Source MAC Addresses in
- * loopback rules.
- */
- if (f->l2t)
- cxgb4_l2t_release(f->l2t);
- /* The zeroing of the filter rule below clears the filter valid,
- * pending, locked flags, l2t pointer, etc. so it's all we need for
- * this operation.
- */
- memset(f, 0, sizeof(*f));
- }
- /* Handle a filter write/deletion reply.
- */
- static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
- {
- unsigned int idx = GET_TID(rpl);
- unsigned int nidx = idx - adap->tids.ftid_base;
- unsigned int ret;
- struct filter_entry *f;
- if (idx >= adap->tids.ftid_base && nidx <
- (adap->tids.nftids + adap->tids.nsftids)) {
- idx = nidx;
- ret = TCB_COOKIE_G(rpl->cookie);
- f = &adap->tids.ftid_tab[idx];
- if (ret == FW_FILTER_WR_FLT_DELETED) {
- /* Clear the filter when we get confirmation from the
- * hardware that the filter has been deleted.
- */
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
- dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
- idx);
- clear_filter(adap, f);
- } else if (ret == FW_FILTER_WR_FLT_ADDED) {
- f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
- f->pending = 0; /* asynchronous setup completed */
- f->valid = 1;
- } else {
- /* Something went wrong. Issue a warning about the
- * problem and clear everything out.
- */
- dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
- idx, ret);
- clear_filter(adap, f);
- }
- }
- }
- /* Response queue handler for the FW event queue.
- */
- static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
- {
- u8 opcode = ((const struct rss_header *)rsp)->opcode;
- rsp++; /* skip RSS header */
- /* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
- */
- if (unlikely(opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)rsp)->type == FW_TYPE_RSSCPL)) {
- rsp++;
- opcode = ((const struct rss_header *)rsp)->opcode;
- rsp++;
- if (opcode != CPL_SGE_EGR_UPDATE) {
- dev_err(q->adap->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
- , opcode);
- goto out;
- }
- }
- if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
- const struct cpl_sge_egr_update *p = (void *)rsp;
- unsigned int qid = EGR_QID_G(ntohl(p->opcode_qid));
- struct sge_txq *txq;
- txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
- txq->restarts++;
- if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
- struct sge_eth_txq *eq;
- eq = container_of(txq, struct sge_eth_txq, q);
- netif_tx_wake_queue(eq->txq);
- } else {
- struct sge_ofld_txq *oq;
- oq = container_of(txq, struct sge_ofld_txq, q);
- tasklet_schedule(&oq->qresume_tsk);
- }
- } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
- const struct cpl_fw6_msg *p = (void *)rsp;
- #ifdef CONFIG_CHELSIO_T4_DCB
- const struct fw_port_cmd *pcmd = (const void *)p->data;
- unsigned int cmd = FW_CMD_OP_G(ntohl(pcmd->op_to_portid));
- unsigned int action =
- FW_PORT_CMD_ACTION_G(ntohl(pcmd->action_to_len16));
- if (cmd == FW_PORT_CMD &&
- action == FW_PORT_ACTION_GET_PORT_INFO) {
- int port = FW_PORT_CMD_PORTID_G(
- be32_to_cpu(pcmd->op_to_portid));
- struct net_device *dev = q->adap->port[port];
- int state_input = ((pcmd->u.info.dcbxdis_pkd &
- FW_PORT_CMD_DCBXDIS_F)
- ? CXGB4_DCB_INPUT_FW_DISABLED
- : CXGB4_DCB_INPUT_FW_ENABLED);
- cxgb4_dcb_state_fsm(dev, state_input);
- }
- if (cmd == FW_PORT_CMD &&
- action == FW_PORT_ACTION_L2_DCB_CFG)
- dcb_rpl(q->adap, pcmd);
- else
- #endif
- if (p->type == 0)
- t4_handle_fw_rpl(q->adap, p->data);
- } else if (opcode == CPL_L2T_WRITE_RPL) {
- const struct cpl_l2t_write_rpl *p = (void *)rsp;
- do_l2t_write_rpl(q->adap, p);
- } else if (opcode == CPL_SET_TCB_RPL) {
- const struct cpl_set_tcb_rpl *p = (void *)rsp;
- filter_rpl(q->adap, p);
- } else
- dev_err(q->adap->pdev_dev,
- "unexpected CPL %#x on FW event queue\n", opcode);
- out:
- return 0;
- }
- /**
- * uldrx_handler - response queue handler for ULD queues
- * @q: the response queue that received the packet
- * @rsp: the response queue descriptor holding the offload message
- * @gl: the gather list of packet fragments
- *
- * Deliver an ingress offload packet to a ULD. All processing is done by
- * the ULD, we just maintain statistics.
- */
- static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
- const struct pkt_gl *gl)
- {
- struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
- /* FW can send CPLs encapsulated in a CPL_FW4_MSG.
- */
- if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
- ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
- rsp += 2;
- if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
- rxq->stats.nomem++;
- return -1;
- }
- if (gl == NULL)
- rxq->stats.imm++;
- else if (gl == CXGB4_MSG_AN)
- rxq->stats.an++;
- else
- rxq->stats.pkts++;
- return 0;
- }
- static void disable_msi(struct adapter *adapter)
- {
- if (adapter->flags & USING_MSIX) {
- pci_disable_msix(adapter->pdev);
- adapter->flags &= ~USING_MSIX;
- } else if (adapter->flags & USING_MSI) {
- pci_disable_msi(adapter->pdev);
- adapter->flags &= ~USING_MSI;
- }
- }
- /*
- * Interrupt handler for non-data events used with MSI-X.
- */
- static irqreturn_t t4_nondata_intr(int irq, void *cookie)
- {
- struct adapter *adap = cookie;
- u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A));
- if (v & PFSW_F) {
- adap->swintr = 1;
- t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE_A), v);
- }
- if (adap->flags & MASTER_PF)
- t4_slow_intr_handler(adap);
- return IRQ_HANDLED;
- }
- /*
- * Name the MSI-X interrupts.
- */
- static void name_msix_vecs(struct adapter *adap)
- {
- int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
- /* non-data interrupts */
- snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
- /* FW events */
- snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
- adap->port[0]->name);
- /* Ethernet queues */
- for_each_port(adap, j) {
- struct net_device *d = adap->port[j];
- const struct port_info *pi = netdev_priv(d);
- for (i = 0; i < pi->nqsets; i++, msi_idx++)
- snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
- d->name, i);
- }
- /* offload queues */
- for_each_ofldrxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
- adap->port[0]->name, i);
- for_each_rdmarxq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
- adap->port[0]->name, i);
- for_each_rdmaciq(&adap->sge, i)
- snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
- adap->port[0]->name, i);
- }
- static int request_msix_queue_irqs(struct adapter *adap)
- {
- struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
- int msi_index = 2;
- err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
- adap->msix_info[1].desc, &s->fw_evtq);
- if (err)
- return err;
- for_each_ethrxq(s, ethqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->ethrxq[ethqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_ofldrxq(s, ofldqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->ofldrxq[ofldqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmarxq(s, rdmaqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmarxq[rdmaqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- for_each_rdmaciq(s, rdmaciqqidx) {
- err = request_irq(adap->msix_info[msi_index].vec,
- t4_sge_intr_msix, 0,
- adap->msix_info[msi_index].desc,
- &s->rdmaciq[rdmaciqqidx].rspq);
- if (err)
- goto unwind;
- msi_index++;
- }
- return 0;
- unwind:
- while (--rdmaciqqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmaciq[rdmaciqqidx].rspq);
- while (--rdmaqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->rdmarxq[rdmaqidx].rspq);
- while (--ofldqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->ofldrxq[ofldqidx].rspq);
- while (--ethqidx >= 0)
- free_irq(adap->msix_info[--msi_index].vec,
- &s->ethrxq[ethqidx].rspq);
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
- return err;
- }
- static void free_msix_queue_irqs(struct adapter *adap)
- {
- int i, msi_index = 2;
- struct sge *s = &adap->sge;
- free_irq(adap->msix_info[1].vec, &s->fw_evtq);
- for_each_ethrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ethrxq[i].rspq);
- for_each_ofldrxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
- for_each_rdmarxq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
- for_each_rdmaciq(s, i)
- free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
- }
- /**
- * cxgb4_write_rss - write the RSS table for a given port
- * @pi: the port
- * @queues: array of queue indices for RSS
- *
- * Sets up the portion of the HW RSS table for the port's VI to distribute
- * packets to the Rx queues in @queues.
- */
- int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
- {
- u16 *rss;
- int i, err;
- const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
- rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
- if (!rss)
- return -ENOMEM;
- /* map the queue indices to queue ids */
- for (i = 0; i < pi->rss_size; i++, queues++)
- rss[i] = q[*queues].rspq.abs_id;
- err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
- pi->rss_size, rss, pi->rss_size);
- kfree(rss);
- return err;
- }
- /**
- * setup_rss - configure RSS
- * @adap: the adapter
- *
- * Sets up RSS for each port.
- */
- static int setup_rss(struct adapter *adap)
- {
- int i, err;
- for_each_port(adap, i) {
- const struct port_info *pi = adap2pinfo(adap, i);
- err = cxgb4_write_rss(pi, pi->rss);
- if (err)
- return err;
- }
- return 0;
- }
- /*
- * Return the channel of the ingress queue with the given qid.
- */
- static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
- {
- qid -= p->ingr_start;
- return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
- }
- /*
- * Wait until all NAPI handlers are descheduled.
- */
- static void quiesce_rx(struct adapter *adap)
- {
- int i;
- for (i = 0; i < adap->sge.ingr_sz; i++) {
- struct sge_rspq *q = adap->sge.ingr_map[i];
- if (q && q->handler) {
- napi_disable(&q->napi);
- local_bh_disable();
- while (!cxgb_poll_lock_napi(q))
- mdelay(1);
- local_bh_enable();
- }
- }
- }
- /* Disable interrupt and napi handler */
- static void disable_interrupts(struct adapter *adap)
- {
- if (adap->flags & FULL_INIT_DONE) {
- t4_intr_disable(adap);
- if (adap->flags & USING_MSIX) {
- free_msix_queue_irqs(adap);
- free_irq(adap->msix_info[0].vec, adap);
- } else {
- free_irq(adap->pdev->irq, adap);
- }
- quiesce_rx(adap);
- }
- }
- /*
- * Enable NAPI scheduling and interrupt generation for all Rx queues.
- */
- static void enable_rx(struct adapter *adap)
- {
- int i;
- for (i = 0; i < adap->sge.ingr_sz; i++) {
- struct sge_rspq *q = adap->sge.ingr_map[i];
- if (!q)
- continue;
- if (q->handler) {
- cxgb_busy_poll_init_lock(q);
- napi_enable(&q->napi);
- }
- /* 0-increment GTS to start the timer and enable interrupts */
- t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
- SEINTARM_V(q->intr_params) |
- INGRESSQID_V(q->cntxt_id));
- }
- }
- static int alloc_ofld_rxqs(struct adapter *adap, struct sge_ofld_rxq *q,
- unsigned int nq, unsigned int per_chan, int msi_idx,
- u16 *ids)
- {
- int i, err;
- for (i = 0; i < nq; i++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false,
- adap->port[i / per_chan],
- msi_idx, q->fl.size ? &q->fl : NULL,
- uldrx_handler);
- if (err)
- return err;
- memset(&q->stats, 0, sizeof(q->stats));
- if (ids)
- ids[i] = q->rspq.abs_id;
- }
- return 0;
- }
- /**
- * setup_sge_queues - configure SGE Tx/Rx/response queues
- * @adap: the adapter
- *
- * Determines how many sets of SGE queues to use and initializes them.
- * We support multiple queue sets per port if we have MSI-X, otherwise
- * just one queue set per port.
- */
- static int setup_sge_queues(struct adapter *adap)
- {
- int err, msi_idx, i, j;
- struct sge *s = &adap->sge;
- bitmap_zero(s->starving_fl, s->egr_sz);
- bitmap_zero(s->txq_maperr, s->egr_sz);
- if (adap->flags & USING_MSIX)
- msi_idx = 1; /* vector 0 is for non-queue interrupts */
- else {
- err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
- NULL, NULL);
- if (err)
- return err;
- msi_idx = -((int)s->intrq.abs_id + 1);
- }
- /* NOTE: If you add/delete any Ingress/Egress Queue allocations in here,
- * don't forget to update the following which need to be
- * synchronized to and changes here.
- *
- * 1. The calculations of MAX_INGQ in cxgb4.h.
- *
- * 2. Update enable_msix/name_msix_vecs/request_msix_queue_irqs
- * to accommodate any new/deleted Ingress Queues
- * which need MSI-X Vectors.
- *
- * 3. Update sge_qinfo_show() to include information on the
- * new/deleted queues.
- */
- err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
- msi_idx, NULL, fwevtq_handler);
- if (err) {
- freeout: t4_free_sge_resources(adap);
- return err;
- }
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- struct port_info *pi = netdev_priv(dev);
- struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
- struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
- for (j = 0; j < pi->nqsets; j++, q++) {
- if (msi_idx > 0)
- msi_idx++;
- err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
- msi_idx, &q->fl,
- t4_ethrx_handler);
- if (err)
- goto freeout;
- q->rspq.idx = j;
- memset(&q->stats, 0, sizeof(q->stats));
- }
- for (j = 0; j < pi->nqsets; j++, t++) {
- err = t4_sge_alloc_eth_txq(adap, t, dev,
- netdev_get_tx_queue(dev, j),
- s->fw_evtq.cntxt_id);
- if (err)
- goto freeout;
- }
- }
- j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
- for_each_ofldrxq(s, i) {
- err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i],
- adap->port[i / j],
- s->fw_evtq.cntxt_id);
- if (err)
- goto freeout;
- }
- #define ALLOC_OFLD_RXQS(firstq, nq, per_chan, ids) do { \
- err = alloc_ofld_rxqs(adap, firstq, nq, per_chan, msi_idx, ids); \
- if (err) \
- goto freeout; \
- if (msi_idx > 0) \
- msi_idx += nq; \
- } while (0)
- ALLOC_OFLD_RXQS(s->ofldrxq, s->ofldqsets, j, s->ofld_rxq);
- ALLOC_OFLD_RXQS(s->rdmarxq, s->rdmaqs, 1, s->rdma_rxq);
- j = s->rdmaciqs / adap->params.nports; /* rdmaq queues per channel */
- ALLOC_OFLD_RXQS(s->rdmaciq, s->rdmaciqs, j, s->rdma_ciq);
- #undef ALLOC_OFLD_RXQS
- for_each_port(adap, i) {
- /*
- * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
- * have RDMA queues, and that's the right value.
- */
- err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
- s->fw_evtq.cntxt_id,
- s->rdmarxq[i].rspq.cntxt_id);
- if (err)
- goto freeout;
- }
- t4_write_reg(adap, is_t4(adap->params.chip) ?
- MPS_TRC_RSS_CONTROL_A :
- MPS_T5_TRC_RSS_CONTROL_A,
- RSSCONTROL_V(netdev2pinfo(adap->port[0])->tx_chan) |
- QUEUENUMBER_V(s->ethrxq[0].rspq.abs_id));
- return 0;
- }
- /*
- * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
- * The allocated memory is cleared.
- */
- void *t4_alloc_mem(size_t size)
- {
- void *p = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!p)
- p = vzalloc(size);
- return p;
- }
- /*
- * Free memory allocated through alloc_mem().
- */
- void t4_free_mem(void *addr)
- {
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
- }
- /* Send a Work Request to write the filter at a specified index. We construct
- * a Firmware Filter Work Request to have the work done and put the indicated
- * filter into "pending" mode which will prevent any further actions against
- * it till we get a reply from the firmware on the completion status of the
- * request.
- */
- static int set_filter_wr(struct adapter *adapter, int fidx)
- {
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int ftid;
- skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- /* If the new filter requires loopback Destination MAC and/or VLAN
- * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
- * the filter.
- */
- if (f->fs.newdmac || f->fs.newvlan) {
- /* allocate L2T entry for new filter */
- f->l2t = t4_l2t_alloc_switching(adapter->l2t);
- if (f->l2t == NULL) {
- kfree_skb(skb);
- return -EAGAIN;
- }
- if (t4_l2t_set_switching(adapter, f->l2t, f->fs.vlan,
- f->fs.eport, f->fs.dmac)) {
- cxgb4_l2t_release(f->l2t);
- f->l2t = NULL;
- kfree_skb(skb);
- return -ENOMEM;
- }
- }
- ftid = adapter->tids.ftid_base + fidx;
- fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
- memset(fwr, 0, sizeof(*fwr));
- /* It would be nice to put most of the following in t4_hw.c but most
- * of the work is translating the cxgbtool ch_filter_specification
- * into the Work Request and the definition of that structure is
- * currently in cxgbtool.h which isn't appropriate to pull into the
- * common code. We may eventually try to come up with a more neutral
- * filter specification structure but for now it's easiest to simply
- * put this fairly direct code in line ...
- */
- fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
- fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
- fwr->tid_to_iq =
- htonl(FW_FILTER_WR_TID_V(ftid) |
- FW_FILTER_WR_RQTYPE_V(f->fs.type) |
- FW_FILTER_WR_NOREPLY_V(0) |
- FW_FILTER_WR_IQ_V(f->fs.iq));
- fwr->del_filter_to_l2tix =
- htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
- FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
- FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
- FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
- FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
- FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
- FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
- FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
- FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
- f->fs.newvlan == VLAN_REWRITE) |
- FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
- FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
- FW_FILTER_WR_PRIO_V(f->fs.prio) |
- FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
- fwr->ethtype = htons(f->fs.val.ethtype);
- fwr->ethtypem = htons(f->fs.mask.ethtype);
- fwr->frag_to_ovlan_vldm =
- (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
- FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
- FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
- FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
- FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
- fwr->smac_sel = 0;
- fwr->rx_chan_rx_rpl_iq =
- htons(FW_FILTER_WR_RX_CHAN_V(0) |
- FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
- fwr->maci_to_matchtypem =
- htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
- FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
- FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
- FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
- FW_FILTER_WR_PORT_V(f->fs.val.iport) |
- FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
- FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
- FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
- fwr->ptcl = f->fs.val.proto;
- fwr->ptclm = f->fs.mask.proto;
- fwr->ttyp = f->fs.val.tos;
- fwr->ttypm = f->fs.mask.tos;
- fwr->ivlan = htons(f->fs.val.ivlan);
- fwr->ivlanm = htons(f->fs.mask.ivlan);
- fwr->ovlan = htons(f->fs.val.ovlan);
- fwr->ovlanm = htons(f->fs.mask.ovlan);
- memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
- memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
- memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
- memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
- fwr->lp = htons(f->fs.val.lport);
- fwr->lpm = htons(f->fs.mask.lport);
- fwr->fp = htons(f->fs.val.fport);
- fwr->fpm = htons(f->fs.mask.fport);
- if (f->fs.newsmac)
- memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
- t4_ofld_send(adapter, skb);
- return 0;
- }
- /* Delete the filter at a specified index.
- */
- static int del_filter_wr(struct adapter *adapter, int fidx)
- {
- struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
- struct sk_buff *skb;
- struct fw_filter_wr *fwr;
- unsigned int len, ftid;
- len = sizeof(*fwr);
- ftid = adapter->tids.ftid_base + fidx;
- skb = alloc_skb(len, GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- fwr = (struct fw_filter_wr *)__skb_put(skb, len);
- t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
- /* Mark the filter as "pending" and ship off the Filter Work Request.
- * When we get the Work Request Reply we'll clear the pending status.
- */
- f->pending = 1;
- t4_mgmt_tx(adapter, skb);
- return 0;
- }
- static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv, select_queue_fallback_t fallback)
- {
- int txq;
- #ifdef CONFIG_CHELSIO_T4_DCB
- /* If a Data Center Bridging has been successfully negotiated on this
- * link then we'll use the skb's priority to map it to a TX Queue.
- * The skb's priority is determined via the VLAN Tag Priority Code
- * Point field.
- */
- if (cxgb4_dcb_enabled(dev)) {
- u16 vlan_tci;
- int err;
- err = vlan_get_tag(skb, &vlan_tci);
- if (unlikely(err)) {
- if (net_ratelimit())
- netdev_warn(dev,
- "TX Packet without VLAN Tag on DCB Link\n");
- txq = 0;
- } else {
- txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
- #ifdef CONFIG_CHELSIO_T4_FCOE
- if (skb->protocol == htons(ETH_P_FCOE))
- txq = skb->priority & 0x7;
- #endif /* CONFIG_CHELSIO_T4_FCOE */
- }
- return txq;
- }
- #endif /* CONFIG_CHELSIO_T4_DCB */
- if (select_queue) {
- txq = (skb_rx_queue_recorded(skb)
- ? skb_get_rx_queue(skb)
- : smp_processor_id());
- while (unlikely(txq >= dev->real_num_tx_queues))
- txq -= dev->real_num_tx_queues;
- return txq;
- }
- return fallback(dev, skb) % dev->real_num_tx_queues;
- }
- static inline int is_offload(const struct adapter *adap)
- {
- return adap->params.offload;
- }
- static int closest_timer(const struct sge *s, int time)
- {
- int i, delta, match = 0, min_delta = INT_MAX;
- for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
- delta = time - s->timer_val[i];
- if (delta < 0)
- delta = -delta;
- if (delta < min_delta) {
- min_delta = delta;
- match = i;
- }
- }
- return match;
- }
- static int closest_thres(const struct sge *s, int thres)
- {
- int i, delta, match = 0, min_delta = INT_MAX;
- for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
- delta = thres - s->counter_val[i];
- if (delta < 0)
- delta = -delta;
- if (delta < min_delta) {
- min_delta = delta;
- match = i;
- }
- }
- return match;
- }
- /**
- * cxgb4_set_rspq_intr_params - set a queue's interrupt holdoff parameters
- * @q: the Rx queue
- * @us: the hold-off time in us, or 0 to disable timer
- * @cnt: the hold-off packet count, or 0 to disable counter
- *
- * Sets an Rx queue's interrupt hold-off time and packet count. At least
- * one of the two needs to be enabled for the queue to generate interrupts.
- */
- int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
- unsigned int us, unsigned int cnt)
- {
- struct adapter *adap = q->adap;
- if ((us | cnt) == 0)
- cnt = 1;
- if (cnt) {
- int err;
- u32 v, new_idx;
- new_idx = closest_thres(&adap->sge, cnt);
- if (q->desc && q->pktcnt_idx != new_idx) {
- /* the queue has already been created, update it */
- v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
- FW_PARAMS_PARAM_X_V(
- FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
- FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
- err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
- &new_idx);
- if (err)
- return err;
- }
- q->pktcnt_idx = new_idx;
- }
- us = us == 0 ? 6 : closest_timer(&adap->sge, us);
- q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
- return 0;
- }
- static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
- {
- const struct port_info *pi = netdev_priv(dev);
- netdev_features_t changed = dev->features ^ features;
- int err;
- if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
- return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
- -1, -1, -1,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
- if (unlikely(err))
- dev->features = features ^ NETIF_F_HW_VLAN_CTAG_RX;
- return err;
- }
- static int setup_debugfs(struct adapter *adap)
- {
- if (IS_ERR_OR_NULL(adap->debugfs_root))
- return -1;
- #ifdef CONFIG_DEBUG_FS
- t4_setup_debugfs(adap);
- #endif
- return 0;
- }
- /*
- * upper-layer driver support
- */
- /*
- * Allocate an active-open TID and set it to the supplied value.
- */
- int cxgb4_alloc_atid(struct tid_info *t, void *data)
- {
- int atid = -1;
- spin_lock_bh(&t->atid_lock);
- if (t->afree) {
- union aopen_entry *p = t->afree;
- atid = (p - t->atid_tab) + t->atid_base;
- t->afree = p->next;
- p->data = data;
- t->atids_in_use++;
- }
- spin_unlock_bh(&t->atid_lock);
- return atid;
- }
- EXPORT_SYMBOL(cxgb4_alloc_atid);
- /*
- * Release an active-open TID.
- */
- void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
- {
- union aopen_entry *p = &t->atid_tab[atid - t->atid_base];
- spin_lock_bh(&t->atid_lock);
- p->next = t->afree;
- t->afree = p;
- t->atids_in_use--;
- spin_unlock_bh(&t->atid_lock);
- }
- EXPORT_SYMBOL(cxgb4_free_atid);
- /*
- * Allocate a server TID and set it to the supplied value.
- */
- int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
- {
- int stid;
- spin_lock_bh(&t->stid_lock);
- if (family == PF_INET) {
- stid = find_first_zero_bit(t->stid_bmap, t->nstids);
- if (stid < t->nstids)
- __set_bit(stid, t->stid_bmap);
- else
- stid = -1;
- } else {
- stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
- if (stid < 0)
- stid = -1;
- }
- if (stid >= 0) {
- t->stid_tab[stid].data = data;
- stid += t->stid_base;
- /* IPv6 requires max of 520 bits or 16 cells in TCAM
- * This is equivalent to 4 TIDs. With CLIP enabled it
- * needs 2 TIDs.
- */
- if (family == PF_INET)
- t->stids_in_use++;
- else
- t->stids_in_use += 4;
- }
- spin_unlock_bh(&t->stid_lock);
- return stid;
- }
- EXPORT_SYMBOL(cxgb4_alloc_stid);
- /* Allocate a server filter TID and set it to the supplied value.
- */
- int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
- {
- int stid;
- spin_lock_bh(&t->stid_lock);
- if (family == PF_INET) {
- stid = find_next_zero_bit(t->stid_bmap,
- t->nstids + t->nsftids, t->nstids);
- if (stid < (t->nstids + t->nsftids))
- __set_bit(stid, t->stid_bmap);
- else
- stid = -1;
- } else {
- stid = -1;
- }
- if (stid >= 0) {
- t->stid_tab[stid].data = data;
- stid -= t->nstids;
- stid += t->sftid_base;
- t->stids_in_use++;
- }
- spin_unlock_bh(&t->stid_lock);
- return stid;
- }
- EXPORT_SYMBOL(cxgb4_alloc_sftid);
- /* Release a server TID.
- */
- void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
- {
- /* Is it a server filter TID? */
- if (t->nsftids && (stid >= t->sftid_base)) {
- stid -= t->sftid_base;
- stid += t->nstids;
- } else {
- stid -= t->stid_base;
- }
- spin_lock_bh(&t->stid_lock);
- if (family == PF_INET)
- __clear_bit(stid, t->stid_bmap);
- else
- bitmap_release_region(t->stid_bmap, stid, 2);
- t->stid_tab[stid].data = NULL;
- if (family == PF_INET)
- t->stids_in_use--;
- else
- t->stids_in_use -= 4;
- spin_unlock_bh(&t->stid_lock);
- }
- EXPORT_SYMBOL(cxgb4_free_stid);
- /*
- * Populate a TID_RELEASE WR. Caller must properly size the skb.
- */
- static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
- unsigned int tid)
- {
- struct cpl_tid_release *req;
- set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
- req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, tid);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
- }
- /*
- * Queue a TID release request and if necessary schedule a work queue to
- * process it.
- */
- static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
- unsigned int tid)
- {
- void **p = &t->tid_tab[tid];
- struct adapter *adap = container_of(t, struct adapter, tids);
- spin_lock_bh(&adap->tid_release_lock);
- *p = adap->tid_release_head;
- /* Low 2 bits encode the Tx channel number */
- adap->tid_release_head = (void **)((uintptr_t)p | chan);
- if (!adap->tid_release_task_busy) {
- adap->tid_release_task_busy = true;
- queue_work(adap->workq, &adap->tid_release_task);
- }
- spin_unlock_bh(&adap->tid_release_lock);
- }
- /*
- * Process the list of pending TID release requests.
- */
- static void process_tid_release_list(struct work_struct *work)
- {
- struct sk_buff *skb;
- struct adapter *adap;
- adap = container_of(work, struct adapter, tid_release_task);
- spin_lock_bh(&adap->tid_release_lock);
- while (adap->tid_release_head) {
- void **p = adap->tid_release_head;
- unsigned int chan = (uintptr_t)p & 3;
- p = (void *)p - chan;
- adap->tid_release_head = *p;
- *p = NULL;
- spin_unlock_bh(&adap->tid_release_lock);
- while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
- GFP_KERNEL)))
- schedule_timeout_uninterruptible(1);
- mk_tid_release(skb, chan, p - adap->tids.tid_tab);
- t4_ofld_send(adap, skb);
- spin_lock_bh(&adap->tid_release_lock);
- }
- adap->tid_release_task_busy = false;
- spin_unlock_bh(&adap->tid_release_lock);
- }
- /*
- * Release a TID and inform HW. If we are unable to allocate the release
- * message we defer to a work queue.
- */
- void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
- {
- void *old;
- struct sk_buff *skb;
- struct adapter *adap = container_of(t, struct adapter, tids);
- old = t->tid_tab[tid];
- skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
- if (likely(skb)) {
- t->tid_tab[tid] = NULL;
- mk_tid_release(skb, chan, tid);
- t4_ofld_send(adap, skb);
- } else
- cxgb4_queue_tid_release(t, chan, tid);
- if (old)
- atomic_dec(&t->tids_in_use);
- }
- EXPORT_SYMBOL(cxgb4_remove_tid);
- /*
- * Allocate and initialize the TID tables. Returns 0 on success.
- */
- static int tid_init(struct tid_info *t)
- {
- size_t size;
- unsigned int stid_bmap_size;
- unsigned int natids = t->natids;
- struct adapter *adap = container_of(t, struct adapter, tids);
- stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
- size = t->ntids * sizeof(*t->tid_tab) +
- natids * sizeof(*t->atid_tab) +
- t->nstids * sizeof(*t->stid_tab) +
- t->nsftids * sizeof(*t->stid_tab) +
- stid_bmap_size * sizeof(long) +
- t->nftids * sizeof(*t->ftid_tab) +
- t->nsftids * sizeof(*t->ftid_tab);
- t->tid_tab = t4_alloc_mem(size);
- if (!t->tid_tab)
- return -ENOMEM;
- t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
- t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
- t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
- t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
- spin_lock_init(&t->stid_lock);
- spin_lock_init(&t->atid_lock);
- t->stids_in_use = 0;
- t->afree = NULL;
- t->atids_in_use = 0;
- atomic_set(&t->tids_in_use, 0);
- /* Setup the free list for atid_tab and clear the stid bitmap. */
- if (natids) {
- while (--natids)
- t->atid_tab[natids - 1].next = &t->atid_tab[natids];
- t->afree = t->atid_tab;
- }
- bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
- /* Reserve stid 0 for T4/T5 adapters */
- if (!t->stid_base &&
- (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
- __set_bit(0, t->stid_bmap);
- return 0;
- }
- /**
- * cxgb4_create_server - create an IP server
- * @dev: the device
- * @stid: the server TID
- * @sip: local IP address to bind server to
- * @sport: the server's TCP port
- * @queue: queue to direct messages from this server to
- *
- * Create an IP server for the given port and address.
- * Returns <0 on error and one of the %NET_XMIT_* values on success.
- */
- int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, __be16 vlan,
- unsigned int queue)
- {
- unsigned int chan;
- struct sk_buff *skb;
- struct adapter *adap;
- struct cpl_pass_open_req *req;
- int ret;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
- req->local_port = sport;
- req->peer_port = htons(0);
- req->local_ip = sip;
- req->peer_ip = htonl(0);
- chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
- SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
- ret = t4_mgmt_tx(adap, skb);
- return net_xmit_eval(ret);
- }
- EXPORT_SYMBOL(cxgb4_create_server);
- /* cxgb4_create_server6 - create an IPv6 server
- * @dev: the device
- * @stid: the server TID
- * @sip: local IPv6 address to bind server to
- * @sport: the server's TCP port
- * @queue: queue to direct messages from this server to
- *
- * Create an IPv6 server for the given port and address.
- * Returns <0 on error and one of the %NET_XMIT_* values on success.
- */
- int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
- const struct in6_addr *sip, __be16 sport,
- unsigned int queue)
- {
- unsigned int chan;
- struct sk_buff *skb;
- struct adapter *adap;
- struct cpl_pass_open_req6 *req;
- int ret;
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- adap = netdev2adap(dev);
- req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
- req->local_port = sport;
- req->peer_port = htons(0);
- req->local_ip_hi = *(__be64 *)(sip->s6_addr);
- req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
- req->peer_ip_hi = cpu_to_be64(0);
- req->peer_ip_lo = cpu_to_be64(0);
- chan = rxq_to_chan(&adap->sge, queue);
- req->opt0 = cpu_to_be64(TX_CHAN_V(chan));
- req->opt1 = cpu_to_be64(CONN_POLICY_V(CPL_CONN_POLICY_ASK) |
- SYN_RSS_ENABLE_F | SYN_RSS_QUEUE_V(queue));
- ret = t4_mgmt_tx(adap, skb);
- return net_xmit_eval(ret);
- }
- EXPORT_SYMBOL(cxgb4_create_server6);
- int cxgb4_remove_server(const struct net_device *dev, unsigned int stid,
- unsigned int queue, bool ipv6)
- {
- struct sk_buff *skb;
- struct adapter *adap;
- struct cpl_close_listsvr_req *req;
- int ret;
- adap = netdev2adap(dev);
- skb = alloc_skb(sizeof(*req), GFP_KERNEL);
- if (!skb)
- return -ENOMEM;
- req = (struct cpl_close_listsvr_req *)__skb_put(skb, sizeof(*req));
- INIT_TP_WR(req, 0);
- OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, stid));
- req->reply_ctrl = htons(NO_REPLY_V(0) | (ipv6 ? LISTSVR_IPV6_V(1) :
- LISTSVR_IPV6_V(0)) | QUEUENO_V(queue));
- ret = t4_mgmt_tx(adap, skb);
- return net_xmit_eval(ret);
- }
- EXPORT_SYMBOL(cxgb4_remove_server);
- /**
- * cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
- * @mtus: the HW MTU table
- * @mtu: the target MTU
- * @idx: index of selected entry in the MTU table
- *
- * Returns the index and the value in the HW MTU table that is closest to
- * but does not exceed @mtu, unless @mtu is smaller than any value in the
- * table, in which case that smallest available value is selected.
- */
- unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
- unsigned int *idx)
- {
- unsigned int i = 0;
- while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
- ++i;
- if (idx)
- *idx = i;
- return mtus[i];
- }
- EXPORT_SYMBOL(cxgb4_best_mtu);
- /**
- * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
- * @mtus: the HW MTU table
- * @header_size: Header Size
- * @data_size_max: maximum Data Segment Size
- * @data_size_align: desired Data Segment Size Alignment (2^N)
- * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
- *
- * Similar to cxgb4_best_mtu() but instead of searching the Hardware
- * MTU Table based solely on a Maximum MTU parameter, we break that
- * parameter up into a Header Size and Maximum Data Segment Size, and
- * provide a desired Data Segment Size Alignment. If we find an MTU in
- * the Hardware MTU Table which will result in a Data Segment Size with
- * the requested alignment _and_ that MTU isn't "too far" from the
- * closest MTU, then we'll return that rather than the closest MTU.
- */
- unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
- unsigned short header_size,
- unsigned short data_size_max,
- unsigned short data_size_align,
- unsigned int *mtu_idxp)
- {
- unsigned short max_mtu = header_size + data_size_max;
- unsigned short data_size_align_mask = data_size_align - 1;
- int mtu_idx, aligned_mtu_idx;
- /* Scan the MTU Table till we find an MTU which is larger than our
- * Maximum MTU or we reach the end of the table. Along the way,
- * record the last MTU found, if any, which will result in a Data
- * Segment Length matching the requested alignment.
- */
- for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
- unsigned short data_size = mtus[mtu_idx] - header_size;
- /* If this MTU minus the Header Size would result in a
- * Data Segment Size of the desired alignment, remember it.
- */
- if ((data_size & data_size_align_mask) == 0)
- aligned_mtu_idx = mtu_idx;
- /* If we're not at the end of the Hardware MTU Table and the
- * next element is larger than our Maximum MTU, drop out of
- * the loop.
- */
- if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
- break;
- }
- /* If we fell out of the loop because we ran to the end of the table,
- * then we just have to use the last [largest] entry.
- */
- if (mtu_idx == NMTUS)
- mtu_idx--;
- /* If we found an MTU which resulted in the requested Data Segment
- * Length alignment and that's "not far" from the largest MTU which is
- * less than or equal to the maximum MTU, then use that.
- */
- if (aligned_mtu_idx >= 0 &&
- mtu_idx - aligned_mtu_idx <= 1)
- mtu_idx = aligned_mtu_idx;
- /* If the caller has passed in an MTU Index pointer, pass the
- * MTU Index back. Return the MTU value.
- */
- if (mtu_idxp)
- *mtu_idxp = mtu_idx;
- return mtus[mtu_idx];
- }
- EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
- /**
- * cxgb4_port_chan - get the HW channel of a port
- * @dev: the net device for the port
- *
- * Return the HW Tx channel of the given port.
- */
- unsigned int cxgb4_port_chan(const struct net_device *dev)
- {
- return netdev2pinfo(dev)->tx_chan;
- }
- EXPORT_SYMBOL(cxgb4_port_chan);
- unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
- {
- struct adapter *adap = netdev2adap(dev);
- u32 v1, v2, lp_count, hp_count;
- v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
- if (is_t4(adap->params.chip)) {
- lp_count = LP_COUNT_G(v1);
- hp_count = HP_COUNT_G(v1);
- } else {
- lp_count = LP_COUNT_T5_G(v1);
- hp_count = HP_COUNT_T5_G(v2);
- }
- return lpfifo ? lp_count : hp_count;
- }
- EXPORT_SYMBOL(cxgb4_dbfifo_count);
- /**
- * cxgb4_port_viid - get the VI id of a port
- * @dev: the net device for the port
- *
- * Return the VI id of the given port.
- */
- unsigned int cxgb4_port_viid(const struct net_device *dev)
- {
- return netdev2pinfo(dev)->viid;
- }
- EXPORT_SYMBOL(cxgb4_port_viid);
- /**
- * cxgb4_port_idx - get the index of a port
- * @dev: the net device for the port
- *
- * Return the index of the given port.
- */
- unsigned int cxgb4_port_idx(const struct net_device *dev)
- {
- return netdev2pinfo(dev)->port_id;
- }
- EXPORT_SYMBOL(cxgb4_port_idx);
- void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
- struct tp_tcp_stats *v6)
- {
- struct adapter *adap = pci_get_drvdata(pdev);
- spin_lock(&adap->stats_lock);
- t4_tp_get_tcp_stats(adap, v4, v6);
- spin_unlock(&adap->stats_lock);
- }
- EXPORT_SYMBOL(cxgb4_get_tcp_stats);
- void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
- const unsigned int *pgsz_order)
- {
- struct adapter *adap = netdev2adap(dev);
- t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK_A, tag_mask);
- t4_write_reg(adap, ULP_RX_ISCSI_PSZ_A, HPZ0_V(pgsz_order[0]) |
- HPZ1_V(pgsz_order[1]) | HPZ2_V(pgsz_order[2]) |
- HPZ3_V(pgsz_order[3]));
- }
- EXPORT_SYMBOL(cxgb4_iscsi_init);
- int cxgb4_flush_eq_cache(struct net_device *dev)
- {
- struct adapter *adap = netdev2adap(dev);
- int ret;
- ret = t4_fwaddrspace_write(adap, adap->mbox,
- 0xe1000000 + SGE_CTXT_CMD_A, 0x20000000);
- return ret;
- }
- EXPORT_SYMBOL(cxgb4_flush_eq_cache);
- static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
- {
- u32 addr = t4_read_reg(adap, SGE_DBQ_CTXT_BADDR_A) + 24 * qid + 8;
- __be64 indices;
- int ret;
- spin_lock(&adap->win0_lock);
- ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
- sizeof(indices), (__be32 *)&indices,
- T4_MEMORY_READ);
- spin_unlock(&adap->win0_lock);
- if (!ret) {
- *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
- *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
- }
- return ret;
- }
- int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx,
- u16 size)
- {
- struct adapter *adap = netdev2adap(dev);
- u16 hw_pidx, hw_cidx;
- int ret;
- ret = read_eq_indices(adap, qid, &hw_pidx, &hw_cidx);
- if (ret)
- goto out;
- if (pidx != hw_pidx) {
- u16 delta;
- u32 val;
- if (pidx >= hw_pidx)
- delta = pidx - hw_pidx;
- else
- delta = size - hw_pidx + pidx;
- if (is_t4(adap->params.chip))
- val = PIDX_V(delta);
- else
- val = PIDX_T5_V(delta);
- wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
- QID_V(qid) | val);
- }
- out:
- return ret;
- }
- EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
- void cxgb4_disable_db_coalescing(struct net_device *dev)
- {
- struct adapter *adap;
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
- NOCOALESCE_F);
- }
- EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
- void cxgb4_enable_db_coalescing(struct net_device *dev)
- {
- struct adapter *adap;
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
- }
- EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
- int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
- {
- struct adapter *adap;
- u32 offset, memtype, memaddr;
- u32 edc0_size, edc1_size, mc0_size, mc1_size, size;
- u32 edc0_end, edc1_end, mc0_end, mc1_end;
- int ret;
- adap = netdev2adap(dev);
- offset = ((stag >> 8) * 32) + adap->vres.stag.start;
- /* Figure out where the offset lands in the Memory Type/Address scheme.
- * This code assumes that the memory is laid out starting at offset 0
- * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
- * and EDC1. Some cards will have neither MC0 nor MC1, most cards have
- * MC0, and some have both MC0 and MC1.
- */
- size = t4_read_reg(adap, MA_EDRAM0_BAR_A);
- edc0_size = EDRAM0_SIZE_G(size) << 20;
- size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
- edc1_size = EDRAM1_SIZE_G(size) << 20;
- size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
- mc0_size = EXT_MEM0_SIZE_G(size) << 20;
- edc0_end = edc0_size;
- edc1_end = edc0_end + edc1_size;
- mc0_end = edc1_end + mc0_size;
- if (offset < edc0_end) {
- memtype = MEM_EDC0;
- memaddr = offset;
- } else if (offset < edc1_end) {
- memtype = MEM_EDC1;
- memaddr = offset - edc0_end;
- } else {
- if (offset < mc0_end) {
- memtype = MEM_MC0;
- memaddr = offset - edc1_end;
- } else if (is_t4(adap->params.chip)) {
- /* T4 only has a single memory channel */
- goto err;
- } else {
- size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
- mc1_size = EXT_MEM1_SIZE_G(size) << 20;
- mc1_end = mc0_end + mc1_size;
- if (offset < mc1_end) {
- memtype = MEM_MC1;
- memaddr = offset - mc0_end;
- } else {
- /* offset beyond the end of any memory */
- goto err;
- }
- }
- }
- spin_lock(&adap->win0_lock);
- ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
- spin_unlock(&adap->win0_lock);
- return ret;
- err:
- dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
- stag, offset);
- return -EINVAL;
- }
- EXPORT_SYMBOL(cxgb4_read_tpte);
- u64 cxgb4_read_sge_timestamp(struct net_device *dev)
- {
- u32 hi, lo;
- struct adapter *adap;
- adap = netdev2adap(dev);
- lo = t4_read_reg(adap, SGE_TIMESTAMP_LO_A);
- hi = TSVAL_G(t4_read_reg(adap, SGE_TIMESTAMP_HI_A));
- return ((u64)hi << 32) | (u64)lo;
- }
- EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
- int cxgb4_bar2_sge_qregs(struct net_device *dev,
- unsigned int qid,
- enum cxgb4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid)
- {
- return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
- qid,
- (qtype == CXGB4_BAR2_QTYPE_EGRESS
- ? T4_BAR2_QTYPE_EGRESS
- : T4_BAR2_QTYPE_INGRESS),
- pbar2_qoffset,
- pbar2_qid);
- }
- EXPORT_SYMBOL(cxgb4_bar2_sge_qregs);
- static struct pci_driver cxgb4_driver;
- static void check_neigh_update(struct neighbour *neigh)
- {
- const struct device *parent;
- const struct net_device *netdev = neigh->dev;
- if (netdev->priv_flags & IFF_802_1Q_VLAN)
- netdev = vlan_dev_real_dev(netdev);
- parent = netdev->dev.parent;
- if (parent && parent->driver == &cxgb4_driver.driver)
- t4_l2t_update(dev_get_drvdata(parent), neigh);
- }
- static int netevent_cb(struct notifier_block *nb, unsigned long event,
- void *data)
- {
- switch (event) {
- case NETEVENT_NEIGH_UPDATE:
- check_neigh_update(data);
- break;
- case NETEVENT_REDIRECT:
- default:
- break;
- }
- return 0;
- }
- static bool netevent_registered;
- static struct notifier_block cxgb4_netevent_nb = {
- .notifier_call = netevent_cb
- };
- static void drain_db_fifo(struct adapter *adap, int usecs)
- {
- u32 v1, v2, lp_count, hp_count;
- do {
- v1 = t4_read_reg(adap, SGE_DBFIFO_STATUS_A);
- v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2_A);
- if (is_t4(adap->params.chip)) {
- lp_count = LP_COUNT_G(v1);
- hp_count = HP_COUNT_G(v1);
- } else {
- lp_count = LP_COUNT_T5_G(v1);
- hp_count = HP_COUNT_T5_G(v2);
- }
- if (lp_count == 0 && hp_count == 0)
- break;
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(usecs_to_jiffies(usecs));
- } while (1);
- }
- static void disable_txq_db(struct sge_txq *q)
- {
- unsigned long flags;
- spin_lock_irqsave(&q->db_lock, flags);
- q->db_disabled = 1;
- spin_unlock_irqrestore(&q->db_lock, flags);
- }
- static void enable_txq_db(struct adapter *adap, struct sge_txq *q)
- {
- spin_lock_irq(&q->db_lock);
- if (q->db_pidx_inc) {
- /* Make sure that all writes to the TX descriptors
- * are committed before we tell HW about them.
- */
- wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
- QID_V(q->cntxt_id) | PIDX_V(q->db_pidx_inc));
- q->db_pidx_inc = 0;
- }
- q->db_disabled = 0;
- spin_unlock_irq(&q->db_lock);
- }
- static void disable_dbs(struct adapter *adap)
- {
- int i;
- for_each_ethrxq(&adap->sge, i)
- disable_txq_db(&adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
- disable_txq_db(&adap->sge.ofldtxq[i].q);
- for_each_port(adap, i)
- disable_txq_db(&adap->sge.ctrlq[i].q);
- }
- static void enable_dbs(struct adapter *adap)
- {
- int i;
- for_each_ethrxq(&adap->sge, i)
- enable_txq_db(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
- enable_txq_db(adap, &adap->sge.ofldtxq[i].q);
- for_each_port(adap, i)
- enable_txq_db(adap, &adap->sge.ctrlq[i].q);
- }
- static void notify_rdma_uld(struct adapter *adap, enum cxgb4_control cmd)
- {
- if (adap->uld_handle[CXGB4_ULD_RDMA])
- ulds[CXGB4_ULD_RDMA].control(adap->uld_handle[CXGB4_ULD_RDMA],
- cmd);
- }
- static void process_db_full(struct work_struct *work)
- {
- struct adapter *adap;
- adap = container_of(work, struct adapter, db_full_task);
- drain_db_fifo(adap, dbfifo_drain_delay);
- enable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
- }
- static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
- {
- u16 hw_pidx, hw_cidx;
- int ret;
- spin_lock_irq(&q->db_lock);
- ret = read_eq_indices(adap, (u16)q->cntxt_id, &hw_pidx, &hw_cidx);
- if (ret)
- goto out;
- if (q->db_pidx != hw_pidx) {
- u16 delta;
- u32 val;
- if (q->db_pidx >= hw_pidx)
- delta = q->db_pidx - hw_pidx;
- else
- delta = q->size - hw_pidx + q->db_pidx;
- if (is_t4(adap->params.chip))
- val = PIDX_V(delta);
- else
- val = PIDX_T5_V(delta);
- wmb();
- t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL_A),
- QID_V(q->cntxt_id) | val);
- }
- out:
- q->db_disabled = 0;
- q->db_pidx_inc = 0;
- spin_unlock_irq(&q->db_lock);
- if (ret)
- CH_WARN(adap, "DB drop recovery failed.\n");
- }
- static void recover_all_queues(struct adapter *adap)
- {
- int i;
- for_each_ethrxq(&adap->sge, i)
- sync_txq_pidx(adap, &adap->sge.ethtxq[i].q);
- for_each_ofldrxq(&adap->sge, i)
- sync_txq_pidx(adap, &adap->sge.ofldtxq[i].q);
- for_each_port(adap, i)
- sync_txq_pidx(adap, &adap->sge.ctrlq[i].q);
- }
- static void process_db_drop(struct work_struct *work)
- {
- struct adapter *adap;
- adap = container_of(work, struct adapter, db_drop_task);
- if (is_t4(adap->params.chip)) {
- drain_db_fifo(adap, dbfifo_drain_delay);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
- drain_db_fifo(adap, dbfifo_drain_delay);
- recover_all_queues(adap);
- drain_db_fifo(adap, dbfifo_drain_delay);
- enable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- } else {
- u32 dropped_db = t4_read_reg(adap, 0x010ac);
- u16 qid = (dropped_db >> 15) & 0x1ffff;
- u16 pidx_inc = dropped_db & 0x1fff;
- u64 bar2_qoffset;
- unsigned int bar2_qid;
- int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
- &bar2_qoffset, &bar2_qid);
- if (ret)
- dev_err(adap->pdev_dev, "doorbell drop recovery: "
- "qid=%d, pidx_inc=%d\n", qid, pidx_inc);
- else
- writel(PIDX_T5_V(pidx_inc) | QID_V(bar2_qid),
- adap->bar2 + bar2_qoffset + SGE_UDB_KDOORBELL);
- /* Re-enable BAR2 WC */
- t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
- }
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
- }
- void t4_db_full(struct adapter *adap)
- {
- if (is_t4(adap->params.chip)) {
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F, 0);
- queue_work(adap->workq, &adap->db_full_task);
- }
- }
- void t4_db_dropped(struct adapter *adap)
- {
- if (is_t4(adap->params.chip)) {
- disable_dbs(adap);
- notify_rdma_uld(adap, CXGB4_CONTROL_DB_FULL);
- }
- queue_work(adap->workq, &adap->db_drop_task);
- }
- static void uld_attach(struct adapter *adap, unsigned int uld)
- {
- void *handle;
- struct cxgb4_lld_info lli;
- unsigned short i;
- lli.pdev = adap->pdev;
- lli.pf = adap->fn;
- lli.l2t = adap->l2t;
- lli.tids = &adap->tids;
- lli.ports = adap->port;
- lli.vr = &adap->vres;
- lli.mtus = adap->params.mtus;
- if (uld == CXGB4_ULD_RDMA) {
- lli.rxq_ids = adap->sge.rdma_rxq;
- lli.ciq_ids = adap->sge.rdma_ciq;
- lli.nrxq = adap->sge.rdmaqs;
- lli.nciq = adap->sge.rdmaciqs;
- } else if (uld == CXGB4_ULD_ISCSI) {
- lli.rxq_ids = adap->sge.ofld_rxq;
- lli.nrxq = adap->sge.ofldqsets;
- }
- lli.ntxq = adap->sge.ofldqsets;
- lli.nchan = adap->params.nports;
- lli.nports = adap->params.nports;
- lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.chip;
- lli.iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
- lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
- lli.udb_density = 1 << adap->params.sge.eq_qpp;
- lli.ucq_density = 1 << adap->params.sge.iq_qpp;
- lli.filt_mode = adap->params.tp.vlan_pri_map;
- /* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
- for (i = 0; i < NCHAN; i++)
- lli.tx_modq[i] = i;
- lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
- lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
- lli.fw_vers = adap->params.fw_vers;
- lli.dbfifo_int_thresh = dbfifo_int_thresh;
- lli.sge_ingpadboundary = adap->sge.fl_align;
- lli.sge_egrstatuspagesize = adap->sge.stat_len;
- lli.sge_pktshift = adap->sge.pktshift;
- lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
- lli.max_ordird_qp = adap->params.max_ordird_qp;
- lli.max_ird_adapter = adap->params.max_ird_adapter;
- lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
- handle = ulds[uld].add(&lli);
- if (IS_ERR(handle)) {
- dev_warn(adap->pdev_dev,
- "could not attach to the %s driver, error %ld\n",
- uld_str[uld], PTR_ERR(handle));
- return;
- }
- adap->uld_handle[uld] = handle;
- if (!netevent_registered) {
- register_netevent_notifier(&cxgb4_netevent_nb);
- netevent_registered = true;
- }
- if (adap->flags & FULL_INIT_DONE)
- ulds[uld].state_change(handle, CXGB4_STATE_UP);
- }
- static void attach_ulds(struct adapter *adap)
- {
- unsigned int i;
- spin_lock(&adap_rcu_lock);
- list_add_tail_rcu(&adap->rcu_node, &adap_rcu_list);
- spin_unlock(&adap_rcu_lock);
- mutex_lock(&uld_mutex);
- list_add_tail(&adap->list_node, &adapter_list);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (ulds[i].add)
- uld_attach(adap, i);
- mutex_unlock(&uld_mutex);
- }
- static void detach_ulds(struct adapter *adap)
- {
- unsigned int i;
- mutex_lock(&uld_mutex);
- list_del(&adap->list_node);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i]) {
- ulds[i].state_change(adap->uld_handle[i],
- CXGB4_STATE_DETACH);
- adap->uld_handle[i] = NULL;
- }
- if (netevent_registered && list_empty(&adapter_list)) {
- unregister_netevent_notifier(&cxgb4_netevent_nb);
- netevent_registered = false;
- }
- mutex_unlock(&uld_mutex);
- spin_lock(&adap_rcu_lock);
- list_del_rcu(&adap->rcu_node);
- spin_unlock(&adap_rcu_lock);
- }
- static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
- {
- unsigned int i;
- mutex_lock(&uld_mutex);
- for (i = 0; i < CXGB4_ULD_MAX; i++)
- if (adap->uld_handle[i])
- ulds[i].state_change(adap->uld_handle[i], new_state);
- mutex_unlock(&uld_mutex);
- }
- /**
- * cxgb4_register_uld - register an upper-layer driver
- * @type: the ULD type
- * @p: the ULD methods
- *
- * Registers an upper-layer driver with this driver and notifies the ULD
- * about any presently available devices that support its type. Returns
- * %-EBUSY if a ULD of the same type is already registered.
- */
- int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
- {
- int ret = 0;
- struct adapter *adap;
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- if (ulds[type].add) {
- ret = -EBUSY;
- goto out;
- }
- ulds[type] = *p;
- list_for_each_entry(adap, &adapter_list, list_node)
- uld_attach(adap, type);
- out: mutex_unlock(&uld_mutex);
- return ret;
- }
- EXPORT_SYMBOL(cxgb4_register_uld);
- /**
- * cxgb4_unregister_uld - unregister an upper-layer driver
- * @type: the ULD type
- *
- * Unregisters an existing upper-layer driver.
- */
- int cxgb4_unregister_uld(enum cxgb4_uld type)
- {
- struct adapter *adap;
- if (type >= CXGB4_ULD_MAX)
- return -EINVAL;
- mutex_lock(&uld_mutex);
- list_for_each_entry(adap, &adapter_list, list_node)
- adap->uld_handle[type] = NULL;
- ulds[type].add = NULL;
- mutex_unlock(&uld_mutex);
- return 0;
- }
- EXPORT_SYMBOL(cxgb4_unregister_uld);
- #if IS_ENABLED(CONFIG_IPV6)
- static int cxgb4_inet6addr_handler(struct notifier_block *this,
- unsigned long event, void *data)
- {
- struct inet6_ifaddr *ifa = data;
- struct net_device *event_dev = ifa->idev->dev;
- const struct device *parent = NULL;
- #if IS_ENABLED(CONFIG_BONDING)
- struct adapter *adap;
- #endif
- if (event_dev->priv_flags & IFF_802_1Q_VLAN)
- event_dev = vlan_dev_real_dev(event_dev);
- #if IS_ENABLED(CONFIG_BONDING)
- if (event_dev->flags & IFF_MASTER) {
- list_for_each_entry(adap, &adapter_list, list_node) {
- switch (event) {
- case NETDEV_UP:
- cxgb4_clip_get(adap->port[0],
- (const u32 *)ifa, 1);
- break;
- case NETDEV_DOWN:
- cxgb4_clip_release(adap->port[0],
- (const u32 *)ifa, 1);
- break;
- default:
- break;
- }
- }
- return NOTIFY_OK;
- }
- #endif
- if (event_dev)
- parent = event_dev->dev.parent;
- if (parent && parent->driver == &cxgb4_driver.driver) {
- switch (event) {
- case NETDEV_UP:
- cxgb4_clip_get(event_dev, (const u32 *)ifa, 1);
- break;
- case NETDEV_DOWN:
- cxgb4_clip_release(event_dev, (const u32 *)ifa, 1);
- break;
- default:
- break;
- }
- }
- return NOTIFY_OK;
- }
- static bool inet6addr_registered;
- static struct notifier_block cxgb4_inet6addr_notifier = {
- .notifier_call = cxgb4_inet6addr_handler
- };
- static void update_clip(const struct adapter *adap)
- {
- int i;
- struct net_device *dev;
- int ret;
- rcu_read_lock();
- for (i = 0; i < MAX_NPORTS; i++) {
- dev = adap->port[i];
- ret = 0;
- if (dev)
- ret = cxgb4_update_root_dev_clip(dev);
- if (ret < 0)
- break;
- }
- rcu_read_unlock();
- }
- #endif /* IS_ENABLED(CONFIG_IPV6) */
- /**
- * cxgb_up - enable the adapter
- * @adap: adapter being enabled
- *
- * Called when the first port is enabled, this function performs the
- * actions necessary to make an adapter operational, such as completing
- * the initialization of HW modules, and enabling interrupts.
- *
- * Must be called with the rtnl lock held.
- */
- static int cxgb_up(struct adapter *adap)
- {
- int err;
- err = setup_sge_queues(adap);
- if (err)
- goto out;
- err = setup_rss(adap);
- if (err)
- goto freeq;
- if (adap->flags & USING_MSIX) {
- name_msix_vecs(adap);
- err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
- adap->msix_info[0].desc, adap);
- if (err)
- goto irq_err;
- err = request_msix_queue_irqs(adap);
- if (err) {
- free_irq(adap->msix_info[0].vec, adap);
- goto irq_err;
- }
- } else {
- err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
- (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
- adap->port[0]->name, adap);
- if (err)
- goto irq_err;
- }
- enable_rx(adap);
- t4_sge_start(adap);
- t4_intr_enable(adap);
- adap->flags |= FULL_INIT_DONE;
- notify_ulds(adap, CXGB4_STATE_UP);
- #if IS_ENABLED(CONFIG_IPV6)
- update_clip(adap);
- #endif
- out:
- return err;
- irq_err:
- dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
- freeq:
- t4_free_sge_resources(adap);
- goto out;
- }
- static void cxgb_down(struct adapter *adapter)
- {
- cancel_work_sync(&adapter->tid_release_task);
- cancel_work_sync(&adapter->db_full_task);
- cancel_work_sync(&adapter->db_drop_task);
- adapter->tid_release_task_busy = false;
- adapter->tid_release_head = NULL;
- t4_sge_stop(adapter);
- t4_free_sge_resources(adapter);
- adapter->flags &= ~FULL_INIT_DONE;
- }
- /*
- * net_device operations
- */
- static int cxgb_open(struct net_device *dev)
- {
- int err;
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
- netif_carrier_off(dev);
- if (!(adapter->flags & FULL_INIT_DONE)) {
- err = cxgb_up(adapter);
- if (err < 0)
- return err;
- }
- err = link_start(dev);
- if (!err)
- netif_tx_start_all_queues(dev);
- return err;
- }
- static int cxgb_close(struct net_device *dev)
- {
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adapter = pi->adapter;
- netif_tx_stop_all_queues(dev);
- netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
- }
- /* Return an error number if the indicated filter isn't writable ...
- */
- static int writable_filter(struct filter_entry *f)
- {
- if (f->locked)
- return -EPERM;
- if (f->pending)
- return -EBUSY;
- return 0;
- }
- /* Delete the filter at the specified index (if valid). The checks for all
- * the common problems with doing this like the filter being locked, currently
- * pending in another operation, etc.
- */
- static int delete_filter(struct adapter *adapter, unsigned int fidx)
- {
- struct filter_entry *f;
- int ret;
- if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
- return -EINVAL;
- f = &adapter->tids.ftid_tab[fidx];
- ret = writable_filter(f);
- if (ret)
- return ret;
- if (f->valid)
- return del_filter_wr(adapter, fidx);
- return 0;
- }
- int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
- __be32 sip, __be16 sport, __be16 vlan,
- unsigned int queue, unsigned char port, unsigned char mask)
- {
- int ret;
- struct filter_entry *f;
- struct adapter *adap;
- int i;
- u8 *val;
- adap = netdev2adap(dev);
- /* Adjust stid to correct filter index */
- stid -= adap->tids.sftid_base;
- stid += adap->tids.nftids;
- /* Check to make sure the filter requested is writable ...
- */
- f = &adap->tids.ftid_tab[stid];
- ret = writable_filter(f);
- if (ret)
- return ret;
- /* Clear out any old resources being used by the filter before
- * we start constructing the new filter.
- */
- if (f->valid)
- clear_filter(adap, f);
- /* Clear out filter specifications */
- memset(&f->fs, 0, sizeof(struct ch_filter_specification));
- f->fs.val.lport = cpu_to_be16(sport);
- f->fs.mask.lport = ~0;
- val = (u8 *)&sip;
- if ((val[0] | val[1] | val[2] | val[3]) != 0) {
- for (i = 0; i < 4; i++) {
- f->fs.val.lip[i] = val[i];
- f->fs.mask.lip[i] = ~0;
- }
- if (adap->params.tp.vlan_pri_map & PORT_F) {
- f->fs.val.iport = port;
- f->fs.mask.iport = mask;
- }
- }
- if (adap->params.tp.vlan_pri_map & PROTOCOL_F) {
- f->fs.val.proto = IPPROTO_TCP;
- f->fs.mask.proto = ~0;
- }
- f->fs.dirsteer = 1;
- f->fs.iq = queue;
- /* Mark filter as locked */
- f->locked = 1;
- f->fs.rpttid = 1;
- ret = set_filter_wr(adap, stid);
- if (ret) {
- clear_filter(adap, f);
- return ret;
- }
- return 0;
- }
- EXPORT_SYMBOL(cxgb4_create_server_filter);
- int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
- unsigned int queue, bool ipv6)
- {
- int ret;
- struct filter_entry *f;
- struct adapter *adap;
- adap = netdev2adap(dev);
- /* Adjust stid to correct filter index */
- stid -= adap->tids.sftid_base;
- stid += adap->tids.nftids;
- f = &adap->tids.ftid_tab[stid];
- /* Unlock the filter */
- f->locked = 0;
- ret = delete_filter(adap, stid);
- if (ret)
- return ret;
- return 0;
- }
- EXPORT_SYMBOL(cxgb4_remove_server_filter);
- static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
- struct rtnl_link_stats64 *ns)
- {
- struct port_stats stats;
- struct port_info *p = netdev_priv(dev);
- struct adapter *adapter = p->adapter;
- /* Block retrieving statistics during EEH error
- * recovery. Otherwise, the recovery might fail
- * and the PCI device will be removed permanently
- */
- spin_lock(&adapter->stats_lock);
- if (!netif_device_present(dev)) {
- spin_unlock(&adapter->stats_lock);
- return ns;
- }
- t4_get_port_stats(adapter, p->tx_chan, &stats);
- spin_unlock(&adapter->stats_lock);
- ns->tx_bytes = stats.tx_octets;
- ns->tx_packets = stats.tx_frames;
- ns->rx_bytes = stats.rx_octets;
- ns->rx_packets = stats.rx_frames;
- ns->multicast = stats.rx_mcast_frames;
- /* detailed rx_errors */
- ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
- stats.rx_runt;
- ns->rx_over_errors = 0;
- ns->rx_crc_errors = stats.rx_fcs_err;
- ns->rx_frame_errors = stats.rx_symbol_err;
- ns->rx_fifo_errors = stats.rx_ovflow0 + stats.rx_ovflow1 +
- stats.rx_ovflow2 + stats.rx_ovflow3 +
- stats.rx_trunc0 + stats.rx_trunc1 +
- stats.rx_trunc2 + stats.rx_trunc3;
- ns->rx_missed_errors = 0;
- /* detailed tx_errors */
- ns->tx_aborted_errors = 0;
- ns->tx_carrier_errors = 0;
- ns->tx_fifo_errors = 0;
- ns->tx_heartbeat_errors = 0;
- ns->tx_window_errors = 0;
- ns->tx_errors = stats.tx_error_frames;
- ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
- ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
- return ns;
- }
- static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
- {
- unsigned int mbox;
- int ret = 0, prtad, devad;
- struct port_info *pi = netdev_priv(dev);
- struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
- switch (cmd) {
- case SIOCGMIIPHY:
- if (pi->mdio_addr < 0)
- return -EOPNOTSUPP;
- data->phy_id = pi->mdio_addr;
- break;
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- if (mdio_phy_id_is_c45(data->phy_id)) {
- prtad = mdio_phy_id_prtad(data->phy_id);
- devad = mdio_phy_id_devad(data->phy_id);
- } else if (data->phy_id < 32) {
- prtad = data->phy_id;
- devad = 0;
- data->reg_num &= 0x1f;
- } else
- return -EINVAL;
- mbox = pi->adapter->fn;
- if (cmd == SIOCGMIIREG)
- ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
- data->reg_num, &data->val_out);
- else
- ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
- data->reg_num, data->val_in);
- break;
- default:
- return -EOPNOTSUPP;
- }
- return ret;
- }
- static void cxgb_set_rxmode(struct net_device *dev)
- {
- /* unfortunately we can't return errors to the stack */
- set_rxmode(dev, -1, false);
- }
- static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
- {
- int ret;
- struct port_info *pi = netdev_priv(dev);
- if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
- return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
- -1, -1, -1, true);
- if (!ret)
- dev->mtu = new_mtu;
- return ret;
- }
- static int cxgb_set_mac_addr(struct net_device *dev, void *p)
- {
- int ret;
- struct sockaddr *addr = p;
- struct port_info *pi = netdev_priv(dev);
- if (!is_valid_ether_addr(addr->sa_data))
- return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
- pi->xact_addr_filt, addr->sa_data, true, true);
- if (ret < 0)
- return ret;
- memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
- pi->xact_addr_filt = ret;
- return 0;
- }
- #ifdef CONFIG_NET_POLL_CONTROLLER
- static void cxgb_netpoll(struct net_device *dev)
- {
- struct port_info *pi = netdev_priv(dev);
- struct adapter *adap = pi->adapter;
- if (adap->flags & USING_MSIX) {
- int i;
- struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
- for (i = pi->nqsets; i; i--, rx++)
- t4_sge_intr_msix(0, &rx->rspq);
- } else
- t4_intr_handler(adap)(0, adap);
- }
- #endif
- static const struct net_device_ops cxgb4_netdev_ops = {
- .ndo_open = cxgb_open,
- .ndo_stop = cxgb_close,
- .ndo_start_xmit = t4_eth_xmit,
- .ndo_select_queue = cxgb_select_queue,
- .ndo_get_stats64 = cxgb_get_stats,
- .ndo_set_rx_mode = cxgb_set_rxmode,
- .ndo_set_mac_address = cxgb_set_mac_addr,
- .ndo_set_features = cxgb_set_features,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_do_ioctl = cxgb_ioctl,
- .ndo_change_mtu = cxgb_change_mtu,
- #ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = cxgb_netpoll,
- #endif
- #ifdef CONFIG_CHELSIO_T4_FCOE
- .ndo_fcoe_enable = cxgb_fcoe_enable,
- .ndo_fcoe_disable = cxgb_fcoe_disable,
- #endif /* CONFIG_CHELSIO_T4_FCOE */
- #ifdef CONFIG_NET_RX_BUSY_POLL
- .ndo_busy_poll = cxgb_busy_poll,
- #endif
- };
- void t4_fatal_err(struct adapter *adap)
- {
- t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
- t4_intr_disable(adap);
- dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
- }
- /* Return the specified PCI-E Configuration Space register from our Physical
- * Function. We try first via a Firmware LDST Command since we prefer to let
- * the firmware own all of these registers, but if that fails we go for it
- * directly ourselves.
- */
- static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
- {
- struct fw_ldst_cmd ldst_cmd;
- u32 val;
- int ret;
- /* Construct and send the Firmware LDST Command to retrieve the
- * specified PCI-E Configuration Space register.
- */
- memset(&ldst_cmd, 0, sizeof(ldst_cmd));
- ldst_cmd.op_to_addrspace =
- htonl(FW_CMD_OP_V(FW_LDST_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE));
- ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
- ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
- ldst_cmd.u.pcie.r = reg;
- ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
- &ldst_cmd);
- /* If the LDST Command suucceeded, exctract the returned register
- * value. Otherwise read it directly ourself.
- */
- if (ret == 0)
- val = ntohl(ldst_cmd.u.pcie.data[0]);
- else
- t4_hw_pci_read_cfg4(adap, reg, &val);
- return val;
- }
- static void setup_memwin(struct adapter *adap)
- {
- u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
- if (is_t4(adap->params.chip)) {
- u32 bar0;
- /* Truncation intentional: we only read the bottom 32-bits of
- * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
- * mechanism to read BAR0 instead of using
- * pci_resource_start() because we could be operating from
- * within a Virtual Machine which is trapping our accesses to
- * our Configuration Space and we need to set up the PCI-E
- * Memory Window decoders with the actual addresses which will
- * be coming across the PCI-E link.
- */
- bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
- bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
- adap->t4_bar0 = bar0;
- mem_win0_base = bar0 + MEMWIN0_BASE;
- mem_win1_base = bar0 + MEMWIN1_BASE;
- mem_win2_base = bar0 + MEMWIN2_BASE;
- mem_win2_aperture = MEMWIN2_APERTURE;
- } else {
- /* For T5, only relative offset inside the PCIe BAR is passed */
- mem_win0_base = MEMWIN0_BASE;
- mem_win1_base = MEMWIN1_BASE;
- mem_win2_base = MEMWIN2_BASE_T5;
- mem_win2_aperture = MEMWIN2_APERTURE_T5;
- }
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 0),
- mem_win0_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN0_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 1),
- mem_win1_base | BIR_V(0) |
- WINDOW_V(ilog2(MEMWIN1_APERTURE) - 10));
- t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2),
- mem_win2_base | BIR_V(0) |
- WINDOW_V(ilog2(mem_win2_aperture) - 10));
- t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 2));
- }
- static void setup_memwin_rdma(struct adapter *adap)
- {
- if (adap->vres.ocq.size) {
- u32 start;
- unsigned int sz_kb;
- start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
- start &= PCI_BASE_ADDRESS_MEM_MASK;
- start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
- sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
- t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, 3),
- start | BIR_V(1) | WINDOW_V(ilog2(sz_kb)));
- t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3),
- adap->vres.ocq.start);
- t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, 3));
- }
- }
- static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
- {
- u32 v;
- int ret;
- /* get device capabilities */
- memset(c, 0, sizeof(*c));
- c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
- if (ret < 0)
- return ret;
- /* select capabilities we'll be using */
- if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
- if (!vf_acls)
- c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
- else
- c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
- } else if (vf_acls) {
- dev_err(adap->pdev_dev, "virtualization ACLs not supported");
- return ret;
- }
- c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
- if (ret < 0)
- return ret;
- ret = t4_config_glbl_rss(adap, adap->fn,
- FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
- FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
- FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
- if (ret < 0)
- return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
- MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
- FW_CMD_CAP_PF);
- if (ret < 0)
- return ret;
- t4_sge_init(adap);
- /* tweak some settings */
- t4_write_reg(adap, TP_SHIFT_CNT_A, 0x64f8849);
- t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(PAGE_SHIFT - 12));
- t4_write_reg(adap, TP_PIO_ADDR_A, TP_INGRESS_CONFIG_A);
- v = t4_read_reg(adap, TP_PIO_DATA_A);
- t4_write_reg(adap, TP_PIO_DATA_A, v & ~CSUM_HAS_PSEUDO_HDR_F);
- /* first 4 Tx modulation queues point to consecutive Tx channels */
- adap->params.tp.tx_modq_map = 0xE4;
- t4_write_reg(adap, TP_TX_MOD_QUEUE_REQ_MAP_A,
- TX_MOD_QUEUE_REQ_MAP_V(adap->params.tp.tx_modq_map));
- /* associate each Tx modulation queue with consecutive Tx channels */
- v = 0x84218421;
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &v, 1, TP_TX_SCHED_HDR_A);
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &v, 1, TP_TX_SCHED_FIFO_A);
- t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A,
- &v, 1, TP_TX_SCHED_PCMD_A);
- #define T4_TX_MODQ_10G_WEIGHT_DEFAULT 16 /* in KB units */
- if (is_offload(adap)) {
- t4_write_reg(adap, TP_TX_MOD_QUEUE_WEIGHT0_A,
- TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
- t4_write_reg(adap, TP_TX_MOD_CHANNEL_WEIGHT_A,
- TX_MODQ_WEIGHT0_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT1_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT2_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT) |
- TX_MODQ_WEIGHT3_V(T4_TX_MODQ_10G_WEIGHT_DEFAULT));
- }
- /* get basic stuff going */
- return t4_early_init(adap, adap->fn);
- }
- /*
- * Max # of ATIDs. The absolute HW max is 16K but we keep it lower.
- */
- #define MAX_ATIDS 8192U
- /*
- * Phase 0 of initialization: contact FW, obtain config, perform basic init.
- *
- * If the firmware we're dealing with has Configuration File support, then
- * we use that to perform all configuration
- */
- /*
- * Tweak configuration based on module parameters, etc. Most of these have
- * defaults assigned to them by Firmware Configuration Files (if we're using
- * them) but need to be explicitly set if we're using hard-coded
- * initialization. But even in the case of using Firmware Configuration
- * Files, we'd like to expose the ability to change these via module
- * parameters so these are essentially common tweaks/settings for
- * Configuration Files and hard-coded initialization ...
- */
- static int adap_init0_tweaks(struct adapter *adapter)
- {
- /*
- * Fix up various Host-Dependent Parameters like Page Size, Cache
- * Line Size, etc. The firmware default is for a 4KB Page Size and
- * 64B Cache Line Size ...
- */
- t4_fixup_host_params(adapter, PAGE_SIZE, L1_CACHE_BYTES);
- /*
- * Process module parameters which affect early initialization.
- */
- if (rx_dma_offset != 2 && rx_dma_offset != 0) {
- dev_err(&adapter->pdev->dev,
- "Ignoring illegal rx_dma_offset=%d, using 2\n",
- rx_dma_offset);
- rx_dma_offset = 2;
- }
- t4_set_reg_field(adapter, SGE_CONTROL_A,
- PKTSHIFT_V(PKTSHIFT_M),
- PKTSHIFT_V(rx_dma_offset));
- /*
- * Don't include the "IP Pseudo Header" in CPL_RX_PKT checksums: Linux
- * adds the pseudo header itself.
- */
- t4_tp_wr_bits_indirect(adapter, TP_INGRESS_CONFIG_A,
- CSUM_HAS_PSEUDO_HDR_F, 0);
- return 0;
- }
- /*
- * Attempt to initialize the adapter via a Firmware Configuration File.
- */
- static int adap_init0_config(struct adapter *adapter, int reset)
- {
- struct fw_caps_config_cmd caps_cmd;
- const struct firmware *cf;
- unsigned long mtype = 0, maddr = 0;
- u32 finiver, finicsum, cfcsum;
- int ret;
- int config_issued = 0;
- char *fw_config_file, fw_config_file_path[256];
- char *config_name = NULL;
- /*
- * Reset device if necessary.
- */
- if (reset) {
- ret = t4_fw_reset(adapter, adapter->mbox,
- PIORSTMODE_F | PIORST_F);
- if (ret < 0)
- goto bye;
- }
- /*
- * If we have a T4 configuration file under /lib/firmware/cxgb4/,
- * then use that. Otherwise, use the configuration file stored
- * in the adapter flash ...
- */
- switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
- case CHELSIO_T4:
- fw_config_file = FW4_CFNAME;
- break;
- case CHELSIO_T5:
- fw_config_file = FW5_CFNAME;
- break;
- default:
- dev_err(adapter->pdev_dev, "Device %d is not supported\n",
- adapter->pdev->device);
- ret = -EINVAL;
- goto bye;
- }
- ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
- if (ret < 0) {
- config_name = "On FLASH";
- mtype = FW_MEMTYPE_CF_FLASH;
- maddr = t4_flash_cfg_addr(adapter);
- } else {
- u32 params[7], val[7];
- sprintf(fw_config_file_path,
- "/lib/firmware/%s", fw_config_file);
- config_name = fw_config_file_path;
- if (cf->size >= FLASH_CFG_MAX_SIZE)
- ret = -ENOMEM;
- else {
- params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adapter, adapter->mbox,
- adapter->fn, 0, 1, params, val);
- if (ret == 0) {
- /*
- * For t4_memory_rw() below addresses and
- * sizes have to be in terms of multiples of 4
- * bytes. So, if the Configuration File isn't
- * a multiple of 4 bytes in length we'll have
- * to write that out separately since we can't
- * guarantee that the bytes following the
- * residual byte in the buffer returned by
- * request_firmware() are zeroed out ...
- */
- size_t resid = cf->size & 0x3;
- size_t size = cf->size & ~0x3;
- __be32 *data = (__be32 *)cf->data;
- mtype = FW_PARAMS_PARAM_Y_G(val[0]);
- maddr = FW_PARAMS_PARAM_Z_G(val[0]) << 16;
- spin_lock(&adapter->win0_lock);
- ret = t4_memory_rw(adapter, 0, mtype, maddr,
- size, data, T4_MEMORY_WRITE);
- if (ret == 0 && resid != 0) {
- union {
- __be32 word;
- char buf[4];
- } last;
- int i;
- last.word = data[size >> 2];
- for (i = resid; i < 4; i++)
- last.buf[i] = 0;
- ret = t4_memory_rw(adapter, 0, mtype,
- maddr + size,
- 4, &last.word,
- T4_MEMORY_WRITE);
- }
- spin_unlock(&adapter->win0_lock);
- }
- }
- release_firmware(cf);
- if (ret)
- goto bye;
- }
- /*
- * Issue a Capability Configuration command to the firmware to get it
- * to parse the Configuration File. We don't use t4_fw_config_file()
- * because we want the ability to modify various features after we've
- * processed the configuration file ...
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F);
- caps_cmd.cfvalid_to_len16 =
- htonl(FW_CAPS_CONFIG_CMD_CFVALID_F |
- FW_CAPS_CONFIG_CMD_MEMTYPE_CF_V(mtype) |
- FW_CAPS_CONFIG_CMD_MEMADDR64K_CF_V(maddr >> 16) |
- FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- &caps_cmd);
- /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
- * Configuration File in FLASH), our last gasp effort is to use the
- * Firmware Configuration File which is embedded in the firmware. A
- * very few early versions of the firmware didn't have one embedded
- * but we can ignore those.
- */
- if (ret == -ENOENT) {
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_READ_F);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
- sizeof(caps_cmd), &caps_cmd);
- config_name = "Firmware Default";
- }
- config_issued = 1;
- if (ret < 0)
- goto bye;
- finiver = ntohl(caps_cmd.finiver);
- finicsum = ntohl(caps_cmd.finicsum);
- cfcsum = ntohl(caps_cmd.cfcsum);
- if (finicsum != cfcsum)
- dev_warn(adapter->pdev_dev, "Configuration File checksum "\
- "mismatch: [fini] csum=%#x, computed csum=%#x\n",
- finicsum, cfcsum);
- /*
- * And now tell the firmware to use the configuration we just loaded.
- */
- caps_cmd.op_to_write =
- htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F |
- FW_CMD_WRITE_F);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
- NULL);
- if (ret < 0)
- goto bye;
- /*
- * Tweak configuration based on system architecture, module
- * parameters, etc.
- */
- ret = adap_init0_tweaks(adapter);
- if (ret < 0)
- goto bye;
- /*
- * And finally tell the firmware to initialize itself using the
- * parameters from the Configuration File.
- */
- ret = t4_fw_initialize(adapter, adapter->mbox);
- if (ret < 0)
- goto bye;
- /* Emit Firmware Configuration File information and return
- * successfully.
- */
- dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
- "Configuration File \"%s\", version %#x, computed checksum %#x\n",
- config_name, finiver, cfcsum);
- return 0;
- /*
- * Something bad happened. Return the error ... (If the "error"
- * is that there's no Configuration File on the adapter we don't
- * want to issue a warning since this is fairly common.)
- */
- bye:
- if (config_issued && ret != -ENOENT)
- dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
- config_name, -ret);
- return ret;
- }
- static struct fw_info fw_info_array[] = {
- {
- .chip = CHELSIO_T4,
- .fs_name = FW4_CFNAME,
- .fw_mod_name = FW4_FNAME,
- .fw_hdr = {
- .chip = FW_HDR_CHIP_T4,
- .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
- .intfver_nic = FW_INTFVER(T4, NIC),
- .intfver_vnic = FW_INTFVER(T4, VNIC),
- .intfver_ri = FW_INTFVER(T4, RI),
- .intfver_iscsi = FW_INTFVER(T4, ISCSI),
- .intfver_fcoe = FW_INTFVER(T4, FCOE),
- },
- }, {
- .chip = CHELSIO_T5,
- .fs_name = FW5_CFNAME,
- .fw_mod_name = FW5_FNAME,
- .fw_hdr = {
- .chip = FW_HDR_CHIP_T5,
- .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
- .intfver_nic = FW_INTFVER(T5, NIC),
- .intfver_vnic = FW_INTFVER(T5, VNIC),
- .intfver_ri = FW_INTFVER(T5, RI),
- .intfver_iscsi = FW_INTFVER(T5, ISCSI),
- .intfver_fcoe = FW_INTFVER(T5, FCOE),
- },
- }
- };
- static struct fw_info *find_fw_info(int chip)
- {
- int i;
- for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
- if (fw_info_array[i].chip == chip)
- return &fw_info_array[i];
- }
- return NULL;
- }
- /*
- * Phase 0 of initialization: contact FW, obtain config, perform basic init.
- */
- static int adap_init0(struct adapter *adap)
- {
- int ret;
- u32 v, port_vec;
- enum dev_state state;
- u32 params[7], val[7];
- struct fw_caps_config_cmd caps_cmd;
- int reset = 1;
- /* Grab Firmware Device Log parameters as early as possible so we have
- * access to it for debugging, etc.
- */
- ret = t4_init_devlog_params(adap);
- if (ret < 0)
- return ret;
- /* Contact FW, advertising Master capability */
- ret = t4_fw_hello(adap, adap->mbox, adap->mbox, MASTER_MAY, &state);
- if (ret < 0) {
- dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
- ret);
- return ret;
- }
- if (ret == adap->mbox)
- adap->flags |= MASTER_PF;
- /*
- * If we're the Master PF Driver and the device is uninitialized,
- * then let's consider upgrading the firmware ... (We always want
- * to check the firmware version number in order to A. get it for
- * later reporting and B. to warn if the currently loaded firmware
- * is excessively mismatched relative to the driver.)
- */
- t4_get_fw_version(adap, &adap->params.fw_vers);
- t4_get_tp_version(adap, &adap->params.tp_vers);
- if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
- struct fw_info *fw_info;
- struct fw_hdr *card_fw;
- const struct firmware *fw;
- const u8 *fw_data = NULL;
- unsigned int fw_size = 0;
- /* This is the firmware whose headers the driver was compiled
- * against
- */
- fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
- if (fw_info == NULL) {
- dev_err(adap->pdev_dev,
- "unable to get firmware info for chip %d.\n",
- CHELSIO_CHIP_VERSION(adap->params.chip));
- return -EINVAL;
- }
- /* allocate memory to read the header of the firmware on the
- * card
- */
- card_fw = t4_alloc_mem(sizeof(*card_fw));
- /* Get FW from from /lib/firmware/ */
- ret = request_firmware(&fw, fw_info->fw_mod_name,
- adap->pdev_dev);
- if (ret < 0) {
- dev_err(adap->pdev_dev,
- "unable to load firmware image %s, error %d\n",
- fw_info->fw_mod_name, ret);
- } else {
- fw_data = fw->data;
- fw_size = fw->size;
- }
- /* upgrade FW logic */
- ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
- state, &reset);
- /* Cleaning up */
- release_firmware(fw);
- t4_free_mem(card_fw);
- if (ret < 0)
- goto bye;
- }
- /*
- * Grab VPD parameters. This should be done after we establish a
- * connection to the firmware since some of the VPD parameters
- * (notably the Core Clock frequency) are retrieved via requests to
- * the firmware. On the other hand, we need these fairly early on
- * so we do this right after getting ahold of the firmware.
- */
- ret = get_vpd_params(adap, &adap->params.vpd);
- if (ret < 0)
- goto bye;
- /*
- * Find out what ports are available to us. Note that we need to do
- * this before calling adap_init0_no_config() since it needs nports
- * and portvec ...
- */
- v =
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
- if (ret < 0)
- goto bye;
- adap->params.nports = hweight32(port_vec);
- adap->params.portvec = port_vec;
- /* If the firmware is initialized already, emit a simply note to that
- * effect. Otherwise, it's time to try initializing the adapter.
- */
- if (state == DEV_STATE_INIT) {
- dev_info(adap->pdev_dev, "Coming up as %s: "\
- "Adapter already initialized\n",
- adap->flags & MASTER_PF ? "MASTER" : "SLAVE");
- } else {
- dev_info(adap->pdev_dev, "Coming up as MASTER: "\
- "Initializing adapter\n");
- /* Find out whether we're dealing with a version of the
- * firmware which has configuration file support.
- */
- params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
- params, val);
- /* If the firmware doesn't support Configuration Files,
- * return an error.
- */
- if (ret < 0) {
- dev_err(adap->pdev_dev, "firmware doesn't support "
- "Firmware Configuration Files\n");
- goto bye;
- }
- /* The firmware provides us with a memory buffer where we can
- * load a Configuration File from the host if we want to
- * override the Configuration File in flash.
- */
- ret = adap_init0_config(adap, reset);
- if (ret == -ENOENT) {
- dev_err(adap->pdev_dev, "no Configuration File "
- "present on adapter.\n");
- goto bye;
- }
- if (ret < 0) {
- dev_err(adap->pdev_dev, "could not initialize "
- "adapter, error %d\n", -ret);
- goto bye;
- }
- }
- /* Give the SGE code a chance to pull in anything that it needs ...
- * Note that this must be called after we retrieve our VPD parameters
- * in order to know how to convert core ticks to seconds, etc.
- */
- ret = t4_sge_init(adap);
- if (ret < 0)
- goto bye;
- if (is_bypass_device(adap->pdev->device))
- adap->params.bypass = 1;
- /*
- * Grab some of our basic fundamental operating parameters.
- */
- #define FW_PARAM_DEV(param) \
- (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_##param))
- #define FW_PARAM_PFVF(param) \
- FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) | \
- FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_##param)| \
- FW_PARAMS_PARAM_Y_V(0) | \
- FW_PARAMS_PARAM_Z_V(0)
- params[0] = FW_PARAM_PFVF(EQ_START);
- params[1] = FW_PARAM_PFVF(L2T_START);
- params[2] = FW_PARAM_PFVF(L2T_END);
- params[3] = FW_PARAM_PFVF(FILTER_START);
- params[4] = FW_PARAM_PFVF(FILTER_END);
- params[5] = FW_PARAM_PFVF(IQFLINT_START);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
- if (ret < 0)
- goto bye;
- adap->sge.egr_start = val[0];
- adap->l2t_start = val[1];
- adap->l2t_end = val[2];
- adap->tids.ftid_base = val[3];
- adap->tids.nftids = val[4] - val[3] + 1;
- adap->sge.ingr_start = val[5];
- /* qids (ingress/egress) returned from firmware can be anywhere
- * in the range from EQ(IQFLINT)_START to EQ(IQFLINT)_END.
- * Hence driver needs to allocate memory for this range to
- * store the queue info. Get the highest IQFLINT/EQ index returned
- * in FW_EQ_*_CMD.alloc command.
- */
- params[0] = FW_PARAM_PFVF(EQ_END);
- params[1] = FW_PARAM_PFVF(IQFLINT_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
- if (ret < 0)
- goto bye;
- adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
- adap->sge.ingr_sz = val[1] - adap->sge.ingr_start + 1;
- adap->sge.egr_map = kcalloc(adap->sge.egr_sz,
- sizeof(*adap->sge.egr_map), GFP_KERNEL);
- if (!adap->sge.egr_map) {
- ret = -ENOMEM;
- goto bye;
- }
- adap->sge.ingr_map = kcalloc(adap->sge.ingr_sz,
- sizeof(*adap->sge.ingr_map), GFP_KERNEL);
- if (!adap->sge.ingr_map) {
- ret = -ENOMEM;
- goto bye;
- }
- /* Allocate the memory for the vaious egress queue bitmaps
- * ie starving_fl and txq_maperr.
- */
- adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
- if (!adap->sge.starving_fl) {
- ret = -ENOMEM;
- goto bye;
- }
- adap->sge.txq_maperr = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
- sizeof(long), GFP_KERNEL);
- if (!adap->sge.txq_maperr) {
- ret = -ENOMEM;
- goto bye;
- }
- params[0] = FW_PARAM_PFVF(CLIP_START);
- params[1] = FW_PARAM_PFVF(CLIP_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
- if (ret < 0)
- goto bye;
- adap->clipt_start = val[0];
- adap->clipt_end = val[1];
- /* query params related to active filter region */
- params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
- params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
- /* If Active filter size is set we enable establishing
- * offload connection through firmware work request
- */
- if ((val[0] != val[1]) && (ret >= 0)) {
- adap->flags |= FW_OFLD_CONN;
- adap->tids.aftid_base = val[0];
- adap->tids.aftid_end = val[1];
- }
- /* If we're running on newer firmware, let it know that we're
- * prepared to deal with encapsulated CPL messages. Older
- * firmware won't understand this and we'll just get
- * unencapsulated messages ...
- */
- params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
- val[0] = 1;
- (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
- /*
- * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
- * capability. Earlier versions of the firmware didn't have the
- * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
- * permission to use ULPTX MEMWRITE DSGL.
- */
- if (is_t4(adap->params.chip)) {
- adap->params.ulptx_memwrite_dsgl = false;
- } else {
- params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
- 1, params, val);
- adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
- }
- /*
- * Get device capabilities so we can determine what resources we need
- * to manage.
- */
- memset(&caps_cmd, 0, sizeof(caps_cmd));
- caps_cmd.op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_READ_F);
- caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
- ret = t4_wr_mbox(adap, adap->mbox, &caps_cmd, sizeof(caps_cmd),
- &caps_cmd);
- if (ret < 0)
- goto bye;
- if (caps_cmd.ofldcaps) {
- /* query offload-related parameters */
- params[0] = FW_PARAM_DEV(NTID);
- params[1] = FW_PARAM_PFVF(SERVER_START);
- params[2] = FW_PARAM_PFVF(SERVER_END);
- params[3] = FW_PARAM_PFVF(TDDP_START);
- params[4] = FW_PARAM_PFVF(TDDP_END);
- params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
- params, val);
- if (ret < 0)
- goto bye;
- adap->tids.ntids = val[0];
- adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
- adap->tids.stid_base = val[1];
- adap->tids.nstids = val[2] - val[1] + 1;
- /*
- * Setup server filter region. Divide the available filter
- * region into two parts. Regular filters get 1/3rd and server
- * filters get 2/3rd part. This is only enabled if workarond
- * path is enabled.
- * 1. For regular filters.
- * 2. Server filter: This are special filters which are used
- * to redirect SYN packets to offload queue.
- */
- if (adap->flags & FW_OFLD_CONN && !is_bypass(adap)) {
- adap->tids.sftid_base = adap->tids.ftid_base +
- DIV_ROUND_UP(adap->tids.nftids, 3);
- adap->tids.nsftids = adap->tids.nftids -
- DIV_ROUND_UP(adap->tids.nftids, 3);
- adap->tids.nftids = adap->tids.sftid_base -
- adap->tids.ftid_base;
- }
- adap->vres.ddp.start = val[3];
- adap->vres.ddp.size = val[4] - val[3] + 1;
- adap->params.ofldq_wr_cred = val[5];
- adap->params.offload = 1;
- }
- if (caps_cmd.rdmacaps) {
- params[0] = FW_PARAM_PFVF(STAG_START);
- params[1] = FW_PARAM_PFVF(STAG_END);
- params[2] = FW_PARAM_PFVF(RQ_START);
- params[3] = FW_PARAM_PFVF(RQ_END);
- params[4] = FW_PARAM_PFVF(PBL_START);
- params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
- params, val);
- if (ret < 0)
- goto bye;
- adap->vres.stag.start = val[0];
- adap->vres.stag.size = val[1] - val[0] + 1;
- adap->vres.rq.start = val[2];
- adap->vres.rq.size = val[3] - val[2] + 1;
- adap->vres.pbl.start = val[4];
- adap->vres.pbl.size = val[5] - val[4] + 1;
- params[0] = FW_PARAM_PFVF(SQRQ_START);
- params[1] = FW_PARAM_PFVF(SQRQ_END);
- params[2] = FW_PARAM_PFVF(CQ_START);
- params[3] = FW_PARAM_PFVF(CQ_END);
- params[4] = FW_PARAM_PFVF(OCQ_START);
- params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
- val);
- if (ret < 0)
- goto bye;
- adap->vres.qp.start = val[0];
- adap->vres.qp.size = val[1] - val[0] + 1;
- adap->vres.cq.start = val[2];
- adap->vres.cq.size = val[3] - val[2] + 1;
- adap->vres.ocq.start = val[4];
- adap->vres.ocq.size = val[5] - val[4] + 1;
- params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
- params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
- val);
- if (ret < 0) {
- adap->params.max_ordird_qp = 8;
- adap->params.max_ird_adapter = 32 * adap->tids.ntids;
- ret = 0;
- } else {
- adap->params.max_ordird_qp = val[0];
- adap->params.max_ird_adapter = val[1];
- }
- dev_info(adap->pdev_dev,
- "max_ordird_qp %d max_ird_adapter %d\n",
- adap->params.max_ordird_qp,
- adap->params.max_ird_adapter);
- }
- if (caps_cmd.iscsicaps) {
- params[0] = FW_PARAM_PFVF(ISCSI_START);
- params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
- params, val);
- if (ret < 0)
- goto bye;
- adap->vres.iscsi.start = val[0];
- adap->vres.iscsi.size = val[1] - val[0] + 1;
- }
- #undef FW_PARAM_PFVF
- #undef FW_PARAM_DEV
- /* The MTU/MSS Table is initialized by now, so load their values. If
- * we're initializing the adapter, then we'll make any modifications
- * we want to the MTU/MSS Table and also initialize the congestion
- * parameters.
- */
- t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
- if (state != DEV_STATE_INIT) {
- int i;
- /* The default MTU Table contains values 1492 and 1500.
- * However, for TCP, it's better to have two values which are
- * a multiple of 8 +/- 4 bytes apart near this popular MTU.
- * This allows us to have a TCP Data Payload which is a
- * multiple of 8 regardless of what combination of TCP Options
- * are in use (always a multiple of 4 bytes) which is
- * important for performance reasons. For instance, if no
- * options are in use, then we have a 20-byte IP header and a
- * 20-byte TCP header. In this case, a 1500-byte MSS would
- * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
- * which is not a multiple of 8. So using an MSS of 1488 in
- * this case results in a TCP Data Payload of 1448 bytes which
- * is a multiple of 8. On the other hand, if 12-byte TCP Time
- * Stamps have been negotiated, then an MTU of 1500 bytes
- * results in a TCP Data Payload of 1448 bytes which, as
- * above, is a multiple of 8 bytes ...
- */
- for (i = 0; i < NMTUS; i++)
- if (adap->params.mtus[i] == 1492) {
- adap->params.mtus[i] = 1488;
- break;
- }
- t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
- adap->params.b_wnd);
- }
- t4_init_sge_params(adap);
- t4_init_tp_params(adap);
- adap->flags |= FW_OK;
- return 0;
- /*
- * Something bad happened. If a command timed out or failed with EIO
- * FW does not operate within its spec or something catastrophic
- * happened to HW/FW, stop issuing commands.
- */
- bye:
- kfree(adap->sge.egr_map);
- kfree(adap->sge.ingr_map);
- kfree(adap->sge.starving_fl);
- kfree(adap->sge.txq_maperr);
- if (ret != -ETIMEDOUT && ret != -EIO)
- t4_fw_bye(adap, adap->mbox);
- return ret;
- }
- /* EEH callbacks */
- static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
- {
- int i;
- struct adapter *adap = pci_get_drvdata(pdev);
- if (!adap)
- goto out;
- rtnl_lock();
- adap->flags &= ~FW_OK;
- notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
- spin_lock(&adap->stats_lock);
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- netif_device_detach(dev);
- netif_carrier_off(dev);
- }
- spin_unlock(&adap->stats_lock);
- disable_interrupts(adap);
- if (adap->flags & FULL_INIT_DONE)
- cxgb_down(adap);
- rtnl_unlock();
- if ((adap->flags & DEV_ENABLED)) {
- pci_disable_device(pdev);
- adap->flags &= ~DEV_ENABLED;
- }
- out: return state == pci_channel_io_perm_failure ?
- PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
- }
- static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
- {
- int i, ret;
- struct fw_caps_config_cmd c;
- struct adapter *adap = pci_get_drvdata(pdev);
- if (!adap) {
- pci_restore_state(pdev);
- pci_save_state(pdev);
- return PCI_ERS_RESULT_RECOVERED;
- }
- if (!(adap->flags & DEV_ENABLED)) {
- if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev, "Cannot reenable PCI "
- "device after reset\n");
- return PCI_ERS_RESULT_DISCONNECT;
- }
- adap->flags |= DEV_ENABLED;
- }
- pci_set_master(pdev);
- pci_restore_state(pdev);
- pci_save_state(pdev);
- pci_cleanup_aer_uncorrect_error_status(pdev);
- if (t4_wait_dev_ready(adap->regs) < 0)
- return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
- return PCI_ERS_RESULT_DISCONNECT;
- adap->flags |= FW_OK;
- if (adap_init1(adap, &c))
- return PCI_ERS_RESULT_DISCONNECT;
- for_each_port(adap, i) {
- struct port_info *p = adap2pinfo(adap, i);
- ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
- NULL, NULL);
- if (ret < 0)
- return PCI_ERS_RESULT_DISCONNECT;
- p->viid = ret;
- p->xact_addr_filt = -1;
- }
- t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
- adap->params.b_wnd);
- setup_memwin(adap);
- if (cxgb_up(adap))
- return PCI_ERS_RESULT_DISCONNECT;
- return PCI_ERS_RESULT_RECOVERED;
- }
- static void eeh_resume(struct pci_dev *pdev)
- {
- int i;
- struct adapter *adap = pci_get_drvdata(pdev);
- if (!adap)
- return;
- rtnl_lock();
- for_each_port(adap, i) {
- struct net_device *dev = adap->port[i];
- if (netif_running(dev)) {
- link_start(dev);
- cxgb_set_rxmode(dev);
- }
- netif_device_attach(dev);
- }
- rtnl_unlock();
- }
- static const struct pci_error_handlers cxgb4_eeh = {
- .error_detected = eeh_err_detected,
- .slot_reset = eeh_slot_reset,
- .resume = eeh_resume,
- };
- static inline bool is_x_10g_port(const struct link_config *lc)
- {
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
- }
- static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt,
- unsigned int size, unsigned int iqe_size)
- {
- q->adap = adap;
- cxgb4_set_rspq_intr_params(q, us, cnt);
- q->iqe_len = iqe_size;
- q->size = size;
- }
- /*
- * Perform default configuration of DMA queues depending on the number and type
- * of ports we found and the number of available CPUs. Most settings can be
- * modified by the admin prior to actual use.
- */
- static void cfg_queues(struct adapter *adap)
- {
- struct sge *s = &adap->sge;
- int i, n10g = 0, qidx = 0;
- #ifndef CONFIG_CHELSIO_T4_DCB
- int q10g = 0;
- #endif
- int ciq_size;
- for_each_port(adap, i)
- n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
- #ifdef CONFIG_CHELSIO_T4_DCB
- /* For Data Center Bridging support we need to be able to support up
- * to 8 Traffic Priorities; each of which will be assigned to its
- * own TX Queue in order to prevent Head-Of-Line Blocking.
- */
- if (adap->params.nports * 8 > MAX_ETH_QSETS) {
- dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
- MAX_ETH_QSETS, adap->params.nports * 8);
- BUG_ON(1);
- }
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
- pi->first_qset = qidx;
- pi->nqsets = 8;
- qidx += pi->nqsets;
- }
- #else /* !CONFIG_CHELSIO_T4_DCB */
- /*
- * We default to 1 queue per non-10G port and up to # of cores queues
- * per 10G port.
- */
- if (n10g)
- q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
- if (q10g > netif_get_num_default_rss_queues())
- q10g = netif_get_num_default_rss_queues();
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
- pi->first_qset = qidx;
- pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
- qidx += pi->nqsets;
- }
- #endif /* !CONFIG_CHELSIO_T4_DCB */
- s->ethqsets = qidx;
- s->max_ethqsets = qidx; /* MSI-X may lower it later */
- if (is_offload(adap)) {
- /*
- * For offload we use 1 queue/channel if all ports are up to 1G,
- * otherwise we divide all available queues amongst the channels
- * capped by the number of available cores.
- */
- if (n10g) {
- i = min_t(int, ARRAY_SIZE(s->ofldrxq),
- num_online_cpus());
- s->ofldqsets = roundup(i, adap->params.nports);
- } else
- s->ofldqsets = adap->params.nports;
- /* For RDMA one Rx queue per channel suffices */
- s->rdmaqs = adap->params.nports;
- /* Try and allow at least 1 CIQ per cpu rounding down
- * to the number of ports, with a minimum of 1 per port.
- * A 2 port card in a 6 cpu system: 6 CIQs, 3 / port.
- * A 4 port card in a 6 cpu system: 4 CIQs, 1 / port.
- * A 4 port card in a 2 cpu system: 4 CIQs, 1 / port.
- */
- s->rdmaciqs = min_t(int, MAX_RDMA_CIQS, num_online_cpus());
- s->rdmaciqs = (s->rdmaciqs / adap->params.nports) *
- adap->params.nports;
- s->rdmaciqs = max_t(int, s->rdmaciqs, adap->params.nports);
- }
- for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
- struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
- r->fl.size = 72;
- }
- for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
- s->ethtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
- s->ctrlq[i].q.size = 512;
- for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
- s->ofldtxq[i].q.size = 1024;
- for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
- struct sge_ofld_rxq *r = &s->ofldrxq[i];
- init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
- r->rspq.uld = CXGB4_ULD_ISCSI;
- r->fl.size = 72;
- }
- for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
- struct sge_ofld_rxq *r = &s->rdmarxq[i];
- init_rspq(adap, &r->rspq, 5, 1, 511, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- r->fl.size = 72;
- }
- ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
- if (ciq_size > SGE_MAX_IQ_SIZE) {
- CH_WARN(adap, "CIQ size too small for available IQs\n");
- ciq_size = SGE_MAX_IQ_SIZE;
- }
- for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
- struct sge_ofld_rxq *r = &s->rdmaciq[i];
- init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
- r->rspq.uld = CXGB4_ULD_RDMA;
- }
- init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
- init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
- }
- /*
- * Reduce the number of Ethernet queues across all ports to at most n.
- * n provides at least one queue per port.
- */
- static void reduce_ethqs(struct adapter *adap, int n)
- {
- int i;
- struct port_info *pi;
- while (n < adap->sge.ethqsets)
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- if (pi->nqsets > 1) {
- pi->nqsets--;
- adap->sge.ethqsets--;
- if (adap->sge.ethqsets <= n)
- break;
- }
- }
- n = 0;
- for_each_port(adap, i) {
- pi = adap2pinfo(adap, i);
- pi->first_qset = n;
- n += pi->nqsets;
- }
- }
- /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
- #define EXTRA_VECS 2
- static int enable_msix(struct adapter *adap)
- {
- int ofld_need = 0;
- int i, want, need, allocated;
- struct sge *s = &adap->sge;
- unsigned int nchan = adap->params.nports;
- struct msix_entry *entries;
- entries = kmalloc(sizeof(*entries) * (MAX_INGQ + 1),
- GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
- for (i = 0; i < MAX_INGQ + 1; ++i)
- entries[i].entry = i;
- want = s->max_ethqsets + EXTRA_VECS;
- if (is_offload(adap)) {
- want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
- /* need nchan for each possible ULD */
- ofld_need = 3 * nchan;
- }
- #ifdef CONFIG_CHELSIO_T4_DCB
- /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
- * each port.
- */
- need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
- #else
- need = adap->params.nports + EXTRA_VECS + ofld_need;
- #endif
- allocated = pci_enable_msix_range(adap->pdev, entries, need, want);
- if (allocated < 0) {
- dev_info(adap->pdev_dev, "not enough MSI-X vectors left,"
- " not using MSI-X\n");
- kfree(entries);
- return allocated;
- }
- /* Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = allocated - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- if (allocated < want) {
- s->rdmaqs = nchan;
- s->rdmaciqs = nchan;
- }
- /* leftovers go to OFLD */
- i = allocated - EXTRA_VECS - s->max_ethqsets -
- s->rdmaqs - s->rdmaciqs;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < allocated; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- kfree(entries);
- return 0;
- }
- #undef EXTRA_VECS
- static int init_rss(struct adapter *adap)
- {
- unsigned int i, j;
- for_each_port(adap, i) {
- struct port_info *pi = adap2pinfo(adap, i);
- pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
- if (!pi->rss)
- return -ENOMEM;
- for (j = 0; j < pi->rss_size; j++)
- pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
- }
- return 0;
- }
- static void print_port_info(const struct net_device *dev)
- {
- char buf[80];
- char *bufp = buf;
- const char *spd = "";
- const struct port_info *pi = netdev_priv(dev);
- const struct adapter *adap = pi->adapter;
- if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
- spd = " 2.5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
- spd = " 5 GT/s";
- else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_8_0GB)
- spd = " 8 GT/s";
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
- bufp += sprintf(bufp, "100/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
- bufp += sprintf(bufp, "1000/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
- bufp += sprintf(bufp, "10G/");
- if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
- bufp += sprintf(bufp, "40G/");
- if (bufp != buf)
- --bufp;
- sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
- netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
- adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
- is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
- (adap->flags & USING_MSIX) ? " MSI-X" :
- (adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, P/N: %s\n",
- adap->params.vpd.sn, adap->params.vpd.pn);
- }
- static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
- {
- pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
- }
- /*
- * Free the following resources:
- * - memory used for tables
- * - MSI/MSI-X
- * - net devices
- * - resources FW is holding for us
- */
- static void free_some_resources(struct adapter *adapter)
- {
- unsigned int i;
- t4_free_mem(adapter->l2t);
- t4_free_mem(adapter->tids.tid_tab);
- kfree(adapter->sge.egr_map);
- kfree(adapter->sge.ingr_map);
- kfree(adapter->sge.starving_fl);
- kfree(adapter->sge.txq_maperr);
- disable_msi(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]) {
- kfree(adap2pinfo(adapter, i)->rss);
- free_netdev(adapter->port[i]);
- }
- if (adapter->flags & FW_OK)
- t4_fw_bye(adapter, adapter->fn);
- }
- #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
- #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
- NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
- #define SEGMENT_SIZE 128
- static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
- {
- int func, i, err, s_qpp, qpp, num_seg;
- struct port_info *pi;
- bool highdma = false;
- struct adapter *adapter = NULL;
- void __iomem *regs;
- printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
- err = pci_request_regions(pdev, KBUILD_MODNAME);
- if (err) {
- /* Just info, some other driver may have claimed the device. */
- dev_info(&pdev->dev, "cannot obtain PCI resources\n");
- return err;
- }
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "cannot enable PCI device\n");
- goto out_release_regions;
- }
- regs = pci_ioremap_bar(pdev, 0);
- if (!regs) {
- dev_err(&pdev->dev, "cannot map device registers\n");
- err = -ENOMEM;
- goto out_disable_device;
- }
- err = t4_wait_dev_ready(regs);
- if (err < 0)
- goto out_unmap_bar0;
- /* We control everything through one PF */
- func = SOURCEPF_G(readl(regs + PL_WHOAMI_A));
- if (func != ent->driver_data) {
- iounmap(regs);
- pci_disable_device(pdev);
- pci_save_state(pdev); /* to restore SR-IOV later */
- goto sriov;
- }
- if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- highdma = true;
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
- "coherent allocations\n");
- goto out_unmap_bar0;
- }
- } else {
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev, "no usable DMA configuration\n");
- goto out_unmap_bar0;
- }
- }
- pci_enable_pcie_error_reporting(pdev);
- enable_pcie_relaxed_ordering(pdev);
- pci_set_master(pdev);
- pci_save_state(pdev);
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
- if (!adapter) {
- err = -ENOMEM;
- goto out_unmap_bar0;
- }
- adapter->workq = create_singlethread_workqueue("cxgb4");
- if (!adapter->workq) {
- err = -ENOMEM;
- goto out_free_adapter;
- }
- /* PCI device has been enabled */
- adapter->flags |= DEV_ENABLED;
- adapter->regs = regs;
- adapter->pdev = pdev;
- adapter->pdev_dev = &pdev->dev;
- adapter->mbox = func;
- adapter->fn = func;
- adapter->msg_enable = dflt_msg_enable;
- memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
- spin_lock_init(&adapter->stats_lock);
- spin_lock_init(&adapter->tid_release_lock);
- spin_lock_init(&adapter->win0_lock);
- INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
- INIT_WORK(&adapter->db_full_task, process_db_full);
- INIT_WORK(&adapter->db_drop_task, process_db_drop);
- err = t4_prep_adapter(adapter);
- if (err)
- goto out_free_adapter;
- if (!is_t4(adapter->params.chip)) {
- s_qpp = (QUEUESPERPAGEPF0_S +
- (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
- adapter->fn);
- qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
- SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
- num_seg = PAGE_SIZE / SEGMENT_SIZE;
- /* Each segment size is 128B. Write coalescing is enabled only
- * when SGE_EGRESS_QUEUES_PER_PAGE_PF reg value for the
- * queue is less no of segments that can be accommodated in
- * a page size.
- */
- if (qpp > num_seg) {
- dev_err(&pdev->dev,
- "Incorrect number of egress queues per page\n");
- err = -EINVAL;
- goto out_free_adapter;
- }
- adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
- pci_resource_len(pdev, 2));
- if (!adapter->bar2) {
- dev_err(&pdev->dev, "cannot map device bar2 region\n");
- err = -ENOMEM;
- goto out_free_adapter;
- }
- }
- setup_memwin(adapter);
- err = adap_init0(adapter);
- setup_memwin_rdma(adapter);
- if (err)
- goto out_unmap_bar;
- for_each_port(adapter, i) {
- struct net_device *netdev;
- netdev = alloc_etherdev_mq(sizeof(struct port_info),
- MAX_ETH_QSETS);
- if (!netdev) {
- err = -ENOMEM;
- goto out_free_dev;
- }
- SET_NETDEV_DEV(netdev, &pdev->dev);
- adapter->port[i] = netdev;
- pi = netdev_priv(netdev);
- pi->adapter = adapter;
- pi->xact_addr_filt = -1;
- pi->port_id = i;
- netdev->irq = pdev->irq;
- netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_RXCSUM | NETIF_F_RXHASH |
- NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
- if (highdma)
- netdev->hw_features |= NETIF_F_HIGHDMA;
- netdev->features |= netdev->hw_features;
- netdev->vlan_features = netdev->features & VLAN_FEAT;
- netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->netdev_ops = &cxgb4_netdev_ops;
- #ifdef CONFIG_CHELSIO_T4_DCB
- netdev->dcbnl_ops = &cxgb4_dcb_ops;
- cxgb4_dcb_state_init(netdev);
- #endif
- cxgb4_set_ethtool_ops(netdev);
- }
- pci_set_drvdata(pdev, adapter);
- if (adapter->flags & FW_OK) {
- err = t4_port_init(adapter, func, func, 0);
- if (err)
- goto out_free_dev;
- }
- /*
- * Configure queues and allocate tables now, they can be needed as
- * soon as the first register_netdev completes.
- */
- cfg_queues(adapter);
- adapter->l2t = t4_init_l2t();
- if (!adapter->l2t) {
- /* We tolerate a lack of L2T, giving up some functionality */
- dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
- adapter->params.offload = 0;
- }
- #if IS_ENABLED(CONFIG_IPV6)
- adapter->clipt = t4_init_clip_tbl(adapter->clipt_start,
- adapter->clipt_end);
- if (!adapter->clipt) {
- /* We tolerate a lack of clip_table, giving up
- * some functionality
- */
- dev_warn(&pdev->dev,
- "could not allocate Clip table, continuing\n");
- adapter->params.offload = 0;
- }
- #endif
- if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
- dev_warn(&pdev->dev, "could not allocate TID table, "
- "continuing\n");
- adapter->params.offload = 0;
- }
- /* See what interrupts we'll be using */
- if (msi > 1 && enable_msix(adapter) == 0)
- adapter->flags |= USING_MSIX;
- else if (msi > 0 && pci_enable_msi(pdev) == 0)
- adapter->flags |= USING_MSI;
- err = init_rss(adapter);
- if (err)
- goto out_free_dev;
- /*
- * The card is now ready to go. If any errors occur during device
- * registration we do not fail the whole card but rather proceed only
- * with the ports we manage to register successfully. However we must
- * register at least one net device.
- */
- for_each_port(adapter, i) {
- pi = adap2pinfo(adapter, i);
- netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
- netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
- err = register_netdev(adapter->port[i]);
- if (err)
- break;
- adapter->chan_map[pi->tx_chan] = i;
- print_port_info(adapter->port[i]);
- }
- if (i == 0) {
- dev_err(&pdev->dev, "could not register any net devices\n");
- goto out_free_dev;
- }
- if (err) {
- dev_warn(&pdev->dev, "only %d net devices registered\n", i);
- err = 0;
- }
- if (cxgb4_debugfs_root) {
- adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
- cxgb4_debugfs_root);
- setup_debugfs(adapter);
- }
- /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
- pdev->needs_freset = 1;
- if (is_offload(adapter))
- attach_ulds(adapter);
- sriov:
- #ifdef CONFIG_PCI_IOV
- if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
- if (pci_enable_sriov(pdev, num_vf[func]) == 0)
- dev_info(&pdev->dev,
- "instantiated %u virtual functions\n",
- num_vf[func]);
- #endif
- return 0;
- out_free_dev:
- free_some_resources(adapter);
- out_unmap_bar:
- if (!is_t4(adapter->params.chip))
- iounmap(adapter->bar2);
- out_free_adapter:
- if (adapter->workq)
- destroy_workqueue(adapter->workq);
- kfree(adapter);
- out_unmap_bar0:
- iounmap(regs);
- out_disable_device:
- pci_disable_pcie_error_reporting(pdev);
- pci_disable_device(pdev);
- out_release_regions:
- pci_release_regions(pdev);
- return err;
- }
- static void remove_one(struct pci_dev *pdev)
- {
- struct adapter *adapter = pci_get_drvdata(pdev);
- #ifdef CONFIG_PCI_IOV
- pci_disable_sriov(pdev);
- #endif
- if (adapter) {
- int i;
- /* Tear down per-adapter Work Queue first since it can contain
- * references to our adapter data structure.
- */
- destroy_workqueue(adapter->workq);
- if (is_offload(adapter))
- detach_ulds(adapter);
- disable_interrupts(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
- debugfs_remove_recursive(adapter->debugfs_root);
- /* If we allocated filters, free up state associated with any
- * valid filters ...
- */
- if (adapter->tids.ftid_tab) {
- struct filter_entry *f = &adapter->tids.ftid_tab[0];
- for (i = 0; i < (adapter->tids.nftids +
- adapter->tids.nsftids); i++, f++)
- if (f->valid)
- clear_filter(adapter, f);
- }
- if (adapter->flags & FULL_INIT_DONE)
- cxgb_down(adapter);
- free_some_resources(adapter);
- #if IS_ENABLED(CONFIG_IPV6)
- t4_cleanup_clip_tbl(adapter);
- #endif
- iounmap(adapter->regs);
- if (!is_t4(adapter->params.chip))
- iounmap(adapter->bar2);
- pci_disable_pcie_error_reporting(pdev);
- if ((adapter->flags & DEV_ENABLED)) {
- pci_disable_device(pdev);
- adapter->flags &= ~DEV_ENABLED;
- }
- pci_release_regions(pdev);
- synchronize_rcu();
- kfree(adapter);
- } else
- pci_release_regions(pdev);
- }
- static struct pci_driver cxgb4_driver = {
- .name = KBUILD_MODNAME,
- .id_table = cxgb4_pci_tbl,
- .probe = init_one,
- .remove = remove_one,
- .shutdown = remove_one,
- .err_handler = &cxgb4_eeh,
- };
- static int __init cxgb4_init_module(void)
- {
- int ret;
- /* Debugfs support is optional, just warn if this fails */
- cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
- if (!cxgb4_debugfs_root)
- pr_warn("could not create debugfs entry, continuing\n");
- ret = pci_register_driver(&cxgb4_driver);
- if (ret < 0)
- debugfs_remove(cxgb4_debugfs_root);
- #if IS_ENABLED(CONFIG_IPV6)
- if (!inet6addr_registered) {
- register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = true;
- }
- #endif
- return ret;
- }
- static void __exit cxgb4_cleanup_module(void)
- {
- #if IS_ENABLED(CONFIG_IPV6)
- if (inet6addr_registered) {
- unregister_inet6addr_notifier(&cxgb4_inet6addr_notifier);
- inet6addr_registered = false;
- }
- #endif
- pci_unregister_driver(&cxgb4_driver);
- debugfs_remove(cxgb4_debugfs_root); /* NULL ok */
- }
- module_init(cxgb4_init_module);
- module_exit(cxgb4_cleanup_module);
|