qlge_main.c 134 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016
  1. /*
  2. * QLogic qlge NIC HBA Driver
  3. * Copyright (c) 2003-2008 QLogic Corporation
  4. * See LICENSE.qlge for copyright and licensing details.
  5. * Author: Linux qlge network device driver by
  6. * Ron Mercer <ron.mercer@qlogic.com>
  7. */
  8. #include <linux/kernel.h>
  9. #include <linux/bitops.h>
  10. #include <linux/types.h>
  11. #include <linux/module.h>
  12. #include <linux/list.h>
  13. #include <linux/pci.h>
  14. #include <linux/dma-mapping.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/sched.h>
  17. #include <linux/slab.h>
  18. #include <linux/dmapool.h>
  19. #include <linux/mempool.h>
  20. #include <linux/spinlock.h>
  21. #include <linux/kthread.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/errno.h>
  24. #include <linux/ioport.h>
  25. #include <linux/in.h>
  26. #include <linux/ip.h>
  27. #include <linux/ipv6.h>
  28. #include <net/ipv6.h>
  29. #include <linux/tcp.h>
  30. #include <linux/udp.h>
  31. #include <linux/if_arp.h>
  32. #include <linux/if_ether.h>
  33. #include <linux/netdevice.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/ethtool.h>
  36. #include <linux/if_vlan.h>
  37. #include <linux/skbuff.h>
  38. #include <linux/delay.h>
  39. #include <linux/mm.h>
  40. #include <linux/vmalloc.h>
  41. #include <linux/prefetch.h>
  42. #include <net/ip6_checksum.h>
  43. #include "qlge.h"
  44. char qlge_driver_name[] = DRV_NAME;
  45. const char qlge_driver_version[] = DRV_VERSION;
  46. MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
  47. MODULE_DESCRIPTION(DRV_STRING " ");
  48. MODULE_LICENSE("GPL");
  49. MODULE_VERSION(DRV_VERSION);
  50. static const u32 default_msg =
  51. NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
  52. /* NETIF_MSG_TIMER | */
  53. NETIF_MSG_IFDOWN |
  54. NETIF_MSG_IFUP |
  55. NETIF_MSG_RX_ERR |
  56. NETIF_MSG_TX_ERR |
  57. /* NETIF_MSG_TX_QUEUED | */
  58. /* NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
  59. /* NETIF_MSG_PKTDATA | */
  60. NETIF_MSG_HW | NETIF_MSG_WOL | 0;
  61. static int debug = -1; /* defaults above */
  62. module_param(debug, int, 0664);
  63. MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
  64. #define MSIX_IRQ 0
  65. #define MSI_IRQ 1
  66. #define LEG_IRQ 2
  67. static int qlge_irq_type = MSIX_IRQ;
  68. module_param(qlge_irq_type, int, 0664);
  69. MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
  70. static int qlge_mpi_coredump;
  71. module_param(qlge_mpi_coredump, int, 0);
  72. MODULE_PARM_DESC(qlge_mpi_coredump,
  73. "Option to enable MPI firmware dump. "
  74. "Default is OFF - Do Not allocate memory. ");
  75. static int qlge_force_coredump;
  76. module_param(qlge_force_coredump, int, 0);
  77. MODULE_PARM_DESC(qlge_force_coredump,
  78. "Option to allow force of firmware core dump. "
  79. "Default is OFF - Do not allow.");
  80. static const struct pci_device_id qlge_pci_tbl[] = {
  81. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
  82. {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
  83. /* required last entry */
  84. {0,}
  85. };
  86. MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
  87. static int ql_wol(struct ql_adapter *);
  88. static void qlge_set_multicast_list(struct net_device *);
  89. static int ql_adapter_down(struct ql_adapter *);
  90. static int ql_adapter_up(struct ql_adapter *);
  91. /* This hardware semaphore causes exclusive access to
  92. * resources shared between the NIC driver, MPI firmware,
  93. * FCOE firmware and the FC driver.
  94. */
  95. static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
  96. {
  97. u32 sem_bits = 0;
  98. switch (sem_mask) {
  99. case SEM_XGMAC0_MASK:
  100. sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
  101. break;
  102. case SEM_XGMAC1_MASK:
  103. sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
  104. break;
  105. case SEM_ICB_MASK:
  106. sem_bits = SEM_SET << SEM_ICB_SHIFT;
  107. break;
  108. case SEM_MAC_ADDR_MASK:
  109. sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
  110. break;
  111. case SEM_FLASH_MASK:
  112. sem_bits = SEM_SET << SEM_FLASH_SHIFT;
  113. break;
  114. case SEM_PROBE_MASK:
  115. sem_bits = SEM_SET << SEM_PROBE_SHIFT;
  116. break;
  117. case SEM_RT_IDX_MASK:
  118. sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
  119. break;
  120. case SEM_PROC_REG_MASK:
  121. sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
  122. break;
  123. default:
  124. netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
  125. return -EINVAL;
  126. }
  127. ql_write32(qdev, SEM, sem_bits | sem_mask);
  128. return !(ql_read32(qdev, SEM) & sem_bits);
  129. }
  130. int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
  131. {
  132. unsigned int wait_count = 30;
  133. do {
  134. if (!ql_sem_trylock(qdev, sem_mask))
  135. return 0;
  136. udelay(100);
  137. } while (--wait_count);
  138. return -ETIMEDOUT;
  139. }
  140. void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
  141. {
  142. ql_write32(qdev, SEM, sem_mask);
  143. ql_read32(qdev, SEM); /* flush */
  144. }
  145. /* This function waits for a specific bit to come ready
  146. * in a given register. It is used mostly by the initialize
  147. * process, but is also used in kernel thread API such as
  148. * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
  149. */
  150. int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
  151. {
  152. u32 temp;
  153. int count = UDELAY_COUNT;
  154. while (count) {
  155. temp = ql_read32(qdev, reg);
  156. /* check for errors */
  157. if (temp & err_bit) {
  158. netif_alert(qdev, probe, qdev->ndev,
  159. "register 0x%.08x access error, value = 0x%.08x!.\n",
  160. reg, temp);
  161. return -EIO;
  162. } else if (temp & bit)
  163. return 0;
  164. udelay(UDELAY_DELAY);
  165. count--;
  166. }
  167. netif_alert(qdev, probe, qdev->ndev,
  168. "Timed out waiting for reg %x to come ready.\n", reg);
  169. return -ETIMEDOUT;
  170. }
  171. /* The CFG register is used to download TX and RX control blocks
  172. * to the chip. This function waits for an operation to complete.
  173. */
  174. static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
  175. {
  176. int count = UDELAY_COUNT;
  177. u32 temp;
  178. while (count) {
  179. temp = ql_read32(qdev, CFG);
  180. if (temp & CFG_LE)
  181. return -EIO;
  182. if (!(temp & bit))
  183. return 0;
  184. udelay(UDELAY_DELAY);
  185. count--;
  186. }
  187. return -ETIMEDOUT;
  188. }
  189. /* Used to issue init control blocks to hw. Maps control block,
  190. * sets address, triggers download, waits for completion.
  191. */
  192. int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
  193. u16 q_id)
  194. {
  195. u64 map;
  196. int status = 0;
  197. int direction;
  198. u32 mask;
  199. u32 value;
  200. direction =
  201. (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
  202. PCI_DMA_FROMDEVICE;
  203. map = pci_map_single(qdev->pdev, ptr, size, direction);
  204. if (pci_dma_mapping_error(qdev->pdev, map)) {
  205. netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
  206. return -ENOMEM;
  207. }
  208. status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
  209. if (status)
  210. return status;
  211. status = ql_wait_cfg(qdev, bit);
  212. if (status) {
  213. netif_err(qdev, ifup, qdev->ndev,
  214. "Timed out waiting for CFG to come ready.\n");
  215. goto exit;
  216. }
  217. ql_write32(qdev, ICB_L, (u32) map);
  218. ql_write32(qdev, ICB_H, (u32) (map >> 32));
  219. mask = CFG_Q_MASK | (bit << 16);
  220. value = bit | (q_id << CFG_Q_SHIFT);
  221. ql_write32(qdev, CFG, (mask | value));
  222. /*
  223. * Wait for the bit to clear after signaling hw.
  224. */
  225. status = ql_wait_cfg(qdev, bit);
  226. exit:
  227. ql_sem_unlock(qdev, SEM_ICB_MASK); /* does flush too */
  228. pci_unmap_single(qdev->pdev, map, size, direction);
  229. return status;
  230. }
  231. /* Get a specific MAC address from the CAM. Used for debug and reg dump. */
  232. int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
  233. u32 *value)
  234. {
  235. u32 offset = 0;
  236. int status;
  237. switch (type) {
  238. case MAC_ADDR_TYPE_MULTI_MAC:
  239. case MAC_ADDR_TYPE_CAM_MAC:
  240. {
  241. status =
  242. ql_wait_reg_rdy(qdev,
  243. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  244. if (status)
  245. goto exit;
  246. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  247. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  248. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  249. status =
  250. ql_wait_reg_rdy(qdev,
  251. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  252. if (status)
  253. goto exit;
  254. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  255. status =
  256. ql_wait_reg_rdy(qdev,
  257. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  258. if (status)
  259. goto exit;
  260. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  261. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  262. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  263. status =
  264. ql_wait_reg_rdy(qdev,
  265. MAC_ADDR_IDX, MAC_ADDR_MR, 0);
  266. if (status)
  267. goto exit;
  268. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  269. if (type == MAC_ADDR_TYPE_CAM_MAC) {
  270. status =
  271. ql_wait_reg_rdy(qdev,
  272. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  273. if (status)
  274. goto exit;
  275. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  276. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  277. MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
  278. status =
  279. ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
  280. MAC_ADDR_MR, 0);
  281. if (status)
  282. goto exit;
  283. *value++ = ql_read32(qdev, MAC_ADDR_DATA);
  284. }
  285. break;
  286. }
  287. case MAC_ADDR_TYPE_VLAN:
  288. case MAC_ADDR_TYPE_MULTI_FLTR:
  289. default:
  290. netif_crit(qdev, ifup, qdev->ndev,
  291. "Address type %d not yet supported.\n", type);
  292. status = -EPERM;
  293. }
  294. exit:
  295. return status;
  296. }
  297. /* Set up a MAC, multicast or VLAN address for the
  298. * inbound frame matching.
  299. */
  300. static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
  301. u16 index)
  302. {
  303. u32 offset = 0;
  304. int status = 0;
  305. switch (type) {
  306. case MAC_ADDR_TYPE_MULTI_MAC:
  307. {
  308. u32 upper = (addr[0] << 8) | addr[1];
  309. u32 lower = (addr[2] << 24) | (addr[3] << 16) |
  310. (addr[4] << 8) | (addr[5]);
  311. status =
  312. ql_wait_reg_rdy(qdev,
  313. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  314. if (status)
  315. goto exit;
  316. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  317. (index << MAC_ADDR_IDX_SHIFT) |
  318. type | MAC_ADDR_E);
  319. ql_write32(qdev, MAC_ADDR_DATA, lower);
  320. status =
  321. ql_wait_reg_rdy(qdev,
  322. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  323. if (status)
  324. goto exit;
  325. ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
  326. (index << MAC_ADDR_IDX_SHIFT) |
  327. type | MAC_ADDR_E);
  328. ql_write32(qdev, MAC_ADDR_DATA, upper);
  329. status =
  330. ql_wait_reg_rdy(qdev,
  331. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  332. if (status)
  333. goto exit;
  334. break;
  335. }
  336. case MAC_ADDR_TYPE_CAM_MAC:
  337. {
  338. u32 cam_output;
  339. u32 upper = (addr[0] << 8) | addr[1];
  340. u32 lower =
  341. (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
  342. (addr[5]);
  343. status =
  344. ql_wait_reg_rdy(qdev,
  345. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  346. if (status)
  347. goto exit;
  348. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  349. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  350. type); /* type */
  351. ql_write32(qdev, MAC_ADDR_DATA, lower);
  352. status =
  353. ql_wait_reg_rdy(qdev,
  354. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  355. if (status)
  356. goto exit;
  357. ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
  358. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  359. type); /* type */
  360. ql_write32(qdev, MAC_ADDR_DATA, upper);
  361. status =
  362. ql_wait_reg_rdy(qdev,
  363. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  364. if (status)
  365. goto exit;
  366. ql_write32(qdev, MAC_ADDR_IDX, (offset) | /* offset */
  367. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  368. type); /* type */
  369. /* This field should also include the queue id
  370. and possibly the function id. Right now we hardcode
  371. the route field to NIC core.
  372. */
  373. cam_output = (CAM_OUT_ROUTE_NIC |
  374. (qdev->
  375. func << CAM_OUT_FUNC_SHIFT) |
  376. (0 << CAM_OUT_CQ_ID_SHIFT));
  377. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
  378. cam_output |= CAM_OUT_RV;
  379. /* route to NIC core */
  380. ql_write32(qdev, MAC_ADDR_DATA, cam_output);
  381. break;
  382. }
  383. case MAC_ADDR_TYPE_VLAN:
  384. {
  385. u32 enable_bit = *((u32 *) &addr[0]);
  386. /* For VLAN, the addr actually holds a bit that
  387. * either enables or disables the vlan id we are
  388. * addressing. It's either MAC_ADDR_E on or off.
  389. * That's bit-27 we're talking about.
  390. */
  391. status =
  392. ql_wait_reg_rdy(qdev,
  393. MAC_ADDR_IDX, MAC_ADDR_MW, 0);
  394. if (status)
  395. goto exit;
  396. ql_write32(qdev, MAC_ADDR_IDX, offset | /* offset */
  397. (index << MAC_ADDR_IDX_SHIFT) | /* index */
  398. type | /* type */
  399. enable_bit); /* enable/disable */
  400. break;
  401. }
  402. case MAC_ADDR_TYPE_MULTI_FLTR:
  403. default:
  404. netif_crit(qdev, ifup, qdev->ndev,
  405. "Address type %d not yet supported.\n", type);
  406. status = -EPERM;
  407. }
  408. exit:
  409. return status;
  410. }
  411. /* Set or clear MAC address in hardware. We sometimes
  412. * have to clear it to prevent wrong frame routing
  413. * especially in a bonding environment.
  414. */
  415. static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
  416. {
  417. int status;
  418. char zero_mac_addr[ETH_ALEN];
  419. char *addr;
  420. if (set) {
  421. addr = &qdev->current_mac_addr[0];
  422. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  423. "Set Mac addr %pM\n", addr);
  424. } else {
  425. eth_zero_addr(zero_mac_addr);
  426. addr = &zero_mac_addr[0];
  427. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  428. "Clearing MAC address\n");
  429. }
  430. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  431. if (status)
  432. return status;
  433. status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
  434. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  435. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  436. if (status)
  437. netif_err(qdev, ifup, qdev->ndev,
  438. "Failed to init mac address.\n");
  439. return status;
  440. }
  441. void ql_link_on(struct ql_adapter *qdev)
  442. {
  443. netif_err(qdev, link, qdev->ndev, "Link is up.\n");
  444. netif_carrier_on(qdev->ndev);
  445. ql_set_mac_addr(qdev, 1);
  446. }
  447. void ql_link_off(struct ql_adapter *qdev)
  448. {
  449. netif_err(qdev, link, qdev->ndev, "Link is down.\n");
  450. netif_carrier_off(qdev->ndev);
  451. ql_set_mac_addr(qdev, 0);
  452. }
  453. /* Get a specific frame routing value from the CAM.
  454. * Used for debug and reg dump.
  455. */
  456. int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
  457. {
  458. int status = 0;
  459. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  460. if (status)
  461. goto exit;
  462. ql_write32(qdev, RT_IDX,
  463. RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
  464. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
  465. if (status)
  466. goto exit;
  467. *value = ql_read32(qdev, RT_DATA);
  468. exit:
  469. return status;
  470. }
  471. /* The NIC function for this chip has 16 routing indexes. Each one can be used
  472. * to route different frame types to various inbound queues. We send broadcast/
  473. * multicast/error frames to the default queue for slow handling,
  474. * and CAM hit/RSS frames to the fast handling queues.
  475. */
  476. static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
  477. int enable)
  478. {
  479. int status = -EINVAL; /* Return error if no mask match. */
  480. u32 value = 0;
  481. switch (mask) {
  482. case RT_IDX_CAM_HIT:
  483. {
  484. value = RT_IDX_DST_CAM_Q | /* dest */
  485. RT_IDX_TYPE_NICQ | /* type */
  486. (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
  487. break;
  488. }
  489. case RT_IDX_VALID: /* Promiscuous Mode frames. */
  490. {
  491. value = RT_IDX_DST_DFLT_Q | /* dest */
  492. RT_IDX_TYPE_NICQ | /* type */
  493. (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
  494. break;
  495. }
  496. case RT_IDX_ERR: /* Pass up MAC,IP,TCP/UDP error frames. */
  497. {
  498. value = RT_IDX_DST_DFLT_Q | /* dest */
  499. RT_IDX_TYPE_NICQ | /* type */
  500. (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
  501. break;
  502. }
  503. case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
  504. {
  505. value = RT_IDX_DST_DFLT_Q | /* dest */
  506. RT_IDX_TYPE_NICQ | /* type */
  507. (RT_IDX_IP_CSUM_ERR_SLOT <<
  508. RT_IDX_IDX_SHIFT); /* index */
  509. break;
  510. }
  511. case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
  512. {
  513. value = RT_IDX_DST_DFLT_Q | /* dest */
  514. RT_IDX_TYPE_NICQ | /* type */
  515. (RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
  516. RT_IDX_IDX_SHIFT); /* index */
  517. break;
  518. }
  519. case RT_IDX_BCAST: /* Pass up Broadcast frames to default Q. */
  520. {
  521. value = RT_IDX_DST_DFLT_Q | /* dest */
  522. RT_IDX_TYPE_NICQ | /* type */
  523. (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
  524. break;
  525. }
  526. case RT_IDX_MCAST: /* Pass up All Multicast frames. */
  527. {
  528. value = RT_IDX_DST_DFLT_Q | /* dest */
  529. RT_IDX_TYPE_NICQ | /* type */
  530. (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
  531. break;
  532. }
  533. case RT_IDX_MCAST_MATCH: /* Pass up matched Multicast frames. */
  534. {
  535. value = RT_IDX_DST_DFLT_Q | /* dest */
  536. RT_IDX_TYPE_NICQ | /* type */
  537. (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  538. break;
  539. }
  540. case RT_IDX_RSS_MATCH: /* Pass up matched RSS frames. */
  541. {
  542. value = RT_IDX_DST_RSS | /* dest */
  543. RT_IDX_TYPE_NICQ | /* type */
  544. (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
  545. break;
  546. }
  547. case 0: /* Clear the E-bit on an entry. */
  548. {
  549. value = RT_IDX_DST_DFLT_Q | /* dest */
  550. RT_IDX_TYPE_NICQ | /* type */
  551. (index << RT_IDX_IDX_SHIFT);/* index */
  552. break;
  553. }
  554. default:
  555. netif_err(qdev, ifup, qdev->ndev,
  556. "Mask type %d not yet supported.\n", mask);
  557. status = -EPERM;
  558. goto exit;
  559. }
  560. if (value) {
  561. status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
  562. if (status)
  563. goto exit;
  564. value |= (enable ? RT_IDX_E : 0);
  565. ql_write32(qdev, RT_IDX, value);
  566. ql_write32(qdev, RT_DATA, enable ? mask : 0);
  567. }
  568. exit:
  569. return status;
  570. }
  571. static void ql_enable_interrupts(struct ql_adapter *qdev)
  572. {
  573. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
  574. }
  575. static void ql_disable_interrupts(struct ql_adapter *qdev)
  576. {
  577. ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
  578. }
  579. /* If we're running with multiple MSI-X vectors then we enable on the fly.
  580. * Otherwise, we may have multiple outstanding workers and don't want to
  581. * enable until the last one finishes. In this case, the irq_cnt gets
  582. * incremented every time we queue a worker and decremented every time
  583. * a worker finishes. Once it hits zero we enable the interrupt.
  584. */
  585. u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  586. {
  587. u32 var = 0;
  588. unsigned long hw_flags = 0;
  589. struct intr_context *ctx = qdev->intr_context + intr;
  590. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
  591. /* Always enable if we're MSIX multi interrupts and
  592. * it's not the default (zeroeth) interrupt.
  593. */
  594. ql_write32(qdev, INTR_EN,
  595. ctx->intr_en_mask);
  596. var = ql_read32(qdev, STS);
  597. return var;
  598. }
  599. spin_lock_irqsave(&qdev->hw_lock, hw_flags);
  600. if (atomic_dec_and_test(&ctx->irq_cnt)) {
  601. ql_write32(qdev, INTR_EN,
  602. ctx->intr_en_mask);
  603. var = ql_read32(qdev, STS);
  604. }
  605. spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
  606. return var;
  607. }
  608. static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
  609. {
  610. u32 var = 0;
  611. struct intr_context *ctx;
  612. /* HW disables for us if we're MSIX multi interrupts and
  613. * it's not the default (zeroeth) interrupt.
  614. */
  615. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
  616. return 0;
  617. ctx = qdev->intr_context + intr;
  618. spin_lock(&qdev->hw_lock);
  619. if (!atomic_read(&ctx->irq_cnt)) {
  620. ql_write32(qdev, INTR_EN,
  621. ctx->intr_dis_mask);
  622. var = ql_read32(qdev, STS);
  623. }
  624. atomic_inc(&ctx->irq_cnt);
  625. spin_unlock(&qdev->hw_lock);
  626. return var;
  627. }
  628. static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
  629. {
  630. int i;
  631. for (i = 0; i < qdev->intr_count; i++) {
  632. /* The enable call does a atomic_dec_and_test
  633. * and enables only if the result is zero.
  634. * So we precharge it here.
  635. */
  636. if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
  637. i == 0))
  638. atomic_set(&qdev->intr_context[i].irq_cnt, 1);
  639. ql_enable_completion_interrupt(qdev, i);
  640. }
  641. }
  642. static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
  643. {
  644. int status, i;
  645. u16 csum = 0;
  646. __le16 *flash = (__le16 *)&qdev->flash;
  647. status = strncmp((char *)&qdev->flash, str, 4);
  648. if (status) {
  649. netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
  650. return status;
  651. }
  652. for (i = 0; i < size; i++)
  653. csum += le16_to_cpu(*flash++);
  654. if (csum)
  655. netif_err(qdev, ifup, qdev->ndev,
  656. "Invalid flash checksum, csum = 0x%.04x.\n", csum);
  657. return csum;
  658. }
  659. static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
  660. {
  661. int status = 0;
  662. /* wait for reg to come ready */
  663. status = ql_wait_reg_rdy(qdev,
  664. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  665. if (status)
  666. goto exit;
  667. /* set up for reg read */
  668. ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
  669. /* wait for reg to come ready */
  670. status = ql_wait_reg_rdy(qdev,
  671. FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
  672. if (status)
  673. goto exit;
  674. /* This data is stored on flash as an array of
  675. * __le32. Since ql_read32() returns cpu endian
  676. * we need to swap it back.
  677. */
  678. *data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
  679. exit:
  680. return status;
  681. }
  682. static int ql_get_8000_flash_params(struct ql_adapter *qdev)
  683. {
  684. u32 i, size;
  685. int status;
  686. __le32 *p = (__le32 *)&qdev->flash;
  687. u32 offset;
  688. u8 mac_addr[6];
  689. /* Get flash offset for function and adjust
  690. * for dword access.
  691. */
  692. if (!qdev->port)
  693. offset = FUNC0_FLASH_OFFSET / sizeof(u32);
  694. else
  695. offset = FUNC1_FLASH_OFFSET / sizeof(u32);
  696. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  697. return -ETIMEDOUT;
  698. size = sizeof(struct flash_params_8000) / sizeof(u32);
  699. for (i = 0; i < size; i++, p++) {
  700. status = ql_read_flash_word(qdev, i+offset, p);
  701. if (status) {
  702. netif_err(qdev, ifup, qdev->ndev,
  703. "Error reading flash.\n");
  704. goto exit;
  705. }
  706. }
  707. status = ql_validate_flash(qdev,
  708. sizeof(struct flash_params_8000) / sizeof(u16),
  709. "8000");
  710. if (status) {
  711. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  712. status = -EINVAL;
  713. goto exit;
  714. }
  715. /* Extract either manufacturer or BOFM modified
  716. * MAC address.
  717. */
  718. if (qdev->flash.flash_params_8000.data_type1 == 2)
  719. memcpy(mac_addr,
  720. qdev->flash.flash_params_8000.mac_addr1,
  721. qdev->ndev->addr_len);
  722. else
  723. memcpy(mac_addr,
  724. qdev->flash.flash_params_8000.mac_addr,
  725. qdev->ndev->addr_len);
  726. if (!is_valid_ether_addr(mac_addr)) {
  727. netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
  728. status = -EINVAL;
  729. goto exit;
  730. }
  731. memcpy(qdev->ndev->dev_addr,
  732. mac_addr,
  733. qdev->ndev->addr_len);
  734. exit:
  735. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  736. return status;
  737. }
  738. static int ql_get_8012_flash_params(struct ql_adapter *qdev)
  739. {
  740. int i;
  741. int status;
  742. __le32 *p = (__le32 *)&qdev->flash;
  743. u32 offset = 0;
  744. u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
  745. /* Second function's parameters follow the first
  746. * function's.
  747. */
  748. if (qdev->port)
  749. offset = size;
  750. if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
  751. return -ETIMEDOUT;
  752. for (i = 0; i < size; i++, p++) {
  753. status = ql_read_flash_word(qdev, i+offset, p);
  754. if (status) {
  755. netif_err(qdev, ifup, qdev->ndev,
  756. "Error reading flash.\n");
  757. goto exit;
  758. }
  759. }
  760. status = ql_validate_flash(qdev,
  761. sizeof(struct flash_params_8012) / sizeof(u16),
  762. "8012");
  763. if (status) {
  764. netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
  765. status = -EINVAL;
  766. goto exit;
  767. }
  768. if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
  769. status = -EINVAL;
  770. goto exit;
  771. }
  772. memcpy(qdev->ndev->dev_addr,
  773. qdev->flash.flash_params_8012.mac_addr,
  774. qdev->ndev->addr_len);
  775. exit:
  776. ql_sem_unlock(qdev, SEM_FLASH_MASK);
  777. return status;
  778. }
  779. /* xgmac register are located behind the xgmac_addr and xgmac_data
  780. * register pair. Each read/write requires us to wait for the ready
  781. * bit before reading/writing the data.
  782. */
  783. static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
  784. {
  785. int status;
  786. /* wait for reg to come ready */
  787. status = ql_wait_reg_rdy(qdev,
  788. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  789. if (status)
  790. return status;
  791. /* write the data to the data reg */
  792. ql_write32(qdev, XGMAC_DATA, data);
  793. /* trigger the write */
  794. ql_write32(qdev, XGMAC_ADDR, reg);
  795. return status;
  796. }
  797. /* xgmac register are located behind the xgmac_addr and xgmac_data
  798. * register pair. Each read/write requires us to wait for the ready
  799. * bit before reading/writing the data.
  800. */
  801. int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
  802. {
  803. int status = 0;
  804. /* wait for reg to come ready */
  805. status = ql_wait_reg_rdy(qdev,
  806. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  807. if (status)
  808. goto exit;
  809. /* set up for reg read */
  810. ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
  811. /* wait for reg to come ready */
  812. status = ql_wait_reg_rdy(qdev,
  813. XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
  814. if (status)
  815. goto exit;
  816. /* get the data */
  817. *data = ql_read32(qdev, XGMAC_DATA);
  818. exit:
  819. return status;
  820. }
  821. /* This is used for reading the 64-bit statistics regs. */
  822. int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
  823. {
  824. int status = 0;
  825. u32 hi = 0;
  826. u32 lo = 0;
  827. status = ql_read_xgmac_reg(qdev, reg, &lo);
  828. if (status)
  829. goto exit;
  830. status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
  831. if (status)
  832. goto exit;
  833. *data = (u64) lo | ((u64) hi << 32);
  834. exit:
  835. return status;
  836. }
  837. static int ql_8000_port_initialize(struct ql_adapter *qdev)
  838. {
  839. int status;
  840. /*
  841. * Get MPI firmware version for driver banner
  842. * and ethool info.
  843. */
  844. status = ql_mb_about_fw(qdev);
  845. if (status)
  846. goto exit;
  847. status = ql_mb_get_fw_state(qdev);
  848. if (status)
  849. goto exit;
  850. /* Wake up a worker to get/set the TX/RX frame sizes. */
  851. queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
  852. exit:
  853. return status;
  854. }
  855. /* Take the MAC Core out of reset.
  856. * Enable statistics counting.
  857. * Take the transmitter/receiver out of reset.
  858. * This functionality may be done in the MPI firmware at a
  859. * later date.
  860. */
  861. static int ql_8012_port_initialize(struct ql_adapter *qdev)
  862. {
  863. int status = 0;
  864. u32 data;
  865. if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
  866. /* Another function has the semaphore, so
  867. * wait for the port init bit to come ready.
  868. */
  869. netif_info(qdev, link, qdev->ndev,
  870. "Another function has the semaphore, so wait for the port init bit to come ready.\n");
  871. status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
  872. if (status) {
  873. netif_crit(qdev, link, qdev->ndev,
  874. "Port initialize timed out.\n");
  875. }
  876. return status;
  877. }
  878. netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
  879. /* Set the core reset. */
  880. status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
  881. if (status)
  882. goto end;
  883. data |= GLOBAL_CFG_RESET;
  884. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  885. if (status)
  886. goto end;
  887. /* Clear the core reset and turn on jumbo for receiver. */
  888. data &= ~GLOBAL_CFG_RESET; /* Clear core reset. */
  889. data |= GLOBAL_CFG_JUMBO; /* Turn on jumbo. */
  890. data |= GLOBAL_CFG_TX_STAT_EN;
  891. data |= GLOBAL_CFG_RX_STAT_EN;
  892. status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
  893. if (status)
  894. goto end;
  895. /* Enable transmitter, and clear it's reset. */
  896. status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
  897. if (status)
  898. goto end;
  899. data &= ~TX_CFG_RESET; /* Clear the TX MAC reset. */
  900. data |= TX_CFG_EN; /* Enable the transmitter. */
  901. status = ql_write_xgmac_reg(qdev, TX_CFG, data);
  902. if (status)
  903. goto end;
  904. /* Enable receiver and clear it's reset. */
  905. status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
  906. if (status)
  907. goto end;
  908. data &= ~RX_CFG_RESET; /* Clear the RX MAC reset. */
  909. data |= RX_CFG_EN; /* Enable the receiver. */
  910. status = ql_write_xgmac_reg(qdev, RX_CFG, data);
  911. if (status)
  912. goto end;
  913. /* Turn on jumbo. */
  914. status =
  915. ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
  916. if (status)
  917. goto end;
  918. status =
  919. ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
  920. if (status)
  921. goto end;
  922. /* Signal to the world that the port is enabled. */
  923. ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
  924. end:
  925. ql_sem_unlock(qdev, qdev->xg_sem_mask);
  926. return status;
  927. }
  928. static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
  929. {
  930. return PAGE_SIZE << qdev->lbq_buf_order;
  931. }
  932. /* Get the next large buffer. */
  933. static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
  934. {
  935. struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
  936. rx_ring->lbq_curr_idx++;
  937. if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
  938. rx_ring->lbq_curr_idx = 0;
  939. rx_ring->lbq_free_cnt++;
  940. return lbq_desc;
  941. }
  942. static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
  943. struct rx_ring *rx_ring)
  944. {
  945. struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
  946. pci_dma_sync_single_for_cpu(qdev->pdev,
  947. dma_unmap_addr(lbq_desc, mapaddr),
  948. rx_ring->lbq_buf_size,
  949. PCI_DMA_FROMDEVICE);
  950. /* If it's the last chunk of our master page then
  951. * we unmap it.
  952. */
  953. if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
  954. == ql_lbq_block_size(qdev))
  955. pci_unmap_page(qdev->pdev,
  956. lbq_desc->p.pg_chunk.map,
  957. ql_lbq_block_size(qdev),
  958. PCI_DMA_FROMDEVICE);
  959. return lbq_desc;
  960. }
  961. /* Get the next small buffer. */
  962. static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
  963. {
  964. struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
  965. rx_ring->sbq_curr_idx++;
  966. if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
  967. rx_ring->sbq_curr_idx = 0;
  968. rx_ring->sbq_free_cnt++;
  969. return sbq_desc;
  970. }
  971. /* Update an rx ring index. */
  972. static void ql_update_cq(struct rx_ring *rx_ring)
  973. {
  974. rx_ring->cnsmr_idx++;
  975. rx_ring->curr_entry++;
  976. if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
  977. rx_ring->cnsmr_idx = 0;
  978. rx_ring->curr_entry = rx_ring->cq_base;
  979. }
  980. }
  981. static void ql_write_cq_idx(struct rx_ring *rx_ring)
  982. {
  983. ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
  984. }
  985. static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
  986. struct bq_desc *lbq_desc)
  987. {
  988. if (!rx_ring->pg_chunk.page) {
  989. u64 map;
  990. rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
  991. GFP_ATOMIC,
  992. qdev->lbq_buf_order);
  993. if (unlikely(!rx_ring->pg_chunk.page)) {
  994. netif_err(qdev, drv, qdev->ndev,
  995. "page allocation failed.\n");
  996. return -ENOMEM;
  997. }
  998. rx_ring->pg_chunk.offset = 0;
  999. map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
  1000. 0, ql_lbq_block_size(qdev),
  1001. PCI_DMA_FROMDEVICE);
  1002. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1003. __free_pages(rx_ring->pg_chunk.page,
  1004. qdev->lbq_buf_order);
  1005. rx_ring->pg_chunk.page = NULL;
  1006. netif_err(qdev, drv, qdev->ndev,
  1007. "PCI mapping failed.\n");
  1008. return -ENOMEM;
  1009. }
  1010. rx_ring->pg_chunk.map = map;
  1011. rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
  1012. }
  1013. /* Copy the current master pg_chunk info
  1014. * to the current descriptor.
  1015. */
  1016. lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
  1017. /* Adjust the master page chunk for next
  1018. * buffer get.
  1019. */
  1020. rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
  1021. if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
  1022. rx_ring->pg_chunk.page = NULL;
  1023. lbq_desc->p.pg_chunk.last_flag = 1;
  1024. } else {
  1025. rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
  1026. get_page(rx_ring->pg_chunk.page);
  1027. lbq_desc->p.pg_chunk.last_flag = 0;
  1028. }
  1029. return 0;
  1030. }
  1031. /* Process (refill) a large buffer queue. */
  1032. static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1033. {
  1034. u32 clean_idx = rx_ring->lbq_clean_idx;
  1035. u32 start_idx = clean_idx;
  1036. struct bq_desc *lbq_desc;
  1037. u64 map;
  1038. int i;
  1039. while (rx_ring->lbq_free_cnt > 32) {
  1040. for (i = (rx_ring->lbq_clean_idx % 16); i < 16; i++) {
  1041. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1042. "lbq: try cleaning clean_idx = %d.\n",
  1043. clean_idx);
  1044. lbq_desc = &rx_ring->lbq[clean_idx];
  1045. if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
  1046. rx_ring->lbq_clean_idx = clean_idx;
  1047. netif_err(qdev, ifup, qdev->ndev,
  1048. "Could not get a page chunk, i=%d, clean_idx =%d .\n",
  1049. i, clean_idx);
  1050. return;
  1051. }
  1052. map = lbq_desc->p.pg_chunk.map +
  1053. lbq_desc->p.pg_chunk.offset;
  1054. dma_unmap_addr_set(lbq_desc, mapaddr, map);
  1055. dma_unmap_len_set(lbq_desc, maplen,
  1056. rx_ring->lbq_buf_size);
  1057. *lbq_desc->addr = cpu_to_le64(map);
  1058. pci_dma_sync_single_for_device(qdev->pdev, map,
  1059. rx_ring->lbq_buf_size,
  1060. PCI_DMA_FROMDEVICE);
  1061. clean_idx++;
  1062. if (clean_idx == rx_ring->lbq_len)
  1063. clean_idx = 0;
  1064. }
  1065. rx_ring->lbq_clean_idx = clean_idx;
  1066. rx_ring->lbq_prod_idx += 16;
  1067. if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
  1068. rx_ring->lbq_prod_idx = 0;
  1069. rx_ring->lbq_free_cnt -= 16;
  1070. }
  1071. if (start_idx != clean_idx) {
  1072. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1073. "lbq: updating prod idx = %d.\n",
  1074. rx_ring->lbq_prod_idx);
  1075. ql_write_db_reg(rx_ring->lbq_prod_idx,
  1076. rx_ring->lbq_prod_idx_db_reg);
  1077. }
  1078. }
  1079. /* Process (refill) a small buffer queue. */
  1080. static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  1081. {
  1082. u32 clean_idx = rx_ring->sbq_clean_idx;
  1083. u32 start_idx = clean_idx;
  1084. struct bq_desc *sbq_desc;
  1085. u64 map;
  1086. int i;
  1087. while (rx_ring->sbq_free_cnt > 16) {
  1088. for (i = (rx_ring->sbq_clean_idx % 16); i < 16; i++) {
  1089. sbq_desc = &rx_ring->sbq[clean_idx];
  1090. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1091. "sbq: try cleaning clean_idx = %d.\n",
  1092. clean_idx);
  1093. if (sbq_desc->p.skb == NULL) {
  1094. netif_printk(qdev, rx_status, KERN_DEBUG,
  1095. qdev->ndev,
  1096. "sbq: getting new skb for index %d.\n",
  1097. sbq_desc->index);
  1098. sbq_desc->p.skb =
  1099. netdev_alloc_skb(qdev->ndev,
  1100. SMALL_BUFFER_SIZE);
  1101. if (sbq_desc->p.skb == NULL) {
  1102. rx_ring->sbq_clean_idx = clean_idx;
  1103. return;
  1104. }
  1105. skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
  1106. map = pci_map_single(qdev->pdev,
  1107. sbq_desc->p.skb->data,
  1108. rx_ring->sbq_buf_size,
  1109. PCI_DMA_FROMDEVICE);
  1110. if (pci_dma_mapping_error(qdev->pdev, map)) {
  1111. netif_err(qdev, ifup, qdev->ndev,
  1112. "PCI mapping failed.\n");
  1113. rx_ring->sbq_clean_idx = clean_idx;
  1114. dev_kfree_skb_any(sbq_desc->p.skb);
  1115. sbq_desc->p.skb = NULL;
  1116. return;
  1117. }
  1118. dma_unmap_addr_set(sbq_desc, mapaddr, map);
  1119. dma_unmap_len_set(sbq_desc, maplen,
  1120. rx_ring->sbq_buf_size);
  1121. *sbq_desc->addr = cpu_to_le64(map);
  1122. }
  1123. clean_idx++;
  1124. if (clean_idx == rx_ring->sbq_len)
  1125. clean_idx = 0;
  1126. }
  1127. rx_ring->sbq_clean_idx = clean_idx;
  1128. rx_ring->sbq_prod_idx += 16;
  1129. if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
  1130. rx_ring->sbq_prod_idx = 0;
  1131. rx_ring->sbq_free_cnt -= 16;
  1132. }
  1133. if (start_idx != clean_idx) {
  1134. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1135. "sbq: updating prod idx = %d.\n",
  1136. rx_ring->sbq_prod_idx);
  1137. ql_write_db_reg(rx_ring->sbq_prod_idx,
  1138. rx_ring->sbq_prod_idx_db_reg);
  1139. }
  1140. }
  1141. static void ql_update_buffer_queues(struct ql_adapter *qdev,
  1142. struct rx_ring *rx_ring)
  1143. {
  1144. ql_update_sbq(qdev, rx_ring);
  1145. ql_update_lbq(qdev, rx_ring);
  1146. }
  1147. /* Unmaps tx buffers. Can be called from send() if a pci mapping
  1148. * fails at some stage, or from the interrupt when a tx completes.
  1149. */
  1150. static void ql_unmap_send(struct ql_adapter *qdev,
  1151. struct tx_ring_desc *tx_ring_desc, int mapped)
  1152. {
  1153. int i;
  1154. for (i = 0; i < mapped; i++) {
  1155. if (i == 0 || (i == 7 && mapped > 7)) {
  1156. /*
  1157. * Unmap the skb->data area, or the
  1158. * external sglist (AKA the Outbound
  1159. * Address List (OAL)).
  1160. * If its the zeroeth element, then it's
  1161. * the skb->data area. If it's the 7th
  1162. * element and there is more than 6 frags,
  1163. * then its an OAL.
  1164. */
  1165. if (i == 7) {
  1166. netif_printk(qdev, tx_done, KERN_DEBUG,
  1167. qdev->ndev,
  1168. "unmapping OAL area.\n");
  1169. }
  1170. pci_unmap_single(qdev->pdev,
  1171. dma_unmap_addr(&tx_ring_desc->map[i],
  1172. mapaddr),
  1173. dma_unmap_len(&tx_ring_desc->map[i],
  1174. maplen),
  1175. PCI_DMA_TODEVICE);
  1176. } else {
  1177. netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
  1178. "unmapping frag %d.\n", i);
  1179. pci_unmap_page(qdev->pdev,
  1180. dma_unmap_addr(&tx_ring_desc->map[i],
  1181. mapaddr),
  1182. dma_unmap_len(&tx_ring_desc->map[i],
  1183. maplen), PCI_DMA_TODEVICE);
  1184. }
  1185. }
  1186. }
  1187. /* Map the buffers for this transmit. This will return
  1188. * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
  1189. */
  1190. static int ql_map_send(struct ql_adapter *qdev,
  1191. struct ob_mac_iocb_req *mac_iocb_ptr,
  1192. struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
  1193. {
  1194. int len = skb_headlen(skb);
  1195. dma_addr_t map;
  1196. int frag_idx, err, map_idx = 0;
  1197. struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
  1198. int frag_cnt = skb_shinfo(skb)->nr_frags;
  1199. if (frag_cnt) {
  1200. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  1201. "frag_cnt = %d.\n", frag_cnt);
  1202. }
  1203. /*
  1204. * Map the skb buffer first.
  1205. */
  1206. map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
  1207. err = pci_dma_mapping_error(qdev->pdev, map);
  1208. if (err) {
  1209. netif_err(qdev, tx_queued, qdev->ndev,
  1210. "PCI mapping failed with error: %d\n", err);
  1211. return NETDEV_TX_BUSY;
  1212. }
  1213. tbd->len = cpu_to_le32(len);
  1214. tbd->addr = cpu_to_le64(map);
  1215. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1216. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
  1217. map_idx++;
  1218. /*
  1219. * This loop fills the remainder of the 8 address descriptors
  1220. * in the IOCB. If there are more than 7 fragments, then the
  1221. * eighth address desc will point to an external list (OAL).
  1222. * When this happens, the remainder of the frags will be stored
  1223. * in this list.
  1224. */
  1225. for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
  1226. skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
  1227. tbd++;
  1228. if (frag_idx == 6 && frag_cnt > 7) {
  1229. /* Let's tack on an sglist.
  1230. * Our control block will now
  1231. * look like this:
  1232. * iocb->seg[0] = skb->data
  1233. * iocb->seg[1] = frag[0]
  1234. * iocb->seg[2] = frag[1]
  1235. * iocb->seg[3] = frag[2]
  1236. * iocb->seg[4] = frag[3]
  1237. * iocb->seg[5] = frag[4]
  1238. * iocb->seg[6] = frag[5]
  1239. * iocb->seg[7] = ptr to OAL (external sglist)
  1240. * oal->seg[0] = frag[6]
  1241. * oal->seg[1] = frag[7]
  1242. * oal->seg[2] = frag[8]
  1243. * oal->seg[3] = frag[9]
  1244. * oal->seg[4] = frag[10]
  1245. * etc...
  1246. */
  1247. /* Tack on the OAL in the eighth segment of IOCB. */
  1248. map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
  1249. sizeof(struct oal),
  1250. PCI_DMA_TODEVICE);
  1251. err = pci_dma_mapping_error(qdev->pdev, map);
  1252. if (err) {
  1253. netif_err(qdev, tx_queued, qdev->ndev,
  1254. "PCI mapping outbound address list with error: %d\n",
  1255. err);
  1256. goto map_error;
  1257. }
  1258. tbd->addr = cpu_to_le64(map);
  1259. /*
  1260. * The length is the number of fragments
  1261. * that remain to be mapped times the length
  1262. * of our sglist (OAL).
  1263. */
  1264. tbd->len =
  1265. cpu_to_le32((sizeof(struct tx_buf_desc) *
  1266. (frag_cnt - frag_idx)) | TX_DESC_C);
  1267. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
  1268. map);
  1269. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1270. sizeof(struct oal));
  1271. tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
  1272. map_idx++;
  1273. }
  1274. map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
  1275. DMA_TO_DEVICE);
  1276. err = dma_mapping_error(&qdev->pdev->dev, map);
  1277. if (err) {
  1278. netif_err(qdev, tx_queued, qdev->ndev,
  1279. "PCI mapping frags failed with error: %d.\n",
  1280. err);
  1281. goto map_error;
  1282. }
  1283. tbd->addr = cpu_to_le64(map);
  1284. tbd->len = cpu_to_le32(skb_frag_size(frag));
  1285. dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
  1286. dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
  1287. skb_frag_size(frag));
  1288. }
  1289. /* Save the number of segments we've mapped. */
  1290. tx_ring_desc->map_cnt = map_idx;
  1291. /* Terminate the last segment. */
  1292. tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
  1293. return NETDEV_TX_OK;
  1294. map_error:
  1295. /*
  1296. * If the first frag mapping failed, then i will be zero.
  1297. * This causes the unmap of the skb->data area. Otherwise
  1298. * we pass in the number of frags that mapped successfully
  1299. * so they can be umapped.
  1300. */
  1301. ql_unmap_send(qdev, tx_ring_desc, map_idx);
  1302. return NETDEV_TX_BUSY;
  1303. }
  1304. /* Categorizing receive firmware frame errors */
  1305. static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
  1306. struct rx_ring *rx_ring)
  1307. {
  1308. struct nic_stats *stats = &qdev->nic_stats;
  1309. stats->rx_err_count++;
  1310. rx_ring->rx_errors++;
  1311. switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
  1312. case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
  1313. stats->rx_code_err++;
  1314. break;
  1315. case IB_MAC_IOCB_RSP_ERR_OVERSIZE:
  1316. stats->rx_oversize_err++;
  1317. break;
  1318. case IB_MAC_IOCB_RSP_ERR_UNDERSIZE:
  1319. stats->rx_undersize_err++;
  1320. break;
  1321. case IB_MAC_IOCB_RSP_ERR_PREAMBLE:
  1322. stats->rx_preamble_err++;
  1323. break;
  1324. case IB_MAC_IOCB_RSP_ERR_FRAME_LEN:
  1325. stats->rx_frame_len_err++;
  1326. break;
  1327. case IB_MAC_IOCB_RSP_ERR_CRC:
  1328. stats->rx_crc_err++;
  1329. default:
  1330. break;
  1331. }
  1332. }
  1333. /**
  1334. * ql_update_mac_hdr_len - helper routine to update the mac header length
  1335. * based on vlan tags if present
  1336. */
  1337. static void ql_update_mac_hdr_len(struct ql_adapter *qdev,
  1338. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1339. void *page, size_t *len)
  1340. {
  1341. u16 *tags;
  1342. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
  1343. return;
  1344. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) {
  1345. tags = (u16 *)page;
  1346. /* Look for stacked vlan tags in ethertype field */
  1347. if (tags[6] == ETH_P_8021Q &&
  1348. tags[8] == ETH_P_8021Q)
  1349. *len += 2 * VLAN_HLEN;
  1350. else
  1351. *len += VLAN_HLEN;
  1352. }
  1353. }
  1354. /* Process an inbound completion from an rx ring. */
  1355. static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
  1356. struct rx_ring *rx_ring,
  1357. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1358. u32 length,
  1359. u16 vlan_id)
  1360. {
  1361. struct sk_buff *skb;
  1362. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1363. struct napi_struct *napi = &rx_ring->napi;
  1364. /* Frame error, so drop the packet. */
  1365. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1366. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1367. put_page(lbq_desc->p.pg_chunk.page);
  1368. return;
  1369. }
  1370. napi->dev = qdev->ndev;
  1371. skb = napi_get_frags(napi);
  1372. if (!skb) {
  1373. netif_err(qdev, drv, qdev->ndev,
  1374. "Couldn't get an skb, exiting.\n");
  1375. rx_ring->rx_dropped++;
  1376. put_page(lbq_desc->p.pg_chunk.page);
  1377. return;
  1378. }
  1379. prefetch(lbq_desc->p.pg_chunk.va);
  1380. __skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
  1381. lbq_desc->p.pg_chunk.page,
  1382. lbq_desc->p.pg_chunk.offset,
  1383. length);
  1384. skb->len += length;
  1385. skb->data_len += length;
  1386. skb->truesize += length;
  1387. skb_shinfo(skb)->nr_frags++;
  1388. rx_ring->rx_packets++;
  1389. rx_ring->rx_bytes += length;
  1390. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1391. skb_record_rx_queue(skb, rx_ring->cq_id);
  1392. if (vlan_id != 0xffff)
  1393. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1394. napi_gro_frags(napi);
  1395. }
  1396. /* Process an inbound completion from an rx ring. */
  1397. static void ql_process_mac_rx_page(struct ql_adapter *qdev,
  1398. struct rx_ring *rx_ring,
  1399. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1400. u32 length,
  1401. u16 vlan_id)
  1402. {
  1403. struct net_device *ndev = qdev->ndev;
  1404. struct sk_buff *skb = NULL;
  1405. void *addr;
  1406. struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1407. struct napi_struct *napi = &rx_ring->napi;
  1408. size_t hlen = ETH_HLEN;
  1409. skb = netdev_alloc_skb(ndev, length);
  1410. if (!skb) {
  1411. rx_ring->rx_dropped++;
  1412. put_page(lbq_desc->p.pg_chunk.page);
  1413. return;
  1414. }
  1415. addr = lbq_desc->p.pg_chunk.va;
  1416. prefetch(addr);
  1417. /* Frame error, so drop the packet. */
  1418. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1419. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1420. goto err_out;
  1421. }
  1422. /* Update the MAC header length*/
  1423. ql_update_mac_hdr_len(qdev, ib_mac_rsp, addr, &hlen);
  1424. /* The max framesize filter on this chip is set higher than
  1425. * MTU since FCoE uses 2k frames.
  1426. */
  1427. if (skb->len > ndev->mtu + hlen) {
  1428. netif_err(qdev, drv, qdev->ndev,
  1429. "Segment too small, dropping.\n");
  1430. rx_ring->rx_dropped++;
  1431. goto err_out;
  1432. }
  1433. memcpy(skb_put(skb, hlen), addr, hlen);
  1434. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1435. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1436. length);
  1437. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1438. lbq_desc->p.pg_chunk.offset + hlen,
  1439. length - hlen);
  1440. skb->len += length - hlen;
  1441. skb->data_len += length - hlen;
  1442. skb->truesize += length - hlen;
  1443. rx_ring->rx_packets++;
  1444. rx_ring->rx_bytes += skb->len;
  1445. skb->protocol = eth_type_trans(skb, ndev);
  1446. skb_checksum_none_assert(skb);
  1447. if ((ndev->features & NETIF_F_RXCSUM) &&
  1448. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1449. /* TCP frame. */
  1450. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1451. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1452. "TCP checksum done!\n");
  1453. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1454. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1455. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1456. /* Unfragmented ipv4 UDP frame. */
  1457. struct iphdr *iph =
  1458. (struct iphdr *)((u8 *)addr + hlen);
  1459. if (!(iph->frag_off &
  1460. htons(IP_MF|IP_OFFSET))) {
  1461. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1462. netif_printk(qdev, rx_status, KERN_DEBUG,
  1463. qdev->ndev,
  1464. "UDP checksum done!\n");
  1465. }
  1466. }
  1467. }
  1468. skb_record_rx_queue(skb, rx_ring->cq_id);
  1469. if (vlan_id != 0xffff)
  1470. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1471. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1472. napi_gro_receive(napi, skb);
  1473. else
  1474. netif_receive_skb(skb);
  1475. return;
  1476. err_out:
  1477. dev_kfree_skb_any(skb);
  1478. put_page(lbq_desc->p.pg_chunk.page);
  1479. }
  1480. /* Process an inbound completion from an rx ring. */
  1481. static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
  1482. struct rx_ring *rx_ring,
  1483. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1484. u32 length,
  1485. u16 vlan_id)
  1486. {
  1487. struct net_device *ndev = qdev->ndev;
  1488. struct sk_buff *skb = NULL;
  1489. struct sk_buff *new_skb = NULL;
  1490. struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
  1491. skb = sbq_desc->p.skb;
  1492. /* Allocate new_skb and copy */
  1493. new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
  1494. if (new_skb == NULL) {
  1495. rx_ring->rx_dropped++;
  1496. return;
  1497. }
  1498. skb_reserve(new_skb, NET_IP_ALIGN);
  1499. memcpy(skb_put(new_skb, length), skb->data, length);
  1500. skb = new_skb;
  1501. /* Frame error, so drop the packet. */
  1502. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1503. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1504. dev_kfree_skb_any(skb);
  1505. return;
  1506. }
  1507. /* loopback self test for ethtool */
  1508. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1509. ql_check_lb_frame(qdev, skb);
  1510. dev_kfree_skb_any(skb);
  1511. return;
  1512. }
  1513. /* The max framesize filter on this chip is set higher than
  1514. * MTU since FCoE uses 2k frames.
  1515. */
  1516. if (skb->len > ndev->mtu + ETH_HLEN) {
  1517. dev_kfree_skb_any(skb);
  1518. rx_ring->rx_dropped++;
  1519. return;
  1520. }
  1521. prefetch(skb->data);
  1522. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1523. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1524. "%s Multicast.\n",
  1525. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1526. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1527. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1528. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1529. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1530. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1531. }
  1532. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
  1533. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1534. "Promiscuous Packet.\n");
  1535. rx_ring->rx_packets++;
  1536. rx_ring->rx_bytes += skb->len;
  1537. skb->protocol = eth_type_trans(skb, ndev);
  1538. skb_checksum_none_assert(skb);
  1539. /* If rx checksum is on, and there are no
  1540. * csum or frame errors.
  1541. */
  1542. if ((ndev->features & NETIF_F_RXCSUM) &&
  1543. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1544. /* TCP frame. */
  1545. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1546. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1547. "TCP checksum done!\n");
  1548. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1549. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1550. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1551. /* Unfragmented ipv4 UDP frame. */
  1552. struct iphdr *iph = (struct iphdr *) skb->data;
  1553. if (!(iph->frag_off &
  1554. htons(IP_MF|IP_OFFSET))) {
  1555. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1556. netif_printk(qdev, rx_status, KERN_DEBUG,
  1557. qdev->ndev,
  1558. "UDP checksum done!\n");
  1559. }
  1560. }
  1561. }
  1562. skb_record_rx_queue(skb, rx_ring->cq_id);
  1563. if (vlan_id != 0xffff)
  1564. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1565. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1566. napi_gro_receive(&rx_ring->napi, skb);
  1567. else
  1568. netif_receive_skb(skb);
  1569. }
  1570. static void ql_realign_skb(struct sk_buff *skb, int len)
  1571. {
  1572. void *temp_addr = skb->data;
  1573. /* Undo the skb_reserve(skb,32) we did before
  1574. * giving to hardware, and realign data on
  1575. * a 2-byte boundary.
  1576. */
  1577. skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
  1578. skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
  1579. skb_copy_to_linear_data(skb, temp_addr,
  1580. (unsigned int)len);
  1581. }
  1582. /*
  1583. * This function builds an skb for the given inbound
  1584. * completion. It will be rewritten for readability in the near
  1585. * future, but for not it works well.
  1586. */
  1587. static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
  1588. struct rx_ring *rx_ring,
  1589. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1590. {
  1591. struct bq_desc *lbq_desc;
  1592. struct bq_desc *sbq_desc;
  1593. struct sk_buff *skb = NULL;
  1594. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1595. u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
  1596. size_t hlen = ETH_HLEN;
  1597. /*
  1598. * Handle the header buffer if present.
  1599. */
  1600. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
  1601. ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1602. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1603. "Header of %d bytes in small buffer.\n", hdr_len);
  1604. /*
  1605. * Headers fit nicely into a small buffer.
  1606. */
  1607. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1608. pci_unmap_single(qdev->pdev,
  1609. dma_unmap_addr(sbq_desc, mapaddr),
  1610. dma_unmap_len(sbq_desc, maplen),
  1611. PCI_DMA_FROMDEVICE);
  1612. skb = sbq_desc->p.skb;
  1613. ql_realign_skb(skb, hdr_len);
  1614. skb_put(skb, hdr_len);
  1615. sbq_desc->p.skb = NULL;
  1616. }
  1617. /*
  1618. * Handle the data buffer(s).
  1619. */
  1620. if (unlikely(!length)) { /* Is there data too? */
  1621. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1622. "No Data buffer in this packet.\n");
  1623. return skb;
  1624. }
  1625. if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1626. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1627. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1628. "Headers in small, data of %d bytes in small, combine them.\n",
  1629. length);
  1630. /*
  1631. * Data is less than small buffer size so it's
  1632. * stuffed in a small buffer.
  1633. * For this case we append the data
  1634. * from the "data" small buffer to the "header" small
  1635. * buffer.
  1636. */
  1637. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1638. pci_dma_sync_single_for_cpu(qdev->pdev,
  1639. dma_unmap_addr
  1640. (sbq_desc, mapaddr),
  1641. dma_unmap_len
  1642. (sbq_desc, maplen),
  1643. PCI_DMA_FROMDEVICE);
  1644. memcpy(skb_put(skb, length),
  1645. sbq_desc->p.skb->data, length);
  1646. pci_dma_sync_single_for_device(qdev->pdev,
  1647. dma_unmap_addr
  1648. (sbq_desc,
  1649. mapaddr),
  1650. dma_unmap_len
  1651. (sbq_desc,
  1652. maplen),
  1653. PCI_DMA_FROMDEVICE);
  1654. } else {
  1655. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1656. "%d bytes in a single small buffer.\n",
  1657. length);
  1658. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1659. skb = sbq_desc->p.skb;
  1660. ql_realign_skb(skb, length);
  1661. skb_put(skb, length);
  1662. pci_unmap_single(qdev->pdev,
  1663. dma_unmap_addr(sbq_desc,
  1664. mapaddr),
  1665. dma_unmap_len(sbq_desc,
  1666. maplen),
  1667. PCI_DMA_FROMDEVICE);
  1668. sbq_desc->p.skb = NULL;
  1669. }
  1670. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1671. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
  1672. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1673. "Header in small, %d bytes in large. Chain large to small!\n",
  1674. length);
  1675. /*
  1676. * The data is in a single large buffer. We
  1677. * chain it to the header buffer's skb and let
  1678. * it rip.
  1679. */
  1680. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1681. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1682. "Chaining page at offset = %d, for %d bytes to skb.\n",
  1683. lbq_desc->p.pg_chunk.offset, length);
  1684. skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
  1685. lbq_desc->p.pg_chunk.offset,
  1686. length);
  1687. skb->len += length;
  1688. skb->data_len += length;
  1689. skb->truesize += length;
  1690. } else {
  1691. /*
  1692. * The headers and data are in a single large buffer. We
  1693. * copy it to a new skb and let it go. This can happen with
  1694. * jumbo mtu on a non-TCP/UDP frame.
  1695. */
  1696. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1697. skb = netdev_alloc_skb(qdev->ndev, length);
  1698. if (skb == NULL) {
  1699. netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
  1700. "No skb available, drop the packet.\n");
  1701. return NULL;
  1702. }
  1703. pci_unmap_page(qdev->pdev,
  1704. dma_unmap_addr(lbq_desc,
  1705. mapaddr),
  1706. dma_unmap_len(lbq_desc, maplen),
  1707. PCI_DMA_FROMDEVICE);
  1708. skb_reserve(skb, NET_IP_ALIGN);
  1709. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1710. "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
  1711. length);
  1712. skb_fill_page_desc(skb, 0,
  1713. lbq_desc->p.pg_chunk.page,
  1714. lbq_desc->p.pg_chunk.offset,
  1715. length);
  1716. skb->len += length;
  1717. skb->data_len += length;
  1718. skb->truesize += length;
  1719. length -= length;
  1720. ql_update_mac_hdr_len(qdev, ib_mac_rsp,
  1721. lbq_desc->p.pg_chunk.va,
  1722. &hlen);
  1723. __pskb_pull_tail(skb, hlen);
  1724. }
  1725. } else {
  1726. /*
  1727. * The data is in a chain of large buffers
  1728. * pointed to by a small buffer. We loop
  1729. * thru and chain them to the our small header
  1730. * buffer's skb.
  1731. * frags: There are 18 max frags and our small
  1732. * buffer will hold 32 of them. The thing is,
  1733. * we'll use 3 max for our 9000 byte jumbo
  1734. * frames. If the MTU goes up we could
  1735. * eventually be in trouble.
  1736. */
  1737. int size, i = 0;
  1738. sbq_desc = ql_get_curr_sbuf(rx_ring);
  1739. pci_unmap_single(qdev->pdev,
  1740. dma_unmap_addr(sbq_desc, mapaddr),
  1741. dma_unmap_len(sbq_desc, maplen),
  1742. PCI_DMA_FROMDEVICE);
  1743. if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
  1744. /*
  1745. * This is an non TCP/UDP IP frame, so
  1746. * the headers aren't split into a small
  1747. * buffer. We have to use the small buffer
  1748. * that contains our sg list as our skb to
  1749. * send upstairs. Copy the sg list here to
  1750. * a local buffer and use it to find the
  1751. * pages to chain.
  1752. */
  1753. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1754. "%d bytes of headers & data in chain of large.\n",
  1755. length);
  1756. skb = sbq_desc->p.skb;
  1757. sbq_desc->p.skb = NULL;
  1758. skb_reserve(skb, NET_IP_ALIGN);
  1759. }
  1760. do {
  1761. lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
  1762. size = (length < rx_ring->lbq_buf_size) ? length :
  1763. rx_ring->lbq_buf_size;
  1764. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1765. "Adding page %d to skb for %d bytes.\n",
  1766. i, size);
  1767. skb_fill_page_desc(skb, i,
  1768. lbq_desc->p.pg_chunk.page,
  1769. lbq_desc->p.pg_chunk.offset,
  1770. size);
  1771. skb->len += size;
  1772. skb->data_len += size;
  1773. skb->truesize += size;
  1774. length -= size;
  1775. i++;
  1776. } while (length > 0);
  1777. ql_update_mac_hdr_len(qdev, ib_mac_rsp, lbq_desc->p.pg_chunk.va,
  1778. &hlen);
  1779. __pskb_pull_tail(skb, hlen);
  1780. }
  1781. return skb;
  1782. }
  1783. /* Process an inbound completion from an rx ring. */
  1784. static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
  1785. struct rx_ring *rx_ring,
  1786. struct ib_mac_iocb_rsp *ib_mac_rsp,
  1787. u16 vlan_id)
  1788. {
  1789. struct net_device *ndev = qdev->ndev;
  1790. struct sk_buff *skb = NULL;
  1791. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1792. skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
  1793. if (unlikely(!skb)) {
  1794. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1795. "No skb available, drop packet.\n");
  1796. rx_ring->rx_dropped++;
  1797. return;
  1798. }
  1799. /* Frame error, so drop the packet. */
  1800. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
  1801. ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
  1802. dev_kfree_skb_any(skb);
  1803. return;
  1804. }
  1805. /* The max framesize filter on this chip is set higher than
  1806. * MTU since FCoE uses 2k frames.
  1807. */
  1808. if (skb->len > ndev->mtu + ETH_HLEN) {
  1809. dev_kfree_skb_any(skb);
  1810. rx_ring->rx_dropped++;
  1811. return;
  1812. }
  1813. /* loopback self test for ethtool */
  1814. if (test_bit(QL_SELFTEST, &qdev->flags)) {
  1815. ql_check_lb_frame(qdev, skb);
  1816. dev_kfree_skb_any(skb);
  1817. return;
  1818. }
  1819. prefetch(skb->data);
  1820. if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
  1821. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
  1822. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1823. IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
  1824. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1825. IB_MAC_IOCB_RSP_M_REG ? "Registered" :
  1826. (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
  1827. IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
  1828. rx_ring->rx_multicast++;
  1829. }
  1830. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
  1831. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1832. "Promiscuous Packet.\n");
  1833. }
  1834. skb->protocol = eth_type_trans(skb, ndev);
  1835. skb_checksum_none_assert(skb);
  1836. /* If rx checksum is on, and there are no
  1837. * csum or frame errors.
  1838. */
  1839. if ((ndev->features & NETIF_F_RXCSUM) &&
  1840. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
  1841. /* TCP frame. */
  1842. if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
  1843. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1844. "TCP checksum done!\n");
  1845. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1846. } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
  1847. (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
  1848. /* Unfragmented ipv4 UDP frame. */
  1849. struct iphdr *iph = (struct iphdr *) skb->data;
  1850. if (!(iph->frag_off &
  1851. htons(IP_MF|IP_OFFSET))) {
  1852. skb->ip_summed = CHECKSUM_UNNECESSARY;
  1853. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  1854. "TCP checksum done!\n");
  1855. }
  1856. }
  1857. }
  1858. rx_ring->rx_packets++;
  1859. rx_ring->rx_bytes += skb->len;
  1860. skb_record_rx_queue(skb, rx_ring->cq_id);
  1861. if (vlan_id != 0xffff)
  1862. __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_id);
  1863. if (skb->ip_summed == CHECKSUM_UNNECESSARY)
  1864. napi_gro_receive(&rx_ring->napi, skb);
  1865. else
  1866. netif_receive_skb(skb);
  1867. }
  1868. /* Process an inbound completion from an rx ring. */
  1869. static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
  1870. struct rx_ring *rx_ring,
  1871. struct ib_mac_iocb_rsp *ib_mac_rsp)
  1872. {
  1873. u32 length = le32_to_cpu(ib_mac_rsp->data_len);
  1874. u16 vlan_id = ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) &&
  1875. (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)) ?
  1876. ((le16_to_cpu(ib_mac_rsp->vlan_id) &
  1877. IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
  1878. QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
  1879. if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
  1880. /* The data and headers are split into
  1881. * separate buffers.
  1882. */
  1883. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1884. vlan_id);
  1885. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
  1886. /* The data fit in a single small buffer.
  1887. * Allocate a new skb, copy the data and
  1888. * return the buffer to the free pool.
  1889. */
  1890. ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
  1891. length, vlan_id);
  1892. } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
  1893. !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
  1894. (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
  1895. /* TCP packet in a page chunk that's been checksummed.
  1896. * Tack it on to our GRO skb and let it go.
  1897. */
  1898. ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
  1899. length, vlan_id);
  1900. } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
  1901. /* Non-TCP packet in a page chunk. Allocate an
  1902. * skb, tack it on frags, and send it up.
  1903. */
  1904. ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
  1905. length, vlan_id);
  1906. } else {
  1907. /* Non-TCP/UDP large frames that span multiple buffers
  1908. * can be processed corrrectly by the split frame logic.
  1909. */
  1910. ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
  1911. vlan_id);
  1912. }
  1913. return (unsigned long)length;
  1914. }
  1915. /* Process an outbound completion from an rx ring. */
  1916. static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
  1917. struct ob_mac_iocb_rsp *mac_rsp)
  1918. {
  1919. struct tx_ring *tx_ring;
  1920. struct tx_ring_desc *tx_ring_desc;
  1921. QL_DUMP_OB_MAC_RSP(mac_rsp);
  1922. tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
  1923. tx_ring_desc = &tx_ring->q[mac_rsp->tid];
  1924. ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
  1925. tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
  1926. tx_ring->tx_packets++;
  1927. dev_kfree_skb(tx_ring_desc->skb);
  1928. tx_ring_desc->skb = NULL;
  1929. if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
  1930. OB_MAC_IOCB_RSP_S |
  1931. OB_MAC_IOCB_RSP_L |
  1932. OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
  1933. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
  1934. netif_warn(qdev, tx_done, qdev->ndev,
  1935. "Total descriptor length did not match transfer length.\n");
  1936. }
  1937. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
  1938. netif_warn(qdev, tx_done, qdev->ndev,
  1939. "Frame too short to be valid, not sent.\n");
  1940. }
  1941. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
  1942. netif_warn(qdev, tx_done, qdev->ndev,
  1943. "Frame too long, but sent anyway.\n");
  1944. }
  1945. if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
  1946. netif_warn(qdev, tx_done, qdev->ndev,
  1947. "PCI backplane error. Frame not sent.\n");
  1948. }
  1949. }
  1950. atomic_inc(&tx_ring->tx_count);
  1951. }
  1952. /* Fire up a handler to reset the MPI processor. */
  1953. void ql_queue_fw_error(struct ql_adapter *qdev)
  1954. {
  1955. ql_link_off(qdev);
  1956. queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
  1957. }
  1958. void ql_queue_asic_error(struct ql_adapter *qdev)
  1959. {
  1960. ql_link_off(qdev);
  1961. ql_disable_interrupts(qdev);
  1962. /* Clear adapter up bit to signal the recovery
  1963. * process that it shouldn't kill the reset worker
  1964. * thread
  1965. */
  1966. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  1967. /* Set asic recovery bit to indicate reset process that we are
  1968. * in fatal error recovery process rather than normal close
  1969. */
  1970. set_bit(QL_ASIC_RECOVERY, &qdev->flags);
  1971. queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
  1972. }
  1973. static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
  1974. struct ib_ae_iocb_rsp *ib_ae_rsp)
  1975. {
  1976. switch (ib_ae_rsp->event) {
  1977. case MGMT_ERR_EVENT:
  1978. netif_err(qdev, rx_err, qdev->ndev,
  1979. "Management Processor Fatal Error.\n");
  1980. ql_queue_fw_error(qdev);
  1981. return;
  1982. case CAM_LOOKUP_ERR_EVENT:
  1983. netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
  1984. netdev_err(qdev->ndev, "This event shouldn't occur.\n");
  1985. ql_queue_asic_error(qdev);
  1986. return;
  1987. case SOFT_ECC_ERROR_EVENT:
  1988. netdev_err(qdev->ndev, "Soft ECC error detected.\n");
  1989. ql_queue_asic_error(qdev);
  1990. break;
  1991. case PCI_ERR_ANON_BUF_RD:
  1992. netdev_err(qdev->ndev, "PCI error occurred when reading "
  1993. "anonymous buffers from rx_ring %d.\n",
  1994. ib_ae_rsp->q_id);
  1995. ql_queue_asic_error(qdev);
  1996. break;
  1997. default:
  1998. netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
  1999. ib_ae_rsp->event);
  2000. ql_queue_asic_error(qdev);
  2001. break;
  2002. }
  2003. }
  2004. static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
  2005. {
  2006. struct ql_adapter *qdev = rx_ring->qdev;
  2007. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2008. struct ob_mac_iocb_rsp *net_rsp = NULL;
  2009. int count = 0;
  2010. struct tx_ring *tx_ring;
  2011. /* While there are entries in the completion queue. */
  2012. while (prod != rx_ring->cnsmr_idx) {
  2013. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2014. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  2015. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  2016. net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
  2017. rmb();
  2018. switch (net_rsp->opcode) {
  2019. case OPCODE_OB_MAC_TSO_IOCB:
  2020. case OPCODE_OB_MAC_IOCB:
  2021. ql_process_mac_tx_intr(qdev, net_rsp);
  2022. break;
  2023. default:
  2024. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2025. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  2026. net_rsp->opcode);
  2027. }
  2028. count++;
  2029. ql_update_cq(rx_ring);
  2030. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2031. }
  2032. if (!net_rsp)
  2033. return 0;
  2034. ql_write_cq_idx(rx_ring);
  2035. tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
  2036. if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
  2037. if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  2038. /*
  2039. * The queue got stopped because the tx_ring was full.
  2040. * Wake it up, because it's now at least 25% empty.
  2041. */
  2042. netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
  2043. }
  2044. return count;
  2045. }
  2046. static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
  2047. {
  2048. struct ql_adapter *qdev = rx_ring->qdev;
  2049. u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2050. struct ql_net_rsp_iocb *net_rsp;
  2051. int count = 0;
  2052. /* While there are entries in the completion queue. */
  2053. while (prod != rx_ring->cnsmr_idx) {
  2054. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2055. "cq_id = %d, prod = %d, cnsmr = %d.\n.",
  2056. rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
  2057. net_rsp = rx_ring->curr_entry;
  2058. rmb();
  2059. switch (net_rsp->opcode) {
  2060. case OPCODE_IB_MAC_IOCB:
  2061. ql_process_mac_rx_intr(qdev, rx_ring,
  2062. (struct ib_mac_iocb_rsp *)
  2063. net_rsp);
  2064. break;
  2065. case OPCODE_IB_AE_IOCB:
  2066. ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
  2067. net_rsp);
  2068. break;
  2069. default:
  2070. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2071. "Hit default case, not handled! dropping the packet, opcode = %x.\n",
  2072. net_rsp->opcode);
  2073. break;
  2074. }
  2075. count++;
  2076. ql_update_cq(rx_ring);
  2077. prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
  2078. if (count == budget)
  2079. break;
  2080. }
  2081. ql_update_buffer_queues(qdev, rx_ring);
  2082. ql_write_cq_idx(rx_ring);
  2083. return count;
  2084. }
  2085. static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
  2086. {
  2087. struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
  2088. struct ql_adapter *qdev = rx_ring->qdev;
  2089. struct rx_ring *trx_ring;
  2090. int i, work_done = 0;
  2091. struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
  2092. netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
  2093. "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
  2094. /* Service the TX rings first. They start
  2095. * right after the RSS rings. */
  2096. for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
  2097. trx_ring = &qdev->rx_ring[i];
  2098. /* If this TX completion ring belongs to this vector and
  2099. * it's not empty then service it.
  2100. */
  2101. if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
  2102. (ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
  2103. trx_ring->cnsmr_idx)) {
  2104. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2105. "%s: Servicing TX completion ring %d.\n",
  2106. __func__, trx_ring->cq_id);
  2107. ql_clean_outbound_rx_ring(trx_ring);
  2108. }
  2109. }
  2110. /*
  2111. * Now service the RSS ring if it's active.
  2112. */
  2113. if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
  2114. rx_ring->cnsmr_idx) {
  2115. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2116. "%s: Servicing RX completion ring %d.\n",
  2117. __func__, rx_ring->cq_id);
  2118. work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
  2119. }
  2120. if (work_done < budget) {
  2121. napi_complete(napi);
  2122. ql_enable_completion_interrupt(qdev, rx_ring->irq);
  2123. }
  2124. return work_done;
  2125. }
  2126. static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
  2127. {
  2128. struct ql_adapter *qdev = netdev_priv(ndev);
  2129. if (features & NETIF_F_HW_VLAN_CTAG_RX) {
  2130. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
  2131. NIC_RCV_CFG_VLAN_MATCH_AND_NON);
  2132. } else {
  2133. ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
  2134. }
  2135. }
  2136. /**
  2137. * qlge_update_hw_vlan_features - helper routine to reinitialize the adapter
  2138. * based on the features to enable/disable hardware vlan accel
  2139. */
  2140. static int qlge_update_hw_vlan_features(struct net_device *ndev,
  2141. netdev_features_t features)
  2142. {
  2143. struct ql_adapter *qdev = netdev_priv(ndev);
  2144. int status = 0;
  2145. bool need_restart = netif_running(ndev);
  2146. if (need_restart) {
  2147. status = ql_adapter_down(qdev);
  2148. if (status) {
  2149. netif_err(qdev, link, qdev->ndev,
  2150. "Failed to bring down the adapter\n");
  2151. return status;
  2152. }
  2153. }
  2154. /* update the features with resent change */
  2155. ndev->features = features;
  2156. if (need_restart) {
  2157. status = ql_adapter_up(qdev);
  2158. if (status) {
  2159. netif_err(qdev, link, qdev->ndev,
  2160. "Failed to bring up the adapter\n");
  2161. return status;
  2162. }
  2163. }
  2164. return status;
  2165. }
  2166. static netdev_features_t qlge_fix_features(struct net_device *ndev,
  2167. netdev_features_t features)
  2168. {
  2169. int err;
  2170. /* Update the behavior of vlan accel in the adapter */
  2171. err = qlge_update_hw_vlan_features(ndev, features);
  2172. if (err)
  2173. return err;
  2174. return features;
  2175. }
  2176. static int qlge_set_features(struct net_device *ndev,
  2177. netdev_features_t features)
  2178. {
  2179. netdev_features_t changed = ndev->features ^ features;
  2180. if (changed & NETIF_F_HW_VLAN_CTAG_RX)
  2181. qlge_vlan_mode(ndev, features);
  2182. return 0;
  2183. }
  2184. static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
  2185. {
  2186. u32 enable_bit = MAC_ADDR_E;
  2187. int err;
  2188. err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
  2189. MAC_ADDR_TYPE_VLAN, vid);
  2190. if (err)
  2191. netif_err(qdev, ifup, qdev->ndev,
  2192. "Failed to init vlan address.\n");
  2193. return err;
  2194. }
  2195. static int qlge_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
  2196. {
  2197. struct ql_adapter *qdev = netdev_priv(ndev);
  2198. int status;
  2199. int err;
  2200. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2201. if (status)
  2202. return status;
  2203. err = __qlge_vlan_rx_add_vid(qdev, vid);
  2204. set_bit(vid, qdev->active_vlans);
  2205. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2206. return err;
  2207. }
  2208. static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
  2209. {
  2210. u32 enable_bit = 0;
  2211. int err;
  2212. err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
  2213. MAC_ADDR_TYPE_VLAN, vid);
  2214. if (err)
  2215. netif_err(qdev, ifup, qdev->ndev,
  2216. "Failed to clear vlan address.\n");
  2217. return err;
  2218. }
  2219. static int qlge_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
  2220. {
  2221. struct ql_adapter *qdev = netdev_priv(ndev);
  2222. int status;
  2223. int err;
  2224. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2225. if (status)
  2226. return status;
  2227. err = __qlge_vlan_rx_kill_vid(qdev, vid);
  2228. clear_bit(vid, qdev->active_vlans);
  2229. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2230. return err;
  2231. }
  2232. static void qlge_restore_vlan(struct ql_adapter *qdev)
  2233. {
  2234. int status;
  2235. u16 vid;
  2236. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  2237. if (status)
  2238. return;
  2239. for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
  2240. __qlge_vlan_rx_add_vid(qdev, vid);
  2241. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  2242. }
  2243. /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
  2244. static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
  2245. {
  2246. struct rx_ring *rx_ring = dev_id;
  2247. napi_schedule(&rx_ring->napi);
  2248. return IRQ_HANDLED;
  2249. }
  2250. /* This handles a fatal error, MPI activity, and the default
  2251. * rx_ring in an MSI-X multiple vector environment.
  2252. * In MSI/Legacy environment it also process the rest of
  2253. * the rx_rings.
  2254. */
  2255. static irqreturn_t qlge_isr(int irq, void *dev_id)
  2256. {
  2257. struct rx_ring *rx_ring = dev_id;
  2258. struct ql_adapter *qdev = rx_ring->qdev;
  2259. struct intr_context *intr_context = &qdev->intr_context[0];
  2260. u32 var;
  2261. int work_done = 0;
  2262. spin_lock(&qdev->hw_lock);
  2263. if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
  2264. netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
  2265. "Shared Interrupt, Not ours!\n");
  2266. spin_unlock(&qdev->hw_lock);
  2267. return IRQ_NONE;
  2268. }
  2269. spin_unlock(&qdev->hw_lock);
  2270. var = ql_disable_completion_interrupt(qdev, intr_context->intr);
  2271. /*
  2272. * Check for fatal error.
  2273. */
  2274. if (var & STS_FE) {
  2275. ql_queue_asic_error(qdev);
  2276. netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
  2277. var = ql_read32(qdev, ERR_STS);
  2278. netdev_err(qdev->ndev, "Resetting chip. "
  2279. "Error Status Register = 0x%x\n", var);
  2280. return IRQ_HANDLED;
  2281. }
  2282. /*
  2283. * Check MPI processor activity.
  2284. */
  2285. if ((var & STS_PI) &&
  2286. (ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
  2287. /*
  2288. * We've got an async event or mailbox completion.
  2289. * Handle it and clear the source of the interrupt.
  2290. */
  2291. netif_err(qdev, intr, qdev->ndev,
  2292. "Got MPI processor interrupt.\n");
  2293. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2294. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
  2295. queue_delayed_work_on(smp_processor_id(),
  2296. qdev->workqueue, &qdev->mpi_work, 0);
  2297. work_done++;
  2298. }
  2299. /*
  2300. * Get the bit-mask that shows the active queues for this
  2301. * pass. Compare it to the queues that this irq services
  2302. * and call napi if there's a match.
  2303. */
  2304. var = ql_read32(qdev, ISR1);
  2305. if (var & intr_context->irq_mask) {
  2306. netif_info(qdev, intr, qdev->ndev,
  2307. "Waking handler for rx_ring[0].\n");
  2308. ql_disable_completion_interrupt(qdev, intr_context->intr);
  2309. napi_schedule(&rx_ring->napi);
  2310. work_done++;
  2311. }
  2312. ql_enable_completion_interrupt(qdev, intr_context->intr);
  2313. return work_done ? IRQ_HANDLED : IRQ_NONE;
  2314. }
  2315. static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2316. {
  2317. if (skb_is_gso(skb)) {
  2318. int err;
  2319. __be16 l3_proto = vlan_get_protocol(skb);
  2320. err = skb_cow_head(skb, 0);
  2321. if (err < 0)
  2322. return err;
  2323. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2324. mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
  2325. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2326. mac_iocb_ptr->total_hdrs_len =
  2327. cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
  2328. mac_iocb_ptr->net_trans_offset =
  2329. cpu_to_le16(skb_network_offset(skb) |
  2330. skb_transport_offset(skb)
  2331. << OB_MAC_TRANSPORT_HDR_SHIFT);
  2332. mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
  2333. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
  2334. if (likely(l3_proto == htons(ETH_P_IP))) {
  2335. struct iphdr *iph = ip_hdr(skb);
  2336. iph->check = 0;
  2337. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2338. tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
  2339. iph->daddr, 0,
  2340. IPPROTO_TCP,
  2341. 0);
  2342. } else if (l3_proto == htons(ETH_P_IPV6)) {
  2343. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
  2344. tcp_hdr(skb)->check =
  2345. ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
  2346. &ipv6_hdr(skb)->daddr,
  2347. 0, IPPROTO_TCP, 0);
  2348. }
  2349. return 1;
  2350. }
  2351. return 0;
  2352. }
  2353. static void ql_hw_csum_setup(struct sk_buff *skb,
  2354. struct ob_mac_tso_iocb_req *mac_iocb_ptr)
  2355. {
  2356. int len;
  2357. struct iphdr *iph = ip_hdr(skb);
  2358. __sum16 *check;
  2359. mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
  2360. mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
  2361. mac_iocb_ptr->net_trans_offset =
  2362. cpu_to_le16(skb_network_offset(skb) |
  2363. skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
  2364. mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
  2365. len = (ntohs(iph->tot_len) - (iph->ihl << 2));
  2366. if (likely(iph->protocol == IPPROTO_TCP)) {
  2367. check = &(tcp_hdr(skb)->check);
  2368. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
  2369. mac_iocb_ptr->total_hdrs_len =
  2370. cpu_to_le16(skb_transport_offset(skb) +
  2371. (tcp_hdr(skb)->doff << 2));
  2372. } else {
  2373. check = &(udp_hdr(skb)->check);
  2374. mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
  2375. mac_iocb_ptr->total_hdrs_len =
  2376. cpu_to_le16(skb_transport_offset(skb) +
  2377. sizeof(struct udphdr));
  2378. }
  2379. *check = ~csum_tcpudp_magic(iph->saddr,
  2380. iph->daddr, len, iph->protocol, 0);
  2381. }
  2382. static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
  2383. {
  2384. struct tx_ring_desc *tx_ring_desc;
  2385. struct ob_mac_iocb_req *mac_iocb_ptr;
  2386. struct ql_adapter *qdev = netdev_priv(ndev);
  2387. int tso;
  2388. struct tx_ring *tx_ring;
  2389. u32 tx_ring_idx = (u32) skb->queue_mapping;
  2390. tx_ring = &qdev->tx_ring[tx_ring_idx];
  2391. if (skb_padto(skb, ETH_ZLEN))
  2392. return NETDEV_TX_OK;
  2393. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  2394. netif_info(qdev, tx_queued, qdev->ndev,
  2395. "%s: BUG! shutting down tx queue %d due to lack of resources.\n",
  2396. __func__, tx_ring_idx);
  2397. netif_stop_subqueue(ndev, tx_ring->wq_id);
  2398. tx_ring->tx_errors++;
  2399. return NETDEV_TX_BUSY;
  2400. }
  2401. tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
  2402. mac_iocb_ptr = tx_ring_desc->queue_entry;
  2403. memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
  2404. mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
  2405. mac_iocb_ptr->tid = tx_ring_desc->index;
  2406. /* We use the upper 32-bits to store the tx queue for this IO.
  2407. * When we get the completion we can use it to establish the context.
  2408. */
  2409. mac_iocb_ptr->txq_idx = tx_ring_idx;
  2410. tx_ring_desc->skb = skb;
  2411. mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
  2412. if (skb_vlan_tag_present(skb)) {
  2413. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2414. "Adding a vlan tag %d.\n", skb_vlan_tag_get(skb));
  2415. mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
  2416. mac_iocb_ptr->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
  2417. }
  2418. tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2419. if (tso < 0) {
  2420. dev_kfree_skb_any(skb);
  2421. return NETDEV_TX_OK;
  2422. } else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
  2423. ql_hw_csum_setup(skb,
  2424. (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
  2425. }
  2426. if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
  2427. NETDEV_TX_OK) {
  2428. netif_err(qdev, tx_queued, qdev->ndev,
  2429. "Could not map the segments.\n");
  2430. tx_ring->tx_errors++;
  2431. return NETDEV_TX_BUSY;
  2432. }
  2433. QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
  2434. tx_ring->prod_idx++;
  2435. if (tx_ring->prod_idx == tx_ring->wq_len)
  2436. tx_ring->prod_idx = 0;
  2437. wmb();
  2438. ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
  2439. netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
  2440. "tx queued, slot %d, len %d\n",
  2441. tx_ring->prod_idx, skb->len);
  2442. atomic_dec(&tx_ring->tx_count);
  2443. if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
  2444. netif_stop_subqueue(ndev, tx_ring->wq_id);
  2445. if ((atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
  2446. /*
  2447. * The queue got stopped because the tx_ring was full.
  2448. * Wake it up, because it's now at least 25% empty.
  2449. */
  2450. netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
  2451. }
  2452. return NETDEV_TX_OK;
  2453. }
  2454. static void ql_free_shadow_space(struct ql_adapter *qdev)
  2455. {
  2456. if (qdev->rx_ring_shadow_reg_area) {
  2457. pci_free_consistent(qdev->pdev,
  2458. PAGE_SIZE,
  2459. qdev->rx_ring_shadow_reg_area,
  2460. qdev->rx_ring_shadow_reg_dma);
  2461. qdev->rx_ring_shadow_reg_area = NULL;
  2462. }
  2463. if (qdev->tx_ring_shadow_reg_area) {
  2464. pci_free_consistent(qdev->pdev,
  2465. PAGE_SIZE,
  2466. qdev->tx_ring_shadow_reg_area,
  2467. qdev->tx_ring_shadow_reg_dma);
  2468. qdev->tx_ring_shadow_reg_area = NULL;
  2469. }
  2470. }
  2471. static int ql_alloc_shadow_space(struct ql_adapter *qdev)
  2472. {
  2473. qdev->rx_ring_shadow_reg_area =
  2474. pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
  2475. &qdev->rx_ring_shadow_reg_dma);
  2476. if (qdev->rx_ring_shadow_reg_area == NULL) {
  2477. netif_err(qdev, ifup, qdev->ndev,
  2478. "Allocation of RX shadow space failed.\n");
  2479. return -ENOMEM;
  2480. }
  2481. qdev->tx_ring_shadow_reg_area =
  2482. pci_zalloc_consistent(qdev->pdev, PAGE_SIZE,
  2483. &qdev->tx_ring_shadow_reg_dma);
  2484. if (qdev->tx_ring_shadow_reg_area == NULL) {
  2485. netif_err(qdev, ifup, qdev->ndev,
  2486. "Allocation of TX shadow space failed.\n");
  2487. goto err_wqp_sh_area;
  2488. }
  2489. return 0;
  2490. err_wqp_sh_area:
  2491. pci_free_consistent(qdev->pdev,
  2492. PAGE_SIZE,
  2493. qdev->rx_ring_shadow_reg_area,
  2494. qdev->rx_ring_shadow_reg_dma);
  2495. return -ENOMEM;
  2496. }
  2497. static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2498. {
  2499. struct tx_ring_desc *tx_ring_desc;
  2500. int i;
  2501. struct ob_mac_iocb_req *mac_iocb_ptr;
  2502. mac_iocb_ptr = tx_ring->wq_base;
  2503. tx_ring_desc = tx_ring->q;
  2504. for (i = 0; i < tx_ring->wq_len; i++) {
  2505. tx_ring_desc->index = i;
  2506. tx_ring_desc->skb = NULL;
  2507. tx_ring_desc->queue_entry = mac_iocb_ptr;
  2508. mac_iocb_ptr++;
  2509. tx_ring_desc++;
  2510. }
  2511. atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
  2512. }
  2513. static void ql_free_tx_resources(struct ql_adapter *qdev,
  2514. struct tx_ring *tx_ring)
  2515. {
  2516. if (tx_ring->wq_base) {
  2517. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2518. tx_ring->wq_base, tx_ring->wq_base_dma);
  2519. tx_ring->wq_base = NULL;
  2520. }
  2521. kfree(tx_ring->q);
  2522. tx_ring->q = NULL;
  2523. }
  2524. static int ql_alloc_tx_resources(struct ql_adapter *qdev,
  2525. struct tx_ring *tx_ring)
  2526. {
  2527. tx_ring->wq_base =
  2528. pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
  2529. &tx_ring->wq_base_dma);
  2530. if ((tx_ring->wq_base == NULL) ||
  2531. tx_ring->wq_base_dma & WQ_ADDR_ALIGN)
  2532. goto pci_alloc_err;
  2533. tx_ring->q =
  2534. kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
  2535. if (tx_ring->q == NULL)
  2536. goto err;
  2537. return 0;
  2538. err:
  2539. pci_free_consistent(qdev->pdev, tx_ring->wq_size,
  2540. tx_ring->wq_base, tx_ring->wq_base_dma);
  2541. tx_ring->wq_base = NULL;
  2542. pci_alloc_err:
  2543. netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
  2544. return -ENOMEM;
  2545. }
  2546. static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2547. {
  2548. struct bq_desc *lbq_desc;
  2549. uint32_t curr_idx, clean_idx;
  2550. curr_idx = rx_ring->lbq_curr_idx;
  2551. clean_idx = rx_ring->lbq_clean_idx;
  2552. while (curr_idx != clean_idx) {
  2553. lbq_desc = &rx_ring->lbq[curr_idx];
  2554. if (lbq_desc->p.pg_chunk.last_flag) {
  2555. pci_unmap_page(qdev->pdev,
  2556. lbq_desc->p.pg_chunk.map,
  2557. ql_lbq_block_size(qdev),
  2558. PCI_DMA_FROMDEVICE);
  2559. lbq_desc->p.pg_chunk.last_flag = 0;
  2560. }
  2561. put_page(lbq_desc->p.pg_chunk.page);
  2562. lbq_desc->p.pg_chunk.page = NULL;
  2563. if (++curr_idx == rx_ring->lbq_len)
  2564. curr_idx = 0;
  2565. }
  2566. if (rx_ring->pg_chunk.page) {
  2567. pci_unmap_page(qdev->pdev, rx_ring->pg_chunk.map,
  2568. ql_lbq_block_size(qdev), PCI_DMA_FROMDEVICE);
  2569. put_page(rx_ring->pg_chunk.page);
  2570. rx_ring->pg_chunk.page = NULL;
  2571. }
  2572. }
  2573. static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2574. {
  2575. int i;
  2576. struct bq_desc *sbq_desc;
  2577. for (i = 0; i < rx_ring->sbq_len; i++) {
  2578. sbq_desc = &rx_ring->sbq[i];
  2579. if (sbq_desc == NULL) {
  2580. netif_err(qdev, ifup, qdev->ndev,
  2581. "sbq_desc %d is NULL.\n", i);
  2582. return;
  2583. }
  2584. if (sbq_desc->p.skb) {
  2585. pci_unmap_single(qdev->pdev,
  2586. dma_unmap_addr(sbq_desc, mapaddr),
  2587. dma_unmap_len(sbq_desc, maplen),
  2588. PCI_DMA_FROMDEVICE);
  2589. dev_kfree_skb(sbq_desc->p.skb);
  2590. sbq_desc->p.skb = NULL;
  2591. }
  2592. }
  2593. }
  2594. /* Free all large and small rx buffers associated
  2595. * with the completion queues for this device.
  2596. */
  2597. static void ql_free_rx_buffers(struct ql_adapter *qdev)
  2598. {
  2599. int i;
  2600. struct rx_ring *rx_ring;
  2601. for (i = 0; i < qdev->rx_ring_count; i++) {
  2602. rx_ring = &qdev->rx_ring[i];
  2603. if (rx_ring->lbq)
  2604. ql_free_lbq_buffers(qdev, rx_ring);
  2605. if (rx_ring->sbq)
  2606. ql_free_sbq_buffers(qdev, rx_ring);
  2607. }
  2608. }
  2609. static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
  2610. {
  2611. struct rx_ring *rx_ring;
  2612. int i;
  2613. for (i = 0; i < qdev->rx_ring_count; i++) {
  2614. rx_ring = &qdev->rx_ring[i];
  2615. if (rx_ring->type != TX_Q)
  2616. ql_update_buffer_queues(qdev, rx_ring);
  2617. }
  2618. }
  2619. static void ql_init_lbq_ring(struct ql_adapter *qdev,
  2620. struct rx_ring *rx_ring)
  2621. {
  2622. int i;
  2623. struct bq_desc *lbq_desc;
  2624. __le64 *bq = rx_ring->lbq_base;
  2625. memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
  2626. for (i = 0; i < rx_ring->lbq_len; i++) {
  2627. lbq_desc = &rx_ring->lbq[i];
  2628. memset(lbq_desc, 0, sizeof(*lbq_desc));
  2629. lbq_desc->index = i;
  2630. lbq_desc->addr = bq;
  2631. bq++;
  2632. }
  2633. }
  2634. static void ql_init_sbq_ring(struct ql_adapter *qdev,
  2635. struct rx_ring *rx_ring)
  2636. {
  2637. int i;
  2638. struct bq_desc *sbq_desc;
  2639. __le64 *bq = rx_ring->sbq_base;
  2640. memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
  2641. for (i = 0; i < rx_ring->sbq_len; i++) {
  2642. sbq_desc = &rx_ring->sbq[i];
  2643. memset(sbq_desc, 0, sizeof(*sbq_desc));
  2644. sbq_desc->index = i;
  2645. sbq_desc->addr = bq;
  2646. bq++;
  2647. }
  2648. }
  2649. static void ql_free_rx_resources(struct ql_adapter *qdev,
  2650. struct rx_ring *rx_ring)
  2651. {
  2652. /* Free the small buffer queue. */
  2653. if (rx_ring->sbq_base) {
  2654. pci_free_consistent(qdev->pdev,
  2655. rx_ring->sbq_size,
  2656. rx_ring->sbq_base, rx_ring->sbq_base_dma);
  2657. rx_ring->sbq_base = NULL;
  2658. }
  2659. /* Free the small buffer queue control blocks. */
  2660. kfree(rx_ring->sbq);
  2661. rx_ring->sbq = NULL;
  2662. /* Free the large buffer queue. */
  2663. if (rx_ring->lbq_base) {
  2664. pci_free_consistent(qdev->pdev,
  2665. rx_ring->lbq_size,
  2666. rx_ring->lbq_base, rx_ring->lbq_base_dma);
  2667. rx_ring->lbq_base = NULL;
  2668. }
  2669. /* Free the large buffer queue control blocks. */
  2670. kfree(rx_ring->lbq);
  2671. rx_ring->lbq = NULL;
  2672. /* Free the rx queue. */
  2673. if (rx_ring->cq_base) {
  2674. pci_free_consistent(qdev->pdev,
  2675. rx_ring->cq_size,
  2676. rx_ring->cq_base, rx_ring->cq_base_dma);
  2677. rx_ring->cq_base = NULL;
  2678. }
  2679. }
  2680. /* Allocate queues and buffers for this completions queue based
  2681. * on the values in the parameter structure. */
  2682. static int ql_alloc_rx_resources(struct ql_adapter *qdev,
  2683. struct rx_ring *rx_ring)
  2684. {
  2685. /*
  2686. * Allocate the completion queue for this rx_ring.
  2687. */
  2688. rx_ring->cq_base =
  2689. pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
  2690. &rx_ring->cq_base_dma);
  2691. if (rx_ring->cq_base == NULL) {
  2692. netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
  2693. return -ENOMEM;
  2694. }
  2695. if (rx_ring->sbq_len) {
  2696. /*
  2697. * Allocate small buffer queue.
  2698. */
  2699. rx_ring->sbq_base =
  2700. pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
  2701. &rx_ring->sbq_base_dma);
  2702. if (rx_ring->sbq_base == NULL) {
  2703. netif_err(qdev, ifup, qdev->ndev,
  2704. "Small buffer queue allocation failed.\n");
  2705. goto err_mem;
  2706. }
  2707. /*
  2708. * Allocate small buffer queue control blocks.
  2709. */
  2710. rx_ring->sbq = kmalloc_array(rx_ring->sbq_len,
  2711. sizeof(struct bq_desc),
  2712. GFP_KERNEL);
  2713. if (rx_ring->sbq == NULL)
  2714. goto err_mem;
  2715. ql_init_sbq_ring(qdev, rx_ring);
  2716. }
  2717. if (rx_ring->lbq_len) {
  2718. /*
  2719. * Allocate large buffer queue.
  2720. */
  2721. rx_ring->lbq_base =
  2722. pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
  2723. &rx_ring->lbq_base_dma);
  2724. if (rx_ring->lbq_base == NULL) {
  2725. netif_err(qdev, ifup, qdev->ndev,
  2726. "Large buffer queue allocation failed.\n");
  2727. goto err_mem;
  2728. }
  2729. /*
  2730. * Allocate large buffer queue control blocks.
  2731. */
  2732. rx_ring->lbq = kmalloc_array(rx_ring->lbq_len,
  2733. sizeof(struct bq_desc),
  2734. GFP_KERNEL);
  2735. if (rx_ring->lbq == NULL)
  2736. goto err_mem;
  2737. ql_init_lbq_ring(qdev, rx_ring);
  2738. }
  2739. return 0;
  2740. err_mem:
  2741. ql_free_rx_resources(qdev, rx_ring);
  2742. return -ENOMEM;
  2743. }
  2744. static void ql_tx_ring_clean(struct ql_adapter *qdev)
  2745. {
  2746. struct tx_ring *tx_ring;
  2747. struct tx_ring_desc *tx_ring_desc;
  2748. int i, j;
  2749. /*
  2750. * Loop through all queues and free
  2751. * any resources.
  2752. */
  2753. for (j = 0; j < qdev->tx_ring_count; j++) {
  2754. tx_ring = &qdev->tx_ring[j];
  2755. for (i = 0; i < tx_ring->wq_len; i++) {
  2756. tx_ring_desc = &tx_ring->q[i];
  2757. if (tx_ring_desc && tx_ring_desc->skb) {
  2758. netif_err(qdev, ifdown, qdev->ndev,
  2759. "Freeing lost SKB %p, from queue %d, index %d.\n",
  2760. tx_ring_desc->skb, j,
  2761. tx_ring_desc->index);
  2762. ql_unmap_send(qdev, tx_ring_desc,
  2763. tx_ring_desc->map_cnt);
  2764. dev_kfree_skb(tx_ring_desc->skb);
  2765. tx_ring_desc->skb = NULL;
  2766. }
  2767. }
  2768. }
  2769. }
  2770. static void ql_free_mem_resources(struct ql_adapter *qdev)
  2771. {
  2772. int i;
  2773. for (i = 0; i < qdev->tx_ring_count; i++)
  2774. ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
  2775. for (i = 0; i < qdev->rx_ring_count; i++)
  2776. ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
  2777. ql_free_shadow_space(qdev);
  2778. }
  2779. static int ql_alloc_mem_resources(struct ql_adapter *qdev)
  2780. {
  2781. int i;
  2782. /* Allocate space for our shadow registers and such. */
  2783. if (ql_alloc_shadow_space(qdev))
  2784. return -ENOMEM;
  2785. for (i = 0; i < qdev->rx_ring_count; i++) {
  2786. if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
  2787. netif_err(qdev, ifup, qdev->ndev,
  2788. "RX resource allocation failed.\n");
  2789. goto err_mem;
  2790. }
  2791. }
  2792. /* Allocate tx queue resources */
  2793. for (i = 0; i < qdev->tx_ring_count; i++) {
  2794. if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
  2795. netif_err(qdev, ifup, qdev->ndev,
  2796. "TX resource allocation failed.\n");
  2797. goto err_mem;
  2798. }
  2799. }
  2800. return 0;
  2801. err_mem:
  2802. ql_free_mem_resources(qdev);
  2803. return -ENOMEM;
  2804. }
  2805. /* Set up the rx ring control block and pass it to the chip.
  2806. * The control block is defined as
  2807. * "Completion Queue Initialization Control Block", or cqicb.
  2808. */
  2809. static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
  2810. {
  2811. struct cqicb *cqicb = &rx_ring->cqicb;
  2812. void *shadow_reg = qdev->rx_ring_shadow_reg_area +
  2813. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2814. u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
  2815. (rx_ring->cq_id * RX_RING_SHADOW_SPACE);
  2816. void __iomem *doorbell_area =
  2817. qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
  2818. int err = 0;
  2819. u16 bq_len;
  2820. u64 tmp;
  2821. __le64 *base_indirect_ptr;
  2822. int page_entries;
  2823. /* Set up the shadow registers for this ring. */
  2824. rx_ring->prod_idx_sh_reg = shadow_reg;
  2825. rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
  2826. *rx_ring->prod_idx_sh_reg = 0;
  2827. shadow_reg += sizeof(u64);
  2828. shadow_reg_dma += sizeof(u64);
  2829. rx_ring->lbq_base_indirect = shadow_reg;
  2830. rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
  2831. shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2832. shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2833. rx_ring->sbq_base_indirect = shadow_reg;
  2834. rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
  2835. /* PCI doorbell mem area + 0x00 for consumer index register */
  2836. rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
  2837. rx_ring->cnsmr_idx = 0;
  2838. rx_ring->curr_entry = rx_ring->cq_base;
  2839. /* PCI doorbell mem area + 0x04 for valid register */
  2840. rx_ring->valid_db_reg = doorbell_area + 0x04;
  2841. /* PCI doorbell mem area + 0x18 for large buffer consumer */
  2842. rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
  2843. /* PCI doorbell mem area + 0x1c */
  2844. rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
  2845. memset((void *)cqicb, 0, sizeof(struct cqicb));
  2846. cqicb->msix_vect = rx_ring->irq;
  2847. bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
  2848. cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
  2849. cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
  2850. cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
  2851. /*
  2852. * Set up the control block load flags.
  2853. */
  2854. cqicb->flags = FLAGS_LC | /* Load queue base address */
  2855. FLAGS_LV | /* Load MSI-X vector */
  2856. FLAGS_LI; /* Load irq delay values */
  2857. if (rx_ring->lbq_len) {
  2858. cqicb->flags |= FLAGS_LL; /* Load lbq values */
  2859. tmp = (u64)rx_ring->lbq_base_dma;
  2860. base_indirect_ptr = rx_ring->lbq_base_indirect;
  2861. page_entries = 0;
  2862. do {
  2863. *base_indirect_ptr = cpu_to_le64(tmp);
  2864. tmp += DB_PAGE_SIZE;
  2865. base_indirect_ptr++;
  2866. page_entries++;
  2867. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
  2868. cqicb->lbq_addr =
  2869. cpu_to_le64(rx_ring->lbq_base_indirect_dma);
  2870. bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
  2871. (u16) rx_ring->lbq_buf_size;
  2872. cqicb->lbq_buf_size = cpu_to_le16(bq_len);
  2873. bq_len = (rx_ring->lbq_len == 65536) ? 0 :
  2874. (u16) rx_ring->lbq_len;
  2875. cqicb->lbq_len = cpu_to_le16(bq_len);
  2876. rx_ring->lbq_prod_idx = 0;
  2877. rx_ring->lbq_curr_idx = 0;
  2878. rx_ring->lbq_clean_idx = 0;
  2879. rx_ring->lbq_free_cnt = rx_ring->lbq_len;
  2880. }
  2881. if (rx_ring->sbq_len) {
  2882. cqicb->flags |= FLAGS_LS; /* Load sbq values */
  2883. tmp = (u64)rx_ring->sbq_base_dma;
  2884. base_indirect_ptr = rx_ring->sbq_base_indirect;
  2885. page_entries = 0;
  2886. do {
  2887. *base_indirect_ptr = cpu_to_le64(tmp);
  2888. tmp += DB_PAGE_SIZE;
  2889. base_indirect_ptr++;
  2890. page_entries++;
  2891. } while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
  2892. cqicb->sbq_addr =
  2893. cpu_to_le64(rx_ring->sbq_base_indirect_dma);
  2894. cqicb->sbq_buf_size =
  2895. cpu_to_le16((u16)(rx_ring->sbq_buf_size));
  2896. bq_len = (rx_ring->sbq_len == 65536) ? 0 :
  2897. (u16) rx_ring->sbq_len;
  2898. cqicb->sbq_len = cpu_to_le16(bq_len);
  2899. rx_ring->sbq_prod_idx = 0;
  2900. rx_ring->sbq_curr_idx = 0;
  2901. rx_ring->sbq_clean_idx = 0;
  2902. rx_ring->sbq_free_cnt = rx_ring->sbq_len;
  2903. }
  2904. switch (rx_ring->type) {
  2905. case TX_Q:
  2906. cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
  2907. cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
  2908. break;
  2909. case RX_Q:
  2910. /* Inbound completion handling rx_rings run in
  2911. * separate NAPI contexts.
  2912. */
  2913. netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
  2914. 64);
  2915. cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
  2916. cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
  2917. break;
  2918. default:
  2919. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  2920. "Invalid rx_ring->type = %d.\n", rx_ring->type);
  2921. }
  2922. err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
  2923. CFG_LCQ, rx_ring->cq_id);
  2924. if (err) {
  2925. netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
  2926. return err;
  2927. }
  2928. return err;
  2929. }
  2930. static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
  2931. {
  2932. struct wqicb *wqicb = (struct wqicb *)tx_ring;
  2933. void __iomem *doorbell_area =
  2934. qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
  2935. void *shadow_reg = qdev->tx_ring_shadow_reg_area +
  2936. (tx_ring->wq_id * sizeof(u64));
  2937. u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
  2938. (tx_ring->wq_id * sizeof(u64));
  2939. int err = 0;
  2940. /*
  2941. * Assign doorbell registers for this tx_ring.
  2942. */
  2943. /* TX PCI doorbell mem area for tx producer index */
  2944. tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
  2945. tx_ring->prod_idx = 0;
  2946. /* TX PCI doorbell mem area + 0x04 */
  2947. tx_ring->valid_db_reg = doorbell_area + 0x04;
  2948. /*
  2949. * Assign shadow registers for this tx_ring.
  2950. */
  2951. tx_ring->cnsmr_idx_sh_reg = shadow_reg;
  2952. tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
  2953. wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
  2954. wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
  2955. Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
  2956. wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
  2957. wqicb->rid = 0;
  2958. wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
  2959. wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
  2960. ql_init_tx_ring(qdev, tx_ring);
  2961. err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
  2962. (u16) tx_ring->wq_id);
  2963. if (err) {
  2964. netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
  2965. return err;
  2966. }
  2967. return err;
  2968. }
  2969. static void ql_disable_msix(struct ql_adapter *qdev)
  2970. {
  2971. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  2972. pci_disable_msix(qdev->pdev);
  2973. clear_bit(QL_MSIX_ENABLED, &qdev->flags);
  2974. kfree(qdev->msi_x_entry);
  2975. qdev->msi_x_entry = NULL;
  2976. } else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
  2977. pci_disable_msi(qdev->pdev);
  2978. clear_bit(QL_MSI_ENABLED, &qdev->flags);
  2979. }
  2980. }
  2981. /* We start by trying to get the number of vectors
  2982. * stored in qdev->intr_count. If we don't get that
  2983. * many then we reduce the count and try again.
  2984. */
  2985. static void ql_enable_msix(struct ql_adapter *qdev)
  2986. {
  2987. int i, err;
  2988. /* Get the MSIX vectors. */
  2989. if (qlge_irq_type == MSIX_IRQ) {
  2990. /* Try to alloc space for the msix struct,
  2991. * if it fails then go to MSI/legacy.
  2992. */
  2993. qdev->msi_x_entry = kcalloc(qdev->intr_count,
  2994. sizeof(struct msix_entry),
  2995. GFP_KERNEL);
  2996. if (!qdev->msi_x_entry) {
  2997. qlge_irq_type = MSI_IRQ;
  2998. goto msi;
  2999. }
  3000. for (i = 0; i < qdev->intr_count; i++)
  3001. qdev->msi_x_entry[i].entry = i;
  3002. err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
  3003. 1, qdev->intr_count);
  3004. if (err < 0) {
  3005. kfree(qdev->msi_x_entry);
  3006. qdev->msi_x_entry = NULL;
  3007. netif_warn(qdev, ifup, qdev->ndev,
  3008. "MSI-X Enable failed, trying MSI.\n");
  3009. qlge_irq_type = MSI_IRQ;
  3010. } else {
  3011. qdev->intr_count = err;
  3012. set_bit(QL_MSIX_ENABLED, &qdev->flags);
  3013. netif_info(qdev, ifup, qdev->ndev,
  3014. "MSI-X Enabled, got %d vectors.\n",
  3015. qdev->intr_count);
  3016. return;
  3017. }
  3018. }
  3019. msi:
  3020. qdev->intr_count = 1;
  3021. if (qlge_irq_type == MSI_IRQ) {
  3022. if (!pci_enable_msi(qdev->pdev)) {
  3023. set_bit(QL_MSI_ENABLED, &qdev->flags);
  3024. netif_info(qdev, ifup, qdev->ndev,
  3025. "Running with MSI interrupts.\n");
  3026. return;
  3027. }
  3028. }
  3029. qlge_irq_type = LEG_IRQ;
  3030. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3031. "Running with legacy interrupts.\n");
  3032. }
  3033. /* Each vector services 1 RSS ring and and 1 or more
  3034. * TX completion rings. This function loops through
  3035. * the TX completion rings and assigns the vector that
  3036. * will service it. An example would be if there are
  3037. * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
  3038. * This would mean that vector 0 would service RSS ring 0
  3039. * and TX completion rings 0,1,2 and 3. Vector 1 would
  3040. * service RSS ring 1 and TX completion rings 4,5,6 and 7.
  3041. */
  3042. static void ql_set_tx_vect(struct ql_adapter *qdev)
  3043. {
  3044. int i, j, vect;
  3045. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3046. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3047. /* Assign irq vectors to TX rx_rings.*/
  3048. for (vect = 0, j = 0, i = qdev->rss_ring_count;
  3049. i < qdev->rx_ring_count; i++) {
  3050. if (j == tx_rings_per_vector) {
  3051. vect++;
  3052. j = 0;
  3053. }
  3054. qdev->rx_ring[i].irq = vect;
  3055. j++;
  3056. }
  3057. } else {
  3058. /* For single vector all rings have an irq
  3059. * of zero.
  3060. */
  3061. for (i = 0; i < qdev->rx_ring_count; i++)
  3062. qdev->rx_ring[i].irq = 0;
  3063. }
  3064. }
  3065. /* Set the interrupt mask for this vector. Each vector
  3066. * will service 1 RSS ring and 1 or more TX completion
  3067. * rings. This function sets up a bit mask per vector
  3068. * that indicates which rings it services.
  3069. */
  3070. static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
  3071. {
  3072. int j, vect = ctx->intr;
  3073. u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
  3074. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3075. /* Add the RSS ring serviced by this vector
  3076. * to the mask.
  3077. */
  3078. ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
  3079. /* Add the TX ring(s) serviced by this vector
  3080. * to the mask. */
  3081. for (j = 0; j < tx_rings_per_vector; j++) {
  3082. ctx->irq_mask |=
  3083. (1 << qdev->rx_ring[qdev->rss_ring_count +
  3084. (vect * tx_rings_per_vector) + j].cq_id);
  3085. }
  3086. } else {
  3087. /* For single vector we just shift each queue's
  3088. * ID into the mask.
  3089. */
  3090. for (j = 0; j < qdev->rx_ring_count; j++)
  3091. ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
  3092. }
  3093. }
  3094. /*
  3095. * Here we build the intr_context structures based on
  3096. * our rx_ring count and intr vector count.
  3097. * The intr_context structure is used to hook each vector
  3098. * to possibly different handlers.
  3099. */
  3100. static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
  3101. {
  3102. int i = 0;
  3103. struct intr_context *intr_context = &qdev->intr_context[0];
  3104. if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
  3105. /* Each rx_ring has it's
  3106. * own intr_context since we have separate
  3107. * vectors for each queue.
  3108. */
  3109. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3110. qdev->rx_ring[i].irq = i;
  3111. intr_context->intr = i;
  3112. intr_context->qdev = qdev;
  3113. /* Set up this vector's bit-mask that indicates
  3114. * which queues it services.
  3115. */
  3116. ql_set_irq_mask(qdev, intr_context);
  3117. /*
  3118. * We set up each vectors enable/disable/read bits so
  3119. * there's no bit/mask calculations in the critical path.
  3120. */
  3121. intr_context->intr_en_mask =
  3122. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3123. INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
  3124. | i;
  3125. intr_context->intr_dis_mask =
  3126. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3127. INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
  3128. INTR_EN_IHD | i;
  3129. intr_context->intr_read_mask =
  3130. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3131. INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
  3132. i;
  3133. if (i == 0) {
  3134. /* The first vector/queue handles
  3135. * broadcast/multicast, fatal errors,
  3136. * and firmware events. This in addition
  3137. * to normal inbound NAPI processing.
  3138. */
  3139. intr_context->handler = qlge_isr;
  3140. sprintf(intr_context->name, "%s-rx-%d",
  3141. qdev->ndev->name, i);
  3142. } else {
  3143. /*
  3144. * Inbound queues handle unicast frames only.
  3145. */
  3146. intr_context->handler = qlge_msix_rx_isr;
  3147. sprintf(intr_context->name, "%s-rx-%d",
  3148. qdev->ndev->name, i);
  3149. }
  3150. }
  3151. } else {
  3152. /*
  3153. * All rx_rings use the same intr_context since
  3154. * there is only one vector.
  3155. */
  3156. intr_context->intr = 0;
  3157. intr_context->qdev = qdev;
  3158. /*
  3159. * We set up each vectors enable/disable/read bits so
  3160. * there's no bit/mask calculations in the critical path.
  3161. */
  3162. intr_context->intr_en_mask =
  3163. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
  3164. intr_context->intr_dis_mask =
  3165. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
  3166. INTR_EN_TYPE_DISABLE;
  3167. intr_context->intr_read_mask =
  3168. INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
  3169. /*
  3170. * Single interrupt means one handler for all rings.
  3171. */
  3172. intr_context->handler = qlge_isr;
  3173. sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
  3174. /* Set up this vector's bit-mask that indicates
  3175. * which queues it services. In this case there is
  3176. * a single vector so it will service all RSS and
  3177. * TX completion rings.
  3178. */
  3179. ql_set_irq_mask(qdev, intr_context);
  3180. }
  3181. /* Tell the TX completion rings which MSIx vector
  3182. * they will be using.
  3183. */
  3184. ql_set_tx_vect(qdev);
  3185. }
  3186. static void ql_free_irq(struct ql_adapter *qdev)
  3187. {
  3188. int i;
  3189. struct intr_context *intr_context = &qdev->intr_context[0];
  3190. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3191. if (intr_context->hooked) {
  3192. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3193. free_irq(qdev->msi_x_entry[i].vector,
  3194. &qdev->rx_ring[i]);
  3195. } else {
  3196. free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
  3197. }
  3198. }
  3199. }
  3200. ql_disable_msix(qdev);
  3201. }
  3202. static int ql_request_irq(struct ql_adapter *qdev)
  3203. {
  3204. int i;
  3205. int status = 0;
  3206. struct pci_dev *pdev = qdev->pdev;
  3207. struct intr_context *intr_context = &qdev->intr_context[0];
  3208. ql_resolve_queues_to_irqs(qdev);
  3209. for (i = 0; i < qdev->intr_count; i++, intr_context++) {
  3210. atomic_set(&intr_context->irq_cnt, 0);
  3211. if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
  3212. status = request_irq(qdev->msi_x_entry[i].vector,
  3213. intr_context->handler,
  3214. 0,
  3215. intr_context->name,
  3216. &qdev->rx_ring[i]);
  3217. if (status) {
  3218. netif_err(qdev, ifup, qdev->ndev,
  3219. "Failed request for MSIX interrupt %d.\n",
  3220. i);
  3221. goto err_irq;
  3222. }
  3223. } else {
  3224. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3225. "trying msi or legacy interrupts.\n");
  3226. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3227. "%s: irq = %d.\n", __func__, pdev->irq);
  3228. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3229. "%s: context->name = %s.\n", __func__,
  3230. intr_context->name);
  3231. netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
  3232. "%s: dev_id = 0x%p.\n", __func__,
  3233. &qdev->rx_ring[0]);
  3234. status =
  3235. request_irq(pdev->irq, qlge_isr,
  3236. test_bit(QL_MSI_ENABLED,
  3237. &qdev->
  3238. flags) ? 0 : IRQF_SHARED,
  3239. intr_context->name, &qdev->rx_ring[0]);
  3240. if (status)
  3241. goto err_irq;
  3242. netif_err(qdev, ifup, qdev->ndev,
  3243. "Hooked intr %d, queue type %s, with name %s.\n",
  3244. i,
  3245. qdev->rx_ring[0].type == DEFAULT_Q ?
  3246. "DEFAULT_Q" :
  3247. qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
  3248. qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
  3249. intr_context->name);
  3250. }
  3251. intr_context->hooked = 1;
  3252. }
  3253. return status;
  3254. err_irq:
  3255. netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
  3256. ql_free_irq(qdev);
  3257. return status;
  3258. }
  3259. static int ql_start_rss(struct ql_adapter *qdev)
  3260. {
  3261. static const u8 init_hash_seed[] = {
  3262. 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
  3263. 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
  3264. 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
  3265. 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
  3266. 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
  3267. };
  3268. struct ricb *ricb = &qdev->ricb;
  3269. int status = 0;
  3270. int i;
  3271. u8 *hash_id = (u8 *) ricb->hash_cq_id;
  3272. memset((void *)ricb, 0, sizeof(*ricb));
  3273. ricb->base_cq = RSS_L4K;
  3274. ricb->flags =
  3275. (RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
  3276. ricb->mask = cpu_to_le16((u16)(0x3ff));
  3277. /*
  3278. * Fill out the Indirection Table.
  3279. */
  3280. for (i = 0; i < 1024; i++)
  3281. hash_id[i] = (i & (qdev->rss_ring_count - 1));
  3282. memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
  3283. memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
  3284. status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
  3285. if (status) {
  3286. netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
  3287. return status;
  3288. }
  3289. return status;
  3290. }
  3291. static int ql_clear_routing_entries(struct ql_adapter *qdev)
  3292. {
  3293. int i, status = 0;
  3294. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3295. if (status)
  3296. return status;
  3297. /* Clear all the entries in the routing table. */
  3298. for (i = 0; i < 16; i++) {
  3299. status = ql_set_routing_reg(qdev, i, 0, 0);
  3300. if (status) {
  3301. netif_err(qdev, ifup, qdev->ndev,
  3302. "Failed to init routing register for CAM packets.\n");
  3303. break;
  3304. }
  3305. }
  3306. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3307. return status;
  3308. }
  3309. /* Initialize the frame-to-queue routing. */
  3310. static int ql_route_initialize(struct ql_adapter *qdev)
  3311. {
  3312. int status = 0;
  3313. /* Clear all the entries in the routing table. */
  3314. status = ql_clear_routing_entries(qdev);
  3315. if (status)
  3316. return status;
  3317. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3318. if (status)
  3319. return status;
  3320. status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
  3321. RT_IDX_IP_CSUM_ERR, 1);
  3322. if (status) {
  3323. netif_err(qdev, ifup, qdev->ndev,
  3324. "Failed to init routing register "
  3325. "for IP CSUM error packets.\n");
  3326. goto exit;
  3327. }
  3328. status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
  3329. RT_IDX_TU_CSUM_ERR, 1);
  3330. if (status) {
  3331. netif_err(qdev, ifup, qdev->ndev,
  3332. "Failed to init routing register "
  3333. "for TCP/UDP CSUM error packets.\n");
  3334. goto exit;
  3335. }
  3336. status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
  3337. if (status) {
  3338. netif_err(qdev, ifup, qdev->ndev,
  3339. "Failed to init routing register for broadcast packets.\n");
  3340. goto exit;
  3341. }
  3342. /* If we have more than one inbound queue, then turn on RSS in the
  3343. * routing block.
  3344. */
  3345. if (qdev->rss_ring_count > 1) {
  3346. status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
  3347. RT_IDX_RSS_MATCH, 1);
  3348. if (status) {
  3349. netif_err(qdev, ifup, qdev->ndev,
  3350. "Failed to init routing register for MATCH RSS packets.\n");
  3351. goto exit;
  3352. }
  3353. }
  3354. status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
  3355. RT_IDX_CAM_HIT, 1);
  3356. if (status)
  3357. netif_err(qdev, ifup, qdev->ndev,
  3358. "Failed to init routing register for CAM packets.\n");
  3359. exit:
  3360. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3361. return status;
  3362. }
  3363. int ql_cam_route_initialize(struct ql_adapter *qdev)
  3364. {
  3365. int status, set;
  3366. /* If check if the link is up and use to
  3367. * determine if we are setting or clearing
  3368. * the MAC address in the CAM.
  3369. */
  3370. set = ql_read32(qdev, STS);
  3371. set &= qdev->port_link_up;
  3372. status = ql_set_mac_addr(qdev, set);
  3373. if (status) {
  3374. netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
  3375. return status;
  3376. }
  3377. status = ql_route_initialize(qdev);
  3378. if (status)
  3379. netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
  3380. return status;
  3381. }
  3382. static int ql_adapter_initialize(struct ql_adapter *qdev)
  3383. {
  3384. u32 value, mask;
  3385. int i;
  3386. int status = 0;
  3387. /*
  3388. * Set up the System register to halt on errors.
  3389. */
  3390. value = SYS_EFE | SYS_FAE;
  3391. mask = value << 16;
  3392. ql_write32(qdev, SYS, mask | value);
  3393. /* Set the default queue, and VLAN behavior. */
  3394. value = NIC_RCV_CFG_DFQ;
  3395. mask = NIC_RCV_CFG_DFQ_MASK;
  3396. if (qdev->ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
  3397. value |= NIC_RCV_CFG_RV;
  3398. mask |= (NIC_RCV_CFG_RV << 16);
  3399. }
  3400. ql_write32(qdev, NIC_RCV_CFG, (mask | value));
  3401. /* Set the MPI interrupt to enabled. */
  3402. ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
  3403. /* Enable the function, set pagesize, enable error checking. */
  3404. value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
  3405. FSC_EC | FSC_VM_PAGE_4K;
  3406. value |= SPLT_SETTING;
  3407. /* Set/clear header splitting. */
  3408. mask = FSC_VM_PAGESIZE_MASK |
  3409. FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
  3410. ql_write32(qdev, FSC, mask | value);
  3411. ql_write32(qdev, SPLT_HDR, SPLT_LEN);
  3412. /* Set RX packet routing to use port/pci function on which the
  3413. * packet arrived on in addition to usual frame routing.
  3414. * This is helpful on bonding where both interfaces can have
  3415. * the same MAC address.
  3416. */
  3417. ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
  3418. /* Reroute all packets to our Interface.
  3419. * They may have been routed to MPI firmware
  3420. * due to WOL.
  3421. */
  3422. value = ql_read32(qdev, MGMT_RCV_CFG);
  3423. value &= ~MGMT_RCV_CFG_RM;
  3424. mask = 0xffff0000;
  3425. /* Sticky reg needs clearing due to WOL. */
  3426. ql_write32(qdev, MGMT_RCV_CFG, mask);
  3427. ql_write32(qdev, MGMT_RCV_CFG, mask | value);
  3428. /* Default WOL is enable on Mezz cards */
  3429. if (qdev->pdev->subsystem_device == 0x0068 ||
  3430. qdev->pdev->subsystem_device == 0x0180)
  3431. qdev->wol = WAKE_MAGIC;
  3432. /* Start up the rx queues. */
  3433. for (i = 0; i < qdev->rx_ring_count; i++) {
  3434. status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
  3435. if (status) {
  3436. netif_err(qdev, ifup, qdev->ndev,
  3437. "Failed to start rx ring[%d].\n", i);
  3438. return status;
  3439. }
  3440. }
  3441. /* If there is more than one inbound completion queue
  3442. * then download a RICB to configure RSS.
  3443. */
  3444. if (qdev->rss_ring_count > 1) {
  3445. status = ql_start_rss(qdev);
  3446. if (status) {
  3447. netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
  3448. return status;
  3449. }
  3450. }
  3451. /* Start up the tx queues. */
  3452. for (i = 0; i < qdev->tx_ring_count; i++) {
  3453. status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
  3454. if (status) {
  3455. netif_err(qdev, ifup, qdev->ndev,
  3456. "Failed to start tx ring[%d].\n", i);
  3457. return status;
  3458. }
  3459. }
  3460. /* Initialize the port and set the max framesize. */
  3461. status = qdev->nic_ops->port_initialize(qdev);
  3462. if (status)
  3463. netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
  3464. /* Set up the MAC address and frame routing filter. */
  3465. status = ql_cam_route_initialize(qdev);
  3466. if (status) {
  3467. netif_err(qdev, ifup, qdev->ndev,
  3468. "Failed to init CAM/Routing tables.\n");
  3469. return status;
  3470. }
  3471. /* Start NAPI for the RSS queues. */
  3472. for (i = 0; i < qdev->rss_ring_count; i++)
  3473. napi_enable(&qdev->rx_ring[i].napi);
  3474. return status;
  3475. }
  3476. /* Issue soft reset to chip. */
  3477. static int ql_adapter_reset(struct ql_adapter *qdev)
  3478. {
  3479. u32 value;
  3480. int status = 0;
  3481. unsigned long end_jiffies;
  3482. /* Clear all the entries in the routing table. */
  3483. status = ql_clear_routing_entries(qdev);
  3484. if (status) {
  3485. netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
  3486. return status;
  3487. }
  3488. end_jiffies = jiffies +
  3489. max((unsigned long)1, usecs_to_jiffies(30));
  3490. /* Check if bit is set then skip the mailbox command and
  3491. * clear the bit, else we are in normal reset process.
  3492. */
  3493. if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
  3494. /* Stop management traffic. */
  3495. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
  3496. /* Wait for the NIC and MGMNT FIFOs to empty. */
  3497. ql_wait_fifo_empty(qdev);
  3498. } else
  3499. clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
  3500. ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
  3501. do {
  3502. value = ql_read32(qdev, RST_FO);
  3503. if ((value & RST_FO_FR) == 0)
  3504. break;
  3505. cpu_relax();
  3506. } while (time_before(jiffies, end_jiffies));
  3507. if (value & RST_FO_FR) {
  3508. netif_err(qdev, ifdown, qdev->ndev,
  3509. "ETIMEDOUT!!! errored out of resetting the chip!\n");
  3510. status = -ETIMEDOUT;
  3511. }
  3512. /* Resume management traffic. */
  3513. ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
  3514. return status;
  3515. }
  3516. static void ql_display_dev_info(struct net_device *ndev)
  3517. {
  3518. struct ql_adapter *qdev = netdev_priv(ndev);
  3519. netif_info(qdev, probe, qdev->ndev,
  3520. "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
  3521. "XG Roll = %d, XG Rev = %d.\n",
  3522. qdev->func,
  3523. qdev->port,
  3524. qdev->chip_rev_id & 0x0000000f,
  3525. qdev->chip_rev_id >> 4 & 0x0000000f,
  3526. qdev->chip_rev_id >> 8 & 0x0000000f,
  3527. qdev->chip_rev_id >> 12 & 0x0000000f);
  3528. netif_info(qdev, probe, qdev->ndev,
  3529. "MAC address %pM\n", ndev->dev_addr);
  3530. }
  3531. static int ql_wol(struct ql_adapter *qdev)
  3532. {
  3533. int status = 0;
  3534. u32 wol = MB_WOL_DISABLE;
  3535. /* The CAM is still intact after a reset, but if we
  3536. * are doing WOL, then we may need to program the
  3537. * routing regs. We would also need to issue the mailbox
  3538. * commands to instruct the MPI what to do per the ethtool
  3539. * settings.
  3540. */
  3541. if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
  3542. WAKE_MCAST | WAKE_BCAST)) {
  3543. netif_err(qdev, ifdown, qdev->ndev,
  3544. "Unsupported WOL parameter. qdev->wol = 0x%x.\n",
  3545. qdev->wol);
  3546. return -EINVAL;
  3547. }
  3548. if (qdev->wol & WAKE_MAGIC) {
  3549. status = ql_mb_wol_set_magic(qdev, 1);
  3550. if (status) {
  3551. netif_err(qdev, ifdown, qdev->ndev,
  3552. "Failed to set magic packet on %s.\n",
  3553. qdev->ndev->name);
  3554. return status;
  3555. } else
  3556. netif_info(qdev, drv, qdev->ndev,
  3557. "Enabled magic packet successfully on %s.\n",
  3558. qdev->ndev->name);
  3559. wol |= MB_WOL_MAGIC_PKT;
  3560. }
  3561. if (qdev->wol) {
  3562. wol |= MB_WOL_MODE_ON;
  3563. status = ql_mb_wol_mode(qdev, wol);
  3564. netif_err(qdev, drv, qdev->ndev,
  3565. "WOL %s (wol code 0x%x) on %s\n",
  3566. (status == 0) ? "Successfully set" : "Failed",
  3567. wol, qdev->ndev->name);
  3568. }
  3569. return status;
  3570. }
  3571. static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
  3572. {
  3573. /* Don't kill the reset worker thread if we
  3574. * are in the process of recovery.
  3575. */
  3576. if (test_bit(QL_ADAPTER_UP, &qdev->flags))
  3577. cancel_delayed_work_sync(&qdev->asic_reset_work);
  3578. cancel_delayed_work_sync(&qdev->mpi_reset_work);
  3579. cancel_delayed_work_sync(&qdev->mpi_work);
  3580. cancel_delayed_work_sync(&qdev->mpi_idc_work);
  3581. cancel_delayed_work_sync(&qdev->mpi_core_to_log);
  3582. cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
  3583. }
  3584. static int ql_adapter_down(struct ql_adapter *qdev)
  3585. {
  3586. int i, status = 0;
  3587. ql_link_off(qdev);
  3588. ql_cancel_all_work_sync(qdev);
  3589. for (i = 0; i < qdev->rss_ring_count; i++)
  3590. napi_disable(&qdev->rx_ring[i].napi);
  3591. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  3592. ql_disable_interrupts(qdev);
  3593. ql_tx_ring_clean(qdev);
  3594. /* Call netif_napi_del() from common point.
  3595. */
  3596. for (i = 0; i < qdev->rss_ring_count; i++)
  3597. netif_napi_del(&qdev->rx_ring[i].napi);
  3598. status = ql_adapter_reset(qdev);
  3599. if (status)
  3600. netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
  3601. qdev->func);
  3602. ql_free_rx_buffers(qdev);
  3603. return status;
  3604. }
  3605. static int ql_adapter_up(struct ql_adapter *qdev)
  3606. {
  3607. int err = 0;
  3608. err = ql_adapter_initialize(qdev);
  3609. if (err) {
  3610. netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
  3611. goto err_init;
  3612. }
  3613. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3614. ql_alloc_rx_buffers(qdev);
  3615. /* If the port is initialized and the
  3616. * link is up the turn on the carrier.
  3617. */
  3618. if ((ql_read32(qdev, STS) & qdev->port_init) &&
  3619. (ql_read32(qdev, STS) & qdev->port_link_up))
  3620. ql_link_on(qdev);
  3621. /* Restore rx mode. */
  3622. clear_bit(QL_ALLMULTI, &qdev->flags);
  3623. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3624. qlge_set_multicast_list(qdev->ndev);
  3625. /* Restore vlan setting. */
  3626. qlge_restore_vlan(qdev);
  3627. ql_enable_interrupts(qdev);
  3628. ql_enable_all_completion_interrupts(qdev);
  3629. netif_tx_start_all_queues(qdev->ndev);
  3630. return 0;
  3631. err_init:
  3632. ql_adapter_reset(qdev);
  3633. return err;
  3634. }
  3635. static void ql_release_adapter_resources(struct ql_adapter *qdev)
  3636. {
  3637. ql_free_mem_resources(qdev);
  3638. ql_free_irq(qdev);
  3639. }
  3640. static int ql_get_adapter_resources(struct ql_adapter *qdev)
  3641. {
  3642. int status = 0;
  3643. if (ql_alloc_mem_resources(qdev)) {
  3644. netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
  3645. return -ENOMEM;
  3646. }
  3647. status = ql_request_irq(qdev);
  3648. return status;
  3649. }
  3650. static int qlge_close(struct net_device *ndev)
  3651. {
  3652. struct ql_adapter *qdev = netdev_priv(ndev);
  3653. /* If we hit pci_channel_io_perm_failure
  3654. * failure condition, then we already
  3655. * brought the adapter down.
  3656. */
  3657. if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
  3658. netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
  3659. clear_bit(QL_EEH_FATAL, &qdev->flags);
  3660. return 0;
  3661. }
  3662. /*
  3663. * Wait for device to recover from a reset.
  3664. * (Rarely happens, but possible.)
  3665. */
  3666. while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
  3667. msleep(1);
  3668. ql_adapter_down(qdev);
  3669. ql_release_adapter_resources(qdev);
  3670. return 0;
  3671. }
  3672. static int ql_configure_rings(struct ql_adapter *qdev)
  3673. {
  3674. int i;
  3675. struct rx_ring *rx_ring;
  3676. struct tx_ring *tx_ring;
  3677. int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
  3678. unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3679. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3680. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3681. /* In a perfect world we have one RSS ring for each CPU
  3682. * and each has it's own vector. To do that we ask for
  3683. * cpu_cnt vectors. ql_enable_msix() will adjust the
  3684. * vector count to what we actually get. We then
  3685. * allocate an RSS ring for each.
  3686. * Essentially, we are doing min(cpu_count, msix_vector_count).
  3687. */
  3688. qdev->intr_count = cpu_cnt;
  3689. ql_enable_msix(qdev);
  3690. /* Adjust the RSS ring count to the actual vector count. */
  3691. qdev->rss_ring_count = qdev->intr_count;
  3692. qdev->tx_ring_count = cpu_cnt;
  3693. qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
  3694. for (i = 0; i < qdev->tx_ring_count; i++) {
  3695. tx_ring = &qdev->tx_ring[i];
  3696. memset((void *)tx_ring, 0, sizeof(*tx_ring));
  3697. tx_ring->qdev = qdev;
  3698. tx_ring->wq_id = i;
  3699. tx_ring->wq_len = qdev->tx_ring_size;
  3700. tx_ring->wq_size =
  3701. tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
  3702. /*
  3703. * The completion queue ID for the tx rings start
  3704. * immediately after the rss rings.
  3705. */
  3706. tx_ring->cq_id = qdev->rss_ring_count + i;
  3707. }
  3708. for (i = 0; i < qdev->rx_ring_count; i++) {
  3709. rx_ring = &qdev->rx_ring[i];
  3710. memset((void *)rx_ring, 0, sizeof(*rx_ring));
  3711. rx_ring->qdev = qdev;
  3712. rx_ring->cq_id = i;
  3713. rx_ring->cpu = i % cpu_cnt; /* CPU to run handler on. */
  3714. if (i < qdev->rss_ring_count) {
  3715. /*
  3716. * Inbound (RSS) queues.
  3717. */
  3718. rx_ring->cq_len = qdev->rx_ring_size;
  3719. rx_ring->cq_size =
  3720. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3721. rx_ring->lbq_len = NUM_LARGE_BUFFERS;
  3722. rx_ring->lbq_size =
  3723. rx_ring->lbq_len * sizeof(__le64);
  3724. rx_ring->lbq_buf_size = (u16)lbq_buf_len;
  3725. rx_ring->sbq_len = NUM_SMALL_BUFFERS;
  3726. rx_ring->sbq_size =
  3727. rx_ring->sbq_len * sizeof(__le64);
  3728. rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
  3729. rx_ring->type = RX_Q;
  3730. } else {
  3731. /*
  3732. * Outbound queue handles outbound completions only.
  3733. */
  3734. /* outbound cq is same size as tx_ring it services. */
  3735. rx_ring->cq_len = qdev->tx_ring_size;
  3736. rx_ring->cq_size =
  3737. rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
  3738. rx_ring->lbq_len = 0;
  3739. rx_ring->lbq_size = 0;
  3740. rx_ring->lbq_buf_size = 0;
  3741. rx_ring->sbq_len = 0;
  3742. rx_ring->sbq_size = 0;
  3743. rx_ring->sbq_buf_size = 0;
  3744. rx_ring->type = TX_Q;
  3745. }
  3746. }
  3747. return 0;
  3748. }
  3749. static int qlge_open(struct net_device *ndev)
  3750. {
  3751. int err = 0;
  3752. struct ql_adapter *qdev = netdev_priv(ndev);
  3753. err = ql_adapter_reset(qdev);
  3754. if (err)
  3755. return err;
  3756. err = ql_configure_rings(qdev);
  3757. if (err)
  3758. return err;
  3759. err = ql_get_adapter_resources(qdev);
  3760. if (err)
  3761. goto error_up;
  3762. err = ql_adapter_up(qdev);
  3763. if (err)
  3764. goto error_up;
  3765. return err;
  3766. error_up:
  3767. ql_release_adapter_resources(qdev);
  3768. return err;
  3769. }
  3770. static int ql_change_rx_buffers(struct ql_adapter *qdev)
  3771. {
  3772. struct rx_ring *rx_ring;
  3773. int i, status;
  3774. u32 lbq_buf_len;
  3775. /* Wait for an outstanding reset to complete. */
  3776. if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3777. int i = 3;
  3778. while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
  3779. netif_err(qdev, ifup, qdev->ndev,
  3780. "Waiting for adapter UP...\n");
  3781. ssleep(1);
  3782. }
  3783. if (!i) {
  3784. netif_err(qdev, ifup, qdev->ndev,
  3785. "Timed out waiting for adapter UP\n");
  3786. return -ETIMEDOUT;
  3787. }
  3788. }
  3789. status = ql_adapter_down(qdev);
  3790. if (status)
  3791. goto error;
  3792. /* Get the new rx buffer size. */
  3793. lbq_buf_len = (qdev->ndev->mtu > 1500) ?
  3794. LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
  3795. qdev->lbq_buf_order = get_order(lbq_buf_len);
  3796. for (i = 0; i < qdev->rss_ring_count; i++) {
  3797. rx_ring = &qdev->rx_ring[i];
  3798. /* Set the new size. */
  3799. rx_ring->lbq_buf_size = lbq_buf_len;
  3800. }
  3801. status = ql_adapter_up(qdev);
  3802. if (status)
  3803. goto error;
  3804. return status;
  3805. error:
  3806. netif_alert(qdev, ifup, qdev->ndev,
  3807. "Driver up/down cycle failed, closing device.\n");
  3808. set_bit(QL_ADAPTER_UP, &qdev->flags);
  3809. dev_close(qdev->ndev);
  3810. return status;
  3811. }
  3812. static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
  3813. {
  3814. struct ql_adapter *qdev = netdev_priv(ndev);
  3815. int status;
  3816. if (ndev->mtu == 1500 && new_mtu == 9000) {
  3817. netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
  3818. } else if (ndev->mtu == 9000 && new_mtu == 1500) {
  3819. netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
  3820. } else
  3821. return -EINVAL;
  3822. queue_delayed_work(qdev->workqueue,
  3823. &qdev->mpi_port_cfg_work, 3*HZ);
  3824. ndev->mtu = new_mtu;
  3825. if (!netif_running(qdev->ndev)) {
  3826. return 0;
  3827. }
  3828. status = ql_change_rx_buffers(qdev);
  3829. if (status) {
  3830. netif_err(qdev, ifup, qdev->ndev,
  3831. "Changing MTU failed.\n");
  3832. }
  3833. return status;
  3834. }
  3835. static struct net_device_stats *qlge_get_stats(struct net_device
  3836. *ndev)
  3837. {
  3838. struct ql_adapter *qdev = netdev_priv(ndev);
  3839. struct rx_ring *rx_ring = &qdev->rx_ring[0];
  3840. struct tx_ring *tx_ring = &qdev->tx_ring[0];
  3841. unsigned long pkts, mcast, dropped, errors, bytes;
  3842. int i;
  3843. /* Get RX stats. */
  3844. pkts = mcast = dropped = errors = bytes = 0;
  3845. for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
  3846. pkts += rx_ring->rx_packets;
  3847. bytes += rx_ring->rx_bytes;
  3848. dropped += rx_ring->rx_dropped;
  3849. errors += rx_ring->rx_errors;
  3850. mcast += rx_ring->rx_multicast;
  3851. }
  3852. ndev->stats.rx_packets = pkts;
  3853. ndev->stats.rx_bytes = bytes;
  3854. ndev->stats.rx_dropped = dropped;
  3855. ndev->stats.rx_errors = errors;
  3856. ndev->stats.multicast = mcast;
  3857. /* Get TX stats. */
  3858. pkts = errors = bytes = 0;
  3859. for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
  3860. pkts += tx_ring->tx_packets;
  3861. bytes += tx_ring->tx_bytes;
  3862. errors += tx_ring->tx_errors;
  3863. }
  3864. ndev->stats.tx_packets = pkts;
  3865. ndev->stats.tx_bytes = bytes;
  3866. ndev->stats.tx_errors = errors;
  3867. return &ndev->stats;
  3868. }
  3869. static void qlge_set_multicast_list(struct net_device *ndev)
  3870. {
  3871. struct ql_adapter *qdev = netdev_priv(ndev);
  3872. struct netdev_hw_addr *ha;
  3873. int i, status;
  3874. status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
  3875. if (status)
  3876. return;
  3877. /*
  3878. * Set or clear promiscuous mode if a
  3879. * transition is taking place.
  3880. */
  3881. if (ndev->flags & IFF_PROMISC) {
  3882. if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3883. if (ql_set_routing_reg
  3884. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
  3885. netif_err(qdev, hw, qdev->ndev,
  3886. "Failed to set promiscuous mode.\n");
  3887. } else {
  3888. set_bit(QL_PROMISCUOUS, &qdev->flags);
  3889. }
  3890. }
  3891. } else {
  3892. if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
  3893. if (ql_set_routing_reg
  3894. (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
  3895. netif_err(qdev, hw, qdev->ndev,
  3896. "Failed to clear promiscuous mode.\n");
  3897. } else {
  3898. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3899. }
  3900. }
  3901. }
  3902. /*
  3903. * Set or clear all multicast mode if a
  3904. * transition is taking place.
  3905. */
  3906. if ((ndev->flags & IFF_ALLMULTI) ||
  3907. (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
  3908. if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
  3909. if (ql_set_routing_reg
  3910. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
  3911. netif_err(qdev, hw, qdev->ndev,
  3912. "Failed to set all-multi mode.\n");
  3913. } else {
  3914. set_bit(QL_ALLMULTI, &qdev->flags);
  3915. }
  3916. }
  3917. } else {
  3918. if (test_bit(QL_ALLMULTI, &qdev->flags)) {
  3919. if (ql_set_routing_reg
  3920. (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
  3921. netif_err(qdev, hw, qdev->ndev,
  3922. "Failed to clear all-multi mode.\n");
  3923. } else {
  3924. clear_bit(QL_ALLMULTI, &qdev->flags);
  3925. }
  3926. }
  3927. }
  3928. if (!netdev_mc_empty(ndev)) {
  3929. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3930. if (status)
  3931. goto exit;
  3932. i = 0;
  3933. netdev_for_each_mc_addr(ha, ndev) {
  3934. if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
  3935. MAC_ADDR_TYPE_MULTI_MAC, i)) {
  3936. netif_err(qdev, hw, qdev->ndev,
  3937. "Failed to loadmulticast address.\n");
  3938. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3939. goto exit;
  3940. }
  3941. i++;
  3942. }
  3943. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3944. if (ql_set_routing_reg
  3945. (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
  3946. netif_err(qdev, hw, qdev->ndev,
  3947. "Failed to set multicast match mode.\n");
  3948. } else {
  3949. set_bit(QL_ALLMULTI, &qdev->flags);
  3950. }
  3951. }
  3952. exit:
  3953. ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
  3954. }
  3955. static int qlge_set_mac_address(struct net_device *ndev, void *p)
  3956. {
  3957. struct ql_adapter *qdev = netdev_priv(ndev);
  3958. struct sockaddr *addr = p;
  3959. int status;
  3960. if (!is_valid_ether_addr(addr->sa_data))
  3961. return -EADDRNOTAVAIL;
  3962. memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
  3963. /* Update local copy of current mac address. */
  3964. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  3965. status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
  3966. if (status)
  3967. return status;
  3968. status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
  3969. MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
  3970. if (status)
  3971. netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
  3972. ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
  3973. return status;
  3974. }
  3975. static void qlge_tx_timeout(struct net_device *ndev)
  3976. {
  3977. struct ql_adapter *qdev = netdev_priv(ndev);
  3978. ql_queue_asic_error(qdev);
  3979. }
  3980. static void ql_asic_reset_work(struct work_struct *work)
  3981. {
  3982. struct ql_adapter *qdev =
  3983. container_of(work, struct ql_adapter, asic_reset_work.work);
  3984. int status;
  3985. rtnl_lock();
  3986. status = ql_adapter_down(qdev);
  3987. if (status)
  3988. goto error;
  3989. status = ql_adapter_up(qdev);
  3990. if (status)
  3991. goto error;
  3992. /* Restore rx mode. */
  3993. clear_bit(QL_ALLMULTI, &qdev->flags);
  3994. clear_bit(QL_PROMISCUOUS, &qdev->flags);
  3995. qlge_set_multicast_list(qdev->ndev);
  3996. rtnl_unlock();
  3997. return;
  3998. error:
  3999. netif_alert(qdev, ifup, qdev->ndev,
  4000. "Driver up/down cycle failed, closing device\n");
  4001. set_bit(QL_ADAPTER_UP, &qdev->flags);
  4002. dev_close(qdev->ndev);
  4003. rtnl_unlock();
  4004. }
  4005. static const struct nic_operations qla8012_nic_ops = {
  4006. .get_flash = ql_get_8012_flash_params,
  4007. .port_initialize = ql_8012_port_initialize,
  4008. };
  4009. static const struct nic_operations qla8000_nic_ops = {
  4010. .get_flash = ql_get_8000_flash_params,
  4011. .port_initialize = ql_8000_port_initialize,
  4012. };
  4013. /* Find the pcie function number for the other NIC
  4014. * on this chip. Since both NIC functions share a
  4015. * common firmware we have the lowest enabled function
  4016. * do any common work. Examples would be resetting
  4017. * after a fatal firmware error, or doing a firmware
  4018. * coredump.
  4019. */
  4020. static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
  4021. {
  4022. int status = 0;
  4023. u32 temp;
  4024. u32 nic_func1, nic_func2;
  4025. status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
  4026. &temp);
  4027. if (status)
  4028. return status;
  4029. nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
  4030. MPI_TEST_NIC_FUNC_MASK);
  4031. nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
  4032. MPI_TEST_NIC_FUNC_MASK);
  4033. if (qdev->func == nic_func1)
  4034. qdev->alt_func = nic_func2;
  4035. else if (qdev->func == nic_func2)
  4036. qdev->alt_func = nic_func1;
  4037. else
  4038. status = -EIO;
  4039. return status;
  4040. }
  4041. static int ql_get_board_info(struct ql_adapter *qdev)
  4042. {
  4043. int status;
  4044. qdev->func =
  4045. (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
  4046. if (qdev->func > 3)
  4047. return -EIO;
  4048. status = ql_get_alt_pcie_func(qdev);
  4049. if (status)
  4050. return status;
  4051. qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
  4052. if (qdev->port) {
  4053. qdev->xg_sem_mask = SEM_XGMAC1_MASK;
  4054. qdev->port_link_up = STS_PL1;
  4055. qdev->port_init = STS_PI1;
  4056. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
  4057. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
  4058. } else {
  4059. qdev->xg_sem_mask = SEM_XGMAC0_MASK;
  4060. qdev->port_link_up = STS_PL0;
  4061. qdev->port_init = STS_PI0;
  4062. qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
  4063. qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
  4064. }
  4065. qdev->chip_rev_id = ql_read32(qdev, REV_ID);
  4066. qdev->device_id = qdev->pdev->device;
  4067. if (qdev->device_id == QLGE_DEVICE_ID_8012)
  4068. qdev->nic_ops = &qla8012_nic_ops;
  4069. else if (qdev->device_id == QLGE_DEVICE_ID_8000)
  4070. qdev->nic_ops = &qla8000_nic_ops;
  4071. return status;
  4072. }
  4073. static void ql_release_all(struct pci_dev *pdev)
  4074. {
  4075. struct net_device *ndev = pci_get_drvdata(pdev);
  4076. struct ql_adapter *qdev = netdev_priv(ndev);
  4077. if (qdev->workqueue) {
  4078. destroy_workqueue(qdev->workqueue);
  4079. qdev->workqueue = NULL;
  4080. }
  4081. if (qdev->reg_base)
  4082. iounmap(qdev->reg_base);
  4083. if (qdev->doorbell_area)
  4084. iounmap(qdev->doorbell_area);
  4085. vfree(qdev->mpi_coredump);
  4086. pci_release_regions(pdev);
  4087. }
  4088. static int ql_init_device(struct pci_dev *pdev, struct net_device *ndev,
  4089. int cards_found)
  4090. {
  4091. struct ql_adapter *qdev = netdev_priv(ndev);
  4092. int err = 0;
  4093. memset((void *)qdev, 0, sizeof(*qdev));
  4094. err = pci_enable_device(pdev);
  4095. if (err) {
  4096. dev_err(&pdev->dev, "PCI device enable failed.\n");
  4097. return err;
  4098. }
  4099. qdev->ndev = ndev;
  4100. qdev->pdev = pdev;
  4101. pci_set_drvdata(pdev, ndev);
  4102. /* Set PCIe read request size */
  4103. err = pcie_set_readrq(pdev, 4096);
  4104. if (err) {
  4105. dev_err(&pdev->dev, "Set readrq failed.\n");
  4106. goto err_out1;
  4107. }
  4108. err = pci_request_regions(pdev, DRV_NAME);
  4109. if (err) {
  4110. dev_err(&pdev->dev, "PCI region request failed.\n");
  4111. return err;
  4112. }
  4113. pci_set_master(pdev);
  4114. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4115. set_bit(QL_DMA64, &qdev->flags);
  4116. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
  4117. } else {
  4118. err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4119. if (!err)
  4120. err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
  4121. }
  4122. if (err) {
  4123. dev_err(&pdev->dev, "No usable DMA configuration.\n");
  4124. goto err_out2;
  4125. }
  4126. /* Set PCIe reset type for EEH to fundamental. */
  4127. pdev->needs_freset = 1;
  4128. pci_save_state(pdev);
  4129. qdev->reg_base =
  4130. ioremap_nocache(pci_resource_start(pdev, 1),
  4131. pci_resource_len(pdev, 1));
  4132. if (!qdev->reg_base) {
  4133. dev_err(&pdev->dev, "Register mapping failed.\n");
  4134. err = -ENOMEM;
  4135. goto err_out2;
  4136. }
  4137. qdev->doorbell_area_size = pci_resource_len(pdev, 3);
  4138. qdev->doorbell_area =
  4139. ioremap_nocache(pci_resource_start(pdev, 3),
  4140. pci_resource_len(pdev, 3));
  4141. if (!qdev->doorbell_area) {
  4142. dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
  4143. err = -ENOMEM;
  4144. goto err_out2;
  4145. }
  4146. err = ql_get_board_info(qdev);
  4147. if (err) {
  4148. dev_err(&pdev->dev, "Register access failed.\n");
  4149. err = -EIO;
  4150. goto err_out2;
  4151. }
  4152. qdev->msg_enable = netif_msg_init(debug, default_msg);
  4153. spin_lock_init(&qdev->hw_lock);
  4154. spin_lock_init(&qdev->stats_lock);
  4155. if (qlge_mpi_coredump) {
  4156. qdev->mpi_coredump =
  4157. vmalloc(sizeof(struct ql_mpi_coredump));
  4158. if (qdev->mpi_coredump == NULL) {
  4159. err = -ENOMEM;
  4160. goto err_out2;
  4161. }
  4162. if (qlge_force_coredump)
  4163. set_bit(QL_FRC_COREDUMP, &qdev->flags);
  4164. }
  4165. /* make sure the EEPROM is good */
  4166. err = qdev->nic_ops->get_flash(qdev);
  4167. if (err) {
  4168. dev_err(&pdev->dev, "Invalid FLASH.\n");
  4169. goto err_out2;
  4170. }
  4171. /* Keep local copy of current mac address. */
  4172. memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
  4173. /* Set up the default ring sizes. */
  4174. qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
  4175. qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
  4176. /* Set up the coalescing parameters. */
  4177. qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4178. qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
  4179. qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4180. qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
  4181. /*
  4182. * Set up the operating parameters.
  4183. */
  4184. qdev->workqueue = create_singlethread_workqueue(ndev->name);
  4185. INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
  4186. INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
  4187. INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
  4188. INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
  4189. INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
  4190. INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
  4191. init_completion(&qdev->ide_completion);
  4192. mutex_init(&qdev->mpi_mutex);
  4193. if (!cards_found) {
  4194. dev_info(&pdev->dev, "%s\n", DRV_STRING);
  4195. dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
  4196. DRV_NAME, DRV_VERSION);
  4197. }
  4198. return 0;
  4199. err_out2:
  4200. ql_release_all(pdev);
  4201. err_out1:
  4202. pci_disable_device(pdev);
  4203. return err;
  4204. }
  4205. static const struct net_device_ops qlge_netdev_ops = {
  4206. .ndo_open = qlge_open,
  4207. .ndo_stop = qlge_close,
  4208. .ndo_start_xmit = qlge_send,
  4209. .ndo_change_mtu = qlge_change_mtu,
  4210. .ndo_get_stats = qlge_get_stats,
  4211. .ndo_set_rx_mode = qlge_set_multicast_list,
  4212. .ndo_set_mac_address = qlge_set_mac_address,
  4213. .ndo_validate_addr = eth_validate_addr,
  4214. .ndo_tx_timeout = qlge_tx_timeout,
  4215. .ndo_fix_features = qlge_fix_features,
  4216. .ndo_set_features = qlge_set_features,
  4217. .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
  4218. .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
  4219. };
  4220. static void ql_timer(unsigned long data)
  4221. {
  4222. struct ql_adapter *qdev = (struct ql_adapter *)data;
  4223. u32 var = 0;
  4224. var = ql_read32(qdev, STS);
  4225. if (pci_channel_offline(qdev->pdev)) {
  4226. netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
  4227. return;
  4228. }
  4229. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4230. }
  4231. static int qlge_probe(struct pci_dev *pdev,
  4232. const struct pci_device_id *pci_entry)
  4233. {
  4234. struct net_device *ndev = NULL;
  4235. struct ql_adapter *qdev = NULL;
  4236. static int cards_found = 0;
  4237. int err = 0;
  4238. ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
  4239. min(MAX_CPUS, netif_get_num_default_rss_queues()));
  4240. if (!ndev)
  4241. return -ENOMEM;
  4242. err = ql_init_device(pdev, ndev, cards_found);
  4243. if (err < 0) {
  4244. free_netdev(ndev);
  4245. return err;
  4246. }
  4247. qdev = netdev_priv(ndev);
  4248. SET_NETDEV_DEV(ndev, &pdev->dev);
  4249. ndev->hw_features = NETIF_F_SG |
  4250. NETIF_F_IP_CSUM |
  4251. NETIF_F_TSO |
  4252. NETIF_F_TSO_ECN |
  4253. NETIF_F_HW_VLAN_CTAG_TX |
  4254. NETIF_F_HW_VLAN_CTAG_RX |
  4255. NETIF_F_HW_VLAN_CTAG_FILTER |
  4256. NETIF_F_RXCSUM;
  4257. ndev->features = ndev->hw_features;
  4258. ndev->vlan_features = ndev->hw_features;
  4259. /* vlan gets same features (except vlan filter) */
  4260. ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
  4261. NETIF_F_HW_VLAN_CTAG_TX |
  4262. NETIF_F_HW_VLAN_CTAG_RX);
  4263. if (test_bit(QL_DMA64, &qdev->flags))
  4264. ndev->features |= NETIF_F_HIGHDMA;
  4265. /*
  4266. * Set up net_device structure.
  4267. */
  4268. ndev->tx_queue_len = qdev->tx_ring_size;
  4269. ndev->irq = pdev->irq;
  4270. ndev->netdev_ops = &qlge_netdev_ops;
  4271. ndev->ethtool_ops = &qlge_ethtool_ops;
  4272. ndev->watchdog_timeo = 10 * HZ;
  4273. err = register_netdev(ndev);
  4274. if (err) {
  4275. dev_err(&pdev->dev, "net device registration failed.\n");
  4276. ql_release_all(pdev);
  4277. pci_disable_device(pdev);
  4278. free_netdev(ndev);
  4279. return err;
  4280. }
  4281. /* Start up the timer to trigger EEH if
  4282. * the bus goes dead
  4283. */
  4284. init_timer_deferrable(&qdev->timer);
  4285. qdev->timer.data = (unsigned long)qdev;
  4286. qdev->timer.function = ql_timer;
  4287. qdev->timer.expires = jiffies + (5*HZ);
  4288. add_timer(&qdev->timer);
  4289. ql_link_off(qdev);
  4290. ql_display_dev_info(ndev);
  4291. atomic_set(&qdev->lb_count, 0);
  4292. cards_found++;
  4293. return 0;
  4294. }
  4295. netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
  4296. {
  4297. return qlge_send(skb, ndev);
  4298. }
  4299. int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
  4300. {
  4301. return ql_clean_inbound_rx_ring(rx_ring, budget);
  4302. }
  4303. static void qlge_remove(struct pci_dev *pdev)
  4304. {
  4305. struct net_device *ndev = pci_get_drvdata(pdev);
  4306. struct ql_adapter *qdev = netdev_priv(ndev);
  4307. del_timer_sync(&qdev->timer);
  4308. ql_cancel_all_work_sync(qdev);
  4309. unregister_netdev(ndev);
  4310. ql_release_all(pdev);
  4311. pci_disable_device(pdev);
  4312. free_netdev(ndev);
  4313. }
  4314. /* Clean up resources without touching hardware. */
  4315. static void ql_eeh_close(struct net_device *ndev)
  4316. {
  4317. int i;
  4318. struct ql_adapter *qdev = netdev_priv(ndev);
  4319. if (netif_carrier_ok(ndev)) {
  4320. netif_carrier_off(ndev);
  4321. netif_stop_queue(ndev);
  4322. }
  4323. /* Disabling the timer */
  4324. del_timer_sync(&qdev->timer);
  4325. ql_cancel_all_work_sync(qdev);
  4326. for (i = 0; i < qdev->rss_ring_count; i++)
  4327. netif_napi_del(&qdev->rx_ring[i].napi);
  4328. clear_bit(QL_ADAPTER_UP, &qdev->flags);
  4329. ql_tx_ring_clean(qdev);
  4330. ql_free_rx_buffers(qdev);
  4331. ql_release_adapter_resources(qdev);
  4332. }
  4333. /*
  4334. * This callback is called by the PCI subsystem whenever
  4335. * a PCI bus error is detected.
  4336. */
  4337. static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
  4338. enum pci_channel_state state)
  4339. {
  4340. struct net_device *ndev = pci_get_drvdata(pdev);
  4341. struct ql_adapter *qdev = netdev_priv(ndev);
  4342. switch (state) {
  4343. case pci_channel_io_normal:
  4344. return PCI_ERS_RESULT_CAN_RECOVER;
  4345. case pci_channel_io_frozen:
  4346. netif_device_detach(ndev);
  4347. if (netif_running(ndev))
  4348. ql_eeh_close(ndev);
  4349. pci_disable_device(pdev);
  4350. return PCI_ERS_RESULT_NEED_RESET;
  4351. case pci_channel_io_perm_failure:
  4352. dev_err(&pdev->dev,
  4353. "%s: pci_channel_io_perm_failure.\n", __func__);
  4354. ql_eeh_close(ndev);
  4355. set_bit(QL_EEH_FATAL, &qdev->flags);
  4356. return PCI_ERS_RESULT_DISCONNECT;
  4357. }
  4358. /* Request a slot reset. */
  4359. return PCI_ERS_RESULT_NEED_RESET;
  4360. }
  4361. /*
  4362. * This callback is called after the PCI buss has been reset.
  4363. * Basically, this tries to restart the card from scratch.
  4364. * This is a shortened version of the device probe/discovery code,
  4365. * it resembles the first-half of the () routine.
  4366. */
  4367. static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
  4368. {
  4369. struct net_device *ndev = pci_get_drvdata(pdev);
  4370. struct ql_adapter *qdev = netdev_priv(ndev);
  4371. pdev->error_state = pci_channel_io_normal;
  4372. pci_restore_state(pdev);
  4373. if (pci_enable_device(pdev)) {
  4374. netif_err(qdev, ifup, qdev->ndev,
  4375. "Cannot re-enable PCI device after reset.\n");
  4376. return PCI_ERS_RESULT_DISCONNECT;
  4377. }
  4378. pci_set_master(pdev);
  4379. if (ql_adapter_reset(qdev)) {
  4380. netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
  4381. set_bit(QL_EEH_FATAL, &qdev->flags);
  4382. return PCI_ERS_RESULT_DISCONNECT;
  4383. }
  4384. return PCI_ERS_RESULT_RECOVERED;
  4385. }
  4386. static void qlge_io_resume(struct pci_dev *pdev)
  4387. {
  4388. struct net_device *ndev = pci_get_drvdata(pdev);
  4389. struct ql_adapter *qdev = netdev_priv(ndev);
  4390. int err = 0;
  4391. if (netif_running(ndev)) {
  4392. err = qlge_open(ndev);
  4393. if (err) {
  4394. netif_err(qdev, ifup, qdev->ndev,
  4395. "Device initialization failed after reset.\n");
  4396. return;
  4397. }
  4398. } else {
  4399. netif_err(qdev, ifup, qdev->ndev,
  4400. "Device was not running prior to EEH.\n");
  4401. }
  4402. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4403. netif_device_attach(ndev);
  4404. }
  4405. static const struct pci_error_handlers qlge_err_handler = {
  4406. .error_detected = qlge_io_error_detected,
  4407. .slot_reset = qlge_io_slot_reset,
  4408. .resume = qlge_io_resume,
  4409. };
  4410. static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
  4411. {
  4412. struct net_device *ndev = pci_get_drvdata(pdev);
  4413. struct ql_adapter *qdev = netdev_priv(ndev);
  4414. int err;
  4415. netif_device_detach(ndev);
  4416. del_timer_sync(&qdev->timer);
  4417. if (netif_running(ndev)) {
  4418. err = ql_adapter_down(qdev);
  4419. if (!err)
  4420. return err;
  4421. }
  4422. ql_wol(qdev);
  4423. err = pci_save_state(pdev);
  4424. if (err)
  4425. return err;
  4426. pci_disable_device(pdev);
  4427. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4428. return 0;
  4429. }
  4430. #ifdef CONFIG_PM
  4431. static int qlge_resume(struct pci_dev *pdev)
  4432. {
  4433. struct net_device *ndev = pci_get_drvdata(pdev);
  4434. struct ql_adapter *qdev = netdev_priv(ndev);
  4435. int err;
  4436. pci_set_power_state(pdev, PCI_D0);
  4437. pci_restore_state(pdev);
  4438. err = pci_enable_device(pdev);
  4439. if (err) {
  4440. netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
  4441. return err;
  4442. }
  4443. pci_set_master(pdev);
  4444. pci_enable_wake(pdev, PCI_D3hot, 0);
  4445. pci_enable_wake(pdev, PCI_D3cold, 0);
  4446. if (netif_running(ndev)) {
  4447. err = ql_adapter_up(qdev);
  4448. if (err)
  4449. return err;
  4450. }
  4451. mod_timer(&qdev->timer, jiffies + (5*HZ));
  4452. netif_device_attach(ndev);
  4453. return 0;
  4454. }
  4455. #endif /* CONFIG_PM */
  4456. static void qlge_shutdown(struct pci_dev *pdev)
  4457. {
  4458. qlge_suspend(pdev, PMSG_SUSPEND);
  4459. }
  4460. static struct pci_driver qlge_driver = {
  4461. .name = DRV_NAME,
  4462. .id_table = qlge_pci_tbl,
  4463. .probe = qlge_probe,
  4464. .remove = qlge_remove,
  4465. #ifdef CONFIG_PM
  4466. .suspend = qlge_suspend,
  4467. .resume = qlge_resume,
  4468. #endif
  4469. .shutdown = qlge_shutdown,
  4470. .err_handler = &qlge_err_handler
  4471. };
  4472. module_pci_driver(qlge_driver);