be_cmds.c 124 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983
  1. /*
  2. * Copyright (C) 2005 - 2015 Emulex
  3. * All rights reserved.
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License version 2
  7. * as published by the Free Software Foundation. The full GNU General
  8. * Public License is included in this distribution in the file called COPYING.
  9. *
  10. * Contact Information:
  11. * linux-drivers@emulex.com
  12. *
  13. * Emulex
  14. * 3333 Susan Street
  15. * Costa Mesa, CA 92626
  16. */
  17. #include <linux/module.h>
  18. #include "be.h"
  19. #include "be_cmds.h"
  20. char *be_misconfig_evt_port_state[] = {
  21. "Physical Link is functional",
  22. "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
  23. "Optics of two types installed – Remove one optic or install matching pair of optics.",
  24. "Incompatible optics – Replace with compatible optics for card to function.",
  25. "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.",
  26. "Uncertified optics – Replace with Avago-certified optics to enable link operation."
  27. };
  28. static char *be_port_misconfig_evt_severity[] = {
  29. "KERN_WARN",
  30. "KERN_INFO",
  31. "KERN_ERR",
  32. "KERN_WARN"
  33. };
  34. static char *phy_state_oper_desc[] = {
  35. "Link is non-operational",
  36. "Link is operational",
  37. ""
  38. };
  39. static struct be_cmd_priv_map cmd_priv_map[] = {
  40. {
  41. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  42. CMD_SUBSYSTEM_ETH,
  43. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  44. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  45. },
  46. {
  47. OPCODE_COMMON_GET_FLOW_CONTROL,
  48. CMD_SUBSYSTEM_COMMON,
  49. BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
  50. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  51. },
  52. {
  53. OPCODE_COMMON_SET_FLOW_CONTROL,
  54. CMD_SUBSYSTEM_COMMON,
  55. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  56. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  57. },
  58. {
  59. OPCODE_ETH_GET_PPORT_STATS,
  60. CMD_SUBSYSTEM_ETH,
  61. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  62. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  63. },
  64. {
  65. OPCODE_COMMON_GET_PHY_DETAILS,
  66. CMD_SUBSYSTEM_COMMON,
  67. BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
  68. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  69. },
  70. {
  71. OPCODE_LOWLEVEL_HOST_DDR_DMA,
  72. CMD_SUBSYSTEM_LOWLEVEL,
  73. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  74. },
  75. {
  76. OPCODE_LOWLEVEL_LOOPBACK_TEST,
  77. CMD_SUBSYSTEM_LOWLEVEL,
  78. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  79. },
  80. {
  81. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  82. CMD_SUBSYSTEM_LOWLEVEL,
  83. BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
  84. },
  85. };
  86. static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
  87. {
  88. int i;
  89. int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
  90. u32 cmd_privileges = adapter->cmd_privileges;
  91. for (i = 0; i < num_entries; i++)
  92. if (opcode == cmd_priv_map[i].opcode &&
  93. subsystem == cmd_priv_map[i].subsystem)
  94. if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
  95. return false;
  96. return true;
  97. }
  98. static inline void *embedded_payload(struct be_mcc_wrb *wrb)
  99. {
  100. return wrb->payload.embedded_payload;
  101. }
  102. static int be_mcc_notify(struct be_adapter *adapter)
  103. {
  104. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  105. u32 val = 0;
  106. if (be_check_error(adapter, BE_ERROR_ANY))
  107. return -EIO;
  108. val |= mccq->id & DB_MCCQ_RING_ID_MASK;
  109. val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
  110. wmb();
  111. iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
  112. return 0;
  113. }
  114. /* To check if valid bit is set, check the entire word as we don't know
  115. * the endianness of the data (old entry is host endian while a new entry is
  116. * little endian) */
  117. static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
  118. {
  119. u32 flags;
  120. if (compl->flags != 0) {
  121. flags = le32_to_cpu(compl->flags);
  122. if (flags & CQE_FLAGS_VALID_MASK) {
  123. compl->flags = flags;
  124. return true;
  125. }
  126. }
  127. return false;
  128. }
  129. /* Need to reset the entire word that houses the valid bit */
  130. static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
  131. {
  132. compl->flags = 0;
  133. }
  134. static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
  135. {
  136. unsigned long addr;
  137. addr = tag1;
  138. addr = ((addr << 16) << 16) | tag0;
  139. return (void *)addr;
  140. }
  141. static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
  142. {
  143. if (base_status == MCC_STATUS_NOT_SUPPORTED ||
  144. base_status == MCC_STATUS_ILLEGAL_REQUEST ||
  145. addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
  146. addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
  147. (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
  148. (base_status == MCC_STATUS_ILLEGAL_FIELD ||
  149. addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
  150. return true;
  151. else
  152. return false;
  153. }
  154. /* Place holder for all the async MCC cmds wherein the caller is not in a busy
  155. * loop (has not issued be_mcc_notify_wait())
  156. */
  157. static void be_async_cmd_process(struct be_adapter *adapter,
  158. struct be_mcc_compl *compl,
  159. struct be_cmd_resp_hdr *resp_hdr)
  160. {
  161. enum mcc_base_status base_status = base_status(compl->status);
  162. u8 opcode = 0, subsystem = 0;
  163. if (resp_hdr) {
  164. opcode = resp_hdr->opcode;
  165. subsystem = resp_hdr->subsystem;
  166. }
  167. if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
  168. subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
  169. complete(&adapter->et_cmd_compl);
  170. return;
  171. }
  172. if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE &&
  173. subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
  174. complete(&adapter->et_cmd_compl);
  175. return;
  176. }
  177. if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
  178. opcode == OPCODE_COMMON_WRITE_OBJECT) &&
  179. subsystem == CMD_SUBSYSTEM_COMMON) {
  180. adapter->flash_status = compl->status;
  181. complete(&adapter->et_cmd_compl);
  182. return;
  183. }
  184. if ((opcode == OPCODE_ETH_GET_STATISTICS ||
  185. opcode == OPCODE_ETH_GET_PPORT_STATS) &&
  186. subsystem == CMD_SUBSYSTEM_ETH &&
  187. base_status == MCC_STATUS_SUCCESS) {
  188. be_parse_stats(adapter);
  189. adapter->stats_cmd_sent = false;
  190. return;
  191. }
  192. if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
  193. subsystem == CMD_SUBSYSTEM_COMMON) {
  194. if (base_status == MCC_STATUS_SUCCESS) {
  195. struct be_cmd_resp_get_cntl_addnl_attribs *resp =
  196. (void *)resp_hdr;
  197. adapter->hwmon_info.be_on_die_temp =
  198. resp->on_die_temperature;
  199. } else {
  200. adapter->be_get_temp_freq = 0;
  201. adapter->hwmon_info.be_on_die_temp =
  202. BE_INVALID_DIE_TEMP;
  203. }
  204. return;
  205. }
  206. }
  207. static int be_mcc_compl_process(struct be_adapter *adapter,
  208. struct be_mcc_compl *compl)
  209. {
  210. enum mcc_base_status base_status;
  211. enum mcc_addl_status addl_status;
  212. struct be_cmd_resp_hdr *resp_hdr;
  213. u8 opcode = 0, subsystem = 0;
  214. /* Just swap the status to host endian; mcc tag is opaquely copied
  215. * from mcc_wrb */
  216. be_dws_le_to_cpu(compl, 4);
  217. base_status = base_status(compl->status);
  218. addl_status = addl_status(compl->status);
  219. resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
  220. if (resp_hdr) {
  221. opcode = resp_hdr->opcode;
  222. subsystem = resp_hdr->subsystem;
  223. }
  224. be_async_cmd_process(adapter, compl, resp_hdr);
  225. if (base_status != MCC_STATUS_SUCCESS &&
  226. !be_skip_err_log(opcode, base_status, addl_status)) {
  227. if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST ||
  228. addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) {
  229. dev_warn(&adapter->pdev->dev,
  230. "VF is not privileged to issue opcode %d-%d\n",
  231. opcode, subsystem);
  232. } else {
  233. dev_err(&adapter->pdev->dev,
  234. "opcode %d-%d failed:status %d-%d\n",
  235. opcode, subsystem, base_status, addl_status);
  236. }
  237. }
  238. return compl->status;
  239. }
  240. /* Link state evt is a string of bytes; no need for endian swapping */
  241. static void be_async_link_state_process(struct be_adapter *adapter,
  242. struct be_mcc_compl *compl)
  243. {
  244. struct be_async_event_link_state *evt =
  245. (struct be_async_event_link_state *)compl;
  246. /* When link status changes, link speed must be re-queried from FW */
  247. adapter->phy.link_speed = -1;
  248. /* On BEx the FW does not send a separate link status
  249. * notification for physical and logical link.
  250. * On other chips just process the logical link
  251. * status notification
  252. */
  253. if (!BEx_chip(adapter) &&
  254. !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
  255. return;
  256. /* For the initial link status do not rely on the ASYNC event as
  257. * it may not be received in some cases.
  258. */
  259. if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
  260. be_link_status_update(adapter,
  261. evt->port_link_status & LINK_STATUS_MASK);
  262. }
  263. static void be_async_port_misconfig_event_process(struct be_adapter *adapter,
  264. struct be_mcc_compl *compl)
  265. {
  266. struct be_async_event_misconfig_port *evt =
  267. (struct be_async_event_misconfig_port *)compl;
  268. u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1);
  269. u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2);
  270. u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE;
  271. struct device *dev = &adapter->pdev->dev;
  272. u8 msg_severity = DEFAULT_MSG_SEVERITY;
  273. u8 phy_state_info;
  274. u8 new_phy_state;
  275. new_phy_state =
  276. (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff;
  277. if (new_phy_state == adapter->phy_state)
  278. return;
  279. adapter->phy_state = new_phy_state;
  280. /* for older fw that doesn't populate link effect data */
  281. if (!sfp_misconfig_evt_word2)
  282. goto log_message;
  283. phy_state_info =
  284. (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff;
  285. if (phy_state_info & PHY_STATE_INFO_VALID) {
  286. msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1;
  287. if (be_phy_unqualified(new_phy_state))
  288. phy_oper_state = (phy_state_info & PHY_STATE_OPER);
  289. }
  290. log_message:
  291. /* Log an error message that would allow a user to determine
  292. * whether the SFPs have an issue
  293. */
  294. if (be_phy_state_unknown(new_phy_state))
  295. dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
  296. "Port %c: Unrecognized Optics state: 0x%x. %s",
  297. adapter->port_name,
  298. new_phy_state,
  299. phy_state_oper_desc[phy_oper_state]);
  300. else
  301. dev_printk(be_port_misconfig_evt_severity[msg_severity], dev,
  302. "Port %c: %s %s",
  303. adapter->port_name,
  304. be_misconfig_evt_port_state[new_phy_state],
  305. phy_state_oper_desc[phy_oper_state]);
  306. /* Log Vendor name and part no. if a misconfigured SFP is detected */
  307. if (be_phy_misconfigured(new_phy_state))
  308. adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED;
  309. }
  310. /* Grp5 CoS Priority evt */
  311. static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
  312. struct be_mcc_compl *compl)
  313. {
  314. struct be_async_event_grp5_cos_priority *evt =
  315. (struct be_async_event_grp5_cos_priority *)compl;
  316. if (evt->valid) {
  317. adapter->vlan_prio_bmap = evt->available_priority_bmap;
  318. adapter->recommended_prio_bits =
  319. evt->reco_default_priority << VLAN_PRIO_SHIFT;
  320. }
  321. }
  322. /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
  323. static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
  324. struct be_mcc_compl *compl)
  325. {
  326. struct be_async_event_grp5_qos_link_speed *evt =
  327. (struct be_async_event_grp5_qos_link_speed *)compl;
  328. if (adapter->phy.link_speed >= 0 &&
  329. evt->physical_port == adapter->port_num)
  330. adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
  331. }
  332. /*Grp5 PVID evt*/
  333. static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
  334. struct be_mcc_compl *compl)
  335. {
  336. struct be_async_event_grp5_pvid_state *evt =
  337. (struct be_async_event_grp5_pvid_state *)compl;
  338. if (evt->enabled) {
  339. adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
  340. dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
  341. } else {
  342. adapter->pvid = 0;
  343. }
  344. }
  345. #define MGMT_ENABLE_MASK 0x4
  346. static void be_async_grp5_fw_control_process(struct be_adapter *adapter,
  347. struct be_mcc_compl *compl)
  348. {
  349. struct be_async_fw_control *evt = (struct be_async_fw_control *)compl;
  350. u32 evt_dw1 = le32_to_cpu(evt->event_data_word1);
  351. if (evt_dw1 & MGMT_ENABLE_MASK) {
  352. adapter->flags |= BE_FLAGS_OS2BMC;
  353. adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2);
  354. } else {
  355. adapter->flags &= ~BE_FLAGS_OS2BMC;
  356. }
  357. }
  358. static void be_async_grp5_evt_process(struct be_adapter *adapter,
  359. struct be_mcc_compl *compl)
  360. {
  361. u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
  362. ASYNC_EVENT_TYPE_MASK;
  363. switch (event_type) {
  364. case ASYNC_EVENT_COS_PRIORITY:
  365. be_async_grp5_cos_priority_process(adapter, compl);
  366. break;
  367. case ASYNC_EVENT_QOS_SPEED:
  368. be_async_grp5_qos_speed_process(adapter, compl);
  369. break;
  370. case ASYNC_EVENT_PVID_STATE:
  371. be_async_grp5_pvid_state_process(adapter, compl);
  372. break;
  373. /* Async event to disable/enable os2bmc and/or mac-learning */
  374. case ASYNC_EVENT_FW_CONTROL:
  375. be_async_grp5_fw_control_process(adapter, compl);
  376. break;
  377. default:
  378. break;
  379. }
  380. }
  381. static void be_async_dbg_evt_process(struct be_adapter *adapter,
  382. struct be_mcc_compl *cmp)
  383. {
  384. u8 event_type = 0;
  385. struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp;
  386. event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
  387. ASYNC_EVENT_TYPE_MASK;
  388. switch (event_type) {
  389. case ASYNC_DEBUG_EVENT_TYPE_QNQ:
  390. if (evt->valid)
  391. adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
  392. adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
  393. break;
  394. default:
  395. dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
  396. event_type);
  397. break;
  398. }
  399. }
  400. static void be_async_sliport_evt_process(struct be_adapter *adapter,
  401. struct be_mcc_compl *cmp)
  402. {
  403. u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
  404. ASYNC_EVENT_TYPE_MASK;
  405. if (event_type == ASYNC_EVENT_PORT_MISCONFIG)
  406. be_async_port_misconfig_event_process(adapter, cmp);
  407. }
  408. static inline bool is_link_state_evt(u32 flags)
  409. {
  410. return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
  411. ASYNC_EVENT_CODE_LINK_STATE;
  412. }
  413. static inline bool is_grp5_evt(u32 flags)
  414. {
  415. return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
  416. ASYNC_EVENT_CODE_GRP_5;
  417. }
  418. static inline bool is_dbg_evt(u32 flags)
  419. {
  420. return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
  421. ASYNC_EVENT_CODE_QNQ;
  422. }
  423. static inline bool is_sliport_evt(u32 flags)
  424. {
  425. return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
  426. ASYNC_EVENT_CODE_SLIPORT;
  427. }
  428. static void be_mcc_event_process(struct be_adapter *adapter,
  429. struct be_mcc_compl *compl)
  430. {
  431. if (is_link_state_evt(compl->flags))
  432. be_async_link_state_process(adapter, compl);
  433. else if (is_grp5_evt(compl->flags))
  434. be_async_grp5_evt_process(adapter, compl);
  435. else if (is_dbg_evt(compl->flags))
  436. be_async_dbg_evt_process(adapter, compl);
  437. else if (is_sliport_evt(compl->flags))
  438. be_async_sliport_evt_process(adapter, compl);
  439. }
  440. static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
  441. {
  442. struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
  443. struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
  444. if (be_mcc_compl_is_new(compl)) {
  445. queue_tail_inc(mcc_cq);
  446. return compl;
  447. }
  448. return NULL;
  449. }
  450. void be_async_mcc_enable(struct be_adapter *adapter)
  451. {
  452. spin_lock_bh(&adapter->mcc_cq_lock);
  453. be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
  454. adapter->mcc_obj.rearm_cq = true;
  455. spin_unlock_bh(&adapter->mcc_cq_lock);
  456. }
  457. void be_async_mcc_disable(struct be_adapter *adapter)
  458. {
  459. spin_lock_bh(&adapter->mcc_cq_lock);
  460. adapter->mcc_obj.rearm_cq = false;
  461. be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
  462. spin_unlock_bh(&adapter->mcc_cq_lock);
  463. }
  464. int be_process_mcc(struct be_adapter *adapter)
  465. {
  466. struct be_mcc_compl *compl;
  467. int num = 0, status = 0;
  468. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  469. spin_lock(&adapter->mcc_cq_lock);
  470. while ((compl = be_mcc_compl_get(adapter))) {
  471. if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
  472. be_mcc_event_process(adapter, compl);
  473. } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
  474. status = be_mcc_compl_process(adapter, compl);
  475. atomic_dec(&mcc_obj->q.used);
  476. }
  477. be_mcc_compl_use(compl);
  478. num++;
  479. }
  480. if (num)
  481. be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
  482. spin_unlock(&adapter->mcc_cq_lock);
  483. return status;
  484. }
  485. /* Wait till no more pending mcc requests are present */
  486. static int be_mcc_wait_compl(struct be_adapter *adapter)
  487. {
  488. #define mcc_timeout 120000 /* 12s timeout */
  489. int i, status = 0;
  490. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  491. for (i = 0; i < mcc_timeout; i++) {
  492. if (be_check_error(adapter, BE_ERROR_ANY))
  493. return -EIO;
  494. local_bh_disable();
  495. status = be_process_mcc(adapter);
  496. local_bh_enable();
  497. if (atomic_read(&mcc_obj->q.used) == 0)
  498. break;
  499. udelay(100);
  500. }
  501. if (i == mcc_timeout) {
  502. dev_err(&adapter->pdev->dev, "FW not responding\n");
  503. be_set_error(adapter, BE_ERROR_FW);
  504. return -EIO;
  505. }
  506. return status;
  507. }
  508. /* Notify MCC requests and wait for completion */
  509. static int be_mcc_notify_wait(struct be_adapter *adapter)
  510. {
  511. int status;
  512. struct be_mcc_wrb *wrb;
  513. struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
  514. u32 index = mcc_obj->q.head;
  515. struct be_cmd_resp_hdr *resp;
  516. index_dec(&index, mcc_obj->q.len);
  517. wrb = queue_index_node(&mcc_obj->q, index);
  518. resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
  519. status = be_mcc_notify(adapter);
  520. if (status)
  521. goto out;
  522. status = be_mcc_wait_compl(adapter);
  523. if (status == -EIO)
  524. goto out;
  525. status = (resp->base_status |
  526. ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
  527. CQE_ADDL_STATUS_SHIFT));
  528. out:
  529. return status;
  530. }
  531. static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
  532. {
  533. int msecs = 0;
  534. u32 ready;
  535. do {
  536. if (be_check_error(adapter, BE_ERROR_ANY))
  537. return -EIO;
  538. ready = ioread32(db);
  539. if (ready == 0xffffffff)
  540. return -1;
  541. ready &= MPU_MAILBOX_DB_RDY_MASK;
  542. if (ready)
  543. break;
  544. if (msecs > 4000) {
  545. dev_err(&adapter->pdev->dev, "FW not responding\n");
  546. be_set_error(adapter, BE_ERROR_FW);
  547. be_detect_error(adapter);
  548. return -1;
  549. }
  550. msleep(1);
  551. msecs++;
  552. } while (true);
  553. return 0;
  554. }
  555. /*
  556. * Insert the mailbox address into the doorbell in two steps
  557. * Polls on the mbox doorbell till a command completion (or a timeout) occurs
  558. */
  559. static int be_mbox_notify_wait(struct be_adapter *adapter)
  560. {
  561. int status;
  562. u32 val = 0;
  563. void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
  564. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  565. struct be_mcc_mailbox *mbox = mbox_mem->va;
  566. struct be_mcc_compl *compl = &mbox->compl;
  567. /* wait for ready to be set */
  568. status = be_mbox_db_ready_wait(adapter, db);
  569. if (status != 0)
  570. return status;
  571. val |= MPU_MAILBOX_DB_HI_MASK;
  572. /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
  573. val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
  574. iowrite32(val, db);
  575. /* wait for ready to be set */
  576. status = be_mbox_db_ready_wait(adapter, db);
  577. if (status != 0)
  578. return status;
  579. val = 0;
  580. /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
  581. val |= (u32)(mbox_mem->dma >> 4) << 2;
  582. iowrite32(val, db);
  583. status = be_mbox_db_ready_wait(adapter, db);
  584. if (status != 0)
  585. return status;
  586. /* A cq entry has been made now */
  587. if (be_mcc_compl_is_new(compl)) {
  588. status = be_mcc_compl_process(adapter, &mbox->compl);
  589. be_mcc_compl_use(compl);
  590. if (status)
  591. return status;
  592. } else {
  593. dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
  594. return -1;
  595. }
  596. return 0;
  597. }
  598. static u16 be_POST_stage_get(struct be_adapter *adapter)
  599. {
  600. u32 sem;
  601. if (BEx_chip(adapter))
  602. sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
  603. else
  604. pci_read_config_dword(adapter->pdev,
  605. SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
  606. return sem & POST_STAGE_MASK;
  607. }
  608. static int lancer_wait_ready(struct be_adapter *adapter)
  609. {
  610. #define SLIPORT_READY_TIMEOUT 30
  611. u32 sliport_status;
  612. int i;
  613. for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
  614. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  615. if (sliport_status & SLIPORT_STATUS_RDY_MASK)
  616. return 0;
  617. if (sliport_status & SLIPORT_STATUS_ERR_MASK &&
  618. !(sliport_status & SLIPORT_STATUS_RN_MASK))
  619. return -EIO;
  620. msleep(1000);
  621. }
  622. return sliport_status ? : -1;
  623. }
  624. int be_fw_wait_ready(struct be_adapter *adapter)
  625. {
  626. u16 stage;
  627. int status, timeout = 0;
  628. struct device *dev = &adapter->pdev->dev;
  629. if (lancer_chip(adapter)) {
  630. status = lancer_wait_ready(adapter);
  631. if (status) {
  632. stage = status;
  633. goto err;
  634. }
  635. return 0;
  636. }
  637. do {
  638. /* There's no means to poll POST state on BE2/3 VFs */
  639. if (BEx_chip(adapter) && be_virtfn(adapter))
  640. return 0;
  641. stage = be_POST_stage_get(adapter);
  642. if (stage == POST_STAGE_ARMFW_RDY)
  643. return 0;
  644. dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
  645. if (msleep_interruptible(2000)) {
  646. dev_err(dev, "Waiting for POST aborted\n");
  647. return -EINTR;
  648. }
  649. timeout += 2;
  650. } while (timeout < 60);
  651. err:
  652. dev_err(dev, "POST timeout; stage=%#x\n", stage);
  653. return -ETIMEDOUT;
  654. }
  655. static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
  656. {
  657. return &wrb->payload.sgl[0];
  658. }
  659. static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
  660. {
  661. wrb->tag0 = addr & 0xFFFFFFFF;
  662. wrb->tag1 = upper_32_bits(addr);
  663. }
  664. /* Don't touch the hdr after it's prepared */
  665. /* mem will be NULL for embedded commands */
  666. static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
  667. u8 subsystem, u8 opcode, int cmd_len,
  668. struct be_mcc_wrb *wrb,
  669. struct be_dma_mem *mem)
  670. {
  671. struct be_sge *sge;
  672. req_hdr->opcode = opcode;
  673. req_hdr->subsystem = subsystem;
  674. req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
  675. req_hdr->version = 0;
  676. fill_wrb_tags(wrb, (ulong) req_hdr);
  677. wrb->payload_length = cmd_len;
  678. if (mem) {
  679. wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
  680. MCC_WRB_SGE_CNT_SHIFT;
  681. sge = nonembedded_sgl(wrb);
  682. sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
  683. sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
  684. sge->len = cpu_to_le32(mem->size);
  685. } else
  686. wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
  687. be_dws_cpu_to_le(wrb, 8);
  688. }
  689. static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
  690. struct be_dma_mem *mem)
  691. {
  692. int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
  693. u64 dma = (u64)mem->dma;
  694. for (i = 0; i < buf_pages; i++) {
  695. pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
  696. pages[i].hi = cpu_to_le32(upper_32_bits(dma));
  697. dma += PAGE_SIZE_4K;
  698. }
  699. }
  700. static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
  701. {
  702. struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
  703. struct be_mcc_wrb *wrb
  704. = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
  705. memset(wrb, 0, sizeof(*wrb));
  706. return wrb;
  707. }
  708. static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
  709. {
  710. struct be_queue_info *mccq = &adapter->mcc_obj.q;
  711. struct be_mcc_wrb *wrb;
  712. if (!mccq->created)
  713. return NULL;
  714. if (atomic_read(&mccq->used) >= mccq->len)
  715. return NULL;
  716. wrb = queue_head_node(mccq);
  717. queue_head_inc(mccq);
  718. atomic_inc(&mccq->used);
  719. memset(wrb, 0, sizeof(*wrb));
  720. return wrb;
  721. }
  722. static bool use_mcc(struct be_adapter *adapter)
  723. {
  724. return adapter->mcc_obj.q.created;
  725. }
  726. /* Must be used only in process context */
  727. static int be_cmd_lock(struct be_adapter *adapter)
  728. {
  729. if (use_mcc(adapter)) {
  730. spin_lock_bh(&adapter->mcc_lock);
  731. return 0;
  732. } else {
  733. return mutex_lock_interruptible(&adapter->mbox_lock);
  734. }
  735. }
  736. /* Must be used only in process context */
  737. static void be_cmd_unlock(struct be_adapter *adapter)
  738. {
  739. if (use_mcc(adapter))
  740. spin_unlock_bh(&adapter->mcc_lock);
  741. else
  742. return mutex_unlock(&adapter->mbox_lock);
  743. }
  744. static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
  745. struct be_mcc_wrb *wrb)
  746. {
  747. struct be_mcc_wrb *dest_wrb;
  748. if (use_mcc(adapter)) {
  749. dest_wrb = wrb_from_mccq(adapter);
  750. if (!dest_wrb)
  751. return NULL;
  752. } else {
  753. dest_wrb = wrb_from_mbox(adapter);
  754. }
  755. memcpy(dest_wrb, wrb, sizeof(*wrb));
  756. if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
  757. fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
  758. return dest_wrb;
  759. }
  760. /* Must be used only in process context */
  761. static int be_cmd_notify_wait(struct be_adapter *adapter,
  762. struct be_mcc_wrb *wrb)
  763. {
  764. struct be_mcc_wrb *dest_wrb;
  765. int status;
  766. status = be_cmd_lock(adapter);
  767. if (status)
  768. return status;
  769. dest_wrb = be_cmd_copy(adapter, wrb);
  770. if (!dest_wrb) {
  771. status = -EBUSY;
  772. goto unlock;
  773. }
  774. if (use_mcc(adapter))
  775. status = be_mcc_notify_wait(adapter);
  776. else
  777. status = be_mbox_notify_wait(adapter);
  778. if (!status)
  779. memcpy(wrb, dest_wrb, sizeof(*wrb));
  780. unlock:
  781. be_cmd_unlock(adapter);
  782. return status;
  783. }
  784. /* Tell fw we're about to start firing cmds by writing a
  785. * special pattern across the wrb hdr; uses mbox
  786. */
  787. int be_cmd_fw_init(struct be_adapter *adapter)
  788. {
  789. u8 *wrb;
  790. int status;
  791. if (lancer_chip(adapter))
  792. return 0;
  793. if (mutex_lock_interruptible(&adapter->mbox_lock))
  794. return -1;
  795. wrb = (u8 *)wrb_from_mbox(adapter);
  796. *wrb++ = 0xFF;
  797. *wrb++ = 0x12;
  798. *wrb++ = 0x34;
  799. *wrb++ = 0xFF;
  800. *wrb++ = 0xFF;
  801. *wrb++ = 0x56;
  802. *wrb++ = 0x78;
  803. *wrb = 0xFF;
  804. status = be_mbox_notify_wait(adapter);
  805. mutex_unlock(&adapter->mbox_lock);
  806. return status;
  807. }
  808. /* Tell fw we're done with firing cmds by writing a
  809. * special pattern across the wrb hdr; uses mbox
  810. */
  811. int be_cmd_fw_clean(struct be_adapter *adapter)
  812. {
  813. u8 *wrb;
  814. int status;
  815. if (lancer_chip(adapter))
  816. return 0;
  817. if (mutex_lock_interruptible(&adapter->mbox_lock))
  818. return -1;
  819. wrb = (u8 *)wrb_from_mbox(adapter);
  820. *wrb++ = 0xFF;
  821. *wrb++ = 0xAA;
  822. *wrb++ = 0xBB;
  823. *wrb++ = 0xFF;
  824. *wrb++ = 0xFF;
  825. *wrb++ = 0xCC;
  826. *wrb++ = 0xDD;
  827. *wrb = 0xFF;
  828. status = be_mbox_notify_wait(adapter);
  829. mutex_unlock(&adapter->mbox_lock);
  830. return status;
  831. }
  832. int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
  833. {
  834. struct be_mcc_wrb *wrb;
  835. struct be_cmd_req_eq_create *req;
  836. struct be_dma_mem *q_mem = &eqo->q.dma_mem;
  837. int status, ver = 0;
  838. if (mutex_lock_interruptible(&adapter->mbox_lock))
  839. return -1;
  840. wrb = wrb_from_mbox(adapter);
  841. req = embedded_payload(wrb);
  842. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  843. OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
  844. NULL);
  845. /* Support for EQ_CREATEv2 available only SH-R onwards */
  846. if (!(BEx_chip(adapter) || lancer_chip(adapter)))
  847. ver = 2;
  848. req->hdr.version = ver;
  849. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  850. AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
  851. /* 4byte eqe*/
  852. AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
  853. AMAP_SET_BITS(struct amap_eq_context, count, req->context,
  854. __ilog2_u32(eqo->q.len / 256));
  855. be_dws_cpu_to_le(req->context, sizeof(req->context));
  856. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  857. status = be_mbox_notify_wait(adapter);
  858. if (!status) {
  859. struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
  860. eqo->q.id = le16_to_cpu(resp->eq_id);
  861. eqo->msix_idx =
  862. (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
  863. eqo->q.created = true;
  864. }
  865. mutex_unlock(&adapter->mbox_lock);
  866. return status;
  867. }
  868. /* Use MCC */
  869. int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
  870. bool permanent, u32 if_handle, u32 pmac_id)
  871. {
  872. struct be_mcc_wrb *wrb;
  873. struct be_cmd_req_mac_query *req;
  874. int status;
  875. spin_lock_bh(&adapter->mcc_lock);
  876. wrb = wrb_from_mccq(adapter);
  877. if (!wrb) {
  878. status = -EBUSY;
  879. goto err;
  880. }
  881. req = embedded_payload(wrb);
  882. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  883. OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
  884. NULL);
  885. req->type = MAC_ADDRESS_TYPE_NETWORK;
  886. if (permanent) {
  887. req->permanent = 1;
  888. } else {
  889. req->if_id = cpu_to_le16((u16)if_handle);
  890. req->pmac_id = cpu_to_le32(pmac_id);
  891. req->permanent = 0;
  892. }
  893. status = be_mcc_notify_wait(adapter);
  894. if (!status) {
  895. struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
  896. memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
  897. }
  898. err:
  899. spin_unlock_bh(&adapter->mcc_lock);
  900. return status;
  901. }
  902. /* Uses synchronous MCCQ */
  903. int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
  904. u32 if_id, u32 *pmac_id, u32 domain)
  905. {
  906. struct be_mcc_wrb *wrb;
  907. struct be_cmd_req_pmac_add *req;
  908. int status;
  909. spin_lock_bh(&adapter->mcc_lock);
  910. wrb = wrb_from_mccq(adapter);
  911. if (!wrb) {
  912. status = -EBUSY;
  913. goto err;
  914. }
  915. req = embedded_payload(wrb);
  916. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  917. OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
  918. NULL);
  919. req->hdr.domain = domain;
  920. req->if_id = cpu_to_le32(if_id);
  921. memcpy(req->mac_address, mac_addr, ETH_ALEN);
  922. status = be_mcc_notify_wait(adapter);
  923. if (!status) {
  924. struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
  925. *pmac_id = le32_to_cpu(resp->pmac_id);
  926. }
  927. err:
  928. spin_unlock_bh(&adapter->mcc_lock);
  929. if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
  930. status = -EPERM;
  931. return status;
  932. }
  933. /* Uses synchronous MCCQ */
  934. int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
  935. {
  936. struct be_mcc_wrb *wrb;
  937. struct be_cmd_req_pmac_del *req;
  938. int status;
  939. if (pmac_id == -1)
  940. return 0;
  941. spin_lock_bh(&adapter->mcc_lock);
  942. wrb = wrb_from_mccq(adapter);
  943. if (!wrb) {
  944. status = -EBUSY;
  945. goto err;
  946. }
  947. req = embedded_payload(wrb);
  948. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  949. OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req),
  950. wrb, NULL);
  951. req->hdr.domain = dom;
  952. req->if_id = cpu_to_le32(if_id);
  953. req->pmac_id = cpu_to_le32(pmac_id);
  954. status = be_mcc_notify_wait(adapter);
  955. err:
  956. spin_unlock_bh(&adapter->mcc_lock);
  957. return status;
  958. }
  959. /* Uses Mbox */
  960. int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
  961. struct be_queue_info *eq, bool no_delay, int coalesce_wm)
  962. {
  963. struct be_mcc_wrb *wrb;
  964. struct be_cmd_req_cq_create *req;
  965. struct be_dma_mem *q_mem = &cq->dma_mem;
  966. void *ctxt;
  967. int status;
  968. if (mutex_lock_interruptible(&adapter->mbox_lock))
  969. return -1;
  970. wrb = wrb_from_mbox(adapter);
  971. req = embedded_payload(wrb);
  972. ctxt = &req->context;
  973. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  974. OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
  975. NULL);
  976. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  977. if (BEx_chip(adapter)) {
  978. AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
  979. coalesce_wm);
  980. AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
  981. ctxt, no_delay);
  982. AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
  983. __ilog2_u32(cq->len / 256));
  984. AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
  985. AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
  986. AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
  987. } else {
  988. req->hdr.version = 2;
  989. req->page_size = 1; /* 1 for 4K */
  990. /* coalesce-wm field in this cmd is not relevant to Lancer.
  991. * Lancer uses COMMON_MODIFY_CQ to set this field
  992. */
  993. if (!lancer_chip(adapter))
  994. AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
  995. ctxt, coalesce_wm);
  996. AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
  997. no_delay);
  998. AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
  999. __ilog2_u32(cq->len / 256));
  1000. AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
  1001. AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
  1002. AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
  1003. }
  1004. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1005. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1006. status = be_mbox_notify_wait(adapter);
  1007. if (!status) {
  1008. struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
  1009. cq->id = le16_to_cpu(resp->cq_id);
  1010. cq->created = true;
  1011. }
  1012. mutex_unlock(&adapter->mbox_lock);
  1013. return status;
  1014. }
  1015. static u32 be_encoded_q_len(int q_len)
  1016. {
  1017. u32 len_encoded = fls(q_len); /* log2(len) + 1 */
  1018. if (len_encoded == 16)
  1019. len_encoded = 0;
  1020. return len_encoded;
  1021. }
  1022. static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
  1023. struct be_queue_info *mccq,
  1024. struct be_queue_info *cq)
  1025. {
  1026. struct be_mcc_wrb *wrb;
  1027. struct be_cmd_req_mcc_ext_create *req;
  1028. struct be_dma_mem *q_mem = &mccq->dma_mem;
  1029. void *ctxt;
  1030. int status;
  1031. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1032. return -1;
  1033. wrb = wrb_from_mbox(adapter);
  1034. req = embedded_payload(wrb);
  1035. ctxt = &req->context;
  1036. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1037. OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
  1038. NULL);
  1039. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  1040. if (BEx_chip(adapter)) {
  1041. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  1042. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  1043. be_encoded_q_len(mccq->len));
  1044. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  1045. } else {
  1046. req->hdr.version = 1;
  1047. req->cq_id = cpu_to_le16(cq->id);
  1048. AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt,
  1049. be_encoded_q_len(mccq->len));
  1050. AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1);
  1051. AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id,
  1052. ctxt, cq->id);
  1053. AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid,
  1054. ctxt, 1);
  1055. }
  1056. /* Subscribe to Link State, Sliport Event and Group 5 Events
  1057. * (bits 1, 5 and 17 set)
  1058. */
  1059. req->async_event_bitmap[0] =
  1060. cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) |
  1061. BIT(ASYNC_EVENT_CODE_GRP_5) |
  1062. BIT(ASYNC_EVENT_CODE_QNQ) |
  1063. BIT(ASYNC_EVENT_CODE_SLIPORT));
  1064. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1065. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1066. status = be_mbox_notify_wait(adapter);
  1067. if (!status) {
  1068. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  1069. mccq->id = le16_to_cpu(resp->id);
  1070. mccq->created = true;
  1071. }
  1072. mutex_unlock(&adapter->mbox_lock);
  1073. return status;
  1074. }
  1075. static int be_cmd_mccq_org_create(struct be_adapter *adapter,
  1076. struct be_queue_info *mccq,
  1077. struct be_queue_info *cq)
  1078. {
  1079. struct be_mcc_wrb *wrb;
  1080. struct be_cmd_req_mcc_create *req;
  1081. struct be_dma_mem *q_mem = &mccq->dma_mem;
  1082. void *ctxt;
  1083. int status;
  1084. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1085. return -1;
  1086. wrb = wrb_from_mbox(adapter);
  1087. req = embedded_payload(wrb);
  1088. ctxt = &req->context;
  1089. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1090. OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
  1091. NULL);
  1092. req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
  1093. AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
  1094. AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
  1095. be_encoded_q_len(mccq->len));
  1096. AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
  1097. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1098. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1099. status = be_mbox_notify_wait(adapter);
  1100. if (!status) {
  1101. struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
  1102. mccq->id = le16_to_cpu(resp->id);
  1103. mccq->created = true;
  1104. }
  1105. mutex_unlock(&adapter->mbox_lock);
  1106. return status;
  1107. }
  1108. int be_cmd_mccq_create(struct be_adapter *adapter,
  1109. struct be_queue_info *mccq, struct be_queue_info *cq)
  1110. {
  1111. int status;
  1112. status = be_cmd_mccq_ext_create(adapter, mccq, cq);
  1113. if (status && BEx_chip(adapter)) {
  1114. dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
  1115. "or newer to avoid conflicting priorities between NIC "
  1116. "and FCoE traffic");
  1117. status = be_cmd_mccq_org_create(adapter, mccq, cq);
  1118. }
  1119. return status;
  1120. }
  1121. int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
  1122. {
  1123. struct be_mcc_wrb wrb = {0};
  1124. struct be_cmd_req_eth_tx_create *req;
  1125. struct be_queue_info *txq = &txo->q;
  1126. struct be_queue_info *cq = &txo->cq;
  1127. struct be_dma_mem *q_mem = &txq->dma_mem;
  1128. int status, ver = 0;
  1129. req = embedded_payload(&wrb);
  1130. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1131. OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
  1132. if (lancer_chip(adapter)) {
  1133. req->hdr.version = 1;
  1134. } else if (BEx_chip(adapter)) {
  1135. if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
  1136. req->hdr.version = 2;
  1137. } else { /* For SH */
  1138. req->hdr.version = 2;
  1139. }
  1140. if (req->hdr.version > 0)
  1141. req->if_id = cpu_to_le16(adapter->if_handle);
  1142. req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
  1143. req->ulp_num = BE_ULP1_NUM;
  1144. req->type = BE_ETH_TX_RING_TYPE_STANDARD;
  1145. req->cq_id = cpu_to_le16(cq->id);
  1146. req->queue_size = be_encoded_q_len(txq->len);
  1147. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1148. ver = req->hdr.version;
  1149. status = be_cmd_notify_wait(adapter, &wrb);
  1150. if (!status) {
  1151. struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
  1152. txq->id = le16_to_cpu(resp->cid);
  1153. if (ver == 2)
  1154. txo->db_offset = le32_to_cpu(resp->db_offset);
  1155. else
  1156. txo->db_offset = DB_TXULP1_OFFSET;
  1157. txq->created = true;
  1158. }
  1159. return status;
  1160. }
  1161. /* Uses MCC */
  1162. int be_cmd_rxq_create(struct be_adapter *adapter,
  1163. struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
  1164. u32 if_id, u32 rss, u8 *rss_id)
  1165. {
  1166. struct be_mcc_wrb *wrb;
  1167. struct be_cmd_req_eth_rx_create *req;
  1168. struct be_dma_mem *q_mem = &rxq->dma_mem;
  1169. int status;
  1170. spin_lock_bh(&adapter->mcc_lock);
  1171. wrb = wrb_from_mccq(adapter);
  1172. if (!wrb) {
  1173. status = -EBUSY;
  1174. goto err;
  1175. }
  1176. req = embedded_payload(wrb);
  1177. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1178. OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
  1179. req->cq_id = cpu_to_le16(cq_id);
  1180. req->frag_size = fls(frag_size) - 1;
  1181. req->num_pages = 2;
  1182. be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
  1183. req->interface_id = cpu_to_le32(if_id);
  1184. req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
  1185. req->rss_queue = cpu_to_le32(rss);
  1186. status = be_mcc_notify_wait(adapter);
  1187. if (!status) {
  1188. struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
  1189. rxq->id = le16_to_cpu(resp->id);
  1190. rxq->created = true;
  1191. *rss_id = resp->rss_id;
  1192. }
  1193. err:
  1194. spin_unlock_bh(&adapter->mcc_lock);
  1195. return status;
  1196. }
  1197. /* Generic destroyer function for all types of queues
  1198. * Uses Mbox
  1199. */
  1200. int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
  1201. int queue_type)
  1202. {
  1203. struct be_mcc_wrb *wrb;
  1204. struct be_cmd_req_q_destroy *req;
  1205. u8 subsys = 0, opcode = 0;
  1206. int status;
  1207. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1208. return -1;
  1209. wrb = wrb_from_mbox(adapter);
  1210. req = embedded_payload(wrb);
  1211. switch (queue_type) {
  1212. case QTYPE_EQ:
  1213. subsys = CMD_SUBSYSTEM_COMMON;
  1214. opcode = OPCODE_COMMON_EQ_DESTROY;
  1215. break;
  1216. case QTYPE_CQ:
  1217. subsys = CMD_SUBSYSTEM_COMMON;
  1218. opcode = OPCODE_COMMON_CQ_DESTROY;
  1219. break;
  1220. case QTYPE_TXQ:
  1221. subsys = CMD_SUBSYSTEM_ETH;
  1222. opcode = OPCODE_ETH_TX_DESTROY;
  1223. break;
  1224. case QTYPE_RXQ:
  1225. subsys = CMD_SUBSYSTEM_ETH;
  1226. opcode = OPCODE_ETH_RX_DESTROY;
  1227. break;
  1228. case QTYPE_MCCQ:
  1229. subsys = CMD_SUBSYSTEM_COMMON;
  1230. opcode = OPCODE_COMMON_MCC_DESTROY;
  1231. break;
  1232. default:
  1233. BUG();
  1234. }
  1235. be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
  1236. NULL);
  1237. req->id = cpu_to_le16(q->id);
  1238. status = be_mbox_notify_wait(adapter);
  1239. q->created = false;
  1240. mutex_unlock(&adapter->mbox_lock);
  1241. return status;
  1242. }
  1243. /* Uses MCC */
  1244. int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
  1245. {
  1246. struct be_mcc_wrb *wrb;
  1247. struct be_cmd_req_q_destroy *req;
  1248. int status;
  1249. spin_lock_bh(&adapter->mcc_lock);
  1250. wrb = wrb_from_mccq(adapter);
  1251. if (!wrb) {
  1252. status = -EBUSY;
  1253. goto err;
  1254. }
  1255. req = embedded_payload(wrb);
  1256. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1257. OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
  1258. req->id = cpu_to_le16(q->id);
  1259. status = be_mcc_notify_wait(adapter);
  1260. q->created = false;
  1261. err:
  1262. spin_unlock_bh(&adapter->mcc_lock);
  1263. return status;
  1264. }
  1265. /* Create an rx filtering policy configuration on an i/f
  1266. * Will use MBOX only if MCCQ has not been created.
  1267. */
  1268. int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
  1269. u32 *if_handle, u32 domain)
  1270. {
  1271. struct be_mcc_wrb wrb = {0};
  1272. struct be_cmd_req_if_create *req;
  1273. int status;
  1274. req = embedded_payload(&wrb);
  1275. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1276. OPCODE_COMMON_NTWK_INTERFACE_CREATE,
  1277. sizeof(*req), &wrb, NULL);
  1278. req->hdr.domain = domain;
  1279. req->capability_flags = cpu_to_le32(cap_flags);
  1280. req->enable_flags = cpu_to_le32(en_flags);
  1281. req->pmac_invalid = true;
  1282. status = be_cmd_notify_wait(adapter, &wrb);
  1283. if (!status) {
  1284. struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
  1285. *if_handle = le32_to_cpu(resp->interface_id);
  1286. /* Hack to retrieve VF's pmac-id on BE3 */
  1287. if (BE3_chip(adapter) && be_virtfn(adapter))
  1288. adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
  1289. }
  1290. return status;
  1291. }
  1292. /* Uses MCCQ if available else MBOX */
  1293. int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
  1294. {
  1295. struct be_mcc_wrb wrb = {0};
  1296. struct be_cmd_req_if_destroy *req;
  1297. int status;
  1298. if (interface_id == -1)
  1299. return 0;
  1300. req = embedded_payload(&wrb);
  1301. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1302. OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
  1303. sizeof(*req), &wrb, NULL);
  1304. req->hdr.domain = domain;
  1305. req->interface_id = cpu_to_le32(interface_id);
  1306. status = be_cmd_notify_wait(adapter, &wrb);
  1307. return status;
  1308. }
  1309. /* Get stats is a non embedded command: the request is not embedded inside
  1310. * WRB but is a separate dma memory block
  1311. * Uses asynchronous MCC
  1312. */
  1313. int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
  1314. {
  1315. struct be_mcc_wrb *wrb;
  1316. struct be_cmd_req_hdr *hdr;
  1317. int status = 0;
  1318. spin_lock_bh(&adapter->mcc_lock);
  1319. wrb = wrb_from_mccq(adapter);
  1320. if (!wrb) {
  1321. status = -EBUSY;
  1322. goto err;
  1323. }
  1324. hdr = nonemb_cmd->va;
  1325. be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
  1326. OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
  1327. nonemb_cmd);
  1328. /* version 1 of the cmd is not supported only by BE2 */
  1329. if (BE2_chip(adapter))
  1330. hdr->version = 0;
  1331. if (BE3_chip(adapter) || lancer_chip(adapter))
  1332. hdr->version = 1;
  1333. else
  1334. hdr->version = 2;
  1335. status = be_mcc_notify(adapter);
  1336. if (status)
  1337. goto err;
  1338. adapter->stats_cmd_sent = true;
  1339. err:
  1340. spin_unlock_bh(&adapter->mcc_lock);
  1341. return status;
  1342. }
  1343. /* Lancer Stats */
  1344. int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
  1345. struct be_dma_mem *nonemb_cmd)
  1346. {
  1347. struct be_mcc_wrb *wrb;
  1348. struct lancer_cmd_req_pport_stats *req;
  1349. int status = 0;
  1350. if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
  1351. CMD_SUBSYSTEM_ETH))
  1352. return -EPERM;
  1353. spin_lock_bh(&adapter->mcc_lock);
  1354. wrb = wrb_from_mccq(adapter);
  1355. if (!wrb) {
  1356. status = -EBUSY;
  1357. goto err;
  1358. }
  1359. req = nonemb_cmd->va;
  1360. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1361. OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
  1362. wrb, nonemb_cmd);
  1363. req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
  1364. req->cmd_params.params.reset_stats = 0;
  1365. status = be_mcc_notify(adapter);
  1366. if (status)
  1367. goto err;
  1368. adapter->stats_cmd_sent = true;
  1369. err:
  1370. spin_unlock_bh(&adapter->mcc_lock);
  1371. return status;
  1372. }
  1373. static int be_mac_to_link_speed(int mac_speed)
  1374. {
  1375. switch (mac_speed) {
  1376. case PHY_LINK_SPEED_ZERO:
  1377. return 0;
  1378. case PHY_LINK_SPEED_10MBPS:
  1379. return 10;
  1380. case PHY_LINK_SPEED_100MBPS:
  1381. return 100;
  1382. case PHY_LINK_SPEED_1GBPS:
  1383. return 1000;
  1384. case PHY_LINK_SPEED_10GBPS:
  1385. return 10000;
  1386. case PHY_LINK_SPEED_20GBPS:
  1387. return 20000;
  1388. case PHY_LINK_SPEED_25GBPS:
  1389. return 25000;
  1390. case PHY_LINK_SPEED_40GBPS:
  1391. return 40000;
  1392. }
  1393. return 0;
  1394. }
  1395. /* Uses synchronous mcc
  1396. * Returns link_speed in Mbps
  1397. */
  1398. int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
  1399. u8 *link_status, u32 dom)
  1400. {
  1401. struct be_mcc_wrb *wrb;
  1402. struct be_cmd_req_link_status *req;
  1403. int status;
  1404. spin_lock_bh(&adapter->mcc_lock);
  1405. if (link_status)
  1406. *link_status = LINK_DOWN;
  1407. wrb = wrb_from_mccq(adapter);
  1408. if (!wrb) {
  1409. status = -EBUSY;
  1410. goto err;
  1411. }
  1412. req = embedded_payload(wrb);
  1413. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1414. OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
  1415. sizeof(*req), wrb, NULL);
  1416. /* version 1 of the cmd is not supported only by BE2 */
  1417. if (!BE2_chip(adapter))
  1418. req->hdr.version = 1;
  1419. req->hdr.domain = dom;
  1420. status = be_mcc_notify_wait(adapter);
  1421. if (!status) {
  1422. struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
  1423. if (link_speed) {
  1424. *link_speed = resp->link_speed ?
  1425. le16_to_cpu(resp->link_speed) * 10 :
  1426. be_mac_to_link_speed(resp->mac_speed);
  1427. if (!resp->logical_link_status)
  1428. *link_speed = 0;
  1429. }
  1430. if (link_status)
  1431. *link_status = resp->logical_link_status;
  1432. }
  1433. err:
  1434. spin_unlock_bh(&adapter->mcc_lock);
  1435. return status;
  1436. }
  1437. /* Uses synchronous mcc */
  1438. int be_cmd_get_die_temperature(struct be_adapter *adapter)
  1439. {
  1440. struct be_mcc_wrb *wrb;
  1441. struct be_cmd_req_get_cntl_addnl_attribs *req;
  1442. int status = 0;
  1443. spin_lock_bh(&adapter->mcc_lock);
  1444. wrb = wrb_from_mccq(adapter);
  1445. if (!wrb) {
  1446. status = -EBUSY;
  1447. goto err;
  1448. }
  1449. req = embedded_payload(wrb);
  1450. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1451. OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
  1452. sizeof(*req), wrb, NULL);
  1453. status = be_mcc_notify(adapter);
  1454. err:
  1455. spin_unlock_bh(&adapter->mcc_lock);
  1456. return status;
  1457. }
  1458. /* Uses synchronous mcc */
  1459. int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size)
  1460. {
  1461. struct be_mcc_wrb wrb = {0};
  1462. struct be_cmd_req_get_fat *req;
  1463. int status;
  1464. req = embedded_payload(&wrb);
  1465. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1466. OPCODE_COMMON_MANAGE_FAT, sizeof(*req),
  1467. &wrb, NULL);
  1468. req->fat_operation = cpu_to_le32(QUERY_FAT);
  1469. status = be_cmd_notify_wait(adapter, &wrb);
  1470. if (!status) {
  1471. struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb);
  1472. if (dump_size && resp->log_size)
  1473. *dump_size = le32_to_cpu(resp->log_size) -
  1474. sizeof(u32);
  1475. }
  1476. return status;
  1477. }
  1478. int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf)
  1479. {
  1480. struct be_dma_mem get_fat_cmd;
  1481. struct be_mcc_wrb *wrb;
  1482. struct be_cmd_req_get_fat *req;
  1483. u32 offset = 0, total_size, buf_size,
  1484. log_offset = sizeof(u32), payload_len;
  1485. int status;
  1486. if (buf_len == 0)
  1487. return 0;
  1488. total_size = buf_len;
  1489. get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
  1490. get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
  1491. get_fat_cmd.size,
  1492. &get_fat_cmd.dma, GFP_ATOMIC);
  1493. if (!get_fat_cmd.va)
  1494. return -ENOMEM;
  1495. spin_lock_bh(&adapter->mcc_lock);
  1496. while (total_size) {
  1497. buf_size = min(total_size, (u32)60*1024);
  1498. total_size -= buf_size;
  1499. wrb = wrb_from_mccq(adapter);
  1500. if (!wrb) {
  1501. status = -EBUSY;
  1502. goto err;
  1503. }
  1504. req = get_fat_cmd.va;
  1505. payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
  1506. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1507. OPCODE_COMMON_MANAGE_FAT, payload_len,
  1508. wrb, &get_fat_cmd);
  1509. req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
  1510. req->read_log_offset = cpu_to_le32(log_offset);
  1511. req->read_log_length = cpu_to_le32(buf_size);
  1512. req->data_buffer_size = cpu_to_le32(buf_size);
  1513. status = be_mcc_notify_wait(adapter);
  1514. if (!status) {
  1515. struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
  1516. memcpy(buf + offset,
  1517. resp->data_buffer,
  1518. le32_to_cpu(resp->read_log_length));
  1519. } else {
  1520. dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
  1521. goto err;
  1522. }
  1523. offset += buf_size;
  1524. log_offset += buf_size;
  1525. }
  1526. err:
  1527. dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
  1528. get_fat_cmd.va, get_fat_cmd.dma);
  1529. spin_unlock_bh(&adapter->mcc_lock);
  1530. return status;
  1531. }
  1532. /* Uses synchronous mcc */
  1533. int be_cmd_get_fw_ver(struct be_adapter *adapter)
  1534. {
  1535. struct be_mcc_wrb *wrb;
  1536. struct be_cmd_req_get_fw_version *req;
  1537. int status;
  1538. spin_lock_bh(&adapter->mcc_lock);
  1539. wrb = wrb_from_mccq(adapter);
  1540. if (!wrb) {
  1541. status = -EBUSY;
  1542. goto err;
  1543. }
  1544. req = embedded_payload(wrb);
  1545. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1546. OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
  1547. NULL);
  1548. status = be_mcc_notify_wait(adapter);
  1549. if (!status) {
  1550. struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
  1551. strlcpy(adapter->fw_ver, resp->firmware_version_string,
  1552. sizeof(adapter->fw_ver));
  1553. strlcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string,
  1554. sizeof(adapter->fw_on_flash));
  1555. }
  1556. err:
  1557. spin_unlock_bh(&adapter->mcc_lock);
  1558. return status;
  1559. }
  1560. /* set the EQ delay interval of an EQ to specified value
  1561. * Uses async mcc
  1562. */
  1563. static int __be_cmd_modify_eqd(struct be_adapter *adapter,
  1564. struct be_set_eqd *set_eqd, int num)
  1565. {
  1566. struct be_mcc_wrb *wrb;
  1567. struct be_cmd_req_modify_eq_delay *req;
  1568. int status = 0, i;
  1569. spin_lock_bh(&adapter->mcc_lock);
  1570. wrb = wrb_from_mccq(adapter);
  1571. if (!wrb) {
  1572. status = -EBUSY;
  1573. goto err;
  1574. }
  1575. req = embedded_payload(wrb);
  1576. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1577. OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
  1578. NULL);
  1579. req->num_eq = cpu_to_le32(num);
  1580. for (i = 0; i < num; i++) {
  1581. req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id);
  1582. req->set_eqd[i].phase = 0;
  1583. req->set_eqd[i].delay_multiplier =
  1584. cpu_to_le32(set_eqd[i].delay_multiplier);
  1585. }
  1586. status = be_mcc_notify(adapter);
  1587. err:
  1588. spin_unlock_bh(&adapter->mcc_lock);
  1589. return status;
  1590. }
  1591. int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
  1592. int num)
  1593. {
  1594. int num_eqs, i = 0;
  1595. while (num) {
  1596. num_eqs = min(num, 8);
  1597. __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs);
  1598. i += num_eqs;
  1599. num -= num_eqs;
  1600. }
  1601. return 0;
  1602. }
  1603. /* Uses sycnhronous mcc */
  1604. int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
  1605. u32 num, u32 domain)
  1606. {
  1607. struct be_mcc_wrb *wrb;
  1608. struct be_cmd_req_vlan_config *req;
  1609. int status;
  1610. spin_lock_bh(&adapter->mcc_lock);
  1611. wrb = wrb_from_mccq(adapter);
  1612. if (!wrb) {
  1613. status = -EBUSY;
  1614. goto err;
  1615. }
  1616. req = embedded_payload(wrb);
  1617. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1618. OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
  1619. wrb, NULL);
  1620. req->hdr.domain = domain;
  1621. req->interface_id = if_id;
  1622. req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
  1623. req->num_vlan = num;
  1624. memcpy(req->normal_vlan, vtag_array,
  1625. req->num_vlan * sizeof(vtag_array[0]));
  1626. status = be_mcc_notify_wait(adapter);
  1627. err:
  1628. spin_unlock_bh(&adapter->mcc_lock);
  1629. return status;
  1630. }
  1631. static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
  1632. {
  1633. struct be_mcc_wrb *wrb;
  1634. struct be_dma_mem *mem = &adapter->rx_filter;
  1635. struct be_cmd_req_rx_filter *req = mem->va;
  1636. int status;
  1637. spin_lock_bh(&adapter->mcc_lock);
  1638. wrb = wrb_from_mccq(adapter);
  1639. if (!wrb) {
  1640. status = -EBUSY;
  1641. goto err;
  1642. }
  1643. memset(req, 0, sizeof(*req));
  1644. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1645. OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
  1646. wrb, mem);
  1647. req->if_id = cpu_to_le32(adapter->if_handle);
  1648. req->if_flags_mask = cpu_to_le32(flags);
  1649. req->if_flags = (value == ON) ? req->if_flags_mask : 0;
  1650. if (flags & BE_IF_FLAGS_MULTICAST) {
  1651. struct netdev_hw_addr *ha;
  1652. int i = 0;
  1653. /* Reset mcast promisc mode if already set by setting mask
  1654. * and not setting flags field
  1655. */
  1656. req->if_flags_mask |=
  1657. cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
  1658. be_if_cap_flags(adapter));
  1659. req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
  1660. netdev_for_each_mc_addr(ha, adapter->netdev)
  1661. memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
  1662. }
  1663. status = be_mcc_notify_wait(adapter);
  1664. err:
  1665. spin_unlock_bh(&adapter->mcc_lock);
  1666. return status;
  1667. }
  1668. int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
  1669. {
  1670. struct device *dev = &adapter->pdev->dev;
  1671. if ((flags & be_if_cap_flags(adapter)) != flags) {
  1672. dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags);
  1673. dev_warn(dev, "Interface is capable of 0x%x flags only\n",
  1674. be_if_cap_flags(adapter));
  1675. }
  1676. flags &= be_if_cap_flags(adapter);
  1677. if (!flags)
  1678. return -ENOTSUPP;
  1679. return __be_cmd_rx_filter(adapter, flags, value);
  1680. }
  1681. /* Uses synchrounous mcc */
  1682. int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
  1683. {
  1684. struct be_mcc_wrb *wrb;
  1685. struct be_cmd_req_set_flow_control *req;
  1686. int status;
  1687. if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
  1688. CMD_SUBSYSTEM_COMMON))
  1689. return -EPERM;
  1690. spin_lock_bh(&adapter->mcc_lock);
  1691. wrb = wrb_from_mccq(adapter);
  1692. if (!wrb) {
  1693. status = -EBUSY;
  1694. goto err;
  1695. }
  1696. req = embedded_payload(wrb);
  1697. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1698. OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
  1699. wrb, NULL);
  1700. req->hdr.version = 1;
  1701. req->tx_flow_control = cpu_to_le16((u16)tx_fc);
  1702. req->rx_flow_control = cpu_to_le16((u16)rx_fc);
  1703. status = be_mcc_notify_wait(adapter);
  1704. err:
  1705. spin_unlock_bh(&adapter->mcc_lock);
  1706. if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED)
  1707. return -EOPNOTSUPP;
  1708. return status;
  1709. }
  1710. /* Uses sycn mcc */
  1711. int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
  1712. {
  1713. struct be_mcc_wrb *wrb;
  1714. struct be_cmd_req_get_flow_control *req;
  1715. int status;
  1716. if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
  1717. CMD_SUBSYSTEM_COMMON))
  1718. return -EPERM;
  1719. spin_lock_bh(&adapter->mcc_lock);
  1720. wrb = wrb_from_mccq(adapter);
  1721. if (!wrb) {
  1722. status = -EBUSY;
  1723. goto err;
  1724. }
  1725. req = embedded_payload(wrb);
  1726. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1727. OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
  1728. wrb, NULL);
  1729. status = be_mcc_notify_wait(adapter);
  1730. if (!status) {
  1731. struct be_cmd_resp_get_flow_control *resp =
  1732. embedded_payload(wrb);
  1733. *tx_fc = le16_to_cpu(resp->tx_flow_control);
  1734. *rx_fc = le16_to_cpu(resp->rx_flow_control);
  1735. }
  1736. err:
  1737. spin_unlock_bh(&adapter->mcc_lock);
  1738. return status;
  1739. }
  1740. /* Uses mbox */
  1741. int be_cmd_query_fw_cfg(struct be_adapter *adapter)
  1742. {
  1743. struct be_mcc_wrb *wrb;
  1744. struct be_cmd_req_query_fw_cfg *req;
  1745. int status;
  1746. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1747. return -1;
  1748. wrb = wrb_from_mbox(adapter);
  1749. req = embedded_payload(wrb);
  1750. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1751. OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
  1752. sizeof(*req), wrb, NULL);
  1753. status = be_mbox_notify_wait(adapter);
  1754. if (!status) {
  1755. struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
  1756. adapter->port_num = le32_to_cpu(resp->phys_port);
  1757. adapter->function_mode = le32_to_cpu(resp->function_mode);
  1758. adapter->function_caps = le32_to_cpu(resp->function_caps);
  1759. adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
  1760. dev_info(&adapter->pdev->dev,
  1761. "FW config: function_mode=0x%x, function_caps=0x%x\n",
  1762. adapter->function_mode, adapter->function_caps);
  1763. }
  1764. mutex_unlock(&adapter->mbox_lock);
  1765. return status;
  1766. }
  1767. /* Uses mbox */
  1768. int be_cmd_reset_function(struct be_adapter *adapter)
  1769. {
  1770. struct be_mcc_wrb *wrb;
  1771. struct be_cmd_req_hdr *req;
  1772. int status;
  1773. if (lancer_chip(adapter)) {
  1774. iowrite32(SLI_PORT_CONTROL_IP_MASK,
  1775. adapter->db + SLIPORT_CONTROL_OFFSET);
  1776. status = lancer_wait_ready(adapter);
  1777. if (status)
  1778. dev_err(&adapter->pdev->dev,
  1779. "Adapter in non recoverable error\n");
  1780. return status;
  1781. }
  1782. if (mutex_lock_interruptible(&adapter->mbox_lock))
  1783. return -1;
  1784. wrb = wrb_from_mbox(adapter);
  1785. req = embedded_payload(wrb);
  1786. be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
  1787. OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
  1788. NULL);
  1789. status = be_mbox_notify_wait(adapter);
  1790. mutex_unlock(&adapter->mbox_lock);
  1791. return status;
  1792. }
  1793. int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
  1794. u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
  1795. {
  1796. struct be_mcc_wrb *wrb;
  1797. struct be_cmd_req_rss_config *req;
  1798. int status;
  1799. if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
  1800. return 0;
  1801. spin_lock_bh(&adapter->mcc_lock);
  1802. wrb = wrb_from_mccq(adapter);
  1803. if (!wrb) {
  1804. status = -EBUSY;
  1805. goto err;
  1806. }
  1807. req = embedded_payload(wrb);
  1808. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  1809. OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
  1810. req->if_id = cpu_to_le32(adapter->if_handle);
  1811. req->enable_rss = cpu_to_le16(rss_hash_opts);
  1812. req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
  1813. if (!BEx_chip(adapter))
  1814. req->hdr.version = 1;
  1815. memcpy(req->cpu_table, rsstable, table_size);
  1816. memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
  1817. be_dws_cpu_to_le(req->hash, sizeof(req->hash));
  1818. status = be_mcc_notify_wait(adapter);
  1819. err:
  1820. spin_unlock_bh(&adapter->mcc_lock);
  1821. return status;
  1822. }
  1823. /* Uses sync mcc */
  1824. int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
  1825. u8 bcn, u8 sts, u8 state)
  1826. {
  1827. struct be_mcc_wrb *wrb;
  1828. struct be_cmd_req_enable_disable_beacon *req;
  1829. int status;
  1830. spin_lock_bh(&adapter->mcc_lock);
  1831. wrb = wrb_from_mccq(adapter);
  1832. if (!wrb) {
  1833. status = -EBUSY;
  1834. goto err;
  1835. }
  1836. req = embedded_payload(wrb);
  1837. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1838. OPCODE_COMMON_ENABLE_DISABLE_BEACON,
  1839. sizeof(*req), wrb, NULL);
  1840. req->port_num = port_num;
  1841. req->beacon_state = state;
  1842. req->beacon_duration = bcn;
  1843. req->status_duration = sts;
  1844. status = be_mcc_notify_wait(adapter);
  1845. err:
  1846. spin_unlock_bh(&adapter->mcc_lock);
  1847. return status;
  1848. }
  1849. /* Uses sync mcc */
  1850. int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
  1851. {
  1852. struct be_mcc_wrb *wrb;
  1853. struct be_cmd_req_get_beacon_state *req;
  1854. int status;
  1855. spin_lock_bh(&adapter->mcc_lock);
  1856. wrb = wrb_from_mccq(adapter);
  1857. if (!wrb) {
  1858. status = -EBUSY;
  1859. goto err;
  1860. }
  1861. req = embedded_payload(wrb);
  1862. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1863. OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
  1864. wrb, NULL);
  1865. req->port_num = port_num;
  1866. status = be_mcc_notify_wait(adapter);
  1867. if (!status) {
  1868. struct be_cmd_resp_get_beacon_state *resp =
  1869. embedded_payload(wrb);
  1870. *state = resp->beacon_state;
  1871. }
  1872. err:
  1873. spin_unlock_bh(&adapter->mcc_lock);
  1874. return status;
  1875. }
  1876. /* Uses sync mcc */
  1877. int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
  1878. u8 page_num, u8 *data)
  1879. {
  1880. struct be_dma_mem cmd;
  1881. struct be_mcc_wrb *wrb;
  1882. struct be_cmd_req_port_type *req;
  1883. int status;
  1884. if (page_num > TR_PAGE_A2)
  1885. return -EINVAL;
  1886. cmd.size = sizeof(struct be_cmd_resp_port_type);
  1887. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  1888. GFP_ATOMIC);
  1889. if (!cmd.va) {
  1890. dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
  1891. return -ENOMEM;
  1892. }
  1893. spin_lock_bh(&adapter->mcc_lock);
  1894. wrb = wrb_from_mccq(adapter);
  1895. if (!wrb) {
  1896. status = -EBUSY;
  1897. goto err;
  1898. }
  1899. req = cmd.va;
  1900. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1901. OPCODE_COMMON_READ_TRANSRECV_DATA,
  1902. cmd.size, wrb, &cmd);
  1903. req->port = cpu_to_le32(adapter->hba_port_num);
  1904. req->page_num = cpu_to_le32(page_num);
  1905. status = be_mcc_notify_wait(adapter);
  1906. if (!status) {
  1907. struct be_cmd_resp_port_type *resp = cmd.va;
  1908. memcpy(data, resp->page_data, PAGE_DATA_LEN);
  1909. }
  1910. err:
  1911. spin_unlock_bh(&adapter->mcc_lock);
  1912. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
  1913. return status;
  1914. }
  1915. static int lancer_cmd_write_object(struct be_adapter *adapter,
  1916. struct be_dma_mem *cmd, u32 data_size,
  1917. u32 data_offset, const char *obj_name,
  1918. u32 *data_written, u8 *change_status,
  1919. u8 *addn_status)
  1920. {
  1921. struct be_mcc_wrb *wrb;
  1922. struct lancer_cmd_req_write_object *req;
  1923. struct lancer_cmd_resp_write_object *resp;
  1924. void *ctxt = NULL;
  1925. int status;
  1926. spin_lock_bh(&adapter->mcc_lock);
  1927. adapter->flash_status = 0;
  1928. wrb = wrb_from_mccq(adapter);
  1929. if (!wrb) {
  1930. status = -EBUSY;
  1931. goto err_unlock;
  1932. }
  1933. req = embedded_payload(wrb);
  1934. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  1935. OPCODE_COMMON_WRITE_OBJECT,
  1936. sizeof(struct lancer_cmd_req_write_object), wrb,
  1937. NULL);
  1938. ctxt = &req->context;
  1939. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1940. write_length, ctxt, data_size);
  1941. if (data_size == 0)
  1942. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1943. eof, ctxt, 1);
  1944. else
  1945. AMAP_SET_BITS(struct amap_lancer_write_obj_context,
  1946. eof, ctxt, 0);
  1947. be_dws_cpu_to_le(ctxt, sizeof(req->context));
  1948. req->write_offset = cpu_to_le32(data_offset);
  1949. strlcpy(req->object_name, obj_name, sizeof(req->object_name));
  1950. req->descriptor_count = cpu_to_le32(1);
  1951. req->buf_len = cpu_to_le32(data_size);
  1952. req->addr_low = cpu_to_le32((cmd->dma +
  1953. sizeof(struct lancer_cmd_req_write_object))
  1954. & 0xFFFFFFFF);
  1955. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
  1956. sizeof(struct lancer_cmd_req_write_object)));
  1957. status = be_mcc_notify(adapter);
  1958. if (status)
  1959. goto err_unlock;
  1960. spin_unlock_bh(&adapter->mcc_lock);
  1961. if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
  1962. msecs_to_jiffies(60000)))
  1963. status = -ETIMEDOUT;
  1964. else
  1965. status = adapter->flash_status;
  1966. resp = embedded_payload(wrb);
  1967. if (!status) {
  1968. *data_written = le32_to_cpu(resp->actual_write_len);
  1969. *change_status = resp->change_status;
  1970. } else {
  1971. *addn_status = resp->additional_status;
  1972. }
  1973. return status;
  1974. err_unlock:
  1975. spin_unlock_bh(&adapter->mcc_lock);
  1976. return status;
  1977. }
  1978. int be_cmd_query_cable_type(struct be_adapter *adapter)
  1979. {
  1980. u8 page_data[PAGE_DATA_LEN];
  1981. int status;
  1982. status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
  1983. page_data);
  1984. if (!status) {
  1985. switch (adapter->phy.interface_type) {
  1986. case PHY_TYPE_QSFP:
  1987. adapter->phy.cable_type =
  1988. page_data[QSFP_PLUS_CABLE_TYPE_OFFSET];
  1989. break;
  1990. case PHY_TYPE_SFP_PLUS_10GB:
  1991. adapter->phy.cable_type =
  1992. page_data[SFP_PLUS_CABLE_TYPE_OFFSET];
  1993. break;
  1994. default:
  1995. adapter->phy.cable_type = 0;
  1996. break;
  1997. }
  1998. }
  1999. return status;
  2000. }
  2001. int be_cmd_query_sfp_info(struct be_adapter *adapter)
  2002. {
  2003. u8 page_data[PAGE_DATA_LEN];
  2004. int status;
  2005. status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0,
  2006. page_data);
  2007. if (!status) {
  2008. strlcpy(adapter->phy.vendor_name, page_data +
  2009. SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1);
  2010. strlcpy(adapter->phy.vendor_pn,
  2011. page_data + SFP_VENDOR_PN_OFFSET,
  2012. SFP_VENDOR_NAME_LEN - 1);
  2013. }
  2014. return status;
  2015. }
  2016. static int lancer_cmd_delete_object(struct be_adapter *adapter,
  2017. const char *obj_name)
  2018. {
  2019. struct lancer_cmd_req_delete_object *req;
  2020. struct be_mcc_wrb *wrb;
  2021. int status;
  2022. spin_lock_bh(&adapter->mcc_lock);
  2023. wrb = wrb_from_mccq(adapter);
  2024. if (!wrb) {
  2025. status = -EBUSY;
  2026. goto err;
  2027. }
  2028. req = embedded_payload(wrb);
  2029. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2030. OPCODE_COMMON_DELETE_OBJECT,
  2031. sizeof(*req), wrb, NULL);
  2032. strlcpy(req->object_name, obj_name, sizeof(req->object_name));
  2033. status = be_mcc_notify_wait(adapter);
  2034. err:
  2035. spin_unlock_bh(&adapter->mcc_lock);
  2036. return status;
  2037. }
  2038. int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
  2039. u32 data_size, u32 data_offset, const char *obj_name,
  2040. u32 *data_read, u32 *eof, u8 *addn_status)
  2041. {
  2042. struct be_mcc_wrb *wrb;
  2043. struct lancer_cmd_req_read_object *req;
  2044. struct lancer_cmd_resp_read_object *resp;
  2045. int status;
  2046. spin_lock_bh(&adapter->mcc_lock);
  2047. wrb = wrb_from_mccq(adapter);
  2048. if (!wrb) {
  2049. status = -EBUSY;
  2050. goto err_unlock;
  2051. }
  2052. req = embedded_payload(wrb);
  2053. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2054. OPCODE_COMMON_READ_OBJECT,
  2055. sizeof(struct lancer_cmd_req_read_object), wrb,
  2056. NULL);
  2057. req->desired_read_len = cpu_to_le32(data_size);
  2058. req->read_offset = cpu_to_le32(data_offset);
  2059. strcpy(req->object_name, obj_name);
  2060. req->descriptor_count = cpu_to_le32(1);
  2061. req->buf_len = cpu_to_le32(data_size);
  2062. req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
  2063. req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
  2064. status = be_mcc_notify_wait(adapter);
  2065. resp = embedded_payload(wrb);
  2066. if (!status) {
  2067. *data_read = le32_to_cpu(resp->actual_read_len);
  2068. *eof = le32_to_cpu(resp->eof);
  2069. } else {
  2070. *addn_status = resp->additional_status;
  2071. }
  2072. err_unlock:
  2073. spin_unlock_bh(&adapter->mcc_lock);
  2074. return status;
  2075. }
  2076. static int be_cmd_write_flashrom(struct be_adapter *adapter,
  2077. struct be_dma_mem *cmd, u32 flash_type,
  2078. u32 flash_opcode, u32 img_offset, u32 buf_size)
  2079. {
  2080. struct be_mcc_wrb *wrb;
  2081. struct be_cmd_write_flashrom *req;
  2082. int status;
  2083. spin_lock_bh(&adapter->mcc_lock);
  2084. adapter->flash_status = 0;
  2085. wrb = wrb_from_mccq(adapter);
  2086. if (!wrb) {
  2087. status = -EBUSY;
  2088. goto err_unlock;
  2089. }
  2090. req = cmd->va;
  2091. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2092. OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
  2093. cmd);
  2094. req->params.op_type = cpu_to_le32(flash_type);
  2095. if (flash_type == OPTYPE_OFFSET_SPECIFIED)
  2096. req->params.offset = cpu_to_le32(img_offset);
  2097. req->params.op_code = cpu_to_le32(flash_opcode);
  2098. req->params.data_buf_size = cpu_to_le32(buf_size);
  2099. status = be_mcc_notify(adapter);
  2100. if (status)
  2101. goto err_unlock;
  2102. spin_unlock_bh(&adapter->mcc_lock);
  2103. if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
  2104. msecs_to_jiffies(40000)))
  2105. status = -ETIMEDOUT;
  2106. else
  2107. status = adapter->flash_status;
  2108. return status;
  2109. err_unlock:
  2110. spin_unlock_bh(&adapter->mcc_lock);
  2111. return status;
  2112. }
  2113. static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
  2114. u16 img_optype, u32 img_offset, u32 crc_offset)
  2115. {
  2116. struct be_cmd_read_flash_crc *req;
  2117. struct be_mcc_wrb *wrb;
  2118. int status;
  2119. spin_lock_bh(&adapter->mcc_lock);
  2120. wrb = wrb_from_mccq(adapter);
  2121. if (!wrb) {
  2122. status = -EBUSY;
  2123. goto err;
  2124. }
  2125. req = embedded_payload(wrb);
  2126. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2127. OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
  2128. wrb, NULL);
  2129. req->params.op_type = cpu_to_le32(img_optype);
  2130. if (img_optype == OPTYPE_OFFSET_SPECIFIED)
  2131. req->params.offset = cpu_to_le32(img_offset + crc_offset);
  2132. else
  2133. req->params.offset = cpu_to_le32(crc_offset);
  2134. req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
  2135. req->params.data_buf_size = cpu_to_le32(0x4);
  2136. status = be_mcc_notify_wait(adapter);
  2137. if (!status)
  2138. memcpy(flashed_crc, req->crc, 4);
  2139. err:
  2140. spin_unlock_bh(&adapter->mcc_lock);
  2141. return status;
  2142. }
  2143. static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
  2144. static bool phy_flashing_required(struct be_adapter *adapter)
  2145. {
  2146. return (adapter->phy.phy_type == PHY_TYPE_TN_8022 &&
  2147. adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
  2148. }
  2149. static bool is_comp_in_ufi(struct be_adapter *adapter,
  2150. struct flash_section_info *fsec, int type)
  2151. {
  2152. int i = 0, img_type = 0;
  2153. struct flash_section_info_g2 *fsec_g2 = NULL;
  2154. if (BE2_chip(adapter))
  2155. fsec_g2 = (struct flash_section_info_g2 *)fsec;
  2156. for (i = 0; i < MAX_FLASH_COMP; i++) {
  2157. if (fsec_g2)
  2158. img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
  2159. else
  2160. img_type = le32_to_cpu(fsec->fsec_entry[i].type);
  2161. if (img_type == type)
  2162. return true;
  2163. }
  2164. return false;
  2165. }
  2166. static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
  2167. int header_size,
  2168. const struct firmware *fw)
  2169. {
  2170. struct flash_section_info *fsec = NULL;
  2171. const u8 *p = fw->data;
  2172. p += header_size;
  2173. while (p < (fw->data + fw->size)) {
  2174. fsec = (struct flash_section_info *)p;
  2175. if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
  2176. return fsec;
  2177. p += 32;
  2178. }
  2179. return NULL;
  2180. }
  2181. static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
  2182. u32 img_offset, u32 img_size, int hdr_size,
  2183. u16 img_optype, bool *crc_match)
  2184. {
  2185. u32 crc_offset;
  2186. int status;
  2187. u8 crc[4];
  2188. status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset,
  2189. img_size - 4);
  2190. if (status)
  2191. return status;
  2192. crc_offset = hdr_size + img_offset + img_size - 4;
  2193. /* Skip flashing, if crc of flashed region matches */
  2194. if (!memcmp(crc, p + crc_offset, 4))
  2195. *crc_match = true;
  2196. else
  2197. *crc_match = false;
  2198. return status;
  2199. }
  2200. static int be_flash(struct be_adapter *adapter, const u8 *img,
  2201. struct be_dma_mem *flash_cmd, int optype, int img_size,
  2202. u32 img_offset)
  2203. {
  2204. u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0;
  2205. struct be_cmd_write_flashrom *req = flash_cmd->va;
  2206. int status;
  2207. while (total_bytes) {
  2208. num_bytes = min_t(u32, 32 * 1024, total_bytes);
  2209. total_bytes -= num_bytes;
  2210. if (!total_bytes) {
  2211. if (optype == OPTYPE_PHY_FW)
  2212. flash_op = FLASHROM_OPER_PHY_FLASH;
  2213. else
  2214. flash_op = FLASHROM_OPER_FLASH;
  2215. } else {
  2216. if (optype == OPTYPE_PHY_FW)
  2217. flash_op = FLASHROM_OPER_PHY_SAVE;
  2218. else
  2219. flash_op = FLASHROM_OPER_SAVE;
  2220. }
  2221. memcpy(req->data_buf, img, num_bytes);
  2222. img += num_bytes;
  2223. status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
  2224. flash_op, img_offset +
  2225. bytes_sent, num_bytes);
  2226. if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
  2227. optype == OPTYPE_PHY_FW)
  2228. break;
  2229. else if (status)
  2230. return status;
  2231. bytes_sent += num_bytes;
  2232. }
  2233. return 0;
  2234. }
  2235. /* For BE2, BE3 and BE3-R */
  2236. static int be_flash_BEx(struct be_adapter *adapter,
  2237. const struct firmware *fw,
  2238. struct be_dma_mem *flash_cmd, int num_of_images)
  2239. {
  2240. int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
  2241. struct device *dev = &adapter->pdev->dev;
  2242. struct flash_section_info *fsec = NULL;
  2243. int status, i, filehdr_size, num_comp;
  2244. const struct flash_comp *pflashcomp;
  2245. bool crc_match;
  2246. const u8 *p;
  2247. struct flash_comp gen3_flash_types[] = {
  2248. { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
  2249. BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
  2250. { BE3_REDBOOT_START, OPTYPE_REDBOOT,
  2251. BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
  2252. { BE3_ISCSI_BIOS_START, OPTYPE_BIOS,
  2253. BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
  2254. { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS,
  2255. BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
  2256. { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
  2257. BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
  2258. { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
  2259. BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
  2260. { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
  2261. BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
  2262. { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
  2263. BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE},
  2264. { BE3_NCSI_START, OPTYPE_NCSI_FW,
  2265. BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI},
  2266. { BE3_PHY_FW_START, OPTYPE_PHY_FW,
  2267. BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY}
  2268. };
  2269. struct flash_comp gen2_flash_types[] = {
  2270. { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE,
  2271. BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI},
  2272. { BE2_REDBOOT_START, OPTYPE_REDBOOT,
  2273. BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE},
  2274. { BE2_ISCSI_BIOS_START, OPTYPE_BIOS,
  2275. BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI},
  2276. { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS,
  2277. BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE},
  2278. { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS,
  2279. BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE},
  2280. { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP,
  2281. BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI},
  2282. { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE,
  2283. BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE},
  2284. { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP,
  2285. BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}
  2286. };
  2287. if (BE3_chip(adapter)) {
  2288. pflashcomp = gen3_flash_types;
  2289. filehdr_size = sizeof(struct flash_file_hdr_g3);
  2290. num_comp = ARRAY_SIZE(gen3_flash_types);
  2291. } else {
  2292. pflashcomp = gen2_flash_types;
  2293. filehdr_size = sizeof(struct flash_file_hdr_g2);
  2294. num_comp = ARRAY_SIZE(gen2_flash_types);
  2295. img_hdrs_size = 0;
  2296. }
  2297. /* Get flash section info*/
  2298. fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
  2299. if (!fsec) {
  2300. dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
  2301. return -1;
  2302. }
  2303. for (i = 0; i < num_comp; i++) {
  2304. if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
  2305. continue;
  2306. if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
  2307. memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
  2308. continue;
  2309. if (pflashcomp[i].optype == OPTYPE_PHY_FW &&
  2310. !phy_flashing_required(adapter))
  2311. continue;
  2312. if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
  2313. status = be_check_flash_crc(adapter, fw->data,
  2314. pflashcomp[i].offset,
  2315. pflashcomp[i].size,
  2316. filehdr_size +
  2317. img_hdrs_size,
  2318. OPTYPE_REDBOOT, &crc_match);
  2319. if (status) {
  2320. dev_err(dev,
  2321. "Could not get CRC for 0x%x region\n",
  2322. pflashcomp[i].optype);
  2323. continue;
  2324. }
  2325. if (crc_match)
  2326. continue;
  2327. }
  2328. p = fw->data + filehdr_size + pflashcomp[i].offset +
  2329. img_hdrs_size;
  2330. if (p + pflashcomp[i].size > fw->data + fw->size)
  2331. return -1;
  2332. status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
  2333. pflashcomp[i].size, 0);
  2334. if (status) {
  2335. dev_err(dev, "Flashing section type 0x%x failed\n",
  2336. pflashcomp[i].img_type);
  2337. return status;
  2338. }
  2339. }
  2340. return 0;
  2341. }
  2342. static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
  2343. {
  2344. u32 img_type = le32_to_cpu(fsec_entry.type);
  2345. u16 img_optype = le16_to_cpu(fsec_entry.optype);
  2346. if (img_optype != 0xFFFF)
  2347. return img_optype;
  2348. switch (img_type) {
  2349. case IMAGE_FIRMWARE_ISCSI:
  2350. img_optype = OPTYPE_ISCSI_ACTIVE;
  2351. break;
  2352. case IMAGE_BOOT_CODE:
  2353. img_optype = OPTYPE_REDBOOT;
  2354. break;
  2355. case IMAGE_OPTION_ROM_ISCSI:
  2356. img_optype = OPTYPE_BIOS;
  2357. break;
  2358. case IMAGE_OPTION_ROM_PXE:
  2359. img_optype = OPTYPE_PXE_BIOS;
  2360. break;
  2361. case IMAGE_OPTION_ROM_FCOE:
  2362. img_optype = OPTYPE_FCOE_BIOS;
  2363. break;
  2364. case IMAGE_FIRMWARE_BACKUP_ISCSI:
  2365. img_optype = OPTYPE_ISCSI_BACKUP;
  2366. break;
  2367. case IMAGE_NCSI:
  2368. img_optype = OPTYPE_NCSI_FW;
  2369. break;
  2370. case IMAGE_FLASHISM_JUMPVECTOR:
  2371. img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
  2372. break;
  2373. case IMAGE_FIRMWARE_PHY:
  2374. img_optype = OPTYPE_SH_PHY_FW;
  2375. break;
  2376. case IMAGE_REDBOOT_DIR:
  2377. img_optype = OPTYPE_REDBOOT_DIR;
  2378. break;
  2379. case IMAGE_REDBOOT_CONFIG:
  2380. img_optype = OPTYPE_REDBOOT_CONFIG;
  2381. break;
  2382. case IMAGE_UFI_DIR:
  2383. img_optype = OPTYPE_UFI_DIR;
  2384. break;
  2385. default:
  2386. break;
  2387. }
  2388. return img_optype;
  2389. }
  2390. static int be_flash_skyhawk(struct be_adapter *adapter,
  2391. const struct firmware *fw,
  2392. struct be_dma_mem *flash_cmd, int num_of_images)
  2393. {
  2394. int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
  2395. bool crc_match, old_fw_img, flash_offset_support = true;
  2396. struct device *dev = &adapter->pdev->dev;
  2397. struct flash_section_info *fsec = NULL;
  2398. u32 img_offset, img_size, img_type;
  2399. u16 img_optype, flash_optype;
  2400. int status, i, filehdr_size;
  2401. const u8 *p;
  2402. filehdr_size = sizeof(struct flash_file_hdr_g3);
  2403. fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
  2404. if (!fsec) {
  2405. dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
  2406. return -EINVAL;
  2407. }
  2408. retry_flash:
  2409. for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
  2410. img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
  2411. img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
  2412. img_type = le32_to_cpu(fsec->fsec_entry[i].type);
  2413. img_optype = be_get_img_optype(fsec->fsec_entry[i]);
  2414. old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
  2415. if (img_optype == 0xFFFF)
  2416. continue;
  2417. if (flash_offset_support)
  2418. flash_optype = OPTYPE_OFFSET_SPECIFIED;
  2419. else
  2420. flash_optype = img_optype;
  2421. /* Don't bother verifying CRC if an old FW image is being
  2422. * flashed
  2423. */
  2424. if (old_fw_img)
  2425. goto flash;
  2426. status = be_check_flash_crc(adapter, fw->data, img_offset,
  2427. img_size, filehdr_size +
  2428. img_hdrs_size, flash_optype,
  2429. &crc_match);
  2430. if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
  2431. base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
  2432. /* The current FW image on the card does not support
  2433. * OFFSET based flashing. Retry using older mechanism
  2434. * of OPTYPE based flashing
  2435. */
  2436. if (flash_optype == OPTYPE_OFFSET_SPECIFIED) {
  2437. flash_offset_support = false;
  2438. goto retry_flash;
  2439. }
  2440. /* The current FW image on the card does not recognize
  2441. * the new FLASH op_type. The FW download is partially
  2442. * complete. Reboot the server now to enable FW image
  2443. * to recognize the new FLASH op_type. To complete the
  2444. * remaining process, download the same FW again after
  2445. * the reboot.
  2446. */
  2447. dev_err(dev, "Flash incomplete. Reset the server\n");
  2448. dev_err(dev, "Download FW image again after reset\n");
  2449. return -EAGAIN;
  2450. } else if (status) {
  2451. dev_err(dev, "Could not get CRC for 0x%x region\n",
  2452. img_optype);
  2453. return -EFAULT;
  2454. }
  2455. if (crc_match)
  2456. continue;
  2457. flash:
  2458. p = fw->data + filehdr_size + img_offset + img_hdrs_size;
  2459. if (p + img_size > fw->data + fw->size)
  2460. return -1;
  2461. status = be_flash(adapter, p, flash_cmd, flash_optype, img_size,
  2462. img_offset);
  2463. /* The current FW image on the card does not support OFFSET
  2464. * based flashing. Retry using older mechanism of OPTYPE based
  2465. * flashing
  2466. */
  2467. if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD &&
  2468. flash_optype == OPTYPE_OFFSET_SPECIFIED) {
  2469. flash_offset_support = false;
  2470. goto retry_flash;
  2471. }
  2472. /* For old FW images ignore ILLEGAL_FIELD error or errors on
  2473. * UFI_DIR region
  2474. */
  2475. if (old_fw_img &&
  2476. (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
  2477. (img_optype == OPTYPE_UFI_DIR &&
  2478. base_status(status) == MCC_STATUS_FAILED))) {
  2479. continue;
  2480. } else if (status) {
  2481. dev_err(dev, "Flashing section type 0x%x failed\n",
  2482. img_type);
  2483. switch (addl_status(status)) {
  2484. case MCC_ADDL_STATUS_MISSING_SIGNATURE:
  2485. dev_err(dev,
  2486. "Digital signature missing in FW\n");
  2487. return -EINVAL;
  2488. case MCC_ADDL_STATUS_INVALID_SIGNATURE:
  2489. dev_err(dev,
  2490. "Invalid digital signature in FW\n");
  2491. return -EINVAL;
  2492. default:
  2493. return -EFAULT;
  2494. }
  2495. }
  2496. }
  2497. return 0;
  2498. }
  2499. int lancer_fw_download(struct be_adapter *adapter,
  2500. const struct firmware *fw)
  2501. {
  2502. struct device *dev = &adapter->pdev->dev;
  2503. struct be_dma_mem flash_cmd;
  2504. const u8 *data_ptr = NULL;
  2505. u8 *dest_image_ptr = NULL;
  2506. size_t image_size = 0;
  2507. u32 chunk_size = 0;
  2508. u32 data_written = 0;
  2509. u32 offset = 0;
  2510. int status = 0;
  2511. u8 add_status = 0;
  2512. u8 change_status;
  2513. if (!IS_ALIGNED(fw->size, sizeof(u32))) {
  2514. dev_err(dev, "FW image size should be multiple of 4\n");
  2515. return -EINVAL;
  2516. }
  2517. flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
  2518. + LANCER_FW_DOWNLOAD_CHUNK;
  2519. flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
  2520. &flash_cmd.dma, GFP_KERNEL);
  2521. if (!flash_cmd.va)
  2522. return -ENOMEM;
  2523. dest_image_ptr = flash_cmd.va +
  2524. sizeof(struct lancer_cmd_req_write_object);
  2525. image_size = fw->size;
  2526. data_ptr = fw->data;
  2527. while (image_size) {
  2528. chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
  2529. /* Copy the image chunk content. */
  2530. memcpy(dest_image_ptr, data_ptr, chunk_size);
  2531. status = lancer_cmd_write_object(adapter, &flash_cmd,
  2532. chunk_size, offset,
  2533. LANCER_FW_DOWNLOAD_LOCATION,
  2534. &data_written, &change_status,
  2535. &add_status);
  2536. if (status)
  2537. break;
  2538. offset += data_written;
  2539. data_ptr += data_written;
  2540. image_size -= data_written;
  2541. }
  2542. if (!status) {
  2543. /* Commit the FW written */
  2544. status = lancer_cmd_write_object(adapter, &flash_cmd,
  2545. 0, offset,
  2546. LANCER_FW_DOWNLOAD_LOCATION,
  2547. &data_written, &change_status,
  2548. &add_status);
  2549. }
  2550. dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
  2551. if (status) {
  2552. dev_err(dev, "Firmware load error\n");
  2553. return be_cmd_status(status);
  2554. }
  2555. dev_info(dev, "Firmware flashed successfully\n");
  2556. if (change_status == LANCER_FW_RESET_NEEDED) {
  2557. dev_info(dev, "Resetting adapter to activate new FW\n");
  2558. status = lancer_physdev_ctrl(adapter,
  2559. PHYSDEV_CONTROL_FW_RESET_MASK);
  2560. if (status) {
  2561. dev_err(dev, "Adapter busy, could not reset FW\n");
  2562. dev_err(dev, "Reboot server to activate new FW\n");
  2563. }
  2564. } else if (change_status != LANCER_NO_RESET_NEEDED) {
  2565. dev_info(dev, "Reboot server to activate new FW\n");
  2566. }
  2567. return 0;
  2568. }
  2569. /* Check if the flash image file is compatible with the adapter that
  2570. * is being flashed.
  2571. */
  2572. static bool be_check_ufi_compatibility(struct be_adapter *adapter,
  2573. struct flash_file_hdr_g3 *fhdr)
  2574. {
  2575. if (!fhdr) {
  2576. dev_err(&adapter->pdev->dev, "Invalid FW UFI file");
  2577. return false;
  2578. }
  2579. /* First letter of the build version is used to identify
  2580. * which chip this image file is meant for.
  2581. */
  2582. switch (fhdr->build[0]) {
  2583. case BLD_STR_UFI_TYPE_SH:
  2584. if (!skyhawk_chip(adapter))
  2585. return false;
  2586. break;
  2587. case BLD_STR_UFI_TYPE_BE3:
  2588. if (!BE3_chip(adapter))
  2589. return false;
  2590. break;
  2591. case BLD_STR_UFI_TYPE_BE2:
  2592. if (!BE2_chip(adapter))
  2593. return false;
  2594. break;
  2595. default:
  2596. return false;
  2597. }
  2598. /* In BE3 FW images the "asic_type_rev" field doesn't track the
  2599. * asic_rev of the chips it is compatible with.
  2600. * When asic_type_rev is 0 the image is compatible only with
  2601. * pre-BE3-R chips (asic_rev < 0x10)
  2602. */
  2603. if (BEx_chip(adapter) && fhdr->asic_type_rev == 0)
  2604. return adapter->asic_rev < 0x10;
  2605. else
  2606. return (fhdr->asic_type_rev >= adapter->asic_rev);
  2607. }
  2608. int be_fw_download(struct be_adapter *adapter, const struct firmware *fw)
  2609. {
  2610. struct device *dev = &adapter->pdev->dev;
  2611. struct flash_file_hdr_g3 *fhdr3;
  2612. struct image_hdr *img_hdr_ptr;
  2613. int status = 0, i, num_imgs;
  2614. struct be_dma_mem flash_cmd;
  2615. fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
  2616. if (!be_check_ufi_compatibility(adapter, fhdr3)) {
  2617. dev_err(dev, "Flash image is not compatible with adapter\n");
  2618. return -EINVAL;
  2619. }
  2620. flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
  2621. flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
  2622. GFP_KERNEL);
  2623. if (!flash_cmd.va)
  2624. return -ENOMEM;
  2625. num_imgs = le32_to_cpu(fhdr3->num_imgs);
  2626. for (i = 0; i < num_imgs; i++) {
  2627. img_hdr_ptr = (struct image_hdr *)(fw->data +
  2628. (sizeof(struct flash_file_hdr_g3) +
  2629. i * sizeof(struct image_hdr)));
  2630. if (!BE2_chip(adapter) &&
  2631. le32_to_cpu(img_hdr_ptr->imageid) != 1)
  2632. continue;
  2633. if (skyhawk_chip(adapter))
  2634. status = be_flash_skyhawk(adapter, fw, &flash_cmd,
  2635. num_imgs);
  2636. else
  2637. status = be_flash_BEx(adapter, fw, &flash_cmd,
  2638. num_imgs);
  2639. }
  2640. dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma);
  2641. if (!status)
  2642. dev_info(dev, "Firmware flashed successfully\n");
  2643. return status;
  2644. }
  2645. int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
  2646. struct be_dma_mem *nonemb_cmd)
  2647. {
  2648. struct be_mcc_wrb *wrb;
  2649. struct be_cmd_req_acpi_wol_magic_config *req;
  2650. int status;
  2651. spin_lock_bh(&adapter->mcc_lock);
  2652. wrb = wrb_from_mccq(adapter);
  2653. if (!wrb) {
  2654. status = -EBUSY;
  2655. goto err;
  2656. }
  2657. req = nonemb_cmd->va;
  2658. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  2659. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
  2660. wrb, nonemb_cmd);
  2661. memcpy(req->magic_mac, mac, ETH_ALEN);
  2662. status = be_mcc_notify_wait(adapter);
  2663. err:
  2664. spin_unlock_bh(&adapter->mcc_lock);
  2665. return status;
  2666. }
  2667. int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
  2668. u8 loopback_type, u8 enable)
  2669. {
  2670. struct be_mcc_wrb *wrb;
  2671. struct be_cmd_req_set_lmode *req;
  2672. int status;
  2673. if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
  2674. CMD_SUBSYSTEM_LOWLEVEL))
  2675. return -EPERM;
  2676. spin_lock_bh(&adapter->mcc_lock);
  2677. wrb = wrb_from_mccq(adapter);
  2678. if (!wrb) {
  2679. status = -EBUSY;
  2680. goto err_unlock;
  2681. }
  2682. req = embedded_payload(wrb);
  2683. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  2684. OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
  2685. wrb, NULL);
  2686. req->src_port = port_num;
  2687. req->dest_port = port_num;
  2688. req->loopback_type = loopback_type;
  2689. req->loopback_state = enable;
  2690. status = be_mcc_notify(adapter);
  2691. if (status)
  2692. goto err_unlock;
  2693. spin_unlock_bh(&adapter->mcc_lock);
  2694. if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
  2695. msecs_to_jiffies(SET_LB_MODE_TIMEOUT)))
  2696. status = -ETIMEDOUT;
  2697. return status;
  2698. err_unlock:
  2699. spin_unlock_bh(&adapter->mcc_lock);
  2700. return status;
  2701. }
  2702. int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
  2703. u32 loopback_type, u32 pkt_size, u32 num_pkts,
  2704. u64 pattern)
  2705. {
  2706. struct be_mcc_wrb *wrb;
  2707. struct be_cmd_req_loopback_test *req;
  2708. struct be_cmd_resp_loopback_test *resp;
  2709. int status;
  2710. if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST,
  2711. CMD_SUBSYSTEM_LOWLEVEL))
  2712. return -EPERM;
  2713. spin_lock_bh(&adapter->mcc_lock);
  2714. wrb = wrb_from_mccq(adapter);
  2715. if (!wrb) {
  2716. status = -EBUSY;
  2717. goto err;
  2718. }
  2719. req = embedded_payload(wrb);
  2720. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  2721. OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
  2722. NULL);
  2723. req->hdr.timeout = cpu_to_le32(15);
  2724. req->pattern = cpu_to_le64(pattern);
  2725. req->src_port = cpu_to_le32(port_num);
  2726. req->dest_port = cpu_to_le32(port_num);
  2727. req->pkt_size = cpu_to_le32(pkt_size);
  2728. req->num_pkts = cpu_to_le32(num_pkts);
  2729. req->loopback_type = cpu_to_le32(loopback_type);
  2730. status = be_mcc_notify(adapter);
  2731. if (status)
  2732. goto err;
  2733. spin_unlock_bh(&adapter->mcc_lock);
  2734. wait_for_completion(&adapter->et_cmd_compl);
  2735. resp = embedded_payload(wrb);
  2736. status = le32_to_cpu(resp->status);
  2737. return status;
  2738. err:
  2739. spin_unlock_bh(&adapter->mcc_lock);
  2740. return status;
  2741. }
  2742. int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
  2743. u32 byte_cnt, struct be_dma_mem *cmd)
  2744. {
  2745. struct be_mcc_wrb *wrb;
  2746. struct be_cmd_req_ddrdma_test *req;
  2747. int status;
  2748. int i, j = 0;
  2749. if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA,
  2750. CMD_SUBSYSTEM_LOWLEVEL))
  2751. return -EPERM;
  2752. spin_lock_bh(&adapter->mcc_lock);
  2753. wrb = wrb_from_mccq(adapter);
  2754. if (!wrb) {
  2755. status = -EBUSY;
  2756. goto err;
  2757. }
  2758. req = cmd->va;
  2759. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
  2760. OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
  2761. cmd);
  2762. req->pattern = cpu_to_le64(pattern);
  2763. req->byte_count = cpu_to_le32(byte_cnt);
  2764. for (i = 0; i < byte_cnt; i++) {
  2765. req->snd_buff[i] = (u8)(pattern >> (j*8));
  2766. j++;
  2767. if (j > 7)
  2768. j = 0;
  2769. }
  2770. status = be_mcc_notify_wait(adapter);
  2771. if (!status) {
  2772. struct be_cmd_resp_ddrdma_test *resp;
  2773. resp = cmd->va;
  2774. if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
  2775. resp->snd_err) {
  2776. status = -1;
  2777. }
  2778. }
  2779. err:
  2780. spin_unlock_bh(&adapter->mcc_lock);
  2781. return status;
  2782. }
  2783. int be_cmd_get_seeprom_data(struct be_adapter *adapter,
  2784. struct be_dma_mem *nonemb_cmd)
  2785. {
  2786. struct be_mcc_wrb *wrb;
  2787. struct be_cmd_req_seeprom_read *req;
  2788. int status;
  2789. spin_lock_bh(&adapter->mcc_lock);
  2790. wrb = wrb_from_mccq(adapter);
  2791. if (!wrb) {
  2792. status = -EBUSY;
  2793. goto err;
  2794. }
  2795. req = nonemb_cmd->va;
  2796. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2797. OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
  2798. nonemb_cmd);
  2799. status = be_mcc_notify_wait(adapter);
  2800. err:
  2801. spin_unlock_bh(&adapter->mcc_lock);
  2802. return status;
  2803. }
  2804. int be_cmd_get_phy_info(struct be_adapter *adapter)
  2805. {
  2806. struct be_mcc_wrb *wrb;
  2807. struct be_cmd_req_get_phy_info *req;
  2808. struct be_dma_mem cmd;
  2809. int status;
  2810. if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
  2811. CMD_SUBSYSTEM_COMMON))
  2812. return -EPERM;
  2813. spin_lock_bh(&adapter->mcc_lock);
  2814. wrb = wrb_from_mccq(adapter);
  2815. if (!wrb) {
  2816. status = -EBUSY;
  2817. goto err;
  2818. }
  2819. cmd.size = sizeof(struct be_cmd_req_get_phy_info);
  2820. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  2821. GFP_ATOMIC);
  2822. if (!cmd.va) {
  2823. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  2824. status = -ENOMEM;
  2825. goto err;
  2826. }
  2827. req = cmd.va;
  2828. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2829. OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
  2830. wrb, &cmd);
  2831. status = be_mcc_notify_wait(adapter);
  2832. if (!status) {
  2833. struct be_phy_info *resp_phy_info =
  2834. cmd.va + sizeof(struct be_cmd_req_hdr);
  2835. adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
  2836. adapter->phy.interface_type =
  2837. le16_to_cpu(resp_phy_info->interface_type);
  2838. adapter->phy.auto_speeds_supported =
  2839. le16_to_cpu(resp_phy_info->auto_speeds_supported);
  2840. adapter->phy.fixed_speeds_supported =
  2841. le16_to_cpu(resp_phy_info->fixed_speeds_supported);
  2842. adapter->phy.misc_params =
  2843. le32_to_cpu(resp_phy_info->misc_params);
  2844. if (BE2_chip(adapter)) {
  2845. adapter->phy.fixed_speeds_supported =
  2846. BE_SUPPORTED_SPEED_10GBPS |
  2847. BE_SUPPORTED_SPEED_1GBPS;
  2848. }
  2849. }
  2850. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
  2851. err:
  2852. spin_unlock_bh(&adapter->mcc_lock);
  2853. return status;
  2854. }
  2855. static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
  2856. {
  2857. struct be_mcc_wrb *wrb;
  2858. struct be_cmd_req_set_qos *req;
  2859. int status;
  2860. spin_lock_bh(&adapter->mcc_lock);
  2861. wrb = wrb_from_mccq(adapter);
  2862. if (!wrb) {
  2863. status = -EBUSY;
  2864. goto err;
  2865. }
  2866. req = embedded_payload(wrb);
  2867. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2868. OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
  2869. req->hdr.domain = domain;
  2870. req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
  2871. req->max_bps_nic = cpu_to_le32(bps);
  2872. status = be_mcc_notify_wait(adapter);
  2873. err:
  2874. spin_unlock_bh(&adapter->mcc_lock);
  2875. return status;
  2876. }
  2877. int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
  2878. {
  2879. struct be_mcc_wrb *wrb;
  2880. struct be_cmd_req_cntl_attribs *req;
  2881. struct be_cmd_resp_cntl_attribs *resp;
  2882. int status, i;
  2883. int payload_len = max(sizeof(*req), sizeof(*resp));
  2884. struct mgmt_controller_attrib *attribs;
  2885. struct be_dma_mem attribs_cmd;
  2886. u32 *serial_num;
  2887. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2888. return -1;
  2889. memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
  2890. attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
  2891. attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
  2892. attribs_cmd.size,
  2893. &attribs_cmd.dma, GFP_ATOMIC);
  2894. if (!attribs_cmd.va) {
  2895. dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
  2896. status = -ENOMEM;
  2897. goto err;
  2898. }
  2899. wrb = wrb_from_mbox(adapter);
  2900. if (!wrb) {
  2901. status = -EBUSY;
  2902. goto err;
  2903. }
  2904. req = attribs_cmd.va;
  2905. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2906. OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
  2907. wrb, &attribs_cmd);
  2908. status = be_mbox_notify_wait(adapter);
  2909. if (!status) {
  2910. attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
  2911. adapter->hba_port_num = attribs->hba_attribs.phy_port;
  2912. serial_num = attribs->hba_attribs.controller_serial_number;
  2913. for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++)
  2914. adapter->serial_num[i] = le32_to_cpu(serial_num[i]) &
  2915. (BIT_MASK(16) - 1);
  2916. }
  2917. err:
  2918. mutex_unlock(&adapter->mbox_lock);
  2919. if (attribs_cmd.va)
  2920. dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
  2921. attribs_cmd.va, attribs_cmd.dma);
  2922. return status;
  2923. }
  2924. /* Uses mbox */
  2925. int be_cmd_req_native_mode(struct be_adapter *adapter)
  2926. {
  2927. struct be_mcc_wrb *wrb;
  2928. struct be_cmd_req_set_func_cap *req;
  2929. int status;
  2930. if (mutex_lock_interruptible(&adapter->mbox_lock))
  2931. return -1;
  2932. wrb = wrb_from_mbox(adapter);
  2933. if (!wrb) {
  2934. status = -EBUSY;
  2935. goto err;
  2936. }
  2937. req = embedded_payload(wrb);
  2938. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2939. OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
  2940. sizeof(*req), wrb, NULL);
  2941. req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
  2942. CAPABILITY_BE3_NATIVE_ERX_API);
  2943. req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
  2944. status = be_mbox_notify_wait(adapter);
  2945. if (!status) {
  2946. struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
  2947. adapter->be3_native = le32_to_cpu(resp->cap_flags) &
  2948. CAPABILITY_BE3_NATIVE_ERX_API;
  2949. if (!adapter->be3_native)
  2950. dev_warn(&adapter->pdev->dev,
  2951. "adapter not in advanced mode\n");
  2952. }
  2953. err:
  2954. mutex_unlock(&adapter->mbox_lock);
  2955. return status;
  2956. }
  2957. /* Get privilege(s) for a function */
  2958. int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
  2959. u32 domain)
  2960. {
  2961. struct be_mcc_wrb *wrb;
  2962. struct be_cmd_req_get_fn_privileges *req;
  2963. int status;
  2964. spin_lock_bh(&adapter->mcc_lock);
  2965. wrb = wrb_from_mccq(adapter);
  2966. if (!wrb) {
  2967. status = -EBUSY;
  2968. goto err;
  2969. }
  2970. req = embedded_payload(wrb);
  2971. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  2972. OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
  2973. wrb, NULL);
  2974. req->hdr.domain = domain;
  2975. status = be_mcc_notify_wait(adapter);
  2976. if (!status) {
  2977. struct be_cmd_resp_get_fn_privileges *resp =
  2978. embedded_payload(wrb);
  2979. *privilege = le32_to_cpu(resp->privilege_mask);
  2980. /* In UMC mode FW does not return right privileges.
  2981. * Override with correct privilege equivalent to PF.
  2982. */
  2983. if (BEx_chip(adapter) && be_is_mc(adapter) &&
  2984. be_physfn(adapter))
  2985. *privilege = MAX_PRIVILEGES;
  2986. }
  2987. err:
  2988. spin_unlock_bh(&adapter->mcc_lock);
  2989. return status;
  2990. }
  2991. /* Set privilege(s) for a function */
  2992. int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
  2993. u32 domain)
  2994. {
  2995. struct be_mcc_wrb *wrb;
  2996. struct be_cmd_req_set_fn_privileges *req;
  2997. int status;
  2998. spin_lock_bh(&adapter->mcc_lock);
  2999. wrb = wrb_from_mccq(adapter);
  3000. if (!wrb) {
  3001. status = -EBUSY;
  3002. goto err;
  3003. }
  3004. req = embedded_payload(wrb);
  3005. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3006. OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
  3007. wrb, NULL);
  3008. req->hdr.domain = domain;
  3009. if (lancer_chip(adapter))
  3010. req->privileges_lancer = cpu_to_le32(privileges);
  3011. else
  3012. req->privileges = cpu_to_le32(privileges);
  3013. status = be_mcc_notify_wait(adapter);
  3014. err:
  3015. spin_unlock_bh(&adapter->mcc_lock);
  3016. return status;
  3017. }
  3018. /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
  3019. * pmac_id_valid: false => pmac_id or MAC address is requested.
  3020. * If pmac_id is returned, pmac_id_valid is returned as true
  3021. */
  3022. int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
  3023. bool *pmac_id_valid, u32 *pmac_id, u32 if_handle,
  3024. u8 domain)
  3025. {
  3026. struct be_mcc_wrb *wrb;
  3027. struct be_cmd_req_get_mac_list *req;
  3028. int status;
  3029. int mac_count;
  3030. struct be_dma_mem get_mac_list_cmd;
  3031. int i;
  3032. memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
  3033. get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
  3034. get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
  3035. get_mac_list_cmd.size,
  3036. &get_mac_list_cmd.dma,
  3037. GFP_ATOMIC);
  3038. if (!get_mac_list_cmd.va) {
  3039. dev_err(&adapter->pdev->dev,
  3040. "Memory allocation failure during GET_MAC_LIST\n");
  3041. return -ENOMEM;
  3042. }
  3043. spin_lock_bh(&adapter->mcc_lock);
  3044. wrb = wrb_from_mccq(adapter);
  3045. if (!wrb) {
  3046. status = -EBUSY;
  3047. goto out;
  3048. }
  3049. req = get_mac_list_cmd.va;
  3050. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3051. OPCODE_COMMON_GET_MAC_LIST,
  3052. get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
  3053. req->hdr.domain = domain;
  3054. req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
  3055. if (*pmac_id_valid) {
  3056. req->mac_id = cpu_to_le32(*pmac_id);
  3057. req->iface_id = cpu_to_le16(if_handle);
  3058. req->perm_override = 0;
  3059. } else {
  3060. req->perm_override = 1;
  3061. }
  3062. status = be_mcc_notify_wait(adapter);
  3063. if (!status) {
  3064. struct be_cmd_resp_get_mac_list *resp =
  3065. get_mac_list_cmd.va;
  3066. if (*pmac_id_valid) {
  3067. memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
  3068. ETH_ALEN);
  3069. goto out;
  3070. }
  3071. mac_count = resp->true_mac_count + resp->pseudo_mac_count;
  3072. /* Mac list returned could contain one or more active mac_ids
  3073. * or one or more true or pseudo permanent mac addresses.
  3074. * If an active mac_id is present, return first active mac_id
  3075. * found.
  3076. */
  3077. for (i = 0; i < mac_count; i++) {
  3078. struct get_list_macaddr *mac_entry;
  3079. u16 mac_addr_size;
  3080. u32 mac_id;
  3081. mac_entry = &resp->macaddr_list[i];
  3082. mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
  3083. /* mac_id is a 32 bit value and mac_addr size
  3084. * is 6 bytes
  3085. */
  3086. if (mac_addr_size == sizeof(u32)) {
  3087. *pmac_id_valid = true;
  3088. mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
  3089. *pmac_id = le32_to_cpu(mac_id);
  3090. goto out;
  3091. }
  3092. }
  3093. /* If no active mac_id found, return first mac addr */
  3094. *pmac_id_valid = false;
  3095. memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
  3096. ETH_ALEN);
  3097. }
  3098. out:
  3099. spin_unlock_bh(&adapter->mcc_lock);
  3100. dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
  3101. get_mac_list_cmd.va, get_mac_list_cmd.dma);
  3102. return status;
  3103. }
  3104. int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
  3105. u8 *mac, u32 if_handle, bool active, u32 domain)
  3106. {
  3107. if (!active)
  3108. be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id,
  3109. if_handle, domain);
  3110. if (BEx_chip(adapter))
  3111. return be_cmd_mac_addr_query(adapter, mac, false,
  3112. if_handle, curr_pmac_id);
  3113. else
  3114. /* Fetch the MAC address using pmac_id */
  3115. return be_cmd_get_mac_from_list(adapter, mac, &active,
  3116. &curr_pmac_id,
  3117. if_handle, domain);
  3118. }
  3119. int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
  3120. {
  3121. int status;
  3122. bool pmac_valid = false;
  3123. eth_zero_addr(mac);
  3124. if (BEx_chip(adapter)) {
  3125. if (be_physfn(adapter))
  3126. status = be_cmd_mac_addr_query(adapter, mac, true, 0,
  3127. 0);
  3128. else
  3129. status = be_cmd_mac_addr_query(adapter, mac, false,
  3130. adapter->if_handle, 0);
  3131. } else {
  3132. status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
  3133. NULL, adapter->if_handle, 0);
  3134. }
  3135. return status;
  3136. }
  3137. /* Uses synchronous MCCQ */
  3138. int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
  3139. u8 mac_count, u32 domain)
  3140. {
  3141. struct be_mcc_wrb *wrb;
  3142. struct be_cmd_req_set_mac_list *req;
  3143. int status;
  3144. struct be_dma_mem cmd;
  3145. memset(&cmd, 0, sizeof(struct be_dma_mem));
  3146. cmd.size = sizeof(struct be_cmd_req_set_mac_list);
  3147. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  3148. GFP_KERNEL);
  3149. if (!cmd.va)
  3150. return -ENOMEM;
  3151. spin_lock_bh(&adapter->mcc_lock);
  3152. wrb = wrb_from_mccq(adapter);
  3153. if (!wrb) {
  3154. status = -EBUSY;
  3155. goto err;
  3156. }
  3157. req = cmd.va;
  3158. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3159. OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
  3160. wrb, &cmd);
  3161. req->hdr.domain = domain;
  3162. req->mac_count = mac_count;
  3163. if (mac_count)
  3164. memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
  3165. status = be_mcc_notify_wait(adapter);
  3166. err:
  3167. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
  3168. spin_unlock_bh(&adapter->mcc_lock);
  3169. return status;
  3170. }
  3171. /* Wrapper to delete any active MACs and provision the new mac.
  3172. * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
  3173. * current list are active.
  3174. */
  3175. int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
  3176. {
  3177. bool active_mac = false;
  3178. u8 old_mac[ETH_ALEN];
  3179. u32 pmac_id;
  3180. int status;
  3181. status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
  3182. &pmac_id, if_id, dom);
  3183. if (!status && active_mac)
  3184. be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
  3185. return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
  3186. }
  3187. int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
  3188. u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk)
  3189. {
  3190. struct be_mcc_wrb *wrb;
  3191. struct be_cmd_req_set_hsw_config *req;
  3192. void *ctxt;
  3193. int status;
  3194. spin_lock_bh(&adapter->mcc_lock);
  3195. wrb = wrb_from_mccq(adapter);
  3196. if (!wrb) {
  3197. status = -EBUSY;
  3198. goto err;
  3199. }
  3200. req = embedded_payload(wrb);
  3201. ctxt = &req->context;
  3202. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3203. OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
  3204. NULL);
  3205. req->hdr.domain = domain;
  3206. AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
  3207. if (pvid) {
  3208. AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
  3209. AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
  3210. }
  3211. if (!BEx_chip(adapter) && hsw_mode) {
  3212. AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
  3213. ctxt, adapter->hba_port_num);
  3214. AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
  3215. AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
  3216. ctxt, hsw_mode);
  3217. }
  3218. /* Enable/disable both mac and vlan spoof checking */
  3219. if (!BEx_chip(adapter) && spoofchk) {
  3220. AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk,
  3221. ctxt, spoofchk);
  3222. AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk,
  3223. ctxt, spoofchk);
  3224. }
  3225. be_dws_cpu_to_le(req->context, sizeof(req->context));
  3226. status = be_mcc_notify_wait(adapter);
  3227. err:
  3228. spin_unlock_bh(&adapter->mcc_lock);
  3229. return status;
  3230. }
  3231. /* Get Hyper switch config */
  3232. int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
  3233. u32 domain, u16 intf_id, u8 *mode, bool *spoofchk)
  3234. {
  3235. struct be_mcc_wrb *wrb;
  3236. struct be_cmd_req_get_hsw_config *req;
  3237. void *ctxt;
  3238. int status;
  3239. u16 vid;
  3240. spin_lock_bh(&adapter->mcc_lock);
  3241. wrb = wrb_from_mccq(adapter);
  3242. if (!wrb) {
  3243. status = -EBUSY;
  3244. goto err;
  3245. }
  3246. req = embedded_payload(wrb);
  3247. ctxt = &req->context;
  3248. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3249. OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
  3250. NULL);
  3251. req->hdr.domain = domain;
  3252. AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
  3253. ctxt, intf_id);
  3254. AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
  3255. if (!BEx_chip(adapter) && mode) {
  3256. AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
  3257. ctxt, adapter->hba_port_num);
  3258. AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
  3259. }
  3260. be_dws_cpu_to_le(req->context, sizeof(req->context));
  3261. status = be_mcc_notify_wait(adapter);
  3262. if (!status) {
  3263. struct be_cmd_resp_get_hsw_config *resp =
  3264. embedded_payload(wrb);
  3265. be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
  3266. vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
  3267. pvid, &resp->context);
  3268. if (pvid)
  3269. *pvid = le16_to_cpu(vid);
  3270. if (mode)
  3271. *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
  3272. port_fwd_type, &resp->context);
  3273. if (spoofchk)
  3274. *spoofchk =
  3275. AMAP_GET_BITS(struct amap_get_hsw_resp_context,
  3276. spoofchk, &resp->context);
  3277. }
  3278. err:
  3279. spin_unlock_bh(&adapter->mcc_lock);
  3280. return status;
  3281. }
  3282. static bool be_is_wol_excluded(struct be_adapter *adapter)
  3283. {
  3284. struct pci_dev *pdev = adapter->pdev;
  3285. if (be_virtfn(adapter))
  3286. return true;
  3287. switch (pdev->subsystem_device) {
  3288. case OC_SUBSYS_DEVICE_ID1:
  3289. case OC_SUBSYS_DEVICE_ID2:
  3290. case OC_SUBSYS_DEVICE_ID3:
  3291. case OC_SUBSYS_DEVICE_ID4:
  3292. return true;
  3293. default:
  3294. return false;
  3295. }
  3296. }
  3297. int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
  3298. {
  3299. struct be_mcc_wrb *wrb;
  3300. struct be_cmd_req_acpi_wol_magic_config_v1 *req;
  3301. int status = 0;
  3302. struct be_dma_mem cmd;
  3303. if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  3304. CMD_SUBSYSTEM_ETH))
  3305. return -EPERM;
  3306. if (be_is_wol_excluded(adapter))
  3307. return status;
  3308. if (mutex_lock_interruptible(&adapter->mbox_lock))
  3309. return -1;
  3310. memset(&cmd, 0, sizeof(struct be_dma_mem));
  3311. cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
  3312. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  3313. GFP_ATOMIC);
  3314. if (!cmd.va) {
  3315. dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
  3316. status = -ENOMEM;
  3317. goto err;
  3318. }
  3319. wrb = wrb_from_mbox(adapter);
  3320. if (!wrb) {
  3321. status = -EBUSY;
  3322. goto err;
  3323. }
  3324. req = cmd.va;
  3325. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
  3326. OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
  3327. sizeof(*req), wrb, &cmd);
  3328. req->hdr.version = 1;
  3329. req->query_options = BE_GET_WOL_CAP;
  3330. status = be_mbox_notify_wait(adapter);
  3331. if (!status) {
  3332. struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
  3333. resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va;
  3334. adapter->wol_cap = resp->wol_settings;
  3335. if (adapter->wol_cap & BE_WOL_CAP)
  3336. adapter->wol_en = true;
  3337. }
  3338. err:
  3339. mutex_unlock(&adapter->mbox_lock);
  3340. if (cmd.va)
  3341. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
  3342. cmd.dma);
  3343. return status;
  3344. }
  3345. int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
  3346. {
  3347. struct be_dma_mem extfat_cmd;
  3348. struct be_fat_conf_params *cfgs;
  3349. int status;
  3350. int i, j;
  3351. memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
  3352. extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
  3353. extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
  3354. extfat_cmd.size, &extfat_cmd.dma,
  3355. GFP_ATOMIC);
  3356. if (!extfat_cmd.va)
  3357. return -ENOMEM;
  3358. status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
  3359. if (status)
  3360. goto err;
  3361. cfgs = (struct be_fat_conf_params *)
  3362. (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr));
  3363. for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
  3364. u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
  3365. for (j = 0; j < num_modes; j++) {
  3366. if (cfgs->module[i].trace_lvl[j].mode == MODE_UART)
  3367. cfgs->module[i].trace_lvl[j].dbg_lvl =
  3368. cpu_to_le32(level);
  3369. }
  3370. }
  3371. status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
  3372. err:
  3373. dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
  3374. extfat_cmd.dma);
  3375. return status;
  3376. }
  3377. int be_cmd_get_fw_log_level(struct be_adapter *adapter)
  3378. {
  3379. struct be_dma_mem extfat_cmd;
  3380. struct be_fat_conf_params *cfgs;
  3381. int status, j;
  3382. int level = 0;
  3383. memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
  3384. extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
  3385. extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
  3386. extfat_cmd.size, &extfat_cmd.dma,
  3387. GFP_ATOMIC);
  3388. if (!extfat_cmd.va) {
  3389. dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
  3390. __func__);
  3391. goto err;
  3392. }
  3393. status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
  3394. if (!status) {
  3395. cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
  3396. sizeof(struct be_cmd_resp_hdr));
  3397. for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
  3398. if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
  3399. level = cfgs->module[0].trace_lvl[j].dbg_lvl;
  3400. }
  3401. }
  3402. dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
  3403. extfat_cmd.dma);
  3404. err:
  3405. return level;
  3406. }
  3407. int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
  3408. struct be_dma_mem *cmd)
  3409. {
  3410. struct be_mcc_wrb *wrb;
  3411. struct be_cmd_req_get_ext_fat_caps *req;
  3412. int status;
  3413. if (mutex_lock_interruptible(&adapter->mbox_lock))
  3414. return -1;
  3415. wrb = wrb_from_mbox(adapter);
  3416. if (!wrb) {
  3417. status = -EBUSY;
  3418. goto err;
  3419. }
  3420. req = cmd->va;
  3421. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3422. OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
  3423. cmd->size, wrb, cmd);
  3424. req->parameter_type = cpu_to_le32(1);
  3425. status = be_mbox_notify_wait(adapter);
  3426. err:
  3427. mutex_unlock(&adapter->mbox_lock);
  3428. return status;
  3429. }
  3430. int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
  3431. struct be_dma_mem *cmd,
  3432. struct be_fat_conf_params *configs)
  3433. {
  3434. struct be_mcc_wrb *wrb;
  3435. struct be_cmd_req_set_ext_fat_caps *req;
  3436. int status;
  3437. spin_lock_bh(&adapter->mcc_lock);
  3438. wrb = wrb_from_mccq(adapter);
  3439. if (!wrb) {
  3440. status = -EBUSY;
  3441. goto err;
  3442. }
  3443. req = cmd->va;
  3444. memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
  3445. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3446. OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
  3447. cmd->size, wrb, cmd);
  3448. status = be_mcc_notify_wait(adapter);
  3449. err:
  3450. spin_unlock_bh(&adapter->mcc_lock);
  3451. return status;
  3452. }
  3453. int be_cmd_query_port_name(struct be_adapter *adapter)
  3454. {
  3455. struct be_cmd_req_get_port_name *req;
  3456. struct be_mcc_wrb *wrb;
  3457. int status;
  3458. if (mutex_lock_interruptible(&adapter->mbox_lock))
  3459. return -1;
  3460. wrb = wrb_from_mbox(adapter);
  3461. req = embedded_payload(wrb);
  3462. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3463. OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
  3464. NULL);
  3465. if (!BEx_chip(adapter))
  3466. req->hdr.version = 1;
  3467. status = be_mbox_notify_wait(adapter);
  3468. if (!status) {
  3469. struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
  3470. adapter->port_name = resp->port_name[adapter->hba_port_num];
  3471. } else {
  3472. adapter->port_name = adapter->hba_port_num + '0';
  3473. }
  3474. mutex_unlock(&adapter->mbox_lock);
  3475. return status;
  3476. }
  3477. /* When more than 1 NIC descriptor is present in the descriptor list,
  3478. * the caller must specify the pf_num to obtain the NIC descriptor
  3479. * corresponding to its pci function.
  3480. * get_vft must be true when the caller wants the VF-template desc of the
  3481. * PF-pool.
  3482. * The pf_num should be set to PF_NUM_IGNORE when the caller knows
  3483. * that only it's NIC descriptor is present in the descriptor list.
  3484. */
  3485. static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
  3486. bool get_vft, u8 pf_num)
  3487. {
  3488. struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
  3489. struct be_nic_res_desc *nic;
  3490. int i;
  3491. for (i = 0; i < desc_count; i++) {
  3492. if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
  3493. hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
  3494. nic = (struct be_nic_res_desc *)hdr;
  3495. if ((pf_num == PF_NUM_IGNORE ||
  3496. nic->pf_num == pf_num) &&
  3497. (!get_vft || nic->flags & BIT(VFT_SHIFT)))
  3498. return nic;
  3499. }
  3500. hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
  3501. hdr = (void *)hdr + hdr->desc_len;
  3502. }
  3503. return NULL;
  3504. }
  3505. static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count,
  3506. u8 pf_num)
  3507. {
  3508. return be_get_nic_desc(buf, desc_count, true, pf_num);
  3509. }
  3510. static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count,
  3511. u8 pf_num)
  3512. {
  3513. return be_get_nic_desc(buf, desc_count, false, pf_num);
  3514. }
  3515. static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count,
  3516. u8 pf_num)
  3517. {
  3518. struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
  3519. struct be_pcie_res_desc *pcie;
  3520. int i;
  3521. for (i = 0; i < desc_count; i++) {
  3522. if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
  3523. hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) {
  3524. pcie = (struct be_pcie_res_desc *)hdr;
  3525. if (pcie->pf_num == pf_num)
  3526. return pcie;
  3527. }
  3528. hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
  3529. hdr = (void *)hdr + hdr->desc_len;
  3530. }
  3531. return NULL;
  3532. }
  3533. static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
  3534. {
  3535. struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
  3536. int i;
  3537. for (i = 0; i < desc_count; i++) {
  3538. if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
  3539. return (struct be_port_res_desc *)hdr;
  3540. hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
  3541. hdr = (void *)hdr + hdr->desc_len;
  3542. }
  3543. return NULL;
  3544. }
  3545. static void be_copy_nic_desc(struct be_resources *res,
  3546. struct be_nic_res_desc *desc)
  3547. {
  3548. res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
  3549. res->max_vlans = le16_to_cpu(desc->vlan_count);
  3550. res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
  3551. res->max_tx_qs = le16_to_cpu(desc->txq_count);
  3552. res->max_rss_qs = le16_to_cpu(desc->rssq_count);
  3553. res->max_rx_qs = le16_to_cpu(desc->rq_count);
  3554. res->max_evt_qs = le16_to_cpu(desc->eq_count);
  3555. res->max_cq_count = le16_to_cpu(desc->cq_count);
  3556. res->max_iface_count = le16_to_cpu(desc->iface_count);
  3557. res->max_mcc_count = le16_to_cpu(desc->mcc_count);
  3558. /* Clear flags that driver is not interested in */
  3559. res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
  3560. BE_IF_CAP_FLAGS_WANT;
  3561. }
  3562. /* Uses Mbox */
  3563. int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
  3564. {
  3565. struct be_mcc_wrb *wrb;
  3566. struct be_cmd_req_get_func_config *req;
  3567. int status;
  3568. struct be_dma_mem cmd;
  3569. if (mutex_lock_interruptible(&adapter->mbox_lock))
  3570. return -1;
  3571. memset(&cmd, 0, sizeof(struct be_dma_mem));
  3572. cmd.size = sizeof(struct be_cmd_resp_get_func_config);
  3573. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  3574. GFP_ATOMIC);
  3575. if (!cmd.va) {
  3576. dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
  3577. status = -ENOMEM;
  3578. goto err;
  3579. }
  3580. wrb = wrb_from_mbox(adapter);
  3581. if (!wrb) {
  3582. status = -EBUSY;
  3583. goto err;
  3584. }
  3585. req = cmd.va;
  3586. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3587. OPCODE_COMMON_GET_FUNC_CONFIG,
  3588. cmd.size, wrb, &cmd);
  3589. if (skyhawk_chip(adapter))
  3590. req->hdr.version = 1;
  3591. status = be_mbox_notify_wait(adapter);
  3592. if (!status) {
  3593. struct be_cmd_resp_get_func_config *resp = cmd.va;
  3594. u32 desc_count = le32_to_cpu(resp->desc_count);
  3595. struct be_nic_res_desc *desc;
  3596. /* GET_FUNC_CONFIG returns resource descriptors of the
  3597. * current function only. So, pf_num should be set to
  3598. * PF_NUM_IGNORE.
  3599. */
  3600. desc = be_get_func_nic_desc(resp->func_param, desc_count,
  3601. PF_NUM_IGNORE);
  3602. if (!desc) {
  3603. status = -EINVAL;
  3604. goto err;
  3605. }
  3606. /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */
  3607. adapter->pf_num = desc->pf_num;
  3608. adapter->vf_num = desc->vf_num;
  3609. if (res)
  3610. be_copy_nic_desc(res, desc);
  3611. }
  3612. err:
  3613. mutex_unlock(&adapter->mbox_lock);
  3614. if (cmd.va)
  3615. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
  3616. cmd.dma);
  3617. return status;
  3618. }
  3619. /* Will use MBOX only if MCCQ has not been created */
  3620. int be_cmd_get_profile_config(struct be_adapter *adapter,
  3621. struct be_resources *res, u8 query, u8 domain)
  3622. {
  3623. struct be_cmd_resp_get_profile_config *resp;
  3624. struct be_cmd_req_get_profile_config *req;
  3625. struct be_nic_res_desc *vf_res;
  3626. struct be_pcie_res_desc *pcie;
  3627. struct be_port_res_desc *port;
  3628. struct be_nic_res_desc *nic;
  3629. struct be_mcc_wrb wrb = {0};
  3630. struct be_dma_mem cmd;
  3631. u16 desc_count;
  3632. int status;
  3633. memset(&cmd, 0, sizeof(struct be_dma_mem));
  3634. cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
  3635. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  3636. GFP_ATOMIC);
  3637. if (!cmd.va)
  3638. return -ENOMEM;
  3639. req = cmd.va;
  3640. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3641. OPCODE_COMMON_GET_PROFILE_CONFIG,
  3642. cmd.size, &wrb, &cmd);
  3643. if (!lancer_chip(adapter))
  3644. req->hdr.version = 1;
  3645. req->type = ACTIVE_PROFILE_TYPE;
  3646. req->hdr.domain = domain;
  3647. /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the
  3648. * descriptors with all bits set to "1" for the fields which can be
  3649. * modified using SET_PROFILE_CONFIG cmd.
  3650. */
  3651. if (query == RESOURCE_MODIFIABLE)
  3652. req->type |= QUERY_MODIFIABLE_FIELDS_TYPE;
  3653. status = be_cmd_notify_wait(adapter, &wrb);
  3654. if (status)
  3655. goto err;
  3656. resp = cmd.va;
  3657. desc_count = le16_to_cpu(resp->desc_count);
  3658. pcie = be_get_pcie_desc(resp->func_param, desc_count,
  3659. adapter->pf_num);
  3660. if (pcie)
  3661. res->max_vfs = le16_to_cpu(pcie->num_vfs);
  3662. port = be_get_port_desc(resp->func_param, desc_count);
  3663. if (port)
  3664. adapter->mc_type = port->mc_type;
  3665. nic = be_get_func_nic_desc(resp->func_param, desc_count,
  3666. adapter->pf_num);
  3667. if (nic)
  3668. be_copy_nic_desc(res, nic);
  3669. vf_res = be_get_vft_desc(resp->func_param, desc_count,
  3670. adapter->pf_num);
  3671. if (vf_res)
  3672. res->vf_if_cap_flags = vf_res->cap_flags;
  3673. err:
  3674. if (cmd.va)
  3675. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
  3676. cmd.dma);
  3677. return status;
  3678. }
  3679. /* Will use MBOX only if MCCQ has not been created */
  3680. static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
  3681. int size, int count, u8 version, u8 domain)
  3682. {
  3683. struct be_cmd_req_set_profile_config *req;
  3684. struct be_mcc_wrb wrb = {0};
  3685. struct be_dma_mem cmd;
  3686. int status;
  3687. memset(&cmd, 0, sizeof(struct be_dma_mem));
  3688. cmd.size = sizeof(struct be_cmd_req_set_profile_config);
  3689. cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
  3690. GFP_ATOMIC);
  3691. if (!cmd.va)
  3692. return -ENOMEM;
  3693. req = cmd.va;
  3694. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3695. OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
  3696. &wrb, &cmd);
  3697. req->hdr.version = version;
  3698. req->hdr.domain = domain;
  3699. req->desc_count = cpu_to_le32(count);
  3700. memcpy(req->desc, desc, size);
  3701. status = be_cmd_notify_wait(adapter, &wrb);
  3702. if (cmd.va)
  3703. dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
  3704. cmd.dma);
  3705. return status;
  3706. }
  3707. /* Mark all fields invalid */
  3708. static void be_reset_nic_desc(struct be_nic_res_desc *nic)
  3709. {
  3710. memset(nic, 0, sizeof(*nic));
  3711. nic->unicast_mac_count = 0xFFFF;
  3712. nic->mcc_count = 0xFFFF;
  3713. nic->vlan_count = 0xFFFF;
  3714. nic->mcast_mac_count = 0xFFFF;
  3715. nic->txq_count = 0xFFFF;
  3716. nic->rq_count = 0xFFFF;
  3717. nic->rssq_count = 0xFFFF;
  3718. nic->lro_count = 0xFFFF;
  3719. nic->cq_count = 0xFFFF;
  3720. nic->toe_conn_count = 0xFFFF;
  3721. nic->eq_count = 0xFFFF;
  3722. nic->iface_count = 0xFFFF;
  3723. nic->link_param = 0xFF;
  3724. nic->channel_id_param = cpu_to_le16(0xF000);
  3725. nic->acpi_params = 0xFF;
  3726. nic->wol_param = 0x0F;
  3727. nic->tunnel_iface_count = 0xFFFF;
  3728. nic->direct_tenant_iface_count = 0xFFFF;
  3729. nic->bw_min = 0xFFFFFFFF;
  3730. nic->bw_max = 0xFFFFFFFF;
  3731. }
  3732. /* Mark all fields invalid */
  3733. static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
  3734. {
  3735. memset(pcie, 0, sizeof(*pcie));
  3736. pcie->sriov_state = 0xFF;
  3737. pcie->pf_state = 0xFF;
  3738. pcie->pf_type = 0xFF;
  3739. pcie->num_vfs = 0xFFFF;
  3740. }
  3741. int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
  3742. u8 domain)
  3743. {
  3744. struct be_nic_res_desc nic_desc;
  3745. u32 bw_percent;
  3746. u16 version = 0;
  3747. if (BE3_chip(adapter))
  3748. return be_cmd_set_qos(adapter, max_rate / 10, domain);
  3749. be_reset_nic_desc(&nic_desc);
  3750. nic_desc.pf_num = adapter->pf_num;
  3751. nic_desc.vf_num = domain;
  3752. nic_desc.bw_min = 0;
  3753. if (lancer_chip(adapter)) {
  3754. nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
  3755. nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
  3756. nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
  3757. (1 << NOSV_SHIFT);
  3758. nic_desc.bw_max = cpu_to_le32(max_rate / 10);
  3759. } else {
  3760. version = 1;
  3761. nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
  3762. nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
  3763. nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
  3764. bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
  3765. nic_desc.bw_max = cpu_to_le32(bw_percent);
  3766. }
  3767. return be_cmd_set_profile_config(adapter, &nic_desc,
  3768. nic_desc.hdr.desc_len,
  3769. 1, version, domain);
  3770. }
  3771. static void be_fill_vf_res_template(struct be_adapter *adapter,
  3772. struct be_resources pool_res,
  3773. u16 num_vfs, u16 num_vf_qs,
  3774. struct be_nic_res_desc *nic_vft)
  3775. {
  3776. u32 vf_if_cap_flags = pool_res.vf_if_cap_flags;
  3777. struct be_resources res_mod = {0};
  3778. /* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
  3779. * which are modifiable using SET_PROFILE_CONFIG cmd.
  3780. */
  3781. be_cmd_get_profile_config(adapter, &res_mod, RESOURCE_MODIFIABLE, 0);
  3782. /* If RSS IFACE capability flags are modifiable for a VF, set the
  3783. * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
  3784. * more than 1 RSSQ is available for a VF.
  3785. * Otherwise, provision only 1 queue pair for VF.
  3786. */
  3787. if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
  3788. nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
  3789. if (num_vf_qs > 1) {
  3790. vf_if_cap_flags |= BE_IF_FLAGS_RSS;
  3791. if (pool_res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
  3792. vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
  3793. } else {
  3794. vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
  3795. BE_IF_FLAGS_DEFQ_RSS);
  3796. }
  3797. } else {
  3798. num_vf_qs = 1;
  3799. }
  3800. if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
  3801. nic_vft->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
  3802. vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
  3803. }
  3804. nic_vft->cap_flags = cpu_to_le32(vf_if_cap_flags);
  3805. nic_vft->rq_count = cpu_to_le16(num_vf_qs);
  3806. nic_vft->txq_count = cpu_to_le16(num_vf_qs);
  3807. nic_vft->rssq_count = cpu_to_le16(num_vf_qs);
  3808. nic_vft->cq_count = cpu_to_le16(pool_res.max_cq_count /
  3809. (num_vfs + 1));
  3810. /* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
  3811. * among the PF and it's VFs, if the fields are changeable
  3812. */
  3813. if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
  3814. nic_vft->unicast_mac_count = cpu_to_le16(pool_res.max_uc_mac /
  3815. (num_vfs + 1));
  3816. if (res_mod.max_vlans == FIELD_MODIFIABLE)
  3817. nic_vft->vlan_count = cpu_to_le16(pool_res.max_vlans /
  3818. (num_vfs + 1));
  3819. if (res_mod.max_iface_count == FIELD_MODIFIABLE)
  3820. nic_vft->iface_count = cpu_to_le16(pool_res.max_iface_count /
  3821. (num_vfs + 1));
  3822. if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
  3823. nic_vft->mcc_count = cpu_to_le16(pool_res.max_mcc_count /
  3824. (num_vfs + 1));
  3825. }
  3826. int be_cmd_set_sriov_config(struct be_adapter *adapter,
  3827. struct be_resources pool_res, u16 num_vfs,
  3828. u16 num_vf_qs)
  3829. {
  3830. struct {
  3831. struct be_pcie_res_desc pcie;
  3832. struct be_nic_res_desc nic_vft;
  3833. } __packed desc;
  3834. /* PF PCIE descriptor */
  3835. be_reset_pcie_desc(&desc.pcie);
  3836. desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
  3837. desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
  3838. desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
  3839. desc.pcie.pf_num = adapter->pdev->devfn;
  3840. desc.pcie.sriov_state = num_vfs ? 1 : 0;
  3841. desc.pcie.num_vfs = cpu_to_le16(num_vfs);
  3842. /* VF NIC Template descriptor */
  3843. be_reset_nic_desc(&desc.nic_vft);
  3844. desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
  3845. desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
  3846. desc.nic_vft.flags = BIT(VFT_SHIFT) | BIT(IMM_SHIFT) | BIT(NOSV_SHIFT);
  3847. desc.nic_vft.pf_num = adapter->pdev->devfn;
  3848. desc.nic_vft.vf_num = 0;
  3849. be_fill_vf_res_template(adapter, pool_res, num_vfs, num_vf_qs,
  3850. &desc.nic_vft);
  3851. return be_cmd_set_profile_config(adapter, &desc,
  3852. 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
  3853. }
  3854. int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
  3855. {
  3856. struct be_mcc_wrb *wrb;
  3857. struct be_cmd_req_manage_iface_filters *req;
  3858. int status;
  3859. if (iface == 0xFFFFFFFF)
  3860. return -1;
  3861. spin_lock_bh(&adapter->mcc_lock);
  3862. wrb = wrb_from_mccq(adapter);
  3863. if (!wrb) {
  3864. status = -EBUSY;
  3865. goto err;
  3866. }
  3867. req = embedded_payload(wrb);
  3868. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3869. OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req),
  3870. wrb, NULL);
  3871. req->op = op;
  3872. req->target_iface_id = cpu_to_le32(iface);
  3873. status = be_mcc_notify_wait(adapter);
  3874. err:
  3875. spin_unlock_bh(&adapter->mcc_lock);
  3876. return status;
  3877. }
  3878. int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
  3879. {
  3880. struct be_port_res_desc port_desc;
  3881. memset(&port_desc, 0, sizeof(port_desc));
  3882. port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1;
  3883. port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
  3884. port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
  3885. port_desc.link_num = adapter->hba_port_num;
  3886. if (port) {
  3887. port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) |
  3888. (1 << RCVID_SHIFT);
  3889. port_desc.nv_port = swab16(port);
  3890. } else {
  3891. port_desc.nv_flags = NV_TYPE_DISABLED;
  3892. port_desc.nv_port = 0;
  3893. }
  3894. return be_cmd_set_profile_config(adapter, &port_desc,
  3895. RESOURCE_DESC_SIZE_V1, 1, 1, 0);
  3896. }
  3897. int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
  3898. int vf_num)
  3899. {
  3900. struct be_mcc_wrb *wrb;
  3901. struct be_cmd_req_get_iface_list *req;
  3902. struct be_cmd_resp_get_iface_list *resp;
  3903. int status;
  3904. spin_lock_bh(&adapter->mcc_lock);
  3905. wrb = wrb_from_mccq(adapter);
  3906. if (!wrb) {
  3907. status = -EBUSY;
  3908. goto err;
  3909. }
  3910. req = embedded_payload(wrb);
  3911. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  3912. OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
  3913. wrb, NULL);
  3914. req->hdr.domain = vf_num + 1;
  3915. status = be_mcc_notify_wait(adapter);
  3916. if (!status) {
  3917. resp = (struct be_cmd_resp_get_iface_list *)req;
  3918. vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
  3919. }
  3920. err:
  3921. spin_unlock_bh(&adapter->mcc_lock);
  3922. return status;
  3923. }
  3924. static int lancer_wait_idle(struct be_adapter *adapter)
  3925. {
  3926. #define SLIPORT_IDLE_TIMEOUT 30
  3927. u32 reg_val;
  3928. int status = 0, i;
  3929. for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
  3930. reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
  3931. if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
  3932. break;
  3933. ssleep(1);
  3934. }
  3935. if (i == SLIPORT_IDLE_TIMEOUT)
  3936. status = -1;
  3937. return status;
  3938. }
  3939. int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
  3940. {
  3941. int status = 0;
  3942. status = lancer_wait_idle(adapter);
  3943. if (status)
  3944. return status;
  3945. iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
  3946. return status;
  3947. }
  3948. /* Routine to check whether dump image is present or not */
  3949. bool dump_present(struct be_adapter *adapter)
  3950. {
  3951. u32 sliport_status = 0;
  3952. sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
  3953. return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
  3954. }
  3955. int lancer_initiate_dump(struct be_adapter *adapter)
  3956. {
  3957. struct device *dev = &adapter->pdev->dev;
  3958. int status;
  3959. if (dump_present(adapter)) {
  3960. dev_info(dev, "Previous dump not cleared, not forcing dump\n");
  3961. return -EEXIST;
  3962. }
  3963. /* give firmware reset and diagnostic dump */
  3964. status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
  3965. PHYSDEV_CONTROL_DD_MASK);
  3966. if (status < 0) {
  3967. dev_err(dev, "FW reset failed\n");
  3968. return status;
  3969. }
  3970. status = lancer_wait_idle(adapter);
  3971. if (status)
  3972. return status;
  3973. if (!dump_present(adapter)) {
  3974. dev_err(dev, "FW dump not generated\n");
  3975. return -EIO;
  3976. }
  3977. return 0;
  3978. }
  3979. int lancer_delete_dump(struct be_adapter *adapter)
  3980. {
  3981. int status;
  3982. status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
  3983. return be_cmd_status(status);
  3984. }
  3985. /* Uses sync mcc */
  3986. int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
  3987. {
  3988. struct be_mcc_wrb *wrb;
  3989. struct be_cmd_enable_disable_vf *req;
  3990. int status;
  3991. if (BEx_chip(adapter))
  3992. return 0;
  3993. spin_lock_bh(&adapter->mcc_lock);
  3994. wrb = wrb_from_mccq(adapter);
  3995. if (!wrb) {
  3996. status = -EBUSY;
  3997. goto err;
  3998. }
  3999. req = embedded_payload(wrb);
  4000. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  4001. OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
  4002. wrb, NULL);
  4003. req->hdr.domain = domain;
  4004. req->enable = 1;
  4005. status = be_mcc_notify_wait(adapter);
  4006. err:
  4007. spin_unlock_bh(&adapter->mcc_lock);
  4008. return status;
  4009. }
  4010. int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
  4011. {
  4012. struct be_mcc_wrb *wrb;
  4013. struct be_cmd_req_intr_set *req;
  4014. int status;
  4015. if (mutex_lock_interruptible(&adapter->mbox_lock))
  4016. return -1;
  4017. wrb = wrb_from_mbox(adapter);
  4018. req = embedded_payload(wrb);
  4019. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  4020. OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
  4021. wrb, NULL);
  4022. req->intr_enabled = intr_enable;
  4023. status = be_mbox_notify_wait(adapter);
  4024. mutex_unlock(&adapter->mbox_lock);
  4025. return status;
  4026. }
  4027. /* Uses MBOX */
  4028. int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id)
  4029. {
  4030. struct be_cmd_req_get_active_profile *req;
  4031. struct be_mcc_wrb *wrb;
  4032. int status;
  4033. if (mutex_lock_interruptible(&adapter->mbox_lock))
  4034. return -1;
  4035. wrb = wrb_from_mbox(adapter);
  4036. if (!wrb) {
  4037. status = -EBUSY;
  4038. goto err;
  4039. }
  4040. req = embedded_payload(wrb);
  4041. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  4042. OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req),
  4043. wrb, NULL);
  4044. status = be_mbox_notify_wait(adapter);
  4045. if (!status) {
  4046. struct be_cmd_resp_get_active_profile *resp =
  4047. embedded_payload(wrb);
  4048. *profile_id = le16_to_cpu(resp->active_profile_id);
  4049. }
  4050. err:
  4051. mutex_unlock(&adapter->mbox_lock);
  4052. return status;
  4053. }
  4054. int __be_cmd_set_logical_link_config(struct be_adapter *adapter,
  4055. int link_state, int version, u8 domain)
  4056. {
  4057. struct be_mcc_wrb *wrb;
  4058. struct be_cmd_req_set_ll_link *req;
  4059. int status;
  4060. spin_lock_bh(&adapter->mcc_lock);
  4061. wrb = wrb_from_mccq(adapter);
  4062. if (!wrb) {
  4063. status = -EBUSY;
  4064. goto err;
  4065. }
  4066. req = embedded_payload(wrb);
  4067. be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
  4068. OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG,
  4069. sizeof(*req), wrb, NULL);
  4070. req->hdr.version = version;
  4071. req->hdr.domain = domain;
  4072. if (link_state == IFLA_VF_LINK_STATE_ENABLE ||
  4073. link_state == IFLA_VF_LINK_STATE_AUTO)
  4074. req->link_config |= PLINK_ENABLE;
  4075. if (link_state == IFLA_VF_LINK_STATE_AUTO)
  4076. req->link_config |= PLINK_TRACK;
  4077. status = be_mcc_notify_wait(adapter);
  4078. err:
  4079. spin_unlock_bh(&adapter->mcc_lock);
  4080. return status;
  4081. }
  4082. int be_cmd_set_logical_link_config(struct be_adapter *adapter,
  4083. int link_state, u8 domain)
  4084. {
  4085. int status;
  4086. if (BEx_chip(adapter))
  4087. return -EOPNOTSUPP;
  4088. status = __be_cmd_set_logical_link_config(adapter, link_state,
  4089. 2, domain);
  4090. /* Version 2 of the command will not be recognized by older FW.
  4091. * On such a failure issue version 1 of the command.
  4092. */
  4093. if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST)
  4094. status = __be_cmd_set_logical_link_config(adapter, link_state,
  4095. 1, domain);
  4096. return status;
  4097. }
  4098. int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
  4099. int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
  4100. {
  4101. struct be_adapter *adapter = netdev_priv(netdev_handle);
  4102. struct be_mcc_wrb *wrb;
  4103. struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload;
  4104. struct be_cmd_req_hdr *req;
  4105. struct be_cmd_resp_hdr *resp;
  4106. int status;
  4107. spin_lock_bh(&adapter->mcc_lock);
  4108. wrb = wrb_from_mccq(adapter);
  4109. if (!wrb) {
  4110. status = -EBUSY;
  4111. goto err;
  4112. }
  4113. req = embedded_payload(wrb);
  4114. resp = embedded_payload(wrb);
  4115. be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
  4116. hdr->opcode, wrb_payload_size, wrb, NULL);
  4117. memcpy(req, wrb_payload, wrb_payload_size);
  4118. be_dws_cpu_to_le(req, wrb_payload_size);
  4119. status = be_mcc_notify_wait(adapter);
  4120. if (cmd_status)
  4121. *cmd_status = (status & 0xffff);
  4122. if (ext_status)
  4123. *ext_status = 0;
  4124. memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
  4125. be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
  4126. err:
  4127. spin_unlock_bh(&adapter->mcc_lock);
  4128. return status;
  4129. }
  4130. EXPORT_SYMBOL(be_roce_mcc_cmd);