lpfc_init.c 360 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298112991130011301113021130311304113051130611307113081130911310113111131211313113141131511316113171131811319113201132111322113231132411325113261132711328113291133011331113321133311334113351133611337113381133911340113411134211343113441134511346113471134811349113501135111352113531135411355113561135711358113591136011361113621136311364113651136611367113681136911370113711137211373113741137511376113771137811379113801138111382113831138411385113861138711388113891139011391113921139311394113951139611397113981139911400114011140211403114041140511406114071140811409114101141111412114131141411415114161141711418114191142011421114221142311424114251142611427114281142911430114311143211433114341143511436114371143811439114401144111442114431144411445114461144711448114491145011451114521145311454114551145611457114581145911460114611146211463114641146511466114671146811469114701147111472114731147411475114761147711478114791148011481114821148311484114851148611487114881148911490114911149211493114941149511496114971149811499115001150111502115031150411505115061150711508115091151011511115121151311514115151151611517115181151911520115211152211523115241152511526115271152811529115301153111532115331153411535115361153711538115391154011541115421154311544115451154611547115481154911550115511155211553115541155511556115571155811559115601156111562115631156411565115661156711568115691157011571115721157311574115751157611577115781157911580115811158211583115841158511586115871158811589115901159111592115931159411595115961159711598115991160011601116021160311604116051160611607116081160911610116111161211613116141161511616116171161811619116201162111622116231162411625116261162711628116291163011631116321163311634116351163611637116381163911640116411164211643116441164511646116471164811649116501165111652116531165411655116561165711658116591166011661116621166311664116651166611667116681166911670116711167211673116741167511676116771167811679116801168111682116831168411685116861168711688116891169011691116921169311694116951169611697116981169911700117011170211703117041170511706117071170811709117101171111712117131171411715117161171711718117191172011721117221172311724117251172611727117281172911730117311173211733117341173511736117371173811739117401174111742117431174411745117461174711748117491175011751117521175311754117551175611757117581175911760117611176211763117641176511766117671176811769117701177111772117731177411775117761177711778117791178011781117821178311784117851178611787117881178911790117911179211793117941179511796117971179811799118001180111802118031180411805118061180711808118091181011811118121181311814118151181611817118181181911820118211182211823118241182511826118271182811829118301183111832118331183411835118361183711838118391184011841118421184311844118451184611847118481184911850118511185211853118541185511856118571185811859118601186111862118631186411865118661186711868118691187011871118721187311874118751187611877118781187911880118811188211883118841188511886118871188811889118901189111892118931189411895118961189711898118991190011901119021190311904119051190611907119081190911910119111191211913119141191511916119171191811919119201192111922119231192411925119261192711928119291193011931119321193311934119351193611937119381193911940119411194211943119441194511946119471194811949119501195111952119531195411955119561195711958119591196011961119621196311964119651196611967119681196911970119711197211973119741197511976119771197811979119801198111982119831198411985119861198711988119891199011991119921199311994119951199611997119981199912000120011200212003120041200512006120071200812009120101201112012120131201412015120161201712018120191202012021120221202312024120251202612027120281202912030120311203212033120341203512036120371203812039120401204112042120431204412045120461204712048120491205012051120521205312054120551205612057120581205912060120611206212063120641206512066120671206812069120701207112072120731207412075120761207712078120791208012081120821208312084120851208612087120881208912090120911209212093120941209512096120971209812099121001210112102121031210412105121061210712108121091211012111121121211312114121151211612117121181211912120121211212212123121241212512126121271212812129121301213112132121331213412135121361213712138121391214012141121421214312144121451214612147121481214912150121511215212153121541215512156121571215812159121601216112162121631216412165121661216712168121691217012171121721217312174121751217612177121781217912180121811218212183121841218512186121871218812189121901219112192121931219412195121961219712198121991220012201122021220312204122051220612207122081220912210122111221212213122141221512216122171221812219122201222112222122231222412225122261222712228122291223012231122321223312234122351223612237122381223912240122411224212243122441224512246122471224812249122501225112252122531225412255122561225712258122591226012261122621226312264122651226612267122681226912270122711227212273122741227512276122771227812279122801228112282122831228412285122861228712288122891229012291122921229312294122951229612297122981229912300123011230212303123041230512306123071230812309123101231112312123131231412315123161231712318123191232012321123221232312324123251232612327123281232912330
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
  5. * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
  6. * Copyright (C) 2004-2016 Emulex. All rights reserved. *
  7. * EMULEX and SLI are trademarks of Emulex. *
  8. * www.broadcom.com *
  9. * Portions Copyright (C) 2004-2005 Christoph Hellwig *
  10. * *
  11. * This program is free software; you can redistribute it and/or *
  12. * modify it under the terms of version 2 of the GNU General *
  13. * Public License as published by the Free Software Foundation. *
  14. * This program is distributed in the hope that it will be useful. *
  15. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  16. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  17. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  18. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  19. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  20. * more details, a copy of which can be found in the file COPYING *
  21. * included with this package. *
  22. *******************************************************************/
  23. #include <linux/blkdev.h>
  24. #include <linux/delay.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/idr.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/module.h>
  29. #include <linux/kthread.h>
  30. #include <linux/pci.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/ctype.h>
  33. #include <linux/aer.h>
  34. #include <linux/slab.h>
  35. #include <linux/firmware.h>
  36. #include <linux/miscdevice.h>
  37. #include <linux/percpu.h>
  38. #include <linux/msi.h>
  39. #include <linux/bitops.h>
  40. #include <scsi/scsi.h>
  41. #include <scsi/scsi_device.h>
  42. #include <scsi/scsi_host.h>
  43. #include <scsi/scsi_transport_fc.h>
  44. #include <scsi/scsi_tcq.h>
  45. #include <scsi/fc/fc_fs.h>
  46. #include <linux/nvme-fc-driver.h>
  47. #include "lpfc_hw4.h"
  48. #include "lpfc_hw.h"
  49. #include "lpfc_sli.h"
  50. #include "lpfc_sli4.h"
  51. #include "lpfc_nl.h"
  52. #include "lpfc_disc.h"
  53. #include "lpfc.h"
  54. #include "lpfc_scsi.h"
  55. #include "lpfc_nvme.h"
  56. #include "lpfc_nvmet.h"
  57. #include "lpfc_logmsg.h"
  58. #include "lpfc_crtn.h"
  59. #include "lpfc_vport.h"
  60. #include "lpfc_version.h"
  61. #include "lpfc_ids.h"
  62. char *_dump_buf_data;
  63. unsigned long _dump_buf_data_order;
  64. char *_dump_buf_dif;
  65. unsigned long _dump_buf_dif_order;
  66. spinlock_t _dump_buf_lock;
  67. /* Used when mapping IRQ vectors in a driver centric manner */
  68. uint16_t *lpfc_used_cpu;
  69. uint32_t lpfc_present_cpu;
  70. static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
  71. static int lpfc_post_rcv_buf(struct lpfc_hba *);
  72. static int lpfc_sli4_queue_verify(struct lpfc_hba *);
  73. static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
  74. static int lpfc_setup_endian_order(struct lpfc_hba *);
  75. static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
  76. static void lpfc_free_els_sgl_list(struct lpfc_hba *);
  77. static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
  78. static void lpfc_init_sgl_list(struct lpfc_hba *);
  79. static int lpfc_init_active_sgl_array(struct lpfc_hba *);
  80. static void lpfc_free_active_sgl(struct lpfc_hba *);
  81. static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
  82. static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
  83. static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
  84. static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
  85. static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
  86. static void lpfc_sli4_disable_intr(struct lpfc_hba *);
  87. static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
  88. static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
  89. static struct scsi_transport_template *lpfc_transport_template = NULL;
  90. static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
  91. static DEFINE_IDR(lpfc_hba_index);
  92. #define LPFC_NVMET_BUF_POST 254
  93. /**
  94. * lpfc_config_port_prep - Perform lpfc initialization prior to config port
  95. * @phba: pointer to lpfc hba data structure.
  96. *
  97. * This routine will do LPFC initialization prior to issuing the CONFIG_PORT
  98. * mailbox command. It retrieves the revision information from the HBA and
  99. * collects the Vital Product Data (VPD) about the HBA for preparing the
  100. * configuration of the HBA.
  101. *
  102. * Return codes:
  103. * 0 - success.
  104. * -ERESTART - requests the SLI layer to reset the HBA and try again.
  105. * Any other value - indicates an error.
  106. **/
  107. int
  108. lpfc_config_port_prep(struct lpfc_hba *phba)
  109. {
  110. lpfc_vpd_t *vp = &phba->vpd;
  111. int i = 0, rc;
  112. LPFC_MBOXQ_t *pmb;
  113. MAILBOX_t *mb;
  114. char *lpfc_vpd_data = NULL;
  115. uint16_t offset = 0;
  116. static char licensed[56] =
  117. "key unlock for use with gnu public licensed code only\0";
  118. static int init_key = 1;
  119. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  120. if (!pmb) {
  121. phba->link_state = LPFC_HBA_ERROR;
  122. return -ENOMEM;
  123. }
  124. mb = &pmb->u.mb;
  125. phba->link_state = LPFC_INIT_MBX_CMDS;
  126. if (lpfc_is_LC_HBA(phba->pcidev->device)) {
  127. if (init_key) {
  128. uint32_t *ptext = (uint32_t *) licensed;
  129. for (i = 0; i < 56; i += sizeof (uint32_t), ptext++)
  130. *ptext = cpu_to_be32(*ptext);
  131. init_key = 0;
  132. }
  133. lpfc_read_nv(phba, pmb);
  134. memset((char*)mb->un.varRDnvp.rsvd3, 0,
  135. sizeof (mb->un.varRDnvp.rsvd3));
  136. memcpy((char*)mb->un.varRDnvp.rsvd3, licensed,
  137. sizeof (licensed));
  138. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  139. if (rc != MBX_SUCCESS) {
  140. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  141. "0324 Config Port initialization "
  142. "error, mbxCmd x%x READ_NVPARM, "
  143. "mbxStatus x%x\n",
  144. mb->mbxCommand, mb->mbxStatus);
  145. mempool_free(pmb, phba->mbox_mem_pool);
  146. return -ERESTART;
  147. }
  148. memcpy(phba->wwnn, (char *)mb->un.varRDnvp.nodename,
  149. sizeof(phba->wwnn));
  150. memcpy(phba->wwpn, (char *)mb->un.varRDnvp.portname,
  151. sizeof(phba->wwpn));
  152. }
  153. phba->sli3_options = 0x0;
  154. /* Setup and issue mailbox READ REV command */
  155. lpfc_read_rev(phba, pmb);
  156. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  157. if (rc != MBX_SUCCESS) {
  158. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  159. "0439 Adapter failed to init, mbxCmd x%x "
  160. "READ_REV, mbxStatus x%x\n",
  161. mb->mbxCommand, mb->mbxStatus);
  162. mempool_free( pmb, phba->mbox_mem_pool);
  163. return -ERESTART;
  164. }
  165. /*
  166. * The value of rr must be 1 since the driver set the cv field to 1.
  167. * This setting requires the FW to set all revision fields.
  168. */
  169. if (mb->un.varRdRev.rr == 0) {
  170. vp->rev.rBit = 0;
  171. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  172. "0440 Adapter failed to init, READ_REV has "
  173. "missing revision information.\n");
  174. mempool_free(pmb, phba->mbox_mem_pool);
  175. return -ERESTART;
  176. }
  177. if (phba->sli_rev == 3 && !mb->un.varRdRev.v3rsp) {
  178. mempool_free(pmb, phba->mbox_mem_pool);
  179. return -EINVAL;
  180. }
  181. /* Save information as VPD data */
  182. vp->rev.rBit = 1;
  183. memcpy(&vp->sli3Feat, &mb->un.varRdRev.sli3Feat, sizeof(uint32_t));
  184. vp->rev.sli1FwRev = mb->un.varRdRev.sli1FwRev;
  185. memcpy(vp->rev.sli1FwName, (char*) mb->un.varRdRev.sli1FwName, 16);
  186. vp->rev.sli2FwRev = mb->un.varRdRev.sli2FwRev;
  187. memcpy(vp->rev.sli2FwName, (char *) mb->un.varRdRev.sli2FwName, 16);
  188. vp->rev.biuRev = mb->un.varRdRev.biuRev;
  189. vp->rev.smRev = mb->un.varRdRev.smRev;
  190. vp->rev.smFwRev = mb->un.varRdRev.un.smFwRev;
  191. vp->rev.endecRev = mb->un.varRdRev.endecRev;
  192. vp->rev.fcphHigh = mb->un.varRdRev.fcphHigh;
  193. vp->rev.fcphLow = mb->un.varRdRev.fcphLow;
  194. vp->rev.feaLevelHigh = mb->un.varRdRev.feaLevelHigh;
  195. vp->rev.feaLevelLow = mb->un.varRdRev.feaLevelLow;
  196. vp->rev.postKernRev = mb->un.varRdRev.postKernRev;
  197. vp->rev.opFwRev = mb->un.varRdRev.opFwRev;
  198. /* If the sli feature level is less then 9, we must
  199. * tear down all RPIs and VPIs on link down if NPIV
  200. * is enabled.
  201. */
  202. if (vp->rev.feaLevelHigh < 9)
  203. phba->sli3_options |= LPFC_SLI3_VPORT_TEARDOWN;
  204. if (lpfc_is_LC_HBA(phba->pcidev->device))
  205. memcpy(phba->RandomData, (char *)&mb->un.varWords[24],
  206. sizeof (phba->RandomData));
  207. /* Get adapter VPD information */
  208. lpfc_vpd_data = kmalloc(DMP_VPD_SIZE, GFP_KERNEL);
  209. if (!lpfc_vpd_data)
  210. goto out_free_mbox;
  211. do {
  212. lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
  213. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  214. if (rc != MBX_SUCCESS) {
  215. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  216. "0441 VPD not present on adapter, "
  217. "mbxCmd x%x DUMP VPD, mbxStatus x%x\n",
  218. mb->mbxCommand, mb->mbxStatus);
  219. mb->un.varDmp.word_cnt = 0;
  220. }
  221. /* dump mem may return a zero when finished or we got a
  222. * mailbox error, either way we are done.
  223. */
  224. if (mb->un.varDmp.word_cnt == 0)
  225. break;
  226. if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
  227. mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
  228. lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
  229. lpfc_vpd_data + offset,
  230. mb->un.varDmp.word_cnt);
  231. offset += mb->un.varDmp.word_cnt;
  232. } while (mb->un.varDmp.word_cnt && offset < DMP_VPD_SIZE);
  233. lpfc_parse_vpd(phba, lpfc_vpd_data, offset);
  234. kfree(lpfc_vpd_data);
  235. out_free_mbox:
  236. mempool_free(pmb, phba->mbox_mem_pool);
  237. return 0;
  238. }
  239. /**
  240. * lpfc_config_async_cmpl - Completion handler for config async event mbox cmd
  241. * @phba: pointer to lpfc hba data structure.
  242. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  243. *
  244. * This is the completion handler for driver's configuring asynchronous event
  245. * mailbox command to the device. If the mailbox command returns successfully,
  246. * it will set internal async event support flag to 1; otherwise, it will
  247. * set internal async event support flag to 0.
  248. **/
  249. static void
  250. lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  251. {
  252. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
  253. phba->temp_sensor_support = 1;
  254. else
  255. phba->temp_sensor_support = 0;
  256. mempool_free(pmboxq, phba->mbox_mem_pool);
  257. return;
  258. }
  259. /**
  260. * lpfc_dump_wakeup_param_cmpl - dump memory mailbox command completion handler
  261. * @phba: pointer to lpfc hba data structure.
  262. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  263. *
  264. * This is the completion handler for dump mailbox command for getting
  265. * wake up parameters. When this command complete, the response contain
  266. * Option rom version of the HBA. This function translate the version number
  267. * into a human readable string and store it in OptionROMVersion.
  268. **/
  269. static void
  270. lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  271. {
  272. struct prog_id *prg;
  273. uint32_t prog_id_word;
  274. char dist = ' ';
  275. /* character array used for decoding dist type. */
  276. char dist_char[] = "nabx";
  277. if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
  278. mempool_free(pmboxq, phba->mbox_mem_pool);
  279. return;
  280. }
  281. prg = (struct prog_id *) &prog_id_word;
  282. /* word 7 contain option rom version */
  283. prog_id_word = pmboxq->u.mb.un.varWords[7];
  284. /* Decode the Option rom version word to a readable string */
  285. if (prg->dist < 4)
  286. dist = dist_char[prg->dist];
  287. if ((prg->dist == 3) && (prg->num == 0))
  288. snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
  289. prg->ver, prg->rev, prg->lev);
  290. else
  291. snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
  292. prg->ver, prg->rev, prg->lev,
  293. dist, prg->num);
  294. mempool_free(pmboxq, phba->mbox_mem_pool);
  295. return;
  296. }
  297. /**
  298. * lpfc_update_vport_wwn - Updates the fc_nodename, fc_portname,
  299. * cfg_soft_wwnn, cfg_soft_wwpn
  300. * @vport: pointer to lpfc vport data structure.
  301. *
  302. *
  303. * Return codes
  304. * None.
  305. **/
  306. void
  307. lpfc_update_vport_wwn(struct lpfc_vport *vport)
  308. {
  309. uint8_t vvvl = vport->fc_sparam.cmn.valid_vendor_ver_level;
  310. u32 *fawwpn_key = (u32 *)&vport->fc_sparam.un.vendorVersion[0];
  311. /* If the soft name exists then update it using the service params */
  312. if (vport->phba->cfg_soft_wwnn)
  313. u64_to_wwn(vport->phba->cfg_soft_wwnn,
  314. vport->fc_sparam.nodeName.u.wwn);
  315. if (vport->phba->cfg_soft_wwpn)
  316. u64_to_wwn(vport->phba->cfg_soft_wwpn,
  317. vport->fc_sparam.portName.u.wwn);
  318. /*
  319. * If the name is empty or there exists a soft name
  320. * then copy the service params name, otherwise use the fc name
  321. */
  322. if (vport->fc_nodename.u.wwn[0] == 0 || vport->phba->cfg_soft_wwnn)
  323. memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
  324. sizeof(struct lpfc_name));
  325. else
  326. memcpy(&vport->fc_sparam.nodeName, &vport->fc_nodename,
  327. sizeof(struct lpfc_name));
  328. /*
  329. * If the port name has changed, then set the Param changes flag
  330. * to unreg the login
  331. */
  332. if (vport->fc_portname.u.wwn[0] != 0 &&
  333. memcmp(&vport->fc_portname, &vport->fc_sparam.portName,
  334. sizeof(struct lpfc_name)))
  335. vport->vport_flag |= FAWWPN_PARAM_CHG;
  336. if (vport->fc_portname.u.wwn[0] == 0 ||
  337. vport->phba->cfg_soft_wwpn ||
  338. (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR) ||
  339. vport->vport_flag & FAWWPN_SET) {
  340. memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
  341. sizeof(struct lpfc_name));
  342. vport->vport_flag &= ~FAWWPN_SET;
  343. if (vvvl == 1 && cpu_to_be32(*fawwpn_key) == FAPWWN_KEY_VENDOR)
  344. vport->vport_flag |= FAWWPN_SET;
  345. }
  346. else
  347. memcpy(&vport->fc_sparam.portName, &vport->fc_portname,
  348. sizeof(struct lpfc_name));
  349. }
  350. /**
  351. * lpfc_config_port_post - Perform lpfc initialization after config port
  352. * @phba: pointer to lpfc hba data structure.
  353. *
  354. * This routine will do LPFC initialization after the CONFIG_PORT mailbox
  355. * command call. It performs all internal resource and state setups on the
  356. * port: post IOCB buffers, enable appropriate host interrupt attentions,
  357. * ELS ring timers, etc.
  358. *
  359. * Return codes
  360. * 0 - success.
  361. * Any other value - error.
  362. **/
  363. int
  364. lpfc_config_port_post(struct lpfc_hba *phba)
  365. {
  366. struct lpfc_vport *vport = phba->pport;
  367. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  368. LPFC_MBOXQ_t *pmb;
  369. MAILBOX_t *mb;
  370. struct lpfc_dmabuf *mp;
  371. struct lpfc_sli *psli = &phba->sli;
  372. uint32_t status, timeout;
  373. int i, j;
  374. int rc;
  375. spin_lock_irq(&phba->hbalock);
  376. /*
  377. * If the Config port completed correctly the HBA is not
  378. * over heated any more.
  379. */
  380. if (phba->over_temp_state == HBA_OVER_TEMP)
  381. phba->over_temp_state = HBA_NORMAL_TEMP;
  382. spin_unlock_irq(&phba->hbalock);
  383. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  384. if (!pmb) {
  385. phba->link_state = LPFC_HBA_ERROR;
  386. return -ENOMEM;
  387. }
  388. mb = &pmb->u.mb;
  389. /* Get login parameters for NID. */
  390. rc = lpfc_read_sparam(phba, pmb, 0);
  391. if (rc) {
  392. mempool_free(pmb, phba->mbox_mem_pool);
  393. return -ENOMEM;
  394. }
  395. pmb->vport = vport;
  396. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  397. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  398. "0448 Adapter failed init, mbxCmd x%x "
  399. "READ_SPARM mbxStatus x%x\n",
  400. mb->mbxCommand, mb->mbxStatus);
  401. phba->link_state = LPFC_HBA_ERROR;
  402. mp = (struct lpfc_dmabuf *) pmb->context1;
  403. mempool_free(pmb, phba->mbox_mem_pool);
  404. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  405. kfree(mp);
  406. return -EIO;
  407. }
  408. mp = (struct lpfc_dmabuf *) pmb->context1;
  409. memcpy(&vport->fc_sparam, mp->virt, sizeof (struct serv_parm));
  410. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  411. kfree(mp);
  412. pmb->context1 = NULL;
  413. lpfc_update_vport_wwn(vport);
  414. /* Update the fc_host data structures with new wwn. */
  415. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  416. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  417. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  418. /* If no serial number in VPD data, use low 6 bytes of WWNN */
  419. /* This should be consolidated into parse_vpd ? - mr */
  420. if (phba->SerialNumber[0] == 0) {
  421. uint8_t *outptr;
  422. outptr = &vport->fc_nodename.u.s.IEEE[0];
  423. for (i = 0; i < 12; i++) {
  424. status = *outptr++;
  425. j = ((status & 0xf0) >> 4);
  426. if (j <= 9)
  427. phba->SerialNumber[i] =
  428. (char)((uint8_t) 0x30 + (uint8_t) j);
  429. else
  430. phba->SerialNumber[i] =
  431. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  432. i++;
  433. j = (status & 0xf);
  434. if (j <= 9)
  435. phba->SerialNumber[i] =
  436. (char)((uint8_t) 0x30 + (uint8_t) j);
  437. else
  438. phba->SerialNumber[i] =
  439. (char)((uint8_t) 0x61 + (uint8_t) (j - 10));
  440. }
  441. }
  442. lpfc_read_config(phba, pmb);
  443. pmb->vport = vport;
  444. if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
  445. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  446. "0453 Adapter failed to init, mbxCmd x%x "
  447. "READ_CONFIG, mbxStatus x%x\n",
  448. mb->mbxCommand, mb->mbxStatus);
  449. phba->link_state = LPFC_HBA_ERROR;
  450. mempool_free( pmb, phba->mbox_mem_pool);
  451. return -EIO;
  452. }
  453. /* Check if the port is disabled */
  454. lpfc_sli_read_link_ste(phba);
  455. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  456. i = (mb->un.varRdConfig.max_xri + 1);
  457. if (phba->cfg_hba_queue_depth > i) {
  458. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  459. "3359 HBA queue depth changed from %d to %d\n",
  460. phba->cfg_hba_queue_depth, i);
  461. phba->cfg_hba_queue_depth = i;
  462. }
  463. /* Reset the DFT_LUN_Q_DEPTH to (max xri >> 3) */
  464. i = (mb->un.varRdConfig.max_xri >> 3);
  465. if (phba->pport->cfg_lun_queue_depth > i) {
  466. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  467. "3360 LUN queue depth changed from %d to %d\n",
  468. phba->pport->cfg_lun_queue_depth, i);
  469. phba->pport->cfg_lun_queue_depth = i;
  470. }
  471. phba->lmt = mb->un.varRdConfig.lmt;
  472. /* Get the default values for Model Name and Description */
  473. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  474. phba->link_state = LPFC_LINK_DOWN;
  475. /* Only process IOCBs on ELS ring till hba_state is READY */
  476. if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
  477. psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
  478. if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
  479. psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
  480. /* Post receive buffers for desired rings */
  481. if (phba->sli_rev != 3)
  482. lpfc_post_rcv_buf(phba);
  483. /*
  484. * Configure HBA MSI-X attention conditions to messages if MSI-X mode
  485. */
  486. if (phba->intr_type == MSIX) {
  487. rc = lpfc_config_msi(phba, pmb);
  488. if (rc) {
  489. mempool_free(pmb, phba->mbox_mem_pool);
  490. return -EIO;
  491. }
  492. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  493. if (rc != MBX_SUCCESS) {
  494. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  495. "0352 Config MSI mailbox command "
  496. "failed, mbxCmd x%x, mbxStatus x%x\n",
  497. pmb->u.mb.mbxCommand,
  498. pmb->u.mb.mbxStatus);
  499. mempool_free(pmb, phba->mbox_mem_pool);
  500. return -EIO;
  501. }
  502. }
  503. spin_lock_irq(&phba->hbalock);
  504. /* Initialize ERATT handling flag */
  505. phba->hba_flag &= ~HBA_ERATT_HANDLED;
  506. /* Enable appropriate host interrupts */
  507. if (lpfc_readl(phba->HCregaddr, &status)) {
  508. spin_unlock_irq(&phba->hbalock);
  509. return -EIO;
  510. }
  511. status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
  512. if (psli->num_rings > 0)
  513. status |= HC_R0INT_ENA;
  514. if (psli->num_rings > 1)
  515. status |= HC_R1INT_ENA;
  516. if (psli->num_rings > 2)
  517. status |= HC_R2INT_ENA;
  518. if (psli->num_rings > 3)
  519. status |= HC_R3INT_ENA;
  520. if ((phba->cfg_poll & ENABLE_FCP_RING_POLLING) &&
  521. (phba->cfg_poll & DISABLE_FCP_RING_INT))
  522. status &= ~(HC_R0INT_ENA);
  523. writel(status, phba->HCregaddr);
  524. readl(phba->HCregaddr); /* flush */
  525. spin_unlock_irq(&phba->hbalock);
  526. /* Set up ring-0 (ELS) timer */
  527. timeout = phba->fc_ratov * 2;
  528. mod_timer(&vport->els_tmofunc,
  529. jiffies + msecs_to_jiffies(1000 * timeout));
  530. /* Set up heart beat (HB) timer */
  531. mod_timer(&phba->hb_tmofunc,
  532. jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  533. phba->hb_outstanding = 0;
  534. phba->last_completion_time = jiffies;
  535. /* Set up error attention (ERATT) polling timer */
  536. mod_timer(&phba->eratt_poll,
  537. jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
  538. if (phba->hba_flag & LINK_DISABLED) {
  539. lpfc_printf_log(phba,
  540. KERN_ERR, LOG_INIT,
  541. "2598 Adapter Link is disabled.\n");
  542. lpfc_down_link(phba, pmb);
  543. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  544. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  545. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  546. lpfc_printf_log(phba,
  547. KERN_ERR, LOG_INIT,
  548. "2599 Adapter failed to issue DOWN_LINK"
  549. " mbox command rc 0x%x\n", rc);
  550. mempool_free(pmb, phba->mbox_mem_pool);
  551. return -EIO;
  552. }
  553. } else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
  554. mempool_free(pmb, phba->mbox_mem_pool);
  555. rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
  556. if (rc)
  557. return rc;
  558. }
  559. /* MBOX buffer will be freed in mbox compl */
  560. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  561. if (!pmb) {
  562. phba->link_state = LPFC_HBA_ERROR;
  563. return -ENOMEM;
  564. }
  565. lpfc_config_async(phba, pmb, LPFC_ELS_RING);
  566. pmb->mbox_cmpl = lpfc_config_async_cmpl;
  567. pmb->vport = phba->pport;
  568. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  569. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  570. lpfc_printf_log(phba,
  571. KERN_ERR,
  572. LOG_INIT,
  573. "0456 Adapter failed to issue "
  574. "ASYNCEVT_ENABLE mbox status x%x\n",
  575. rc);
  576. mempool_free(pmb, phba->mbox_mem_pool);
  577. }
  578. /* Get Option rom version */
  579. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  580. if (!pmb) {
  581. phba->link_state = LPFC_HBA_ERROR;
  582. return -ENOMEM;
  583. }
  584. lpfc_dump_wakeup_param(phba, pmb);
  585. pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
  586. pmb->vport = phba->pport;
  587. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  588. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  589. lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
  590. "to get Option ROM version status x%x\n", rc);
  591. mempool_free(pmb, phba->mbox_mem_pool);
  592. }
  593. return 0;
  594. }
  595. /**
  596. * lpfc_hba_init_link - Initialize the FC link
  597. * @phba: pointer to lpfc hba data structure.
  598. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  599. *
  600. * This routine will issue the INIT_LINK mailbox command call.
  601. * It is available to other drivers through the lpfc_hba data
  602. * structure for use as a delayed link up mechanism with the
  603. * module parameter lpfc_suppress_link_up.
  604. *
  605. * Return code
  606. * 0 - success
  607. * Any other value - error
  608. **/
  609. static int
  610. lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
  611. {
  612. return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
  613. }
  614. /**
  615. * lpfc_hba_init_link_fc_topology - Initialize FC link with desired topology
  616. * @phba: pointer to lpfc hba data structure.
  617. * @fc_topology: desired fc topology.
  618. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  619. *
  620. * This routine will issue the INIT_LINK mailbox command call.
  621. * It is available to other drivers through the lpfc_hba data
  622. * structure for use as a delayed link up mechanism with the
  623. * module parameter lpfc_suppress_link_up.
  624. *
  625. * Return code
  626. * 0 - success
  627. * Any other value - error
  628. **/
  629. int
  630. lpfc_hba_init_link_fc_topology(struct lpfc_hba *phba, uint32_t fc_topology,
  631. uint32_t flag)
  632. {
  633. struct lpfc_vport *vport = phba->pport;
  634. LPFC_MBOXQ_t *pmb;
  635. MAILBOX_t *mb;
  636. int rc;
  637. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  638. if (!pmb) {
  639. phba->link_state = LPFC_HBA_ERROR;
  640. return -ENOMEM;
  641. }
  642. mb = &pmb->u.mb;
  643. pmb->vport = vport;
  644. if ((phba->cfg_link_speed > LPFC_USER_LINK_SPEED_MAX) ||
  645. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_1G) &&
  646. !(phba->lmt & LMT_1Gb)) ||
  647. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_2G) &&
  648. !(phba->lmt & LMT_2Gb)) ||
  649. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_4G) &&
  650. !(phba->lmt & LMT_4Gb)) ||
  651. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_8G) &&
  652. !(phba->lmt & LMT_8Gb)) ||
  653. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_10G) &&
  654. !(phba->lmt & LMT_10Gb)) ||
  655. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_16G) &&
  656. !(phba->lmt & LMT_16Gb)) ||
  657. ((phba->cfg_link_speed == LPFC_USER_LINK_SPEED_32G) &&
  658. !(phba->lmt & LMT_32Gb))) {
  659. /* Reset link speed to auto */
  660. lpfc_printf_log(phba, KERN_ERR, LOG_LINK_EVENT,
  661. "1302 Invalid speed for this board:%d "
  662. "Reset link speed to auto.\n",
  663. phba->cfg_link_speed);
  664. phba->cfg_link_speed = LPFC_USER_LINK_SPEED_AUTO;
  665. }
  666. lpfc_init_link(phba, pmb, fc_topology, phba->cfg_link_speed);
  667. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  668. if (phba->sli_rev < LPFC_SLI_REV4)
  669. lpfc_set_loopback_flag(phba);
  670. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  671. if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
  672. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  673. "0498 Adapter failed to init, mbxCmd x%x "
  674. "INIT_LINK, mbxStatus x%x\n",
  675. mb->mbxCommand, mb->mbxStatus);
  676. if (phba->sli_rev <= LPFC_SLI_REV3) {
  677. /* Clear all interrupt enable conditions */
  678. writel(0, phba->HCregaddr);
  679. readl(phba->HCregaddr); /* flush */
  680. /* Clear all pending interrupts */
  681. writel(0xffffffff, phba->HAregaddr);
  682. readl(phba->HAregaddr); /* flush */
  683. }
  684. phba->link_state = LPFC_HBA_ERROR;
  685. if (rc != MBX_BUSY || flag == MBX_POLL)
  686. mempool_free(pmb, phba->mbox_mem_pool);
  687. return -EIO;
  688. }
  689. phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
  690. if (flag == MBX_POLL)
  691. mempool_free(pmb, phba->mbox_mem_pool);
  692. return 0;
  693. }
  694. /**
  695. * lpfc_hba_down_link - this routine downs the FC link
  696. * @phba: pointer to lpfc hba data structure.
  697. * @flag: mailbox command issue mode - either MBX_POLL or MBX_NOWAIT
  698. *
  699. * This routine will issue the DOWN_LINK mailbox command call.
  700. * It is available to other drivers through the lpfc_hba data
  701. * structure for use to stop the link.
  702. *
  703. * Return code
  704. * 0 - success
  705. * Any other value - error
  706. **/
  707. static int
  708. lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
  709. {
  710. LPFC_MBOXQ_t *pmb;
  711. int rc;
  712. pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  713. if (!pmb) {
  714. phba->link_state = LPFC_HBA_ERROR;
  715. return -ENOMEM;
  716. }
  717. lpfc_printf_log(phba,
  718. KERN_ERR, LOG_INIT,
  719. "0491 Adapter Link is disabled.\n");
  720. lpfc_down_link(phba, pmb);
  721. pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
  722. rc = lpfc_sli_issue_mbox(phba, pmb, flag);
  723. if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
  724. lpfc_printf_log(phba,
  725. KERN_ERR, LOG_INIT,
  726. "2522 Adapter failed to issue DOWN_LINK"
  727. " mbox command rc 0x%x\n", rc);
  728. mempool_free(pmb, phba->mbox_mem_pool);
  729. return -EIO;
  730. }
  731. if (flag == MBX_POLL)
  732. mempool_free(pmb, phba->mbox_mem_pool);
  733. return 0;
  734. }
  735. /**
  736. * lpfc_hba_down_prep - Perform lpfc uninitialization prior to HBA reset
  737. * @phba: pointer to lpfc HBA data structure.
  738. *
  739. * This routine will do LPFC uninitialization before the HBA is reset when
  740. * bringing down the SLI Layer.
  741. *
  742. * Return codes
  743. * 0 - success.
  744. * Any other value - error.
  745. **/
  746. int
  747. lpfc_hba_down_prep(struct lpfc_hba *phba)
  748. {
  749. struct lpfc_vport **vports;
  750. int i;
  751. if (phba->sli_rev <= LPFC_SLI_REV3) {
  752. /* Disable interrupts */
  753. writel(0, phba->HCregaddr);
  754. readl(phba->HCregaddr); /* flush */
  755. }
  756. if (phba->pport->load_flag & FC_UNLOADING)
  757. lpfc_cleanup_discovery_resources(phba->pport);
  758. else {
  759. vports = lpfc_create_vport_work_array(phba);
  760. if (vports != NULL)
  761. for (i = 0; i <= phba->max_vports &&
  762. vports[i] != NULL; i++)
  763. lpfc_cleanup_discovery_resources(vports[i]);
  764. lpfc_destroy_vport_work_array(phba, vports);
  765. }
  766. return 0;
  767. }
  768. /**
  769. * lpfc_sli4_free_sp_events - Cleanup sp_queue_events to free
  770. * rspiocb which got deferred
  771. *
  772. * @phba: pointer to lpfc HBA data structure.
  773. *
  774. * This routine will cleanup completed slow path events after HBA is reset
  775. * when bringing down the SLI Layer.
  776. *
  777. *
  778. * Return codes
  779. * void.
  780. **/
  781. static void
  782. lpfc_sli4_free_sp_events(struct lpfc_hba *phba)
  783. {
  784. struct lpfc_iocbq *rspiocbq;
  785. struct hbq_dmabuf *dmabuf;
  786. struct lpfc_cq_event *cq_event;
  787. spin_lock_irq(&phba->hbalock);
  788. phba->hba_flag &= ~HBA_SP_QUEUE_EVT;
  789. spin_unlock_irq(&phba->hbalock);
  790. while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
  791. /* Get the response iocb from the head of work queue */
  792. spin_lock_irq(&phba->hbalock);
  793. list_remove_head(&phba->sli4_hba.sp_queue_event,
  794. cq_event, struct lpfc_cq_event, list);
  795. spin_unlock_irq(&phba->hbalock);
  796. switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
  797. case CQE_CODE_COMPL_WQE:
  798. rspiocbq = container_of(cq_event, struct lpfc_iocbq,
  799. cq_event);
  800. lpfc_sli_release_iocbq(phba, rspiocbq);
  801. break;
  802. case CQE_CODE_RECEIVE:
  803. case CQE_CODE_RECEIVE_V1:
  804. dmabuf = container_of(cq_event, struct hbq_dmabuf,
  805. cq_event);
  806. lpfc_in_buf_free(phba, &dmabuf->dbuf);
  807. }
  808. }
  809. }
  810. /**
  811. * lpfc_hba_free_post_buf - Perform lpfc uninitialization after HBA reset
  812. * @phba: pointer to lpfc HBA data structure.
  813. *
  814. * This routine will cleanup posted ELS buffers after the HBA is reset
  815. * when bringing down the SLI Layer.
  816. *
  817. *
  818. * Return codes
  819. * void.
  820. **/
  821. static void
  822. lpfc_hba_free_post_buf(struct lpfc_hba *phba)
  823. {
  824. struct lpfc_sli *psli = &phba->sli;
  825. struct lpfc_sli_ring *pring;
  826. struct lpfc_dmabuf *mp, *next_mp;
  827. LIST_HEAD(buflist);
  828. int count;
  829. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  830. lpfc_sli_hbqbuf_free_all(phba);
  831. else {
  832. /* Cleanup preposted buffers on the ELS ring */
  833. pring = &psli->sli3_ring[LPFC_ELS_RING];
  834. spin_lock_irq(&phba->hbalock);
  835. list_splice_init(&pring->postbufq, &buflist);
  836. spin_unlock_irq(&phba->hbalock);
  837. count = 0;
  838. list_for_each_entry_safe(mp, next_mp, &buflist, list) {
  839. list_del(&mp->list);
  840. count++;
  841. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  842. kfree(mp);
  843. }
  844. spin_lock_irq(&phba->hbalock);
  845. pring->postbufq_cnt -= count;
  846. spin_unlock_irq(&phba->hbalock);
  847. }
  848. }
  849. /**
  850. * lpfc_hba_clean_txcmplq - Perform lpfc uninitialization after HBA reset
  851. * @phba: pointer to lpfc HBA data structure.
  852. *
  853. * This routine will cleanup the txcmplq after the HBA is reset when bringing
  854. * down the SLI Layer.
  855. *
  856. * Return codes
  857. * void
  858. **/
  859. static void
  860. lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
  861. {
  862. struct lpfc_sli *psli = &phba->sli;
  863. struct lpfc_queue *qp = NULL;
  864. struct lpfc_sli_ring *pring;
  865. LIST_HEAD(completions);
  866. int i;
  867. if (phba->sli_rev != LPFC_SLI_REV4) {
  868. for (i = 0; i < psli->num_rings; i++) {
  869. pring = &psli->sli3_ring[i];
  870. spin_lock_irq(&phba->hbalock);
  871. /* At this point in time the HBA is either reset or DOA
  872. * Nothing should be on txcmplq as it will
  873. * NEVER complete.
  874. */
  875. list_splice_init(&pring->txcmplq, &completions);
  876. pring->txcmplq_cnt = 0;
  877. spin_unlock_irq(&phba->hbalock);
  878. lpfc_sli_abort_iocb_ring(phba, pring);
  879. }
  880. /* Cancel all the IOCBs from the completions list */
  881. lpfc_sli_cancel_iocbs(phba, &completions,
  882. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  883. return;
  884. }
  885. list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
  886. pring = qp->pring;
  887. if (!pring)
  888. continue;
  889. spin_lock_irq(&pring->ring_lock);
  890. list_splice_init(&pring->txcmplq, &completions);
  891. pring->txcmplq_cnt = 0;
  892. spin_unlock_irq(&pring->ring_lock);
  893. lpfc_sli_abort_iocb_ring(phba, pring);
  894. }
  895. /* Cancel all the IOCBs from the completions list */
  896. lpfc_sli_cancel_iocbs(phba, &completions,
  897. IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
  898. }
  899. /**
  900. * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  901. int i;
  902. * @phba: pointer to lpfc HBA data structure.
  903. *
  904. * This routine will do uninitialization after the HBA is reset when bring
  905. * down the SLI Layer.
  906. *
  907. * Return codes
  908. * 0 - success.
  909. * Any other value - error.
  910. **/
  911. static int
  912. lpfc_hba_down_post_s3(struct lpfc_hba *phba)
  913. {
  914. lpfc_hba_free_post_buf(phba);
  915. lpfc_hba_clean_txcmplq(phba);
  916. return 0;
  917. }
  918. /**
  919. * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  920. * @phba: pointer to lpfc HBA data structure.
  921. *
  922. * This routine will do uninitialization after the HBA is reset when bring
  923. * down the SLI Layer.
  924. *
  925. * Return codes
  926. * 0 - success.
  927. * Any other value - error.
  928. **/
  929. static int
  930. lpfc_hba_down_post_s4(struct lpfc_hba *phba)
  931. {
  932. struct lpfc_scsi_buf *psb, *psb_next;
  933. struct lpfc_nvmet_rcv_ctx *ctxp, *ctxp_next;
  934. LIST_HEAD(aborts);
  935. LIST_HEAD(nvme_aborts);
  936. LIST_HEAD(nvmet_aborts);
  937. unsigned long iflag = 0;
  938. struct lpfc_sglq *sglq_entry = NULL;
  939. lpfc_sli_hbqbuf_free_all(phba);
  940. lpfc_hba_clean_txcmplq(phba);
  941. /* At this point in time the HBA is either reset or DOA. Either
  942. * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
  943. * on the lpfc_els_sgl_list so that it can either be freed if the
  944. * driver is unloading or reposted if the driver is restarting
  945. * the port.
  946. */
  947. spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
  948. /* scsl_buf_list */
  949. /* sgl_list_lock required because worker thread uses this
  950. * list.
  951. */
  952. spin_lock(&phba->sli4_hba.sgl_list_lock);
  953. list_for_each_entry(sglq_entry,
  954. &phba->sli4_hba.lpfc_abts_els_sgl_list, list)
  955. sglq_entry->state = SGL_FREED;
  956. list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
  957. &phba->sli4_hba.lpfc_els_sgl_list);
  958. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  959. /* abts_scsi_buf_list_lock required because worker thread uses this
  960. * list.
  961. */
  962. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  963. spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  964. list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
  965. &aborts);
  966. spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
  967. }
  968. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  969. spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  970. list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
  971. &nvme_aborts);
  972. list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list,
  973. &nvmet_aborts);
  974. spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
  975. }
  976. spin_unlock_irq(&phba->hbalock);
  977. list_for_each_entry_safe(psb, psb_next, &aborts, list) {
  978. psb->pCmd = NULL;
  979. psb->status = IOSTAT_SUCCESS;
  980. }
  981. spin_lock_irqsave(&phba->scsi_buf_list_put_lock, iflag);
  982. list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
  983. spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
  984. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  985. list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
  986. psb->pCmd = NULL;
  987. psb->status = IOSTAT_SUCCESS;
  988. }
  989. spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
  990. list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
  991. spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
  992. list_for_each_entry_safe(ctxp, ctxp_next, &nvmet_aborts, list) {
  993. ctxp->flag &= ~(LPFC_NVMET_XBUSY | LPFC_NVMET_ABORT_OP);
  994. lpfc_nvmet_ctxbuf_post(phba, ctxp->ctxbuf);
  995. }
  996. }
  997. lpfc_sli4_free_sp_events(phba);
  998. return 0;
  999. }
  1000. /**
  1001. * lpfc_hba_down_post - Wrapper func for hba down post routine
  1002. * @phba: pointer to lpfc HBA data structure.
  1003. *
  1004. * This routine wraps the actual SLI3 or SLI4 routine for performing
  1005. * uninitialization after the HBA is reset when bring down the SLI Layer.
  1006. *
  1007. * Return codes
  1008. * 0 - success.
  1009. * Any other value - error.
  1010. **/
  1011. int
  1012. lpfc_hba_down_post(struct lpfc_hba *phba)
  1013. {
  1014. return (*phba->lpfc_hba_down_post)(phba);
  1015. }
  1016. /**
  1017. * lpfc_hb_timeout - The HBA-timer timeout handler
  1018. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1019. *
  1020. * This is the HBA-timer timeout handler registered to the lpfc driver. When
  1021. * this timer fires, a HBA timeout event shall be posted to the lpfc driver
  1022. * work-port-events bitmap and the worker thread is notified. This timeout
  1023. * event will be used by the worker thread to invoke the actual timeout
  1024. * handler routine, lpfc_hb_timeout_handler. Any periodical operations will
  1025. * be performed in the timeout handler and the HBA timeout event bit shall
  1026. * be cleared by the worker thread after it has taken the event bitmap out.
  1027. **/
  1028. static void
  1029. lpfc_hb_timeout(struct timer_list *t)
  1030. {
  1031. struct lpfc_hba *phba;
  1032. uint32_t tmo_posted;
  1033. unsigned long iflag;
  1034. phba = from_timer(phba, t, hb_tmofunc);
  1035. /* Check for heart beat timeout conditions */
  1036. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1037. tmo_posted = phba->pport->work_port_events & WORKER_HB_TMO;
  1038. if (!tmo_posted)
  1039. phba->pport->work_port_events |= WORKER_HB_TMO;
  1040. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1041. /* Tell the worker thread there is work to do */
  1042. if (!tmo_posted)
  1043. lpfc_worker_wake_up(phba);
  1044. return;
  1045. }
  1046. /**
  1047. * lpfc_rrq_timeout - The RRQ-timer timeout handler
  1048. * @ptr: unsigned long holds the pointer to lpfc hba data structure.
  1049. *
  1050. * This is the RRQ-timer timeout handler registered to the lpfc driver. When
  1051. * this timer fires, a RRQ timeout event shall be posted to the lpfc driver
  1052. * work-port-events bitmap and the worker thread is notified. This timeout
  1053. * event will be used by the worker thread to invoke the actual timeout
  1054. * handler routine, lpfc_rrq_handler. Any periodical operations will
  1055. * be performed in the timeout handler and the RRQ timeout event bit shall
  1056. * be cleared by the worker thread after it has taken the event bitmap out.
  1057. **/
  1058. static void
  1059. lpfc_rrq_timeout(struct timer_list *t)
  1060. {
  1061. struct lpfc_hba *phba;
  1062. unsigned long iflag;
  1063. phba = from_timer(phba, t, rrq_tmr);
  1064. spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
  1065. if (!(phba->pport->load_flag & FC_UNLOADING))
  1066. phba->hba_flag |= HBA_RRQ_ACTIVE;
  1067. else
  1068. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  1069. spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
  1070. if (!(phba->pport->load_flag & FC_UNLOADING))
  1071. lpfc_worker_wake_up(phba);
  1072. }
  1073. /**
  1074. * lpfc_hb_mbox_cmpl - The lpfc heart-beat mailbox command callback function
  1075. * @phba: pointer to lpfc hba data structure.
  1076. * @pmboxq: pointer to the driver internal queue element for mailbox command.
  1077. *
  1078. * This is the callback function to the lpfc heart-beat mailbox command.
  1079. * If configured, the lpfc driver issues the heart-beat mailbox command to
  1080. * the HBA every LPFC_HB_MBOX_INTERVAL (current 5) seconds. At the time the
  1081. * heart-beat mailbox command is issued, the driver shall set up heart-beat
  1082. * timeout timer to LPFC_HB_MBOX_TIMEOUT (current 30) seconds and marks
  1083. * heart-beat outstanding state. Once the mailbox command comes back and
  1084. * no error conditions detected, the heart-beat mailbox command timer is
  1085. * reset to LPFC_HB_MBOX_INTERVAL seconds and the heart-beat outstanding
  1086. * state is cleared for the next heart-beat. If the timer expired with the
  1087. * heart-beat outstanding state set, the driver will put the HBA offline.
  1088. **/
  1089. static void
  1090. lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
  1091. {
  1092. unsigned long drvr_flag;
  1093. spin_lock_irqsave(&phba->hbalock, drvr_flag);
  1094. phba->hb_outstanding = 0;
  1095. spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
  1096. /* Check and reset heart-beat timer is necessary */
  1097. mempool_free(pmboxq, phba->mbox_mem_pool);
  1098. if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
  1099. !(phba->link_state == LPFC_HBA_ERROR) &&
  1100. !(phba->pport->load_flag & FC_UNLOADING))
  1101. mod_timer(&phba->hb_tmofunc,
  1102. jiffies +
  1103. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1104. return;
  1105. }
  1106. /**
  1107. * lpfc_hb_timeout_handler - The HBA-timer timeout handler
  1108. * @phba: pointer to lpfc hba data structure.
  1109. *
  1110. * This is the actual HBA-timer timeout handler to be invoked by the worker
  1111. * thread whenever the HBA timer fired and HBA-timeout event posted. This
  1112. * handler performs any periodic operations needed for the device. If such
  1113. * periodic event has already been attended to either in the interrupt handler
  1114. * or by processing slow-ring or fast-ring events within the HBA-timer
  1115. * timeout window (LPFC_HB_MBOX_INTERVAL), this handler just simply resets
  1116. * the timer for the next timeout period. If lpfc heart-beat mailbox command
  1117. * is configured and there is no heart-beat mailbox command outstanding, a
  1118. * heart-beat mailbox is issued and timer set properly. Otherwise, if there
  1119. * has been a heart-beat mailbox command outstanding, the HBA shall be put
  1120. * to offline.
  1121. **/
  1122. void
  1123. lpfc_hb_timeout_handler(struct lpfc_hba *phba)
  1124. {
  1125. struct lpfc_vport **vports;
  1126. LPFC_MBOXQ_t *pmboxq;
  1127. struct lpfc_dmabuf *buf_ptr;
  1128. int retval, i;
  1129. struct lpfc_sli *psli = &phba->sli;
  1130. LIST_HEAD(completions);
  1131. struct lpfc_queue *qp;
  1132. unsigned long time_elapsed;
  1133. uint32_t tick_cqe, max_cqe, val;
  1134. uint64_t tot, data1, data2, data3;
  1135. struct lpfc_nvmet_tgtport *tgtp;
  1136. struct lpfc_register reg_data;
  1137. void __iomem *eqdreg = phba->sli4_hba.u.if_type2.EQDregaddr;
  1138. vports = lpfc_create_vport_work_array(phba);
  1139. if (vports != NULL)
  1140. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  1141. lpfc_rcv_seq_check_edtov(vports[i]);
  1142. lpfc_fdmi_num_disc_check(vports[i]);
  1143. }
  1144. lpfc_destroy_vport_work_array(phba, vports);
  1145. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1146. (phba->pport->load_flag & FC_UNLOADING) ||
  1147. (phba->pport->fc_flag & FC_OFFLINE_MODE))
  1148. return;
  1149. if (phba->cfg_auto_imax) {
  1150. if (!phba->last_eqdelay_time) {
  1151. phba->last_eqdelay_time = jiffies;
  1152. goto skip_eqdelay;
  1153. }
  1154. time_elapsed = jiffies - phba->last_eqdelay_time;
  1155. phba->last_eqdelay_time = jiffies;
  1156. tot = 0xffff;
  1157. /* Check outstanding IO count */
  1158. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  1159. if (phba->nvmet_support) {
  1160. tgtp = phba->targetport->private;
  1161. /* Calculate outstanding IOs */
  1162. tot = atomic_read(&tgtp->rcv_fcp_cmd_drop);
  1163. tot += atomic_read(&tgtp->xmt_fcp_release);
  1164. tot = atomic_read(&tgtp->rcv_fcp_cmd_in) - tot;
  1165. } else {
  1166. tot = atomic_read(&phba->fc4NvmeIoCmpls);
  1167. data1 = atomic_read(
  1168. &phba->fc4NvmeInputRequests);
  1169. data2 = atomic_read(
  1170. &phba->fc4NvmeOutputRequests);
  1171. data3 = atomic_read(
  1172. &phba->fc4NvmeControlRequests);
  1173. tot = (data1 + data2 + data3) - tot;
  1174. }
  1175. }
  1176. /* Interrupts per sec per EQ */
  1177. val = phba->cfg_fcp_imax / phba->io_channel_irqs;
  1178. tick_cqe = val / CONFIG_HZ; /* Per tick per EQ */
  1179. /* Assume 1 CQE/ISR, calc max CQEs allowed for time duration */
  1180. max_cqe = time_elapsed * tick_cqe;
  1181. for (i = 0; i < phba->io_channel_irqs; i++) {
  1182. /* Fast-path EQ */
  1183. qp = phba->sli4_hba.hba_eq[i];
  1184. if (!qp)
  1185. continue;
  1186. /* Use no EQ delay if we don't have many outstanding
  1187. * IOs, or if we are only processing 1 CQE/ISR or less.
  1188. * Otherwise, assume we can process up to lpfc_fcp_imax
  1189. * interrupts per HBA.
  1190. */
  1191. if (tot < LPFC_NODELAY_MAX_IO ||
  1192. qp->EQ_cqe_cnt <= max_cqe)
  1193. val = 0;
  1194. else
  1195. val = phba->cfg_fcp_imax;
  1196. if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
  1197. /* Use EQ Delay Register method */
  1198. /* Convert for EQ Delay register */
  1199. if (val) {
  1200. /* First, interrupts per sec per EQ */
  1201. val = phba->cfg_fcp_imax /
  1202. phba->io_channel_irqs;
  1203. /* us delay between each interrupt */
  1204. val = LPFC_SEC_TO_USEC / val;
  1205. }
  1206. if (val != qp->q_mode) {
  1207. reg_data.word0 = 0;
  1208. bf_set(lpfc_sliport_eqdelay_id,
  1209. &reg_data, qp->queue_id);
  1210. bf_set(lpfc_sliport_eqdelay_delay,
  1211. &reg_data, val);
  1212. writel(reg_data.word0, eqdreg);
  1213. }
  1214. } else {
  1215. /* Use mbox command method */
  1216. if (val != qp->q_mode)
  1217. lpfc_modify_hba_eq_delay(phba, i,
  1218. 1, val);
  1219. }
  1220. /*
  1221. * val is cfg_fcp_imax or 0 for mbox delay or us delay
  1222. * between interrupts for EQDR.
  1223. */
  1224. qp->q_mode = val;
  1225. qp->EQ_cqe_cnt = 0;
  1226. }
  1227. }
  1228. skip_eqdelay:
  1229. spin_lock_irq(&phba->pport->work_port_lock);
  1230. if (time_after(phba->last_completion_time +
  1231. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
  1232. jiffies)) {
  1233. spin_unlock_irq(&phba->pport->work_port_lock);
  1234. if (!phba->hb_outstanding)
  1235. mod_timer(&phba->hb_tmofunc,
  1236. jiffies +
  1237. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1238. else
  1239. mod_timer(&phba->hb_tmofunc,
  1240. jiffies +
  1241. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1242. return;
  1243. }
  1244. spin_unlock_irq(&phba->pport->work_port_lock);
  1245. if (phba->elsbuf_cnt &&
  1246. (phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
  1247. spin_lock_irq(&phba->hbalock);
  1248. list_splice_init(&phba->elsbuf, &completions);
  1249. phba->elsbuf_cnt = 0;
  1250. phba->elsbuf_prev_cnt = 0;
  1251. spin_unlock_irq(&phba->hbalock);
  1252. while (!list_empty(&completions)) {
  1253. list_remove_head(&completions, buf_ptr,
  1254. struct lpfc_dmabuf, list);
  1255. lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
  1256. kfree(buf_ptr);
  1257. }
  1258. }
  1259. phba->elsbuf_prev_cnt = phba->elsbuf_cnt;
  1260. /* If there is no heart beat outstanding, issue a heartbeat command */
  1261. if (phba->cfg_enable_hba_heartbeat) {
  1262. if (!phba->hb_outstanding) {
  1263. if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
  1264. (list_empty(&psli->mboxq))) {
  1265. pmboxq = mempool_alloc(phba->mbox_mem_pool,
  1266. GFP_KERNEL);
  1267. if (!pmboxq) {
  1268. mod_timer(&phba->hb_tmofunc,
  1269. jiffies +
  1270. msecs_to_jiffies(1000 *
  1271. LPFC_HB_MBOX_INTERVAL));
  1272. return;
  1273. }
  1274. lpfc_heart_beat(phba, pmboxq);
  1275. pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
  1276. pmboxq->vport = phba->pport;
  1277. retval = lpfc_sli_issue_mbox(phba, pmboxq,
  1278. MBX_NOWAIT);
  1279. if (retval != MBX_BUSY &&
  1280. retval != MBX_SUCCESS) {
  1281. mempool_free(pmboxq,
  1282. phba->mbox_mem_pool);
  1283. mod_timer(&phba->hb_tmofunc,
  1284. jiffies +
  1285. msecs_to_jiffies(1000 *
  1286. LPFC_HB_MBOX_INTERVAL));
  1287. return;
  1288. }
  1289. phba->skipped_hb = 0;
  1290. phba->hb_outstanding = 1;
  1291. } else if (time_before_eq(phba->last_completion_time,
  1292. phba->skipped_hb)) {
  1293. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1294. "2857 Last completion time not "
  1295. " updated in %d ms\n",
  1296. jiffies_to_msecs(jiffies
  1297. - phba->last_completion_time));
  1298. } else
  1299. phba->skipped_hb = jiffies;
  1300. mod_timer(&phba->hb_tmofunc,
  1301. jiffies +
  1302. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1303. return;
  1304. } else {
  1305. /*
  1306. * If heart beat timeout called with hb_outstanding set
  1307. * we need to give the hb mailbox cmd a chance to
  1308. * complete or TMO.
  1309. */
  1310. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1311. "0459 Adapter heartbeat still out"
  1312. "standing:last compl time was %d ms.\n",
  1313. jiffies_to_msecs(jiffies
  1314. - phba->last_completion_time));
  1315. mod_timer(&phba->hb_tmofunc,
  1316. jiffies +
  1317. msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
  1318. }
  1319. } else {
  1320. mod_timer(&phba->hb_tmofunc,
  1321. jiffies +
  1322. msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
  1323. }
  1324. }
  1325. /**
  1326. * lpfc_offline_eratt - Bring lpfc offline on hardware error attention
  1327. * @phba: pointer to lpfc hba data structure.
  1328. *
  1329. * This routine is called to bring the HBA offline when HBA hardware error
  1330. * other than Port Error 6 has been detected.
  1331. **/
  1332. static void
  1333. lpfc_offline_eratt(struct lpfc_hba *phba)
  1334. {
  1335. struct lpfc_sli *psli = &phba->sli;
  1336. spin_lock_irq(&phba->hbalock);
  1337. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1338. spin_unlock_irq(&phba->hbalock);
  1339. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1340. lpfc_offline(phba);
  1341. lpfc_reset_barrier(phba);
  1342. spin_lock_irq(&phba->hbalock);
  1343. lpfc_sli_brdreset(phba);
  1344. spin_unlock_irq(&phba->hbalock);
  1345. lpfc_hba_down_post(phba);
  1346. lpfc_sli_brdready(phba, HS_MBRDY);
  1347. lpfc_unblock_mgmt_io(phba);
  1348. phba->link_state = LPFC_HBA_ERROR;
  1349. return;
  1350. }
  1351. /**
  1352. * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
  1353. * @phba: pointer to lpfc hba data structure.
  1354. *
  1355. * This routine is called to bring a SLI4 HBA offline when HBA hardware error
  1356. * other than Port Error 6 has been detected.
  1357. **/
  1358. void
  1359. lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
  1360. {
  1361. spin_lock_irq(&phba->hbalock);
  1362. phba->link_state = LPFC_HBA_ERROR;
  1363. spin_unlock_irq(&phba->hbalock);
  1364. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1365. lpfc_offline(phba);
  1366. lpfc_hba_down_post(phba);
  1367. lpfc_unblock_mgmt_io(phba);
  1368. }
  1369. /**
  1370. * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  1371. * @phba: pointer to lpfc hba data structure.
  1372. *
  1373. * This routine is invoked to handle the deferred HBA hardware error
  1374. * conditions. This type of error is indicated by HBA by setting ER1
  1375. * and another ER bit in the host status register. The driver will
  1376. * wait until the ER1 bit clears before handling the error condition.
  1377. **/
  1378. static void
  1379. lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  1380. {
  1381. uint32_t old_host_status = phba->work_hs;
  1382. struct lpfc_sli *psli = &phba->sli;
  1383. /* If the pci channel is offline, ignore possible errors,
  1384. * since we cannot communicate with the pci card anyway.
  1385. */
  1386. if (pci_channel_offline(phba->pcidev)) {
  1387. spin_lock_irq(&phba->hbalock);
  1388. phba->hba_flag &= ~DEFER_ERATT;
  1389. spin_unlock_irq(&phba->hbalock);
  1390. return;
  1391. }
  1392. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1393. "0479 Deferred Adapter Hardware Error "
  1394. "Data: x%x x%x x%x\n",
  1395. phba->work_hs,
  1396. phba->work_status[0], phba->work_status[1]);
  1397. spin_lock_irq(&phba->hbalock);
  1398. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1399. spin_unlock_irq(&phba->hbalock);
  1400. /*
  1401. * Firmware stops when it triggred erratt. That could cause the I/Os
  1402. * dropped by the firmware. Error iocb (I/O) on txcmplq and let the
  1403. * SCSI layer retry it after re-establishing link.
  1404. */
  1405. lpfc_sli_abort_fcp_rings(phba);
  1406. /*
  1407. * There was a firmware error. Take the hba offline and then
  1408. * attempt to restart it.
  1409. */
  1410. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  1411. lpfc_offline(phba);
  1412. /* Wait for the ER1 bit to clear.*/
  1413. while (phba->work_hs & HS_FFER1) {
  1414. msleep(100);
  1415. if (lpfc_readl(phba->HSregaddr, &phba->work_hs)) {
  1416. phba->work_hs = UNPLUG_ERR ;
  1417. break;
  1418. }
  1419. /* If driver is unloading let the worker thread continue */
  1420. if (phba->pport->load_flag & FC_UNLOADING) {
  1421. phba->work_hs = 0;
  1422. break;
  1423. }
  1424. }
  1425. /*
  1426. * This is to ptrotect against a race condition in which
  1427. * first write to the host attention register clear the
  1428. * host status register.
  1429. */
  1430. if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
  1431. phba->work_hs = old_host_status & ~HS_FFER1;
  1432. spin_lock_irq(&phba->hbalock);
  1433. phba->hba_flag &= ~DEFER_ERATT;
  1434. spin_unlock_irq(&phba->hbalock);
  1435. phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
  1436. phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
  1437. }
  1438. static void
  1439. lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
  1440. {
  1441. struct lpfc_board_event_header board_event;
  1442. struct Scsi_Host *shost;
  1443. board_event.event_type = FC_REG_BOARD_EVENT;
  1444. board_event.subcategory = LPFC_EVENT_PORTINTERR;
  1445. shost = lpfc_shost_from_vport(phba->pport);
  1446. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1447. sizeof(board_event),
  1448. (char *) &board_event,
  1449. LPFC_NL_VENDOR_ID);
  1450. }
  1451. /**
  1452. * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  1453. * @phba: pointer to lpfc hba data structure.
  1454. *
  1455. * This routine is invoked to handle the following HBA hardware error
  1456. * conditions:
  1457. * 1 - HBA error attention interrupt
  1458. * 2 - DMA ring index out of range
  1459. * 3 - Mailbox command came back as unknown
  1460. **/
  1461. static void
  1462. lpfc_handle_eratt_s3(struct lpfc_hba *phba)
  1463. {
  1464. struct lpfc_vport *vport = phba->pport;
  1465. struct lpfc_sli *psli = &phba->sli;
  1466. uint32_t event_data;
  1467. unsigned long temperature;
  1468. struct temp_event temp_event_data;
  1469. struct Scsi_Host *shost;
  1470. /* If the pci channel is offline, ignore possible errors,
  1471. * since we cannot communicate with the pci card anyway.
  1472. */
  1473. if (pci_channel_offline(phba->pcidev)) {
  1474. spin_lock_irq(&phba->hbalock);
  1475. phba->hba_flag &= ~DEFER_ERATT;
  1476. spin_unlock_irq(&phba->hbalock);
  1477. return;
  1478. }
  1479. /* If resets are disabled then leave the HBA alone and return */
  1480. if (!phba->cfg_enable_hba_reset)
  1481. return;
  1482. /* Send an internal error event to mgmt application */
  1483. lpfc_board_errevt_to_mgmt(phba);
  1484. if (phba->hba_flag & DEFER_ERATT)
  1485. lpfc_handle_deferred_eratt(phba);
  1486. if ((phba->work_hs & HS_FFER6) || (phba->work_hs & HS_FFER8)) {
  1487. if (phba->work_hs & HS_FFER6)
  1488. /* Re-establishing Link */
  1489. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1490. "1301 Re-establishing Link "
  1491. "Data: x%x x%x x%x\n",
  1492. phba->work_hs, phba->work_status[0],
  1493. phba->work_status[1]);
  1494. if (phba->work_hs & HS_FFER8)
  1495. /* Device Zeroization */
  1496. lpfc_printf_log(phba, KERN_INFO, LOG_LINK_EVENT,
  1497. "2861 Host Authentication device "
  1498. "zeroization Data:x%x x%x x%x\n",
  1499. phba->work_hs, phba->work_status[0],
  1500. phba->work_status[1]);
  1501. spin_lock_irq(&phba->hbalock);
  1502. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  1503. spin_unlock_irq(&phba->hbalock);
  1504. /*
  1505. * Firmware stops when it triggled erratt with HS_FFER6.
  1506. * That could cause the I/Os dropped by the firmware.
  1507. * Error iocb (I/O) on txcmplq and let the SCSI layer
  1508. * retry it after re-establishing link.
  1509. */
  1510. lpfc_sli_abort_fcp_rings(phba);
  1511. /*
  1512. * There was a firmware error. Take the hba offline and then
  1513. * attempt to restart it.
  1514. */
  1515. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  1516. lpfc_offline(phba);
  1517. lpfc_sli_brdrestart(phba);
  1518. if (lpfc_online(phba) == 0) { /* Initialize the HBA */
  1519. lpfc_unblock_mgmt_io(phba);
  1520. return;
  1521. }
  1522. lpfc_unblock_mgmt_io(phba);
  1523. } else if (phba->work_hs & HS_CRIT_TEMP) {
  1524. temperature = readl(phba->MBslimaddr + TEMPERATURE_OFFSET);
  1525. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1526. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1527. temp_event_data.data = (uint32_t)temperature;
  1528. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1529. "0406 Adapter maximum temperature exceeded "
  1530. "(%ld), taking this port offline "
  1531. "Data: x%x x%x x%x\n",
  1532. temperature, phba->work_hs,
  1533. phba->work_status[0], phba->work_status[1]);
  1534. shost = lpfc_shost_from_vport(phba->pport);
  1535. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1536. sizeof(temp_event_data),
  1537. (char *) &temp_event_data,
  1538. SCSI_NL_VID_TYPE_PCI
  1539. | PCI_VENDOR_ID_EMULEX);
  1540. spin_lock_irq(&phba->hbalock);
  1541. phba->over_temp_state = HBA_OVER_TEMP;
  1542. spin_unlock_irq(&phba->hbalock);
  1543. lpfc_offline_eratt(phba);
  1544. } else {
  1545. /* The if clause above forces this code path when the status
  1546. * failure is a value other than FFER6. Do not call the offline
  1547. * twice. This is the adapter hardware error path.
  1548. */
  1549. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1550. "0457 Adapter Hardware Error "
  1551. "Data: x%x x%x x%x\n",
  1552. phba->work_hs,
  1553. phba->work_status[0], phba->work_status[1]);
  1554. event_data = FC_REG_DUMP_EVENT;
  1555. shost = lpfc_shost_from_vport(vport);
  1556. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1557. sizeof(event_data), (char *) &event_data,
  1558. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1559. lpfc_offline_eratt(phba);
  1560. }
  1561. return;
  1562. }
  1563. /**
  1564. * lpfc_sli4_port_sta_fn_reset - The SLI4 function reset due to port status reg
  1565. * @phba: pointer to lpfc hba data structure.
  1566. * @mbx_action: flag for mailbox shutdown action.
  1567. *
  1568. * This routine is invoked to perform an SLI4 port PCI function reset in
  1569. * response to port status register polling attention. It waits for port
  1570. * status register (ERR, RDY, RN) bits before proceeding with function reset.
  1571. * During this process, interrupt vectors are freed and later requested
  1572. * for handling possible port resource change.
  1573. **/
  1574. static int
  1575. lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
  1576. bool en_rn_msg)
  1577. {
  1578. int rc;
  1579. uint32_t intr_mode;
  1580. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  1581. LPFC_SLI_INTF_IF_TYPE_2) {
  1582. /*
  1583. * On error status condition, driver need to wait for port
  1584. * ready before performing reset.
  1585. */
  1586. rc = lpfc_sli4_pdev_status_reg_wait(phba);
  1587. if (rc)
  1588. return rc;
  1589. }
  1590. /* need reset: attempt for port recovery */
  1591. if (en_rn_msg)
  1592. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1593. "2887 Reset Needed: Attempting Port "
  1594. "Recovery...\n");
  1595. lpfc_offline_prep(phba, mbx_action);
  1596. lpfc_offline(phba);
  1597. /* release interrupt for possible resource change */
  1598. lpfc_sli4_disable_intr(phba);
  1599. lpfc_sli_brdrestart(phba);
  1600. /* request and enable interrupt */
  1601. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  1602. if (intr_mode == LPFC_INTR_ERROR) {
  1603. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1604. "3175 Failed to enable interrupt\n");
  1605. return -EIO;
  1606. }
  1607. phba->intr_mode = intr_mode;
  1608. rc = lpfc_online(phba);
  1609. if (rc == 0)
  1610. lpfc_unblock_mgmt_io(phba);
  1611. return rc;
  1612. }
  1613. /**
  1614. * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
  1615. * @phba: pointer to lpfc hba data structure.
  1616. *
  1617. * This routine is invoked to handle the SLI4 HBA hardware error attention
  1618. * conditions.
  1619. **/
  1620. static void
  1621. lpfc_handle_eratt_s4(struct lpfc_hba *phba)
  1622. {
  1623. struct lpfc_vport *vport = phba->pport;
  1624. uint32_t event_data;
  1625. struct Scsi_Host *shost;
  1626. uint32_t if_type;
  1627. struct lpfc_register portstat_reg = {0};
  1628. uint32_t reg_err1, reg_err2;
  1629. uint32_t uerrlo_reg, uemasklo_reg;
  1630. uint32_t smphr_port_status = 0, pci_rd_rc1, pci_rd_rc2;
  1631. bool en_rn_msg = true;
  1632. struct temp_event temp_event_data;
  1633. struct lpfc_register portsmphr_reg;
  1634. int rc, i;
  1635. /* If the pci channel is offline, ignore possible errors, since
  1636. * we cannot communicate with the pci card anyway.
  1637. */
  1638. if (pci_channel_offline(phba->pcidev))
  1639. return;
  1640. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  1641. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  1642. switch (if_type) {
  1643. case LPFC_SLI_INTF_IF_TYPE_0:
  1644. pci_rd_rc1 = lpfc_readl(
  1645. phba->sli4_hba.u.if_type0.UERRLOregaddr,
  1646. &uerrlo_reg);
  1647. pci_rd_rc2 = lpfc_readl(
  1648. phba->sli4_hba.u.if_type0.UEMASKLOregaddr,
  1649. &uemasklo_reg);
  1650. /* consider PCI bus read error as pci_channel_offline */
  1651. if (pci_rd_rc1 == -EIO && pci_rd_rc2 == -EIO)
  1652. return;
  1653. if (!(phba->hba_flag & HBA_RECOVERABLE_UE)) {
  1654. lpfc_sli4_offline_eratt(phba);
  1655. return;
  1656. }
  1657. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1658. "7623 Checking UE recoverable");
  1659. for (i = 0; i < phba->sli4_hba.ue_to_sr / 1000; i++) {
  1660. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1661. &portsmphr_reg.word0))
  1662. continue;
  1663. smphr_port_status = bf_get(lpfc_port_smphr_port_status,
  1664. &portsmphr_reg);
  1665. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1666. LPFC_PORT_SEM_UE_RECOVERABLE)
  1667. break;
  1668. /*Sleep for 1Sec, before checking SEMAPHORE */
  1669. msleep(1000);
  1670. }
  1671. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1672. "4827 smphr_port_status x%x : Waited %dSec",
  1673. smphr_port_status, i);
  1674. /* Recoverable UE, reset the HBA device */
  1675. if ((smphr_port_status & LPFC_PORT_SEM_MASK) ==
  1676. LPFC_PORT_SEM_UE_RECOVERABLE) {
  1677. for (i = 0; i < 20; i++) {
  1678. msleep(1000);
  1679. if (!lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  1680. &portsmphr_reg.word0) &&
  1681. (LPFC_POST_STAGE_PORT_READY ==
  1682. bf_get(lpfc_port_smphr_port_status,
  1683. &portsmphr_reg))) {
  1684. rc = lpfc_sli4_port_sta_fn_reset(phba,
  1685. LPFC_MBX_NO_WAIT, en_rn_msg);
  1686. if (rc == 0)
  1687. return;
  1688. lpfc_printf_log(phba,
  1689. KERN_ERR, LOG_INIT,
  1690. "4215 Failed to recover UE");
  1691. break;
  1692. }
  1693. }
  1694. }
  1695. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1696. "7624 Firmware not ready: Failing UE recovery,"
  1697. " waited %dSec", i);
  1698. lpfc_sli4_offline_eratt(phba);
  1699. break;
  1700. case LPFC_SLI_INTF_IF_TYPE_2:
  1701. pci_rd_rc1 = lpfc_readl(
  1702. phba->sli4_hba.u.if_type2.STATUSregaddr,
  1703. &portstat_reg.word0);
  1704. /* consider PCI bus read error as pci_channel_offline */
  1705. if (pci_rd_rc1 == -EIO) {
  1706. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1707. "3151 PCI bus read access failure: x%x\n",
  1708. readl(phba->sli4_hba.u.if_type2.STATUSregaddr));
  1709. return;
  1710. }
  1711. reg_err1 = readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
  1712. reg_err2 = readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
  1713. if (bf_get(lpfc_sliport_status_oti, &portstat_reg)) {
  1714. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1715. "2889 Port Overtemperature event, "
  1716. "taking port offline Data: x%x x%x\n",
  1717. reg_err1, reg_err2);
  1718. phba->sfp_alarm |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  1719. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  1720. temp_event_data.event_code = LPFC_CRIT_TEMP;
  1721. temp_event_data.data = 0xFFFFFFFF;
  1722. shost = lpfc_shost_from_vport(phba->pport);
  1723. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1724. sizeof(temp_event_data),
  1725. (char *)&temp_event_data,
  1726. SCSI_NL_VID_TYPE_PCI
  1727. | PCI_VENDOR_ID_EMULEX);
  1728. spin_lock_irq(&phba->hbalock);
  1729. phba->over_temp_state = HBA_OVER_TEMP;
  1730. spin_unlock_irq(&phba->hbalock);
  1731. lpfc_sli4_offline_eratt(phba);
  1732. return;
  1733. }
  1734. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1735. reg_err2 == SLIPORT_ERR2_REG_FW_RESTART) {
  1736. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1737. "3143 Port Down: Firmware Update "
  1738. "Detected\n");
  1739. en_rn_msg = false;
  1740. } else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1741. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1742. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1743. "3144 Port Down: Debug Dump\n");
  1744. else if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1745. reg_err2 == SLIPORT_ERR2_REG_FUNC_PROVISON)
  1746. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1747. "3145 Port Down: Provisioning\n");
  1748. /* If resets are disabled then leave the HBA alone and return */
  1749. if (!phba->cfg_enable_hba_reset)
  1750. return;
  1751. /* Check port status register for function reset */
  1752. rc = lpfc_sli4_port_sta_fn_reset(phba, LPFC_MBX_NO_WAIT,
  1753. en_rn_msg);
  1754. if (rc == 0) {
  1755. /* don't report event on forced debug dump */
  1756. if (reg_err1 == SLIPORT_ERR1_REG_ERR_CODE_2 &&
  1757. reg_err2 == SLIPORT_ERR2_REG_FORCED_DUMP)
  1758. return;
  1759. else
  1760. break;
  1761. }
  1762. /* fall through for not able to recover */
  1763. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  1764. "3152 Unrecoverable error, bring the port "
  1765. "offline\n");
  1766. lpfc_sli4_offline_eratt(phba);
  1767. break;
  1768. case LPFC_SLI_INTF_IF_TYPE_1:
  1769. default:
  1770. break;
  1771. }
  1772. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  1773. "3123 Report dump event to upper layer\n");
  1774. /* Send an internal error event to mgmt application */
  1775. lpfc_board_errevt_to_mgmt(phba);
  1776. event_data = FC_REG_DUMP_EVENT;
  1777. shost = lpfc_shost_from_vport(vport);
  1778. fc_host_post_vendor_event(shost, fc_get_event_number(),
  1779. sizeof(event_data), (char *) &event_data,
  1780. SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
  1781. }
  1782. /**
  1783. * lpfc_handle_eratt - Wrapper func for handling hba error attention
  1784. * @phba: pointer to lpfc HBA data structure.
  1785. *
  1786. * This routine wraps the actual SLI3 or SLI4 hba error attention handling
  1787. * routine from the API jump table function pointer from the lpfc_hba struct.
  1788. *
  1789. * Return codes
  1790. * 0 - success.
  1791. * Any other value - error.
  1792. **/
  1793. void
  1794. lpfc_handle_eratt(struct lpfc_hba *phba)
  1795. {
  1796. (*phba->lpfc_handle_eratt)(phba);
  1797. }
  1798. /**
  1799. * lpfc_handle_latt - The HBA link event handler
  1800. * @phba: pointer to lpfc hba data structure.
  1801. *
  1802. * This routine is invoked from the worker thread to handle a HBA host
  1803. * attention link event. SLI3 only.
  1804. **/
  1805. void
  1806. lpfc_handle_latt(struct lpfc_hba *phba)
  1807. {
  1808. struct lpfc_vport *vport = phba->pport;
  1809. struct lpfc_sli *psli = &phba->sli;
  1810. LPFC_MBOXQ_t *pmb;
  1811. volatile uint32_t control;
  1812. struct lpfc_dmabuf *mp;
  1813. int rc = 0;
  1814. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1815. if (!pmb) {
  1816. rc = 1;
  1817. goto lpfc_handle_latt_err_exit;
  1818. }
  1819. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1820. if (!mp) {
  1821. rc = 2;
  1822. goto lpfc_handle_latt_free_pmb;
  1823. }
  1824. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  1825. if (!mp->virt) {
  1826. rc = 3;
  1827. goto lpfc_handle_latt_free_mp;
  1828. }
  1829. /* Cleanup any outstanding ELS commands */
  1830. lpfc_els_flush_all_cmd(phba);
  1831. psli->slistat.link_event++;
  1832. lpfc_read_topology(phba, pmb, mp);
  1833. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  1834. pmb->vport = vport;
  1835. /* Block ELS IOCBs until we have processed this mbox command */
  1836. phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
  1837. rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
  1838. if (rc == MBX_NOT_FINISHED) {
  1839. rc = 4;
  1840. goto lpfc_handle_latt_free_mbuf;
  1841. }
  1842. /* Clear Link Attention in HA REG */
  1843. spin_lock_irq(&phba->hbalock);
  1844. writel(HA_LATT, phba->HAregaddr);
  1845. readl(phba->HAregaddr); /* flush */
  1846. spin_unlock_irq(&phba->hbalock);
  1847. return;
  1848. lpfc_handle_latt_free_mbuf:
  1849. phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
  1850. lpfc_mbuf_free(phba, mp->virt, mp->phys);
  1851. lpfc_handle_latt_free_mp:
  1852. kfree(mp);
  1853. lpfc_handle_latt_free_pmb:
  1854. mempool_free(pmb, phba->mbox_mem_pool);
  1855. lpfc_handle_latt_err_exit:
  1856. /* Enable Link attention interrupts */
  1857. spin_lock_irq(&phba->hbalock);
  1858. psli->sli_flag |= LPFC_PROCESS_LA;
  1859. control = readl(phba->HCregaddr);
  1860. control |= HC_LAINT_ENA;
  1861. writel(control, phba->HCregaddr);
  1862. readl(phba->HCregaddr); /* flush */
  1863. /* Clear Link Attention in HA REG */
  1864. writel(HA_LATT, phba->HAregaddr);
  1865. readl(phba->HAregaddr); /* flush */
  1866. spin_unlock_irq(&phba->hbalock);
  1867. lpfc_linkdown(phba);
  1868. phba->link_state = LPFC_HBA_ERROR;
  1869. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
  1870. "0300 LATT: Cannot issue READ_LA: Data:%d\n", rc);
  1871. return;
  1872. }
  1873. /**
  1874. * lpfc_parse_vpd - Parse VPD (Vital Product Data)
  1875. * @phba: pointer to lpfc hba data structure.
  1876. * @vpd: pointer to the vital product data.
  1877. * @len: length of the vital product data in bytes.
  1878. *
  1879. * This routine parses the Vital Product Data (VPD). The VPD is treated as
  1880. * an array of characters. In this routine, the ModelName, ProgramType, and
  1881. * ModelDesc, etc. fields of the phba data structure will be populated.
  1882. *
  1883. * Return codes
  1884. * 0 - pointer to the VPD passed in is NULL
  1885. * 1 - success
  1886. **/
  1887. int
  1888. lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
  1889. {
  1890. uint8_t lenlo, lenhi;
  1891. int Length;
  1892. int i, j;
  1893. int finished = 0;
  1894. int index = 0;
  1895. if (!vpd)
  1896. return 0;
  1897. /* Vital Product */
  1898. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  1899. "0455 Vital Product Data: x%x x%x x%x x%x\n",
  1900. (uint32_t) vpd[0], (uint32_t) vpd[1], (uint32_t) vpd[2],
  1901. (uint32_t) vpd[3]);
  1902. while (!finished && (index < (len - 4))) {
  1903. switch (vpd[index]) {
  1904. case 0x82:
  1905. case 0x91:
  1906. index += 1;
  1907. lenlo = vpd[index];
  1908. index += 1;
  1909. lenhi = vpd[index];
  1910. index += 1;
  1911. i = ((((unsigned short)lenhi) << 8) + lenlo);
  1912. index += i;
  1913. break;
  1914. case 0x90:
  1915. index += 1;
  1916. lenlo = vpd[index];
  1917. index += 1;
  1918. lenhi = vpd[index];
  1919. index += 1;
  1920. Length = ((((unsigned short)lenhi) << 8) + lenlo);
  1921. if (Length > len - index)
  1922. Length = len - index;
  1923. while (Length > 0) {
  1924. /* Look for Serial Number */
  1925. if ((vpd[index] == 'S') && (vpd[index+1] == 'N')) {
  1926. index += 2;
  1927. i = vpd[index];
  1928. index += 1;
  1929. j = 0;
  1930. Length -= (3+i);
  1931. while(i--) {
  1932. phba->SerialNumber[j++] = vpd[index++];
  1933. if (j == 31)
  1934. break;
  1935. }
  1936. phba->SerialNumber[j] = 0;
  1937. continue;
  1938. }
  1939. else if ((vpd[index] == 'V') && (vpd[index+1] == '1')) {
  1940. phba->vpd_flag |= VPD_MODEL_DESC;
  1941. index += 2;
  1942. i = vpd[index];
  1943. index += 1;
  1944. j = 0;
  1945. Length -= (3+i);
  1946. while(i--) {
  1947. phba->ModelDesc[j++] = vpd[index++];
  1948. if (j == 255)
  1949. break;
  1950. }
  1951. phba->ModelDesc[j] = 0;
  1952. continue;
  1953. }
  1954. else if ((vpd[index] == 'V') && (vpd[index+1] == '2')) {
  1955. phba->vpd_flag |= VPD_MODEL_NAME;
  1956. index += 2;
  1957. i = vpd[index];
  1958. index += 1;
  1959. j = 0;
  1960. Length -= (3+i);
  1961. while(i--) {
  1962. phba->ModelName[j++] = vpd[index++];
  1963. if (j == 79)
  1964. break;
  1965. }
  1966. phba->ModelName[j] = 0;
  1967. continue;
  1968. }
  1969. else if ((vpd[index] == 'V') && (vpd[index+1] == '3')) {
  1970. phba->vpd_flag |= VPD_PROGRAM_TYPE;
  1971. index += 2;
  1972. i = vpd[index];
  1973. index += 1;
  1974. j = 0;
  1975. Length -= (3+i);
  1976. while(i--) {
  1977. phba->ProgramType[j++] = vpd[index++];
  1978. if (j == 255)
  1979. break;
  1980. }
  1981. phba->ProgramType[j] = 0;
  1982. continue;
  1983. }
  1984. else if ((vpd[index] == 'V') && (vpd[index+1] == '4')) {
  1985. phba->vpd_flag |= VPD_PORT;
  1986. index += 2;
  1987. i = vpd[index];
  1988. index += 1;
  1989. j = 0;
  1990. Length -= (3+i);
  1991. while(i--) {
  1992. if ((phba->sli_rev == LPFC_SLI_REV4) &&
  1993. (phba->sli4_hba.pport_name_sta ==
  1994. LPFC_SLI4_PPNAME_GET)) {
  1995. j++;
  1996. index++;
  1997. } else
  1998. phba->Port[j++] = vpd[index++];
  1999. if (j == 19)
  2000. break;
  2001. }
  2002. if ((phba->sli_rev != LPFC_SLI_REV4) ||
  2003. (phba->sli4_hba.pport_name_sta ==
  2004. LPFC_SLI4_PPNAME_NON))
  2005. phba->Port[j] = 0;
  2006. continue;
  2007. }
  2008. else {
  2009. index += 2;
  2010. i = vpd[index];
  2011. index += 1;
  2012. index += i;
  2013. Length -= (3 + i);
  2014. }
  2015. }
  2016. finished = 0;
  2017. break;
  2018. case 0x78:
  2019. finished = 1;
  2020. break;
  2021. default:
  2022. index ++;
  2023. break;
  2024. }
  2025. }
  2026. return(1);
  2027. }
  2028. /**
  2029. * lpfc_get_hba_model_desc - Retrieve HBA device model name and description
  2030. * @phba: pointer to lpfc hba data structure.
  2031. * @mdp: pointer to the data structure to hold the derived model name.
  2032. * @descp: pointer to the data structure to hold the derived description.
  2033. *
  2034. * This routine retrieves HBA's description based on its registered PCI device
  2035. * ID. The @descp passed into this function points to an array of 256 chars. It
  2036. * shall be returned with the model name, maximum speed, and the host bus type.
  2037. * The @mdp passed into this function points to an array of 80 chars. When the
  2038. * function returns, the @mdp will be filled with the model name.
  2039. **/
  2040. static void
  2041. lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
  2042. {
  2043. lpfc_vpd_t *vp;
  2044. uint16_t dev_id = phba->pcidev->device;
  2045. int max_speed;
  2046. int GE = 0;
  2047. int oneConnect = 0; /* default is not a oneConnect */
  2048. struct {
  2049. char *name;
  2050. char *bus;
  2051. char *function;
  2052. } m = {"<Unknown>", "", ""};
  2053. if (mdp && mdp[0] != '\0'
  2054. && descp && descp[0] != '\0')
  2055. return;
  2056. if (phba->lmt & LMT_32Gb)
  2057. max_speed = 32;
  2058. else if (phba->lmt & LMT_16Gb)
  2059. max_speed = 16;
  2060. else if (phba->lmt & LMT_10Gb)
  2061. max_speed = 10;
  2062. else if (phba->lmt & LMT_8Gb)
  2063. max_speed = 8;
  2064. else if (phba->lmt & LMT_4Gb)
  2065. max_speed = 4;
  2066. else if (phba->lmt & LMT_2Gb)
  2067. max_speed = 2;
  2068. else if (phba->lmt & LMT_1Gb)
  2069. max_speed = 1;
  2070. else
  2071. max_speed = 0;
  2072. vp = &phba->vpd;
  2073. switch (dev_id) {
  2074. case PCI_DEVICE_ID_FIREFLY:
  2075. m = (typeof(m)){"LP6000", "PCI",
  2076. "Obsolete, Unsupported Fibre Channel Adapter"};
  2077. break;
  2078. case PCI_DEVICE_ID_SUPERFLY:
  2079. if (vp->rev.biuRev >= 1 && vp->rev.biuRev <= 3)
  2080. m = (typeof(m)){"LP7000", "PCI", ""};
  2081. else
  2082. m = (typeof(m)){"LP7000E", "PCI", ""};
  2083. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  2084. break;
  2085. case PCI_DEVICE_ID_DRAGONFLY:
  2086. m = (typeof(m)){"LP8000", "PCI",
  2087. "Obsolete, Unsupported Fibre Channel Adapter"};
  2088. break;
  2089. case PCI_DEVICE_ID_CENTAUR:
  2090. if (FC_JEDEC_ID(vp->rev.biuRev) == CENTAUR_2G_JEDEC_ID)
  2091. m = (typeof(m)){"LP9002", "PCI", ""};
  2092. else
  2093. m = (typeof(m)){"LP9000", "PCI", ""};
  2094. m.function = "Obsolete, Unsupported Fibre Channel Adapter";
  2095. break;
  2096. case PCI_DEVICE_ID_RFLY:
  2097. m = (typeof(m)){"LP952", "PCI",
  2098. "Obsolete, Unsupported Fibre Channel Adapter"};
  2099. break;
  2100. case PCI_DEVICE_ID_PEGASUS:
  2101. m = (typeof(m)){"LP9802", "PCI-X",
  2102. "Obsolete, Unsupported Fibre Channel Adapter"};
  2103. break;
  2104. case PCI_DEVICE_ID_THOR:
  2105. m = (typeof(m)){"LP10000", "PCI-X",
  2106. "Obsolete, Unsupported Fibre Channel Adapter"};
  2107. break;
  2108. case PCI_DEVICE_ID_VIPER:
  2109. m = (typeof(m)){"LPX1000", "PCI-X",
  2110. "Obsolete, Unsupported Fibre Channel Adapter"};
  2111. break;
  2112. case PCI_DEVICE_ID_PFLY:
  2113. m = (typeof(m)){"LP982", "PCI-X",
  2114. "Obsolete, Unsupported Fibre Channel Adapter"};
  2115. break;
  2116. case PCI_DEVICE_ID_TFLY:
  2117. m = (typeof(m)){"LP1050", "PCI-X",
  2118. "Obsolete, Unsupported Fibre Channel Adapter"};
  2119. break;
  2120. case PCI_DEVICE_ID_HELIOS:
  2121. m = (typeof(m)){"LP11000", "PCI-X2",
  2122. "Obsolete, Unsupported Fibre Channel Adapter"};
  2123. break;
  2124. case PCI_DEVICE_ID_HELIOS_SCSP:
  2125. m = (typeof(m)){"LP11000-SP", "PCI-X2",
  2126. "Obsolete, Unsupported Fibre Channel Adapter"};
  2127. break;
  2128. case PCI_DEVICE_ID_HELIOS_DCSP:
  2129. m = (typeof(m)){"LP11002-SP", "PCI-X2",
  2130. "Obsolete, Unsupported Fibre Channel Adapter"};
  2131. break;
  2132. case PCI_DEVICE_ID_NEPTUNE:
  2133. m = (typeof(m)){"LPe1000", "PCIe",
  2134. "Obsolete, Unsupported Fibre Channel Adapter"};
  2135. break;
  2136. case PCI_DEVICE_ID_NEPTUNE_SCSP:
  2137. m = (typeof(m)){"LPe1000-SP", "PCIe",
  2138. "Obsolete, Unsupported Fibre Channel Adapter"};
  2139. break;
  2140. case PCI_DEVICE_ID_NEPTUNE_DCSP:
  2141. m = (typeof(m)){"LPe1002-SP", "PCIe",
  2142. "Obsolete, Unsupported Fibre Channel Adapter"};
  2143. break;
  2144. case PCI_DEVICE_ID_BMID:
  2145. m = (typeof(m)){"LP1150", "PCI-X2", "Fibre Channel Adapter"};
  2146. break;
  2147. case PCI_DEVICE_ID_BSMB:
  2148. m = (typeof(m)){"LP111", "PCI-X2",
  2149. "Obsolete, Unsupported Fibre Channel Adapter"};
  2150. break;
  2151. case PCI_DEVICE_ID_ZEPHYR:
  2152. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2153. break;
  2154. case PCI_DEVICE_ID_ZEPHYR_SCSP:
  2155. m = (typeof(m)){"LPe11000", "PCIe", "Fibre Channel Adapter"};
  2156. break;
  2157. case PCI_DEVICE_ID_ZEPHYR_DCSP:
  2158. m = (typeof(m)){"LP2105", "PCIe", "FCoE Adapter"};
  2159. GE = 1;
  2160. break;
  2161. case PCI_DEVICE_ID_ZMID:
  2162. m = (typeof(m)){"LPe1150", "PCIe", "Fibre Channel Adapter"};
  2163. break;
  2164. case PCI_DEVICE_ID_ZSMB:
  2165. m = (typeof(m)){"LPe111", "PCIe", "Fibre Channel Adapter"};
  2166. break;
  2167. case PCI_DEVICE_ID_LP101:
  2168. m = (typeof(m)){"LP101", "PCI-X",
  2169. "Obsolete, Unsupported Fibre Channel Adapter"};
  2170. break;
  2171. case PCI_DEVICE_ID_LP10000S:
  2172. m = (typeof(m)){"LP10000-S", "PCI",
  2173. "Obsolete, Unsupported Fibre Channel Adapter"};
  2174. break;
  2175. case PCI_DEVICE_ID_LP11000S:
  2176. m = (typeof(m)){"LP11000-S", "PCI-X2",
  2177. "Obsolete, Unsupported Fibre Channel Adapter"};
  2178. break;
  2179. case PCI_DEVICE_ID_LPE11000S:
  2180. m = (typeof(m)){"LPe11000-S", "PCIe",
  2181. "Obsolete, Unsupported Fibre Channel Adapter"};
  2182. break;
  2183. case PCI_DEVICE_ID_SAT:
  2184. m = (typeof(m)){"LPe12000", "PCIe", "Fibre Channel Adapter"};
  2185. break;
  2186. case PCI_DEVICE_ID_SAT_MID:
  2187. m = (typeof(m)){"LPe1250", "PCIe", "Fibre Channel Adapter"};
  2188. break;
  2189. case PCI_DEVICE_ID_SAT_SMB:
  2190. m = (typeof(m)){"LPe121", "PCIe", "Fibre Channel Adapter"};
  2191. break;
  2192. case PCI_DEVICE_ID_SAT_DCSP:
  2193. m = (typeof(m)){"LPe12002-SP", "PCIe", "Fibre Channel Adapter"};
  2194. break;
  2195. case PCI_DEVICE_ID_SAT_SCSP:
  2196. m = (typeof(m)){"LPe12000-SP", "PCIe", "Fibre Channel Adapter"};
  2197. break;
  2198. case PCI_DEVICE_ID_SAT_S:
  2199. m = (typeof(m)){"LPe12000-S", "PCIe", "Fibre Channel Adapter"};
  2200. break;
  2201. case PCI_DEVICE_ID_HORNET:
  2202. m = (typeof(m)){"LP21000", "PCIe",
  2203. "Obsolete, Unsupported FCoE Adapter"};
  2204. GE = 1;
  2205. break;
  2206. case PCI_DEVICE_ID_PROTEUS_VF:
  2207. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2208. "Obsolete, Unsupported Fibre Channel Adapter"};
  2209. break;
  2210. case PCI_DEVICE_ID_PROTEUS_PF:
  2211. m = (typeof(m)){"LPev12000", "PCIe IOV",
  2212. "Obsolete, Unsupported Fibre Channel Adapter"};
  2213. break;
  2214. case PCI_DEVICE_ID_PROTEUS_S:
  2215. m = (typeof(m)){"LPemv12002-S", "PCIe IOV",
  2216. "Obsolete, Unsupported Fibre Channel Adapter"};
  2217. break;
  2218. case PCI_DEVICE_ID_TIGERSHARK:
  2219. oneConnect = 1;
  2220. m = (typeof(m)){"OCe10100", "PCIe", "FCoE"};
  2221. break;
  2222. case PCI_DEVICE_ID_TOMCAT:
  2223. oneConnect = 1;
  2224. m = (typeof(m)){"OCe11100", "PCIe", "FCoE"};
  2225. break;
  2226. case PCI_DEVICE_ID_FALCON:
  2227. m = (typeof(m)){"LPSe12002-ML1-E", "PCIe",
  2228. "EmulexSecure Fibre"};
  2229. break;
  2230. case PCI_DEVICE_ID_BALIUS:
  2231. m = (typeof(m)){"LPVe12002", "PCIe Shared I/O",
  2232. "Obsolete, Unsupported Fibre Channel Adapter"};
  2233. break;
  2234. case PCI_DEVICE_ID_LANCER_FC:
  2235. m = (typeof(m)){"LPe16000", "PCIe", "Fibre Channel Adapter"};
  2236. break;
  2237. case PCI_DEVICE_ID_LANCER_FC_VF:
  2238. m = (typeof(m)){"LPe16000", "PCIe",
  2239. "Obsolete, Unsupported Fibre Channel Adapter"};
  2240. break;
  2241. case PCI_DEVICE_ID_LANCER_FCOE:
  2242. oneConnect = 1;
  2243. m = (typeof(m)){"OCe15100", "PCIe", "FCoE"};
  2244. break;
  2245. case PCI_DEVICE_ID_LANCER_FCOE_VF:
  2246. oneConnect = 1;
  2247. m = (typeof(m)){"OCe15100", "PCIe",
  2248. "Obsolete, Unsupported FCoE"};
  2249. break;
  2250. case PCI_DEVICE_ID_LANCER_G6_FC:
  2251. m = (typeof(m)){"LPe32000", "PCIe", "Fibre Channel Adapter"};
  2252. break;
  2253. case PCI_DEVICE_ID_SKYHAWK:
  2254. case PCI_DEVICE_ID_SKYHAWK_VF:
  2255. oneConnect = 1;
  2256. m = (typeof(m)){"OCe14000", "PCIe", "FCoE"};
  2257. break;
  2258. default:
  2259. m = (typeof(m)){"Unknown", "", ""};
  2260. break;
  2261. }
  2262. if (mdp && mdp[0] == '\0')
  2263. snprintf(mdp, 79,"%s", m.name);
  2264. /*
  2265. * oneConnect hba requires special processing, they are all initiators
  2266. * and we put the port number on the end
  2267. */
  2268. if (descp && descp[0] == '\0') {
  2269. if (oneConnect)
  2270. snprintf(descp, 255,
  2271. "Emulex OneConnect %s, %s Initiator %s",
  2272. m.name, m.function,
  2273. phba->Port);
  2274. else if (max_speed == 0)
  2275. snprintf(descp, 255,
  2276. "Emulex %s %s %s",
  2277. m.name, m.bus, m.function);
  2278. else
  2279. snprintf(descp, 255,
  2280. "Emulex %s %d%s %s %s",
  2281. m.name, max_speed, (GE) ? "GE" : "Gb",
  2282. m.bus, m.function);
  2283. }
  2284. }
  2285. /**
  2286. * lpfc_post_buffer - Post IOCB(s) with DMA buffer descriptor(s) to a IOCB ring
  2287. * @phba: pointer to lpfc hba data structure.
  2288. * @pring: pointer to a IOCB ring.
  2289. * @cnt: the number of IOCBs to be posted to the IOCB ring.
  2290. *
  2291. * This routine posts a given number of IOCBs with the associated DMA buffer
  2292. * descriptors specified by the cnt argument to the given IOCB ring.
  2293. *
  2294. * Return codes
  2295. * The number of IOCBs NOT able to be posted to the IOCB ring.
  2296. **/
  2297. int
  2298. lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
  2299. {
  2300. IOCB_t *icmd;
  2301. struct lpfc_iocbq *iocb;
  2302. struct lpfc_dmabuf *mp1, *mp2;
  2303. cnt += pring->missbufcnt;
  2304. /* While there are buffers to post */
  2305. while (cnt > 0) {
  2306. /* Allocate buffer for command iocb */
  2307. iocb = lpfc_sli_get_iocbq(phba);
  2308. if (iocb == NULL) {
  2309. pring->missbufcnt = cnt;
  2310. return cnt;
  2311. }
  2312. icmd = &iocb->iocb;
  2313. /* 2 buffers can be posted per command */
  2314. /* Allocate buffer to post */
  2315. mp1 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2316. if (mp1)
  2317. mp1->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp1->phys);
  2318. if (!mp1 || !mp1->virt) {
  2319. kfree(mp1);
  2320. lpfc_sli_release_iocbq(phba, iocb);
  2321. pring->missbufcnt = cnt;
  2322. return cnt;
  2323. }
  2324. INIT_LIST_HEAD(&mp1->list);
  2325. /* Allocate buffer to post */
  2326. if (cnt > 1) {
  2327. mp2 = kmalloc(sizeof (struct lpfc_dmabuf), GFP_KERNEL);
  2328. if (mp2)
  2329. mp2->virt = lpfc_mbuf_alloc(phba, MEM_PRI,
  2330. &mp2->phys);
  2331. if (!mp2 || !mp2->virt) {
  2332. kfree(mp2);
  2333. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2334. kfree(mp1);
  2335. lpfc_sli_release_iocbq(phba, iocb);
  2336. pring->missbufcnt = cnt;
  2337. return cnt;
  2338. }
  2339. INIT_LIST_HEAD(&mp2->list);
  2340. } else {
  2341. mp2 = NULL;
  2342. }
  2343. icmd->un.cont64[0].addrHigh = putPaddrHigh(mp1->phys);
  2344. icmd->un.cont64[0].addrLow = putPaddrLow(mp1->phys);
  2345. icmd->un.cont64[0].tus.f.bdeSize = FCELSSIZE;
  2346. icmd->ulpBdeCount = 1;
  2347. cnt--;
  2348. if (mp2) {
  2349. icmd->un.cont64[1].addrHigh = putPaddrHigh(mp2->phys);
  2350. icmd->un.cont64[1].addrLow = putPaddrLow(mp2->phys);
  2351. icmd->un.cont64[1].tus.f.bdeSize = FCELSSIZE;
  2352. cnt--;
  2353. icmd->ulpBdeCount = 2;
  2354. }
  2355. icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
  2356. icmd->ulpLe = 1;
  2357. if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
  2358. IOCB_ERROR) {
  2359. lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
  2360. kfree(mp1);
  2361. cnt++;
  2362. if (mp2) {
  2363. lpfc_mbuf_free(phba, mp2->virt, mp2->phys);
  2364. kfree(mp2);
  2365. cnt++;
  2366. }
  2367. lpfc_sli_release_iocbq(phba, iocb);
  2368. pring->missbufcnt = cnt;
  2369. return cnt;
  2370. }
  2371. lpfc_sli_ringpostbuf_put(phba, pring, mp1);
  2372. if (mp2)
  2373. lpfc_sli_ringpostbuf_put(phba, pring, mp2);
  2374. }
  2375. pring->missbufcnt = 0;
  2376. return 0;
  2377. }
  2378. /**
  2379. * lpfc_post_rcv_buf - Post the initial receive IOCB buffers to ELS ring
  2380. * @phba: pointer to lpfc hba data structure.
  2381. *
  2382. * This routine posts initial receive IOCB buffers to the ELS ring. The
  2383. * current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
  2384. * set to 64 IOCBs. SLI3 only.
  2385. *
  2386. * Return codes
  2387. * 0 - success (currently always success)
  2388. **/
  2389. static int
  2390. lpfc_post_rcv_buf(struct lpfc_hba *phba)
  2391. {
  2392. struct lpfc_sli *psli = &phba->sli;
  2393. /* Ring 0, ELS / CT buffers */
  2394. lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
  2395. /* Ring 2 - FCP no buffers needed */
  2396. return 0;
  2397. }
  2398. #define S(N,V) (((V)<<(N))|((V)>>(32-(N))))
  2399. /**
  2400. * lpfc_sha_init - Set up initial array of hash table entries
  2401. * @HashResultPointer: pointer to an array as hash table.
  2402. *
  2403. * This routine sets up the initial values to the array of hash table entries
  2404. * for the LC HBAs.
  2405. **/
  2406. static void
  2407. lpfc_sha_init(uint32_t * HashResultPointer)
  2408. {
  2409. HashResultPointer[0] = 0x67452301;
  2410. HashResultPointer[1] = 0xEFCDAB89;
  2411. HashResultPointer[2] = 0x98BADCFE;
  2412. HashResultPointer[3] = 0x10325476;
  2413. HashResultPointer[4] = 0xC3D2E1F0;
  2414. }
  2415. /**
  2416. * lpfc_sha_iterate - Iterate initial hash table with the working hash table
  2417. * @HashResultPointer: pointer to an initial/result hash table.
  2418. * @HashWorkingPointer: pointer to an working hash table.
  2419. *
  2420. * This routine iterates an initial hash table pointed by @HashResultPointer
  2421. * with the values from the working hash table pointeed by @HashWorkingPointer.
  2422. * The results are putting back to the initial hash table, returned through
  2423. * the @HashResultPointer as the result hash table.
  2424. **/
  2425. static void
  2426. lpfc_sha_iterate(uint32_t * HashResultPointer, uint32_t * HashWorkingPointer)
  2427. {
  2428. int t;
  2429. uint32_t TEMP;
  2430. uint32_t A, B, C, D, E;
  2431. t = 16;
  2432. do {
  2433. HashWorkingPointer[t] =
  2434. S(1,
  2435. HashWorkingPointer[t - 3] ^ HashWorkingPointer[t -
  2436. 8] ^
  2437. HashWorkingPointer[t - 14] ^ HashWorkingPointer[t - 16]);
  2438. } while (++t <= 79);
  2439. t = 0;
  2440. A = HashResultPointer[0];
  2441. B = HashResultPointer[1];
  2442. C = HashResultPointer[2];
  2443. D = HashResultPointer[3];
  2444. E = HashResultPointer[4];
  2445. do {
  2446. if (t < 20) {
  2447. TEMP = ((B & C) | ((~B) & D)) + 0x5A827999;
  2448. } else if (t < 40) {
  2449. TEMP = (B ^ C ^ D) + 0x6ED9EBA1;
  2450. } else if (t < 60) {
  2451. TEMP = ((B & C) | (B & D) | (C & D)) + 0x8F1BBCDC;
  2452. } else {
  2453. TEMP = (B ^ C ^ D) + 0xCA62C1D6;
  2454. }
  2455. TEMP += S(5, A) + E + HashWorkingPointer[t];
  2456. E = D;
  2457. D = C;
  2458. C = S(30, B);
  2459. B = A;
  2460. A = TEMP;
  2461. } while (++t <= 79);
  2462. HashResultPointer[0] += A;
  2463. HashResultPointer[1] += B;
  2464. HashResultPointer[2] += C;
  2465. HashResultPointer[3] += D;
  2466. HashResultPointer[4] += E;
  2467. }
  2468. /**
  2469. * lpfc_challenge_key - Create challenge key based on WWPN of the HBA
  2470. * @RandomChallenge: pointer to the entry of host challenge random number array.
  2471. * @HashWorking: pointer to the entry of the working hash array.
  2472. *
  2473. * This routine calculates the working hash array referred by @HashWorking
  2474. * from the challenge random numbers associated with the host, referred by
  2475. * @RandomChallenge. The result is put into the entry of the working hash
  2476. * array and returned by reference through @HashWorking.
  2477. **/
  2478. static void
  2479. lpfc_challenge_key(uint32_t * RandomChallenge, uint32_t * HashWorking)
  2480. {
  2481. *HashWorking = (*RandomChallenge ^ *HashWorking);
  2482. }
  2483. /**
  2484. * lpfc_hba_init - Perform special handling for LC HBA initialization
  2485. * @phba: pointer to lpfc hba data structure.
  2486. * @hbainit: pointer to an array of unsigned 32-bit integers.
  2487. *
  2488. * This routine performs the special handling for LC HBA initialization.
  2489. **/
  2490. void
  2491. lpfc_hba_init(struct lpfc_hba *phba, uint32_t *hbainit)
  2492. {
  2493. int t;
  2494. uint32_t *HashWorking;
  2495. uint32_t *pwwnn = (uint32_t *) phba->wwnn;
  2496. HashWorking = kcalloc(80, sizeof(uint32_t), GFP_KERNEL);
  2497. if (!HashWorking)
  2498. return;
  2499. HashWorking[0] = HashWorking[78] = *pwwnn++;
  2500. HashWorking[1] = HashWorking[79] = *pwwnn;
  2501. for (t = 0; t < 7; t++)
  2502. lpfc_challenge_key(phba->RandomData + t, HashWorking + t);
  2503. lpfc_sha_init(hbainit);
  2504. lpfc_sha_iterate(hbainit, HashWorking);
  2505. kfree(HashWorking);
  2506. }
  2507. /**
  2508. * lpfc_cleanup - Performs vport cleanups before deleting a vport
  2509. * @vport: pointer to a virtual N_Port data structure.
  2510. *
  2511. * This routine performs the necessary cleanups before deleting the @vport.
  2512. * It invokes the discovery state machine to perform necessary state
  2513. * transitions and to release the ndlps associated with the @vport. Note,
  2514. * the physical port is treated as @vport 0.
  2515. **/
  2516. void
  2517. lpfc_cleanup(struct lpfc_vport *vport)
  2518. {
  2519. struct lpfc_hba *phba = vport->phba;
  2520. struct lpfc_nodelist *ndlp, *next_ndlp;
  2521. int i = 0;
  2522. if (phba->link_state > LPFC_LINK_DOWN)
  2523. lpfc_port_link_failure(vport);
  2524. list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) {
  2525. if (!NLP_CHK_NODE_ACT(ndlp)) {
  2526. ndlp = lpfc_enable_node(vport, ndlp,
  2527. NLP_STE_UNUSED_NODE);
  2528. if (!ndlp)
  2529. continue;
  2530. spin_lock_irq(&phba->ndlp_lock);
  2531. NLP_SET_FREE_REQ(ndlp);
  2532. spin_unlock_irq(&phba->ndlp_lock);
  2533. /* Trigger the release of the ndlp memory */
  2534. lpfc_nlp_put(ndlp);
  2535. continue;
  2536. }
  2537. spin_lock_irq(&phba->ndlp_lock);
  2538. if (NLP_CHK_FREE_REQ(ndlp)) {
  2539. /* The ndlp should not be in memory free mode already */
  2540. spin_unlock_irq(&phba->ndlp_lock);
  2541. continue;
  2542. } else
  2543. /* Indicate request for freeing ndlp memory */
  2544. NLP_SET_FREE_REQ(ndlp);
  2545. spin_unlock_irq(&phba->ndlp_lock);
  2546. if (vport->port_type != LPFC_PHYSICAL_PORT &&
  2547. ndlp->nlp_DID == Fabric_DID) {
  2548. /* Just free up ndlp with Fabric_DID for vports */
  2549. lpfc_nlp_put(ndlp);
  2550. continue;
  2551. }
  2552. /* take care of nodes in unused state before the state
  2553. * machine taking action.
  2554. */
  2555. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE) {
  2556. lpfc_nlp_put(ndlp);
  2557. continue;
  2558. }
  2559. if (ndlp->nlp_type & NLP_FABRIC)
  2560. lpfc_disc_state_machine(vport, ndlp, NULL,
  2561. NLP_EVT_DEVICE_RECOVERY);
  2562. lpfc_disc_state_machine(vport, ndlp, NULL,
  2563. NLP_EVT_DEVICE_RM);
  2564. }
  2565. /* At this point, ALL ndlp's should be gone
  2566. * because of the previous NLP_EVT_DEVICE_RM.
  2567. * Lets wait for this to happen, if needed.
  2568. */
  2569. while (!list_empty(&vport->fc_nodes)) {
  2570. if (i++ > 3000) {
  2571. lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
  2572. "0233 Nodelist not empty\n");
  2573. list_for_each_entry_safe(ndlp, next_ndlp,
  2574. &vport->fc_nodes, nlp_listp) {
  2575. lpfc_printf_vlog(ndlp->vport, KERN_ERR,
  2576. LOG_NODE,
  2577. "0282 did:x%x ndlp:x%p "
  2578. "usgmap:x%x refcnt:%d\n",
  2579. ndlp->nlp_DID, (void *)ndlp,
  2580. ndlp->nlp_usg_map,
  2581. kref_read(&ndlp->kref));
  2582. }
  2583. break;
  2584. }
  2585. /* Wait for any activity on ndlps to settle */
  2586. msleep(10);
  2587. }
  2588. lpfc_cleanup_vports_rrqs(vport, NULL);
  2589. }
  2590. /**
  2591. * lpfc_stop_vport_timers - Stop all the timers associated with a vport
  2592. * @vport: pointer to a virtual N_Port data structure.
  2593. *
  2594. * This routine stops all the timers associated with a @vport. This function
  2595. * is invoked before disabling or deleting a @vport. Note that the physical
  2596. * port is treated as @vport 0.
  2597. **/
  2598. void
  2599. lpfc_stop_vport_timers(struct lpfc_vport *vport)
  2600. {
  2601. del_timer_sync(&vport->els_tmofunc);
  2602. del_timer_sync(&vport->delayed_disc_tmo);
  2603. lpfc_can_disctmo(vport);
  2604. return;
  2605. }
  2606. /**
  2607. * __lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2608. * @phba: pointer to lpfc hba data structure.
  2609. *
  2610. * This routine stops the SLI4 FCF rediscover wait timer if it's on. The
  2611. * caller of this routine should already hold the host lock.
  2612. **/
  2613. void
  2614. __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2615. {
  2616. /* Clear pending FCF rediscovery wait flag */
  2617. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  2618. /* Now, try to stop the timer */
  2619. del_timer(&phba->fcf.redisc_wait);
  2620. }
  2621. /**
  2622. * lpfc_sli4_stop_fcf_redisc_wait_timer - Stop FCF rediscovery wait timer
  2623. * @phba: pointer to lpfc hba data structure.
  2624. *
  2625. * This routine stops the SLI4 FCF rediscover wait timer if it's on. It
  2626. * checks whether the FCF rediscovery wait timer is pending with the host
  2627. * lock held before proceeding with disabling the timer and clearing the
  2628. * wait timer pendig flag.
  2629. **/
  2630. void
  2631. lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
  2632. {
  2633. spin_lock_irq(&phba->hbalock);
  2634. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  2635. /* FCF rediscovery timer already fired or stopped */
  2636. spin_unlock_irq(&phba->hbalock);
  2637. return;
  2638. }
  2639. __lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2640. /* Clear failover in progress flags */
  2641. phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
  2642. spin_unlock_irq(&phba->hbalock);
  2643. }
  2644. /**
  2645. * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  2646. * @phba: pointer to lpfc hba data structure.
  2647. *
  2648. * This routine stops all the timers associated with a HBA. This function is
  2649. * invoked before either putting a HBA offline or unloading the driver.
  2650. **/
  2651. void
  2652. lpfc_stop_hba_timers(struct lpfc_hba *phba)
  2653. {
  2654. lpfc_stop_vport_timers(phba->pport);
  2655. del_timer_sync(&phba->sli.mbox_tmo);
  2656. del_timer_sync(&phba->fabric_block_timer);
  2657. del_timer_sync(&phba->eratt_poll);
  2658. del_timer_sync(&phba->hb_tmofunc);
  2659. if (phba->sli_rev == LPFC_SLI_REV4) {
  2660. del_timer_sync(&phba->rrq_tmr);
  2661. phba->hba_flag &= ~HBA_RRQ_ACTIVE;
  2662. }
  2663. phba->hb_outstanding = 0;
  2664. switch (phba->pci_dev_grp) {
  2665. case LPFC_PCI_DEV_LP:
  2666. /* Stop any LightPulse device specific driver timers */
  2667. del_timer_sync(&phba->fcp_poll_timer);
  2668. break;
  2669. case LPFC_PCI_DEV_OC:
  2670. /* Stop any OneConnect device sepcific driver timers */
  2671. lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
  2672. break;
  2673. default:
  2674. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  2675. "0297 Invalid device group (x%x)\n",
  2676. phba->pci_dev_grp);
  2677. break;
  2678. }
  2679. return;
  2680. }
  2681. /**
  2682. * lpfc_block_mgmt_io - Mark a HBA's management interface as blocked
  2683. * @phba: pointer to lpfc hba data structure.
  2684. *
  2685. * This routine marks a HBA's management interface as blocked. Once the HBA's
  2686. * management interface is marked as blocked, all the user space access to
  2687. * the HBA, whether they are from sysfs interface or libdfc interface will
  2688. * all be blocked. The HBA is set to block the management interface when the
  2689. * driver prepares the HBA interface for online or offline.
  2690. **/
  2691. static void
  2692. lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
  2693. {
  2694. unsigned long iflag;
  2695. uint8_t actcmd = MBX_HEARTBEAT;
  2696. unsigned long timeout;
  2697. spin_lock_irqsave(&phba->hbalock, iflag);
  2698. phba->sli.sli_flag |= LPFC_BLOCK_MGMT_IO;
  2699. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2700. if (mbx_action == LPFC_MBX_NO_WAIT)
  2701. return;
  2702. timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
  2703. spin_lock_irqsave(&phba->hbalock, iflag);
  2704. if (phba->sli.mbox_active) {
  2705. actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
  2706. /* Determine how long we might wait for the active mailbox
  2707. * command to be gracefully completed by firmware.
  2708. */
  2709. timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
  2710. phba->sli.mbox_active) * 1000) + jiffies;
  2711. }
  2712. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2713. /* Wait for the outstnading mailbox command to complete */
  2714. while (phba->sli.mbox_active) {
  2715. /* Check active mailbox complete status every 2ms */
  2716. msleep(2);
  2717. if (time_after(jiffies, timeout)) {
  2718. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  2719. "2813 Mgmt IO is Blocked %x "
  2720. "- mbox cmd %x still active\n",
  2721. phba->sli.sli_flag, actcmd);
  2722. break;
  2723. }
  2724. }
  2725. }
  2726. /**
  2727. * lpfc_sli4_node_prep - Assign RPIs for active nodes.
  2728. * @phba: pointer to lpfc hba data structure.
  2729. *
  2730. * Allocate RPIs for all active remote nodes. This is needed whenever
  2731. * an SLI4 adapter is reset and the driver is not unloading. Its purpose
  2732. * is to fixup the temporary rpi assignments.
  2733. **/
  2734. void
  2735. lpfc_sli4_node_prep(struct lpfc_hba *phba)
  2736. {
  2737. struct lpfc_nodelist *ndlp, *next_ndlp;
  2738. struct lpfc_vport **vports;
  2739. int i, rpi;
  2740. unsigned long flags;
  2741. if (phba->sli_rev != LPFC_SLI_REV4)
  2742. return;
  2743. vports = lpfc_create_vport_work_array(phba);
  2744. if (vports == NULL)
  2745. return;
  2746. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2747. if (vports[i]->load_flag & FC_UNLOADING)
  2748. continue;
  2749. list_for_each_entry_safe(ndlp, next_ndlp,
  2750. &vports[i]->fc_nodes,
  2751. nlp_listp) {
  2752. if (!NLP_CHK_NODE_ACT(ndlp))
  2753. continue;
  2754. rpi = lpfc_sli4_alloc_rpi(phba);
  2755. if (rpi == LPFC_RPI_ALLOC_ERROR) {
  2756. spin_lock_irqsave(&phba->ndlp_lock, flags);
  2757. NLP_CLR_NODE_ACT(ndlp);
  2758. spin_unlock_irqrestore(&phba->ndlp_lock, flags);
  2759. continue;
  2760. }
  2761. ndlp->nlp_rpi = rpi;
  2762. lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE,
  2763. "0009 rpi:%x DID:%x "
  2764. "flg:%x map:%x %p\n", ndlp->nlp_rpi,
  2765. ndlp->nlp_DID, ndlp->nlp_flag,
  2766. ndlp->nlp_usg_map, ndlp);
  2767. }
  2768. }
  2769. lpfc_destroy_vport_work_array(phba, vports);
  2770. }
  2771. /**
  2772. * lpfc_online - Initialize and bring a HBA online
  2773. * @phba: pointer to lpfc hba data structure.
  2774. *
  2775. * This routine initializes the HBA and brings a HBA online. During this
  2776. * process, the management interface is blocked to prevent user space access
  2777. * to the HBA interfering with the driver initialization.
  2778. *
  2779. * Return codes
  2780. * 0 - successful
  2781. * 1 - failed
  2782. **/
  2783. int
  2784. lpfc_online(struct lpfc_hba *phba)
  2785. {
  2786. struct lpfc_vport *vport;
  2787. struct lpfc_vport **vports;
  2788. int i, error = 0;
  2789. bool vpis_cleared = false;
  2790. if (!phba)
  2791. return 0;
  2792. vport = phba->pport;
  2793. if (!(vport->fc_flag & FC_OFFLINE_MODE))
  2794. return 0;
  2795. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  2796. "0458 Bring Adapter online\n");
  2797. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  2798. if (phba->sli_rev == LPFC_SLI_REV4) {
  2799. if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
  2800. lpfc_unblock_mgmt_io(phba);
  2801. return 1;
  2802. }
  2803. spin_lock_irq(&phba->hbalock);
  2804. if (!phba->sli4_hba.max_cfg_param.vpi_used)
  2805. vpis_cleared = true;
  2806. spin_unlock_irq(&phba->hbalock);
  2807. /* Reestablish the local initiator port.
  2808. * The offline process destroyed the previous lport.
  2809. */
  2810. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME &&
  2811. !phba->nvmet_support) {
  2812. error = lpfc_nvme_create_localport(phba->pport);
  2813. if (error)
  2814. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  2815. "6132 NVME restore reg failed "
  2816. "on nvmei error x%x\n", error);
  2817. }
  2818. } else {
  2819. lpfc_sli_queue_init(phba);
  2820. if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
  2821. lpfc_unblock_mgmt_io(phba);
  2822. return 1;
  2823. }
  2824. }
  2825. vports = lpfc_create_vport_work_array(phba);
  2826. if (vports != NULL) {
  2827. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2828. struct Scsi_Host *shost;
  2829. shost = lpfc_shost_from_vport(vports[i]);
  2830. spin_lock_irq(shost->host_lock);
  2831. vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
  2832. if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
  2833. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2834. if (phba->sli_rev == LPFC_SLI_REV4) {
  2835. vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
  2836. if ((vpis_cleared) &&
  2837. (vports[i]->port_type !=
  2838. LPFC_PHYSICAL_PORT))
  2839. vports[i]->vpi = 0;
  2840. }
  2841. spin_unlock_irq(shost->host_lock);
  2842. }
  2843. }
  2844. lpfc_destroy_vport_work_array(phba, vports);
  2845. lpfc_unblock_mgmt_io(phba);
  2846. return 0;
  2847. }
  2848. /**
  2849. * lpfc_unblock_mgmt_io - Mark a HBA's management interface to be not blocked
  2850. * @phba: pointer to lpfc hba data structure.
  2851. *
  2852. * This routine marks a HBA's management interface as not blocked. Once the
  2853. * HBA's management interface is marked as not blocked, all the user space
  2854. * access to the HBA, whether they are from sysfs interface or libdfc
  2855. * interface will be allowed. The HBA is set to block the management interface
  2856. * when the driver prepares the HBA interface for online or offline and then
  2857. * set to unblock the management interface afterwards.
  2858. **/
  2859. void
  2860. lpfc_unblock_mgmt_io(struct lpfc_hba * phba)
  2861. {
  2862. unsigned long iflag;
  2863. spin_lock_irqsave(&phba->hbalock, iflag);
  2864. phba->sli.sli_flag &= ~LPFC_BLOCK_MGMT_IO;
  2865. spin_unlock_irqrestore(&phba->hbalock, iflag);
  2866. }
  2867. /**
  2868. * lpfc_offline_prep - Prepare a HBA to be brought offline
  2869. * @phba: pointer to lpfc hba data structure.
  2870. *
  2871. * This routine is invoked to prepare a HBA to be brought offline. It performs
  2872. * unregistration login to all the nodes on all vports and flushes the mailbox
  2873. * queue to make it ready to be brought offline.
  2874. **/
  2875. void
  2876. lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action)
  2877. {
  2878. struct lpfc_vport *vport = phba->pport;
  2879. struct lpfc_nodelist *ndlp, *next_ndlp;
  2880. struct lpfc_vport **vports;
  2881. struct Scsi_Host *shost;
  2882. int i;
  2883. if (vport->fc_flag & FC_OFFLINE_MODE)
  2884. return;
  2885. lpfc_block_mgmt_io(phba, mbx_action);
  2886. lpfc_linkdown(phba);
  2887. /* Issue an unreg_login to all nodes on all vports */
  2888. vports = lpfc_create_vport_work_array(phba);
  2889. if (vports != NULL) {
  2890. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2891. if (vports[i]->load_flag & FC_UNLOADING)
  2892. continue;
  2893. shost = lpfc_shost_from_vport(vports[i]);
  2894. spin_lock_irq(shost->host_lock);
  2895. vports[i]->vpi_state &= ~LPFC_VPI_REGISTERED;
  2896. vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  2897. vports[i]->fc_flag &= ~FC_VFI_REGISTERED;
  2898. spin_unlock_irq(shost->host_lock);
  2899. shost = lpfc_shost_from_vport(vports[i]);
  2900. list_for_each_entry_safe(ndlp, next_ndlp,
  2901. &vports[i]->fc_nodes,
  2902. nlp_listp) {
  2903. if (!NLP_CHK_NODE_ACT(ndlp))
  2904. continue;
  2905. if (ndlp->nlp_state == NLP_STE_UNUSED_NODE)
  2906. continue;
  2907. if (ndlp->nlp_type & NLP_FABRIC) {
  2908. lpfc_disc_state_machine(vports[i], ndlp,
  2909. NULL, NLP_EVT_DEVICE_RECOVERY);
  2910. lpfc_disc_state_machine(vports[i], ndlp,
  2911. NULL, NLP_EVT_DEVICE_RM);
  2912. }
  2913. spin_lock_irq(shost->host_lock);
  2914. ndlp->nlp_flag &= ~NLP_NPR_ADISC;
  2915. spin_unlock_irq(shost->host_lock);
  2916. /*
  2917. * Whenever an SLI4 port goes offline, free the
  2918. * RPI. Get a new RPI when the adapter port
  2919. * comes back online.
  2920. */
  2921. if (phba->sli_rev == LPFC_SLI_REV4) {
  2922. lpfc_printf_vlog(ndlp->vport,
  2923. KERN_INFO, LOG_NODE,
  2924. "0011 lpfc_offline: "
  2925. "ndlp:x%p did %x "
  2926. "usgmap:x%x rpi:%x\n",
  2927. ndlp, ndlp->nlp_DID,
  2928. ndlp->nlp_usg_map,
  2929. ndlp->nlp_rpi);
  2930. lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi);
  2931. }
  2932. lpfc_unreg_rpi(vports[i], ndlp);
  2933. }
  2934. }
  2935. }
  2936. lpfc_destroy_vport_work_array(phba, vports);
  2937. lpfc_sli_mbox_sys_shutdown(phba, mbx_action);
  2938. if (phba->wq)
  2939. flush_workqueue(phba->wq);
  2940. }
  2941. /**
  2942. * lpfc_offline - Bring a HBA offline
  2943. * @phba: pointer to lpfc hba data structure.
  2944. *
  2945. * This routine actually brings a HBA offline. It stops all the timers
  2946. * associated with the HBA, brings down the SLI layer, and eventually
  2947. * marks the HBA as in offline state for the upper layer protocol.
  2948. **/
  2949. void
  2950. lpfc_offline(struct lpfc_hba *phba)
  2951. {
  2952. struct Scsi_Host *shost;
  2953. struct lpfc_vport **vports;
  2954. int i;
  2955. if (phba->pport->fc_flag & FC_OFFLINE_MODE)
  2956. return;
  2957. /* stop port and all timers associated with this hba */
  2958. lpfc_stop_port(phba);
  2959. /* Tear down the local and target port registrations. The
  2960. * nvme transports need to cleanup.
  2961. */
  2962. lpfc_nvmet_destroy_targetport(phba);
  2963. lpfc_nvme_destroy_localport(phba->pport);
  2964. vports = lpfc_create_vport_work_array(phba);
  2965. if (vports != NULL)
  2966. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  2967. lpfc_stop_vport_timers(vports[i]);
  2968. lpfc_destroy_vport_work_array(phba, vports);
  2969. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  2970. "0460 Bring Adapter offline\n");
  2971. /* Bring down the SLI Layer and cleanup. The HBA is offline
  2972. now. */
  2973. lpfc_sli_hba_down(phba);
  2974. spin_lock_irq(&phba->hbalock);
  2975. phba->work_ha = 0;
  2976. spin_unlock_irq(&phba->hbalock);
  2977. vports = lpfc_create_vport_work_array(phba);
  2978. if (vports != NULL)
  2979. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  2980. shost = lpfc_shost_from_vport(vports[i]);
  2981. spin_lock_irq(shost->host_lock);
  2982. vports[i]->work_port_events = 0;
  2983. vports[i]->fc_flag |= FC_OFFLINE_MODE;
  2984. spin_unlock_irq(shost->host_lock);
  2985. }
  2986. lpfc_destroy_vport_work_array(phba, vports);
  2987. }
  2988. /**
  2989. * lpfc_scsi_free - Free all the SCSI buffers and IOCBs from driver lists
  2990. * @phba: pointer to lpfc hba data structure.
  2991. *
  2992. * This routine is to free all the SCSI buffers and IOCBs from the driver
  2993. * list back to kernel. It is called from lpfc_pci_remove_one to free
  2994. * the internal resources before the device is removed from the system.
  2995. **/
  2996. static void
  2997. lpfc_scsi_free(struct lpfc_hba *phba)
  2998. {
  2999. struct lpfc_scsi_buf *sb, *sb_next;
  3000. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  3001. return;
  3002. spin_lock_irq(&phba->hbalock);
  3003. /* Release all the lpfc_scsi_bufs maintained by this host. */
  3004. spin_lock(&phba->scsi_buf_list_put_lock);
  3005. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
  3006. list) {
  3007. list_del(&sb->list);
  3008. dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  3009. sb->dma_handle);
  3010. kfree(sb);
  3011. phba->total_scsi_bufs--;
  3012. }
  3013. spin_unlock(&phba->scsi_buf_list_put_lock);
  3014. spin_lock(&phba->scsi_buf_list_get_lock);
  3015. list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
  3016. list) {
  3017. list_del(&sb->list);
  3018. dma_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
  3019. sb->dma_handle);
  3020. kfree(sb);
  3021. phba->total_scsi_bufs--;
  3022. }
  3023. spin_unlock(&phba->scsi_buf_list_get_lock);
  3024. spin_unlock_irq(&phba->hbalock);
  3025. }
  3026. /**
  3027. * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
  3028. * @phba: pointer to lpfc hba data structure.
  3029. *
  3030. * This routine is to free all the NVME buffers and IOCBs from the driver
  3031. * list back to kernel. It is called from lpfc_pci_remove_one to free
  3032. * the internal resources before the device is removed from the system.
  3033. **/
  3034. static void
  3035. lpfc_nvme_free(struct lpfc_hba *phba)
  3036. {
  3037. struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
  3038. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  3039. return;
  3040. spin_lock_irq(&phba->hbalock);
  3041. /* Release all the lpfc_nvme_bufs maintained by this host. */
  3042. spin_lock(&phba->nvme_buf_list_put_lock);
  3043. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3044. &phba->lpfc_nvme_buf_list_put, list) {
  3045. list_del(&lpfc_ncmd->list);
  3046. dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  3047. lpfc_ncmd->dma_handle);
  3048. kfree(lpfc_ncmd);
  3049. phba->total_nvme_bufs--;
  3050. }
  3051. spin_unlock(&phba->nvme_buf_list_put_lock);
  3052. spin_lock(&phba->nvme_buf_list_get_lock);
  3053. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3054. &phba->lpfc_nvme_buf_list_get, list) {
  3055. list_del(&lpfc_ncmd->list);
  3056. dma_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
  3057. lpfc_ncmd->dma_handle);
  3058. kfree(lpfc_ncmd);
  3059. phba->total_nvme_bufs--;
  3060. }
  3061. spin_unlock(&phba->nvme_buf_list_get_lock);
  3062. spin_unlock_irq(&phba->hbalock);
  3063. }
  3064. /**
  3065. * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
  3066. * @phba: pointer to lpfc hba data structure.
  3067. *
  3068. * This routine first calculates the sizes of the current els and allocated
  3069. * scsi sgl lists, and then goes through all sgls to updates the physical
  3070. * XRIs assigned due to port function reset. During port initialization, the
  3071. * current els and allocated scsi sgl lists are 0s.
  3072. *
  3073. * Return codes
  3074. * 0 - successful (for now, it always returns 0)
  3075. **/
  3076. int
  3077. lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
  3078. {
  3079. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  3080. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  3081. LIST_HEAD(els_sgl_list);
  3082. int rc;
  3083. /*
  3084. * update on pci function's els xri-sgl list
  3085. */
  3086. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3087. if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
  3088. /* els xri-sgl expanded */
  3089. xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
  3090. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3091. "3157 ELS xri-sgl count increased from "
  3092. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  3093. els_xri_cnt);
  3094. /* allocate the additional els sgls */
  3095. for (i = 0; i < xri_cnt; i++) {
  3096. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  3097. GFP_KERNEL);
  3098. if (sglq_entry == NULL) {
  3099. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3100. "2562 Failure to allocate an "
  3101. "ELS sgl entry:%d\n", i);
  3102. rc = -ENOMEM;
  3103. goto out_free_mem;
  3104. }
  3105. sglq_entry->buff_type = GEN_BUFF_TYPE;
  3106. sglq_entry->virt = lpfc_mbuf_alloc(phba, 0,
  3107. &sglq_entry->phys);
  3108. if (sglq_entry->virt == NULL) {
  3109. kfree(sglq_entry);
  3110. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3111. "2563 Failure to allocate an "
  3112. "ELS mbuf:%d\n", i);
  3113. rc = -ENOMEM;
  3114. goto out_free_mem;
  3115. }
  3116. sglq_entry->sgl = sglq_entry->virt;
  3117. memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
  3118. sglq_entry->state = SGL_FREED;
  3119. list_add_tail(&sglq_entry->list, &els_sgl_list);
  3120. }
  3121. spin_lock_irq(&phba->hbalock);
  3122. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3123. list_splice_init(&els_sgl_list,
  3124. &phba->sli4_hba.lpfc_els_sgl_list);
  3125. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3126. spin_unlock_irq(&phba->hbalock);
  3127. } else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
  3128. /* els xri-sgl shrinked */
  3129. xri_cnt = phba->sli4_hba.els_xri_cnt - els_xri_cnt;
  3130. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3131. "3158 ELS xri-sgl count decreased from "
  3132. "%d to %d\n", phba->sli4_hba.els_xri_cnt,
  3133. els_xri_cnt);
  3134. spin_lock_irq(&phba->hbalock);
  3135. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3136. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
  3137. &els_sgl_list);
  3138. /* release extra els sgls from list */
  3139. for (i = 0; i < xri_cnt; i++) {
  3140. list_remove_head(&els_sgl_list,
  3141. sglq_entry, struct lpfc_sglq, list);
  3142. if (sglq_entry) {
  3143. __lpfc_mbuf_free(phba, sglq_entry->virt,
  3144. sglq_entry->phys);
  3145. kfree(sglq_entry);
  3146. }
  3147. }
  3148. list_splice_init(&els_sgl_list,
  3149. &phba->sli4_hba.lpfc_els_sgl_list);
  3150. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3151. spin_unlock_irq(&phba->hbalock);
  3152. } else
  3153. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3154. "3163 ELS xri-sgl count unchanged: %d\n",
  3155. els_xri_cnt);
  3156. phba->sli4_hba.els_xri_cnt = els_xri_cnt;
  3157. /* update xris to els sgls on the list */
  3158. sglq_entry = NULL;
  3159. sglq_entry_next = NULL;
  3160. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3161. &phba->sli4_hba.lpfc_els_sgl_list, list) {
  3162. lxri = lpfc_sli4_next_xritag(phba);
  3163. if (lxri == NO_XRI) {
  3164. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3165. "2400 Failed to allocate xri for "
  3166. "ELS sgl\n");
  3167. rc = -ENOMEM;
  3168. goto out_free_mem;
  3169. }
  3170. sglq_entry->sli4_lxritag = lxri;
  3171. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3172. }
  3173. return 0;
  3174. out_free_mem:
  3175. lpfc_free_els_sgl_list(phba);
  3176. return rc;
  3177. }
  3178. /**
  3179. * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
  3180. * @phba: pointer to lpfc hba data structure.
  3181. *
  3182. * This routine first calculates the sizes of the current els and allocated
  3183. * scsi sgl lists, and then goes through all sgls to updates the physical
  3184. * XRIs assigned due to port function reset. During port initialization, the
  3185. * current els and allocated scsi sgl lists are 0s.
  3186. *
  3187. * Return codes
  3188. * 0 - successful (for now, it always returns 0)
  3189. **/
  3190. int
  3191. lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
  3192. {
  3193. struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
  3194. uint16_t i, lxri, xri_cnt, els_xri_cnt;
  3195. uint16_t nvmet_xri_cnt;
  3196. LIST_HEAD(nvmet_sgl_list);
  3197. int rc;
  3198. /*
  3199. * update on pci function's nvmet xri-sgl list
  3200. */
  3201. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3202. /* For NVMET, ALL remaining XRIs are dedicated for IO processing */
  3203. nvmet_xri_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3204. if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
  3205. /* els xri-sgl expanded */
  3206. xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
  3207. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3208. "6302 NVMET xri-sgl cnt grew from %d to %d\n",
  3209. phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
  3210. /* allocate the additional nvmet sgls */
  3211. for (i = 0; i < xri_cnt; i++) {
  3212. sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
  3213. GFP_KERNEL);
  3214. if (sglq_entry == NULL) {
  3215. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3216. "6303 Failure to allocate an "
  3217. "NVMET sgl entry:%d\n", i);
  3218. rc = -ENOMEM;
  3219. goto out_free_mem;
  3220. }
  3221. sglq_entry->buff_type = NVMET_BUFF_TYPE;
  3222. sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
  3223. &sglq_entry->phys);
  3224. if (sglq_entry->virt == NULL) {
  3225. kfree(sglq_entry);
  3226. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3227. "6304 Failure to allocate an "
  3228. "NVMET buf:%d\n", i);
  3229. rc = -ENOMEM;
  3230. goto out_free_mem;
  3231. }
  3232. sglq_entry->sgl = sglq_entry->virt;
  3233. memset(sglq_entry->sgl, 0,
  3234. phba->cfg_sg_dma_buf_size);
  3235. sglq_entry->state = SGL_FREED;
  3236. list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
  3237. }
  3238. spin_lock_irq(&phba->hbalock);
  3239. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3240. list_splice_init(&nvmet_sgl_list,
  3241. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3242. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3243. spin_unlock_irq(&phba->hbalock);
  3244. } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
  3245. /* nvmet xri-sgl shrunk */
  3246. xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
  3247. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3248. "6305 NVMET xri-sgl count decreased from "
  3249. "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
  3250. nvmet_xri_cnt);
  3251. spin_lock_irq(&phba->hbalock);
  3252. spin_lock(&phba->sli4_hba.sgl_list_lock);
  3253. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
  3254. &nvmet_sgl_list);
  3255. /* release extra nvmet sgls from list */
  3256. for (i = 0; i < xri_cnt; i++) {
  3257. list_remove_head(&nvmet_sgl_list,
  3258. sglq_entry, struct lpfc_sglq, list);
  3259. if (sglq_entry) {
  3260. lpfc_nvmet_buf_free(phba, sglq_entry->virt,
  3261. sglq_entry->phys);
  3262. kfree(sglq_entry);
  3263. }
  3264. }
  3265. list_splice_init(&nvmet_sgl_list,
  3266. &phba->sli4_hba.lpfc_nvmet_sgl_list);
  3267. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  3268. spin_unlock_irq(&phba->hbalock);
  3269. } else
  3270. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3271. "6306 NVMET xri-sgl count unchanged: %d\n",
  3272. nvmet_xri_cnt);
  3273. phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
  3274. /* update xris to nvmet sgls on the list */
  3275. sglq_entry = NULL;
  3276. sglq_entry_next = NULL;
  3277. list_for_each_entry_safe(sglq_entry, sglq_entry_next,
  3278. &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
  3279. lxri = lpfc_sli4_next_xritag(phba);
  3280. if (lxri == NO_XRI) {
  3281. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3282. "6307 Failed to allocate xri for "
  3283. "NVMET sgl\n");
  3284. rc = -ENOMEM;
  3285. goto out_free_mem;
  3286. }
  3287. sglq_entry->sli4_lxritag = lxri;
  3288. sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3289. }
  3290. return 0;
  3291. out_free_mem:
  3292. lpfc_free_nvmet_sgl_list(phba);
  3293. return rc;
  3294. }
  3295. /**
  3296. * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
  3297. * @phba: pointer to lpfc hba data structure.
  3298. *
  3299. * This routine first calculates the sizes of the current els and allocated
  3300. * scsi sgl lists, and then goes through all sgls to updates the physical
  3301. * XRIs assigned due to port function reset. During port initialization, the
  3302. * current els and allocated scsi sgl lists are 0s.
  3303. *
  3304. * Return codes
  3305. * 0 - successful (for now, it always returns 0)
  3306. **/
  3307. int
  3308. lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
  3309. {
  3310. struct lpfc_scsi_buf *psb, *psb_next;
  3311. uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
  3312. LIST_HEAD(scsi_sgl_list);
  3313. int rc;
  3314. /*
  3315. * update on pci function's els xri-sgl list
  3316. */
  3317. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3318. phba->total_scsi_bufs = 0;
  3319. /*
  3320. * update on pci function's allocated scsi xri-sgl list
  3321. */
  3322. /* maximum number of xris available for scsi buffers */
  3323. phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
  3324. els_xri_cnt;
  3325. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  3326. return 0;
  3327. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
  3328. phba->sli4_hba.scsi_xri_max = /* Split them up */
  3329. (phba->sli4_hba.scsi_xri_max *
  3330. phba->cfg_xri_split) / 100;
  3331. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3332. spin_lock(&phba->scsi_buf_list_put_lock);
  3333. list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list);
  3334. list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list);
  3335. spin_unlock(&phba->scsi_buf_list_put_lock);
  3336. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3337. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3338. "6060 Current allocated SCSI xri-sgl count:%d, "
  3339. "maximum SCSI xri count:%d (split:%d)\n",
  3340. phba->sli4_hba.scsi_xri_cnt,
  3341. phba->sli4_hba.scsi_xri_max, phba->cfg_xri_split);
  3342. if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) {
  3343. /* max scsi xri shrinked below the allocated scsi buffers */
  3344. scsi_xri_cnt = phba->sli4_hba.scsi_xri_cnt -
  3345. phba->sli4_hba.scsi_xri_max;
  3346. /* release the extra allocated scsi buffers */
  3347. for (i = 0; i < scsi_xri_cnt; i++) {
  3348. list_remove_head(&scsi_sgl_list, psb,
  3349. struct lpfc_scsi_buf, list);
  3350. if (psb) {
  3351. dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  3352. psb->data, psb->dma_handle);
  3353. kfree(psb);
  3354. }
  3355. }
  3356. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3357. phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
  3358. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3359. }
  3360. /* update xris associated to remaining allocated scsi buffers */
  3361. psb = NULL;
  3362. psb_next = NULL;
  3363. list_for_each_entry_safe(psb, psb_next, &scsi_sgl_list, list) {
  3364. lxri = lpfc_sli4_next_xritag(phba);
  3365. if (lxri == NO_XRI) {
  3366. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3367. "2560 Failed to allocate xri for "
  3368. "scsi buffer\n");
  3369. rc = -ENOMEM;
  3370. goto out_free_mem;
  3371. }
  3372. psb->cur_iocbq.sli4_lxritag = lxri;
  3373. psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3374. }
  3375. spin_lock_irq(&phba->scsi_buf_list_get_lock);
  3376. spin_lock(&phba->scsi_buf_list_put_lock);
  3377. list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get);
  3378. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  3379. spin_unlock(&phba->scsi_buf_list_put_lock);
  3380. spin_unlock_irq(&phba->scsi_buf_list_get_lock);
  3381. return 0;
  3382. out_free_mem:
  3383. lpfc_scsi_free(phba);
  3384. return rc;
  3385. }
  3386. static uint64_t
  3387. lpfc_get_wwpn(struct lpfc_hba *phba)
  3388. {
  3389. uint64_t wwn;
  3390. int rc;
  3391. LPFC_MBOXQ_t *mboxq;
  3392. MAILBOX_t *mb;
  3393. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  3394. GFP_KERNEL);
  3395. if (!mboxq)
  3396. return (uint64_t)-1;
  3397. /* First get WWN of HBA instance */
  3398. lpfc_read_nv(phba, mboxq);
  3399. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  3400. if (rc != MBX_SUCCESS) {
  3401. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3402. "6019 Mailbox failed , mbxCmd x%x "
  3403. "READ_NV, mbxStatus x%x\n",
  3404. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  3405. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  3406. mempool_free(mboxq, phba->mbox_mem_pool);
  3407. return (uint64_t) -1;
  3408. }
  3409. mb = &mboxq->u.mb;
  3410. memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
  3411. /* wwn is WWPN of HBA instance */
  3412. mempool_free(mboxq, phba->mbox_mem_pool);
  3413. if (phba->sli_rev == LPFC_SLI_REV4)
  3414. return be64_to_cpu(wwn);
  3415. else
  3416. return rol64(wwn, 32);
  3417. }
  3418. /**
  3419. * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
  3420. * @phba: pointer to lpfc hba data structure.
  3421. *
  3422. * This routine first calculates the sizes of the current els and allocated
  3423. * scsi sgl lists, and then goes through all sgls to updates the physical
  3424. * XRIs assigned due to port function reset. During port initialization, the
  3425. * current els and allocated scsi sgl lists are 0s.
  3426. *
  3427. * Return codes
  3428. * 0 - successful (for now, it always returns 0)
  3429. **/
  3430. int
  3431. lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
  3432. {
  3433. struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
  3434. uint16_t i, lxri, els_xri_cnt;
  3435. uint16_t nvme_xri_cnt, nvme_xri_max;
  3436. LIST_HEAD(nvme_sgl_list);
  3437. int rc;
  3438. phba->total_nvme_bufs = 0;
  3439. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
  3440. return 0;
  3441. /*
  3442. * update on pci function's allocated nvme xri-sgl list
  3443. */
  3444. /* maximum number of xris available for nvme buffers */
  3445. els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
  3446. nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
  3447. phba->sli4_hba.nvme_xri_max = nvme_xri_max;
  3448. phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
  3449. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  3450. "6074 Current allocated NVME xri-sgl count:%d, "
  3451. "maximum NVME xri count:%d\n",
  3452. phba->sli4_hba.nvme_xri_cnt,
  3453. phba->sli4_hba.nvme_xri_max);
  3454. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3455. spin_lock(&phba->nvme_buf_list_put_lock);
  3456. list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
  3457. list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
  3458. spin_unlock(&phba->nvme_buf_list_put_lock);
  3459. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3460. if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
  3461. /* max nvme xri shrunk below the allocated nvme buffers */
  3462. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3463. nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
  3464. phba->sli4_hba.nvme_xri_max;
  3465. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3466. /* release the extra allocated nvme buffers */
  3467. for (i = 0; i < nvme_xri_cnt; i++) {
  3468. list_remove_head(&nvme_sgl_list, lpfc_ncmd,
  3469. struct lpfc_nvme_buf, list);
  3470. if (lpfc_ncmd) {
  3471. dma_pool_free(phba->lpfc_sg_dma_buf_pool,
  3472. lpfc_ncmd->data,
  3473. lpfc_ncmd->dma_handle);
  3474. kfree(lpfc_ncmd);
  3475. }
  3476. }
  3477. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3478. phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
  3479. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3480. }
  3481. /* update xris associated to remaining allocated nvme buffers */
  3482. lpfc_ncmd = NULL;
  3483. lpfc_ncmd_next = NULL;
  3484. list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
  3485. &nvme_sgl_list, list) {
  3486. lxri = lpfc_sli4_next_xritag(phba);
  3487. if (lxri == NO_XRI) {
  3488. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3489. "6075 Failed to allocate xri for "
  3490. "nvme buffer\n");
  3491. rc = -ENOMEM;
  3492. goto out_free_mem;
  3493. }
  3494. lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
  3495. lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
  3496. }
  3497. spin_lock_irq(&phba->nvme_buf_list_get_lock);
  3498. spin_lock(&phba->nvme_buf_list_put_lock);
  3499. list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
  3500. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  3501. spin_unlock(&phba->nvme_buf_list_put_lock);
  3502. spin_unlock_irq(&phba->nvme_buf_list_get_lock);
  3503. return 0;
  3504. out_free_mem:
  3505. lpfc_nvme_free(phba);
  3506. return rc;
  3507. }
  3508. /**
  3509. * lpfc_create_port - Create an FC port
  3510. * @phba: pointer to lpfc hba data structure.
  3511. * @instance: a unique integer ID to this FC port.
  3512. * @dev: pointer to the device data structure.
  3513. *
  3514. * This routine creates a FC port for the upper layer protocol. The FC port
  3515. * can be created on top of either a physical port or a virtual port provided
  3516. * by the HBA. This routine also allocates a SCSI host data structure (shost)
  3517. * and associates the FC port created before adding the shost into the SCSI
  3518. * layer.
  3519. *
  3520. * Return codes
  3521. * @vport - pointer to the virtual N_Port data structure.
  3522. * NULL - port create failed.
  3523. **/
  3524. struct lpfc_vport *
  3525. lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
  3526. {
  3527. struct lpfc_vport *vport;
  3528. struct Scsi_Host *shost = NULL;
  3529. int error = 0;
  3530. int i;
  3531. uint64_t wwn;
  3532. bool use_no_reset_hba = false;
  3533. int rc;
  3534. if (lpfc_no_hba_reset_cnt) {
  3535. if (phba->sli_rev < LPFC_SLI_REV4 &&
  3536. dev == &phba->pcidev->dev) {
  3537. /* Reset the port first */
  3538. lpfc_sli_brdrestart(phba);
  3539. rc = lpfc_sli_chipset_init(phba);
  3540. if (rc)
  3541. return NULL;
  3542. }
  3543. wwn = lpfc_get_wwpn(phba);
  3544. }
  3545. for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
  3546. if (wwn == lpfc_no_hba_reset[i]) {
  3547. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  3548. "6020 Setting use_no_reset port=%llx\n",
  3549. wwn);
  3550. use_no_reset_hba = true;
  3551. break;
  3552. }
  3553. }
  3554. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  3555. if (dev != &phba->pcidev->dev) {
  3556. shost = scsi_host_alloc(&lpfc_vport_template,
  3557. sizeof(struct lpfc_vport));
  3558. } else {
  3559. if (!use_no_reset_hba)
  3560. shost = scsi_host_alloc(&lpfc_template,
  3561. sizeof(struct lpfc_vport));
  3562. else
  3563. shost = scsi_host_alloc(&lpfc_template_no_hr,
  3564. sizeof(struct lpfc_vport));
  3565. }
  3566. } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  3567. shost = scsi_host_alloc(&lpfc_template_nvme,
  3568. sizeof(struct lpfc_vport));
  3569. }
  3570. if (!shost)
  3571. goto out;
  3572. vport = (struct lpfc_vport *) shost->hostdata;
  3573. vport->phba = phba;
  3574. vport->load_flag |= FC_LOADING;
  3575. vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
  3576. vport->fc_rscn_flush = 0;
  3577. lpfc_get_vport_cfgparam(vport);
  3578. shost->unique_id = instance;
  3579. shost->max_id = LPFC_MAX_TARGET;
  3580. shost->max_lun = vport->cfg_max_luns;
  3581. shost->this_id = -1;
  3582. shost->max_cmd_len = 16;
  3583. shost->nr_hw_queues = phba->cfg_fcp_io_channel;
  3584. if (phba->sli_rev == LPFC_SLI_REV4) {
  3585. shost->dma_boundary =
  3586. phba->sli4_hba.pc_sli4_params.sge_supp_len-1;
  3587. shost->sg_tablesize = phba->cfg_sg_seg_cnt;
  3588. }
  3589. /*
  3590. * Set initial can_queue value since 0 is no longer supported and
  3591. * scsi_add_host will fail. This will be adjusted later based on the
  3592. * max xri value determined in hba setup.
  3593. */
  3594. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  3595. if (dev != &phba->pcidev->dev) {
  3596. shost->transportt = lpfc_vport_transport_template;
  3597. vport->port_type = LPFC_NPIV_PORT;
  3598. } else {
  3599. shost->transportt = lpfc_transport_template;
  3600. vport->port_type = LPFC_PHYSICAL_PORT;
  3601. }
  3602. /* Initialize all internally managed lists. */
  3603. INIT_LIST_HEAD(&vport->fc_nodes);
  3604. INIT_LIST_HEAD(&vport->rcv_buffer_list);
  3605. spin_lock_init(&vport->work_port_lock);
  3606. timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0);
  3607. timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0);
  3608. timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
  3609. error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
  3610. if (error)
  3611. goto out_put_shost;
  3612. spin_lock_irq(&phba->hbalock);
  3613. list_add_tail(&vport->listentry, &phba->port_list);
  3614. spin_unlock_irq(&phba->hbalock);
  3615. return vport;
  3616. out_put_shost:
  3617. scsi_host_put(shost);
  3618. out:
  3619. return NULL;
  3620. }
  3621. /**
  3622. * destroy_port - destroy an FC port
  3623. * @vport: pointer to an lpfc virtual N_Port data structure.
  3624. *
  3625. * This routine destroys a FC port from the upper layer protocol. All the
  3626. * resources associated with the port are released.
  3627. **/
  3628. void
  3629. destroy_port(struct lpfc_vport *vport)
  3630. {
  3631. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  3632. struct lpfc_hba *phba = vport->phba;
  3633. lpfc_debugfs_terminate(vport);
  3634. fc_remove_host(shost);
  3635. scsi_remove_host(shost);
  3636. spin_lock_irq(&phba->hbalock);
  3637. list_del_init(&vport->listentry);
  3638. spin_unlock_irq(&phba->hbalock);
  3639. lpfc_cleanup(vport);
  3640. return;
  3641. }
  3642. /**
  3643. * lpfc_get_instance - Get a unique integer ID
  3644. *
  3645. * This routine allocates a unique integer ID from lpfc_hba_index pool. It
  3646. * uses the kernel idr facility to perform the task.
  3647. *
  3648. * Return codes:
  3649. * instance - a unique integer ID allocated as the new instance.
  3650. * -1 - lpfc get instance failed.
  3651. **/
  3652. int
  3653. lpfc_get_instance(void)
  3654. {
  3655. int ret;
  3656. ret = idr_alloc(&lpfc_hba_index, NULL, 0, 0, GFP_KERNEL);
  3657. return ret < 0 ? -1 : ret;
  3658. }
  3659. /**
  3660. * lpfc_scan_finished - method for SCSI layer to detect whether scan is done
  3661. * @shost: pointer to SCSI host data structure.
  3662. * @time: elapsed time of the scan in jiffies.
  3663. *
  3664. * This routine is called by the SCSI layer with a SCSI host to determine
  3665. * whether the scan host is finished.
  3666. *
  3667. * Note: there is no scan_start function as adapter initialization will have
  3668. * asynchronously kicked off the link initialization.
  3669. *
  3670. * Return codes
  3671. * 0 - SCSI host scan is not over yet.
  3672. * 1 - SCSI host scan is over.
  3673. **/
  3674. int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
  3675. {
  3676. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3677. struct lpfc_hba *phba = vport->phba;
  3678. int stat = 0;
  3679. spin_lock_irq(shost->host_lock);
  3680. if (vport->load_flag & FC_UNLOADING) {
  3681. stat = 1;
  3682. goto finished;
  3683. }
  3684. if (time >= msecs_to_jiffies(30 * 1000)) {
  3685. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3686. "0461 Scanning longer than 30 "
  3687. "seconds. Continuing initialization\n");
  3688. stat = 1;
  3689. goto finished;
  3690. }
  3691. if (time >= msecs_to_jiffies(15 * 1000) &&
  3692. phba->link_state <= LPFC_LINK_DOWN) {
  3693. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  3694. "0465 Link down longer than 15 "
  3695. "seconds. Continuing initialization\n");
  3696. stat = 1;
  3697. goto finished;
  3698. }
  3699. if (vport->port_state != LPFC_VPORT_READY)
  3700. goto finished;
  3701. if (vport->num_disc_nodes || vport->fc_prli_sent)
  3702. goto finished;
  3703. if (vport->fc_map_cnt == 0 && time < msecs_to_jiffies(2 * 1000))
  3704. goto finished;
  3705. if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
  3706. goto finished;
  3707. stat = 1;
  3708. finished:
  3709. spin_unlock_irq(shost->host_lock);
  3710. return stat;
  3711. }
  3712. /**
  3713. * lpfc_host_attrib_init - Initialize SCSI host attributes on a FC port
  3714. * @shost: pointer to SCSI host data structure.
  3715. *
  3716. * This routine initializes a given SCSI host attributes on a FC port. The
  3717. * SCSI host can be either on top of a physical port or a virtual port.
  3718. **/
  3719. void lpfc_host_attrib_init(struct Scsi_Host *shost)
  3720. {
  3721. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  3722. struct lpfc_hba *phba = vport->phba;
  3723. /*
  3724. * Set fixed host attributes. Must done after lpfc_sli_hba_setup().
  3725. */
  3726. fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
  3727. fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
  3728. fc_host_supported_classes(shost) = FC_COS_CLASS3;
  3729. memset(fc_host_supported_fc4s(shost), 0,
  3730. sizeof(fc_host_supported_fc4s(shost)));
  3731. fc_host_supported_fc4s(shost)[2] = 1;
  3732. fc_host_supported_fc4s(shost)[7] = 1;
  3733. lpfc_vport_symbolic_node_name(vport, fc_host_symbolic_name(shost),
  3734. sizeof fc_host_symbolic_name(shost));
  3735. fc_host_supported_speeds(shost) = 0;
  3736. if (phba->lmt & LMT_32Gb)
  3737. fc_host_supported_speeds(shost) |= FC_PORTSPEED_32GBIT;
  3738. if (phba->lmt & LMT_16Gb)
  3739. fc_host_supported_speeds(shost) |= FC_PORTSPEED_16GBIT;
  3740. if (phba->lmt & LMT_10Gb)
  3741. fc_host_supported_speeds(shost) |= FC_PORTSPEED_10GBIT;
  3742. if (phba->lmt & LMT_8Gb)
  3743. fc_host_supported_speeds(shost) |= FC_PORTSPEED_8GBIT;
  3744. if (phba->lmt & LMT_4Gb)
  3745. fc_host_supported_speeds(shost) |= FC_PORTSPEED_4GBIT;
  3746. if (phba->lmt & LMT_2Gb)
  3747. fc_host_supported_speeds(shost) |= FC_PORTSPEED_2GBIT;
  3748. if (phba->lmt & LMT_1Gb)
  3749. fc_host_supported_speeds(shost) |= FC_PORTSPEED_1GBIT;
  3750. fc_host_maxframe_size(shost) =
  3751. (((uint32_t) vport->fc_sparam.cmn.bbRcvSizeMsb & 0x0F) << 8) |
  3752. (uint32_t) vport->fc_sparam.cmn.bbRcvSizeLsb;
  3753. fc_host_dev_loss_tmo(shost) = vport->cfg_devloss_tmo;
  3754. /* This value is also unchanging */
  3755. memset(fc_host_active_fc4s(shost), 0,
  3756. sizeof(fc_host_active_fc4s(shost)));
  3757. fc_host_active_fc4s(shost)[2] = 1;
  3758. fc_host_active_fc4s(shost)[7] = 1;
  3759. fc_host_max_npiv_vports(shost) = phba->max_vpi;
  3760. spin_lock_irq(shost->host_lock);
  3761. vport->load_flag &= ~FC_LOADING;
  3762. spin_unlock_irq(shost->host_lock);
  3763. }
  3764. /**
  3765. * lpfc_stop_port_s3 - Stop SLI3 device port
  3766. * @phba: pointer to lpfc hba data structure.
  3767. *
  3768. * This routine is invoked to stop an SLI3 device port, it stops the device
  3769. * from generating interrupts and stops the device driver's timers for the
  3770. * device.
  3771. **/
  3772. static void
  3773. lpfc_stop_port_s3(struct lpfc_hba *phba)
  3774. {
  3775. /* Clear all interrupt enable conditions */
  3776. writel(0, phba->HCregaddr);
  3777. readl(phba->HCregaddr); /* flush */
  3778. /* Clear all pending interrupts */
  3779. writel(0xffffffff, phba->HAregaddr);
  3780. readl(phba->HAregaddr); /* flush */
  3781. /* Reset some HBA SLI setup states */
  3782. lpfc_stop_hba_timers(phba);
  3783. phba->pport->work_port_events = 0;
  3784. }
  3785. /**
  3786. * lpfc_stop_port_s4 - Stop SLI4 device port
  3787. * @phba: pointer to lpfc hba data structure.
  3788. *
  3789. * This routine is invoked to stop an SLI4 device port, it stops the device
  3790. * from generating interrupts and stops the device driver's timers for the
  3791. * device.
  3792. **/
  3793. static void
  3794. lpfc_stop_port_s4(struct lpfc_hba *phba)
  3795. {
  3796. /* Reset some HBA SLI4 setup states */
  3797. lpfc_stop_hba_timers(phba);
  3798. phba->pport->work_port_events = 0;
  3799. phba->sli4_hba.intr_enable = 0;
  3800. }
  3801. /**
  3802. * lpfc_stop_port - Wrapper function for stopping hba port
  3803. * @phba: Pointer to HBA context object.
  3804. *
  3805. * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
  3806. * the API jump table function pointer from the lpfc_hba struct.
  3807. **/
  3808. void
  3809. lpfc_stop_port(struct lpfc_hba *phba)
  3810. {
  3811. phba->lpfc_stop_port(phba);
  3812. if (phba->wq)
  3813. flush_workqueue(phba->wq);
  3814. }
  3815. /**
  3816. * lpfc_fcf_redisc_wait_start_timer - Start fcf rediscover wait timer
  3817. * @phba: Pointer to hba for which this call is being executed.
  3818. *
  3819. * This routine starts the timer waiting for the FCF rediscovery to complete.
  3820. **/
  3821. void
  3822. lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba)
  3823. {
  3824. unsigned long fcf_redisc_wait_tmo =
  3825. (jiffies + msecs_to_jiffies(LPFC_FCF_REDISCOVER_WAIT_TMO));
  3826. /* Start fcf rediscovery wait period timer */
  3827. mod_timer(&phba->fcf.redisc_wait, fcf_redisc_wait_tmo);
  3828. spin_lock_irq(&phba->hbalock);
  3829. /* Allow action to new fcf asynchronous event */
  3830. phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
  3831. /* Mark the FCF rediscovery pending state */
  3832. phba->fcf.fcf_flag |= FCF_REDISC_PEND;
  3833. spin_unlock_irq(&phba->hbalock);
  3834. }
  3835. /**
  3836. * lpfc_sli4_fcf_redisc_wait_tmo - FCF table rediscover wait timeout
  3837. * @ptr: Map to lpfc_hba data structure pointer.
  3838. *
  3839. * This routine is invoked when waiting for FCF table rediscover has been
  3840. * timed out. If new FCF record(s) has (have) been discovered during the
  3841. * wait period, a new FCF event shall be added to the FCOE async event
  3842. * list, and then worker thread shall be waked up for processing from the
  3843. * worker thread context.
  3844. **/
  3845. static void
  3846. lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t)
  3847. {
  3848. struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait);
  3849. /* Don't send FCF rediscovery event if timer cancelled */
  3850. spin_lock_irq(&phba->hbalock);
  3851. if (!(phba->fcf.fcf_flag & FCF_REDISC_PEND)) {
  3852. spin_unlock_irq(&phba->hbalock);
  3853. return;
  3854. }
  3855. /* Clear FCF rediscovery timer pending flag */
  3856. phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
  3857. /* FCF rediscovery event to worker thread */
  3858. phba->fcf.fcf_flag |= FCF_REDISC_EVT;
  3859. spin_unlock_irq(&phba->hbalock);
  3860. lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
  3861. "2776 FCF rediscover quiescent timer expired\n");
  3862. /* wake up worker thread */
  3863. lpfc_worker_wake_up(phba);
  3864. }
  3865. /**
  3866. * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
  3867. * @phba: pointer to lpfc hba data structure.
  3868. * @acqe_link: pointer to the async link completion queue entry.
  3869. *
  3870. * This routine is to parse the SLI4 link-attention link fault code and
  3871. * translate it into the base driver's read link attention mailbox command
  3872. * status.
  3873. *
  3874. * Return: Link-attention status in terms of base driver's coding.
  3875. **/
  3876. static uint16_t
  3877. lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
  3878. struct lpfc_acqe_link *acqe_link)
  3879. {
  3880. uint16_t latt_fault;
  3881. switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
  3882. case LPFC_ASYNC_LINK_FAULT_NONE:
  3883. case LPFC_ASYNC_LINK_FAULT_LOCAL:
  3884. case LPFC_ASYNC_LINK_FAULT_REMOTE:
  3885. latt_fault = 0;
  3886. break;
  3887. default:
  3888. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3889. "0398 Invalid link fault code: x%x\n",
  3890. bf_get(lpfc_acqe_link_fault, acqe_link));
  3891. latt_fault = MBXERR_ERROR;
  3892. break;
  3893. }
  3894. return latt_fault;
  3895. }
  3896. /**
  3897. * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
  3898. * @phba: pointer to lpfc hba data structure.
  3899. * @acqe_link: pointer to the async link completion queue entry.
  3900. *
  3901. * This routine is to parse the SLI4 link attention type and translate it
  3902. * into the base driver's link attention type coding.
  3903. *
  3904. * Return: Link attention type in terms of base driver's coding.
  3905. **/
  3906. static uint8_t
  3907. lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
  3908. struct lpfc_acqe_link *acqe_link)
  3909. {
  3910. uint8_t att_type;
  3911. switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
  3912. case LPFC_ASYNC_LINK_STATUS_DOWN:
  3913. case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
  3914. att_type = LPFC_ATT_LINK_DOWN;
  3915. break;
  3916. case LPFC_ASYNC_LINK_STATUS_UP:
  3917. /* Ignore physical link up events - wait for logical link up */
  3918. att_type = LPFC_ATT_RESERVED;
  3919. break;
  3920. case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
  3921. att_type = LPFC_ATT_LINK_UP;
  3922. break;
  3923. default:
  3924. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  3925. "0399 Invalid link attention type: x%x\n",
  3926. bf_get(lpfc_acqe_link_status, acqe_link));
  3927. att_type = LPFC_ATT_RESERVED;
  3928. break;
  3929. }
  3930. return att_type;
  3931. }
  3932. /**
  3933. * lpfc_sli_port_speed_get - Get sli3 link speed code to link speed
  3934. * @phba: pointer to lpfc hba data structure.
  3935. *
  3936. * This routine is to get an SLI3 FC port's link speed in Mbps.
  3937. *
  3938. * Return: link speed in terms of Mbps.
  3939. **/
  3940. uint32_t
  3941. lpfc_sli_port_speed_get(struct lpfc_hba *phba)
  3942. {
  3943. uint32_t link_speed;
  3944. if (!lpfc_is_link_up(phba))
  3945. return 0;
  3946. if (phba->sli_rev <= LPFC_SLI_REV3) {
  3947. switch (phba->fc_linkspeed) {
  3948. case LPFC_LINK_SPEED_1GHZ:
  3949. link_speed = 1000;
  3950. break;
  3951. case LPFC_LINK_SPEED_2GHZ:
  3952. link_speed = 2000;
  3953. break;
  3954. case LPFC_LINK_SPEED_4GHZ:
  3955. link_speed = 4000;
  3956. break;
  3957. case LPFC_LINK_SPEED_8GHZ:
  3958. link_speed = 8000;
  3959. break;
  3960. case LPFC_LINK_SPEED_10GHZ:
  3961. link_speed = 10000;
  3962. break;
  3963. case LPFC_LINK_SPEED_16GHZ:
  3964. link_speed = 16000;
  3965. break;
  3966. default:
  3967. link_speed = 0;
  3968. }
  3969. } else {
  3970. if (phba->sli4_hba.link_state.logical_speed)
  3971. link_speed =
  3972. phba->sli4_hba.link_state.logical_speed;
  3973. else
  3974. link_speed = phba->sli4_hba.link_state.speed;
  3975. }
  3976. return link_speed;
  3977. }
  3978. /**
  3979. * lpfc_sli4_port_speed_parse - Parse async evt link speed code to link speed
  3980. * @phba: pointer to lpfc hba data structure.
  3981. * @evt_code: asynchronous event code.
  3982. * @speed_code: asynchronous event link speed code.
  3983. *
  3984. * This routine is to parse the giving SLI4 async event link speed code into
  3985. * value of Mbps for the link speed.
  3986. *
  3987. * Return: link speed in terms of Mbps.
  3988. **/
  3989. static uint32_t
  3990. lpfc_sli4_port_speed_parse(struct lpfc_hba *phba, uint32_t evt_code,
  3991. uint8_t speed_code)
  3992. {
  3993. uint32_t port_speed;
  3994. switch (evt_code) {
  3995. case LPFC_TRAILER_CODE_LINK:
  3996. switch (speed_code) {
  3997. case LPFC_ASYNC_LINK_SPEED_ZERO:
  3998. port_speed = 0;
  3999. break;
  4000. case LPFC_ASYNC_LINK_SPEED_10MBPS:
  4001. port_speed = 10;
  4002. break;
  4003. case LPFC_ASYNC_LINK_SPEED_100MBPS:
  4004. port_speed = 100;
  4005. break;
  4006. case LPFC_ASYNC_LINK_SPEED_1GBPS:
  4007. port_speed = 1000;
  4008. break;
  4009. case LPFC_ASYNC_LINK_SPEED_10GBPS:
  4010. port_speed = 10000;
  4011. break;
  4012. case LPFC_ASYNC_LINK_SPEED_20GBPS:
  4013. port_speed = 20000;
  4014. break;
  4015. case LPFC_ASYNC_LINK_SPEED_25GBPS:
  4016. port_speed = 25000;
  4017. break;
  4018. case LPFC_ASYNC_LINK_SPEED_40GBPS:
  4019. port_speed = 40000;
  4020. break;
  4021. default:
  4022. port_speed = 0;
  4023. }
  4024. break;
  4025. case LPFC_TRAILER_CODE_FC:
  4026. switch (speed_code) {
  4027. case LPFC_FC_LA_SPEED_UNKNOWN:
  4028. port_speed = 0;
  4029. break;
  4030. case LPFC_FC_LA_SPEED_1G:
  4031. port_speed = 1000;
  4032. break;
  4033. case LPFC_FC_LA_SPEED_2G:
  4034. port_speed = 2000;
  4035. break;
  4036. case LPFC_FC_LA_SPEED_4G:
  4037. port_speed = 4000;
  4038. break;
  4039. case LPFC_FC_LA_SPEED_8G:
  4040. port_speed = 8000;
  4041. break;
  4042. case LPFC_FC_LA_SPEED_10G:
  4043. port_speed = 10000;
  4044. break;
  4045. case LPFC_FC_LA_SPEED_16G:
  4046. port_speed = 16000;
  4047. break;
  4048. case LPFC_FC_LA_SPEED_32G:
  4049. port_speed = 32000;
  4050. break;
  4051. default:
  4052. port_speed = 0;
  4053. }
  4054. break;
  4055. default:
  4056. port_speed = 0;
  4057. }
  4058. return port_speed;
  4059. }
  4060. /**
  4061. * lpfc_sli4_async_link_evt - Process the asynchronous FCoE link event
  4062. * @phba: pointer to lpfc hba data structure.
  4063. * @acqe_link: pointer to the async link completion queue entry.
  4064. *
  4065. * This routine is to handle the SLI4 asynchronous FCoE link event.
  4066. **/
  4067. static void
  4068. lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
  4069. struct lpfc_acqe_link *acqe_link)
  4070. {
  4071. struct lpfc_dmabuf *mp;
  4072. LPFC_MBOXQ_t *pmb;
  4073. MAILBOX_t *mb;
  4074. struct lpfc_mbx_read_top *la;
  4075. uint8_t att_type;
  4076. int rc;
  4077. att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
  4078. if (att_type != LPFC_ATT_LINK_DOWN && att_type != LPFC_ATT_LINK_UP)
  4079. return;
  4080. phba->fcoe_eventtag = acqe_link->event_tag;
  4081. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4082. if (!pmb) {
  4083. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4084. "0395 The mboxq allocation failed\n");
  4085. return;
  4086. }
  4087. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  4088. if (!mp) {
  4089. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4090. "0396 The lpfc_dmabuf allocation failed\n");
  4091. goto out_free_pmb;
  4092. }
  4093. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  4094. if (!mp->virt) {
  4095. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4096. "0397 The mbuf allocation failed\n");
  4097. goto out_free_dmabuf;
  4098. }
  4099. /* Cleanup any outstanding ELS commands */
  4100. lpfc_els_flush_all_cmd(phba);
  4101. /* Block ELS IOCBs until we have done process link event */
  4102. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  4103. /* Update link event statistics */
  4104. phba->sli.slistat.link_event++;
  4105. /* Create lpfc_handle_latt mailbox command from link ACQE */
  4106. lpfc_read_topology(phba, pmb, mp);
  4107. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  4108. pmb->vport = phba->pport;
  4109. /* Keep the link status for extra SLI4 state machine reference */
  4110. phba->sli4_hba.link_state.speed =
  4111. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_LINK,
  4112. bf_get(lpfc_acqe_link_speed, acqe_link));
  4113. phba->sli4_hba.link_state.duplex =
  4114. bf_get(lpfc_acqe_link_duplex, acqe_link);
  4115. phba->sli4_hba.link_state.status =
  4116. bf_get(lpfc_acqe_link_status, acqe_link);
  4117. phba->sli4_hba.link_state.type =
  4118. bf_get(lpfc_acqe_link_type, acqe_link);
  4119. phba->sli4_hba.link_state.number =
  4120. bf_get(lpfc_acqe_link_number, acqe_link);
  4121. phba->sli4_hba.link_state.fault =
  4122. bf_get(lpfc_acqe_link_fault, acqe_link);
  4123. phba->sli4_hba.link_state.logical_speed =
  4124. bf_get(lpfc_acqe_logical_link_speed, acqe_link) * 10;
  4125. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4126. "2900 Async FC/FCoE Link event - Speed:%dGBit "
  4127. "duplex:x%x LA Type:x%x Port Type:%d Port Number:%d "
  4128. "Logical speed:%dMbps Fault:%d\n",
  4129. phba->sli4_hba.link_state.speed,
  4130. phba->sli4_hba.link_state.topology,
  4131. phba->sli4_hba.link_state.status,
  4132. phba->sli4_hba.link_state.type,
  4133. phba->sli4_hba.link_state.number,
  4134. phba->sli4_hba.link_state.logical_speed,
  4135. phba->sli4_hba.link_state.fault);
  4136. /*
  4137. * For FC Mode: issue the READ_TOPOLOGY mailbox command to fetch
  4138. * topology info. Note: Optional for non FC-AL ports.
  4139. */
  4140. if (!(phba->hba_flag & HBA_FCOE_MODE)) {
  4141. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4142. if (rc == MBX_NOT_FINISHED)
  4143. goto out_free_dmabuf;
  4144. return;
  4145. }
  4146. /*
  4147. * For FCoE Mode: fill in all the topology information we need and call
  4148. * the READ_TOPOLOGY completion routine to continue without actually
  4149. * sending the READ_TOPOLOGY mailbox command to the port.
  4150. */
  4151. /* Parse and translate status field */
  4152. mb = &pmb->u.mb;
  4153. mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
  4154. /* Parse and translate link attention fields */
  4155. la = (struct lpfc_mbx_read_top *) &pmb->u.mb.un.varReadTop;
  4156. la->eventTag = acqe_link->event_tag;
  4157. bf_set(lpfc_mbx_read_top_att_type, la, att_type);
  4158. bf_set(lpfc_mbx_read_top_link_spd, la,
  4159. (bf_get(lpfc_acqe_link_speed, acqe_link)));
  4160. /* Fake the the following irrelvant fields */
  4161. bf_set(lpfc_mbx_read_top_topology, la, LPFC_TOPOLOGY_PT_PT);
  4162. bf_set(lpfc_mbx_read_top_alpa_granted, la, 0);
  4163. bf_set(lpfc_mbx_read_top_il, la, 0);
  4164. bf_set(lpfc_mbx_read_top_pb, la, 0);
  4165. bf_set(lpfc_mbx_read_top_fa, la, 0);
  4166. bf_set(lpfc_mbx_read_top_mm, la, 0);
  4167. /* Invoke the lpfc_handle_latt mailbox command callback function */
  4168. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4169. return;
  4170. out_free_dmabuf:
  4171. kfree(mp);
  4172. out_free_pmb:
  4173. mempool_free(pmb, phba->mbox_mem_pool);
  4174. }
  4175. /**
  4176. * lpfc_sli4_async_fc_evt - Process the asynchronous FC link event
  4177. * @phba: pointer to lpfc hba data structure.
  4178. * @acqe_fc: pointer to the async fc completion queue entry.
  4179. *
  4180. * This routine is to handle the SLI4 asynchronous FC event. It will simply log
  4181. * that the event was received and then issue a read_topology mailbox command so
  4182. * that the rest of the driver will treat it the same as SLI3.
  4183. **/
  4184. static void
  4185. lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
  4186. {
  4187. struct lpfc_dmabuf *mp;
  4188. LPFC_MBOXQ_t *pmb;
  4189. MAILBOX_t *mb;
  4190. struct lpfc_mbx_read_top *la;
  4191. int rc;
  4192. if (bf_get(lpfc_trailer_type, acqe_fc) !=
  4193. LPFC_FC_LA_EVENT_TYPE_FC_LINK) {
  4194. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4195. "2895 Non FC link Event detected.(%d)\n",
  4196. bf_get(lpfc_trailer_type, acqe_fc));
  4197. return;
  4198. }
  4199. /* Keep the link status for extra SLI4 state machine reference */
  4200. phba->sli4_hba.link_state.speed =
  4201. lpfc_sli4_port_speed_parse(phba, LPFC_TRAILER_CODE_FC,
  4202. bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
  4203. phba->sli4_hba.link_state.duplex = LPFC_ASYNC_LINK_DUPLEX_FULL;
  4204. phba->sli4_hba.link_state.topology =
  4205. bf_get(lpfc_acqe_fc_la_topology, acqe_fc);
  4206. phba->sli4_hba.link_state.status =
  4207. bf_get(lpfc_acqe_fc_la_att_type, acqe_fc);
  4208. phba->sli4_hba.link_state.type =
  4209. bf_get(lpfc_acqe_fc_la_port_type, acqe_fc);
  4210. phba->sli4_hba.link_state.number =
  4211. bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
  4212. phba->sli4_hba.link_state.fault =
  4213. bf_get(lpfc_acqe_link_fault, acqe_fc);
  4214. phba->sli4_hba.link_state.logical_speed =
  4215. bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
  4216. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4217. "2896 Async FC event - Speed:%dGBaud Topology:x%x "
  4218. "LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
  4219. "%dMbps Fault:%d\n",
  4220. phba->sli4_hba.link_state.speed,
  4221. phba->sli4_hba.link_state.topology,
  4222. phba->sli4_hba.link_state.status,
  4223. phba->sli4_hba.link_state.type,
  4224. phba->sli4_hba.link_state.number,
  4225. phba->sli4_hba.link_state.logical_speed,
  4226. phba->sli4_hba.link_state.fault);
  4227. pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  4228. if (!pmb) {
  4229. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4230. "2897 The mboxq allocation failed\n");
  4231. return;
  4232. }
  4233. mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  4234. if (!mp) {
  4235. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4236. "2898 The lpfc_dmabuf allocation failed\n");
  4237. goto out_free_pmb;
  4238. }
  4239. mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
  4240. if (!mp->virt) {
  4241. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4242. "2899 The mbuf allocation failed\n");
  4243. goto out_free_dmabuf;
  4244. }
  4245. /* Cleanup any outstanding ELS commands */
  4246. lpfc_els_flush_all_cmd(phba);
  4247. /* Block ELS IOCBs until we have done process link event */
  4248. phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
  4249. /* Update link event statistics */
  4250. phba->sli.slistat.link_event++;
  4251. /* Create lpfc_handle_latt mailbox command from link ACQE */
  4252. lpfc_read_topology(phba, pmb, mp);
  4253. pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
  4254. pmb->vport = phba->pport;
  4255. if (phba->sli4_hba.link_state.status != LPFC_FC_LA_TYPE_LINK_UP) {
  4256. phba->link_flag &= ~(LS_MDS_LINK_DOWN | LS_MDS_LOOPBACK);
  4257. switch (phba->sli4_hba.link_state.status) {
  4258. case LPFC_FC_LA_TYPE_MDS_LINK_DOWN:
  4259. phba->link_flag |= LS_MDS_LINK_DOWN;
  4260. break;
  4261. case LPFC_FC_LA_TYPE_MDS_LOOPBACK:
  4262. phba->link_flag |= LS_MDS_LOOPBACK;
  4263. break;
  4264. default:
  4265. break;
  4266. }
  4267. /* Parse and translate status field */
  4268. mb = &pmb->u.mb;
  4269. mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba,
  4270. (void *)acqe_fc);
  4271. /* Parse and translate link attention fields */
  4272. la = (struct lpfc_mbx_read_top *)&pmb->u.mb.un.varReadTop;
  4273. la->eventTag = acqe_fc->event_tag;
  4274. if (phba->sli4_hba.link_state.status ==
  4275. LPFC_FC_LA_TYPE_UNEXP_WWPN) {
  4276. bf_set(lpfc_mbx_read_top_att_type, la,
  4277. LPFC_FC_LA_TYPE_UNEXP_WWPN);
  4278. } else {
  4279. bf_set(lpfc_mbx_read_top_att_type, la,
  4280. LPFC_FC_LA_TYPE_LINK_DOWN);
  4281. }
  4282. /* Invoke the mailbox command callback function */
  4283. lpfc_mbx_cmpl_read_topology(phba, pmb);
  4284. return;
  4285. }
  4286. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
  4287. if (rc == MBX_NOT_FINISHED)
  4288. goto out_free_dmabuf;
  4289. return;
  4290. out_free_dmabuf:
  4291. kfree(mp);
  4292. out_free_pmb:
  4293. mempool_free(pmb, phba->mbox_mem_pool);
  4294. }
  4295. /**
  4296. * lpfc_sli4_async_sli_evt - Process the asynchronous SLI link event
  4297. * @phba: pointer to lpfc hba data structure.
  4298. * @acqe_fc: pointer to the async SLI completion queue entry.
  4299. *
  4300. * This routine is to handle the SLI4 asynchronous SLI events.
  4301. **/
  4302. static void
  4303. lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
  4304. {
  4305. char port_name;
  4306. char message[128];
  4307. uint8_t status;
  4308. uint8_t evt_type;
  4309. uint8_t operational = 0;
  4310. struct temp_event temp_event_data;
  4311. struct lpfc_acqe_misconfigured_event *misconfigured;
  4312. struct Scsi_Host *shost;
  4313. evt_type = bf_get(lpfc_trailer_type, acqe_sli);
  4314. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4315. "2901 Async SLI event - Event Data1:x%08x Event Data2:"
  4316. "x%08x SLI Event Type:%d\n",
  4317. acqe_sli->event_data1, acqe_sli->event_data2,
  4318. evt_type);
  4319. port_name = phba->Port[0];
  4320. if (port_name == 0x00)
  4321. port_name = '?'; /* get port name is empty */
  4322. switch (evt_type) {
  4323. case LPFC_SLI_EVENT_TYPE_OVER_TEMP:
  4324. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4325. temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
  4326. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4327. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  4328. "3190 Over Temperature:%d Celsius- Port Name %c\n",
  4329. acqe_sli->event_data1, port_name);
  4330. phba->sfp_warning |= LPFC_TRANSGRESSION_HIGH_TEMPERATURE;
  4331. shost = lpfc_shost_from_vport(phba->pport);
  4332. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4333. sizeof(temp_event_data),
  4334. (char *)&temp_event_data,
  4335. SCSI_NL_VID_TYPE_PCI
  4336. | PCI_VENDOR_ID_EMULEX);
  4337. break;
  4338. case LPFC_SLI_EVENT_TYPE_NORM_TEMP:
  4339. temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
  4340. temp_event_data.event_code = LPFC_NORMAL_TEMP;
  4341. temp_event_data.data = (uint32_t)acqe_sli->event_data1;
  4342. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4343. "3191 Normal Temperature:%d Celsius - Port Name %c\n",
  4344. acqe_sli->event_data1, port_name);
  4345. shost = lpfc_shost_from_vport(phba->pport);
  4346. fc_host_post_vendor_event(shost, fc_get_event_number(),
  4347. sizeof(temp_event_data),
  4348. (char *)&temp_event_data,
  4349. SCSI_NL_VID_TYPE_PCI
  4350. | PCI_VENDOR_ID_EMULEX);
  4351. break;
  4352. case LPFC_SLI_EVENT_TYPE_MISCONFIGURED:
  4353. misconfigured = (struct lpfc_acqe_misconfigured_event *)
  4354. &acqe_sli->event_data1;
  4355. /* fetch the status for this port */
  4356. switch (phba->sli4_hba.lnk_info.lnk_no) {
  4357. case LPFC_LINK_NUMBER_0:
  4358. status = bf_get(lpfc_sli_misconfigured_port0_state,
  4359. &misconfigured->theEvent);
  4360. operational = bf_get(lpfc_sli_misconfigured_port0_op,
  4361. &misconfigured->theEvent);
  4362. break;
  4363. case LPFC_LINK_NUMBER_1:
  4364. status = bf_get(lpfc_sli_misconfigured_port1_state,
  4365. &misconfigured->theEvent);
  4366. operational = bf_get(lpfc_sli_misconfigured_port1_op,
  4367. &misconfigured->theEvent);
  4368. break;
  4369. case LPFC_LINK_NUMBER_2:
  4370. status = bf_get(lpfc_sli_misconfigured_port2_state,
  4371. &misconfigured->theEvent);
  4372. operational = bf_get(lpfc_sli_misconfigured_port2_op,
  4373. &misconfigured->theEvent);
  4374. break;
  4375. case LPFC_LINK_NUMBER_3:
  4376. status = bf_get(lpfc_sli_misconfigured_port3_state,
  4377. &misconfigured->theEvent);
  4378. operational = bf_get(lpfc_sli_misconfigured_port3_op,
  4379. &misconfigured->theEvent);
  4380. break;
  4381. default:
  4382. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4383. "3296 "
  4384. "LPFC_SLI_EVENT_TYPE_MISCONFIGURED "
  4385. "event: Invalid link %d",
  4386. phba->sli4_hba.lnk_info.lnk_no);
  4387. return;
  4388. }
  4389. /* Skip if optic state unchanged */
  4390. if (phba->sli4_hba.lnk_info.optic_state == status)
  4391. return;
  4392. switch (status) {
  4393. case LPFC_SLI_EVENT_STATUS_VALID:
  4394. sprintf(message, "Physical Link is functional");
  4395. break;
  4396. case LPFC_SLI_EVENT_STATUS_NOT_PRESENT:
  4397. sprintf(message, "Optics faulted/incorrectly "
  4398. "installed/not installed - Reseat optics, "
  4399. "if issue not resolved, replace.");
  4400. break;
  4401. case LPFC_SLI_EVENT_STATUS_WRONG_TYPE:
  4402. sprintf(message,
  4403. "Optics of two types installed - Remove one "
  4404. "optic or install matching pair of optics.");
  4405. break;
  4406. case LPFC_SLI_EVENT_STATUS_UNSUPPORTED:
  4407. sprintf(message, "Incompatible optics - Replace with "
  4408. "compatible optics for card to function.");
  4409. break;
  4410. case LPFC_SLI_EVENT_STATUS_UNQUALIFIED:
  4411. sprintf(message, "Unqualified optics - Replace with "
  4412. "Avago optics for Warranty and Technical "
  4413. "Support - Link is%s operational",
  4414. (operational) ? " not" : "");
  4415. break;
  4416. case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
  4417. sprintf(message, "Uncertified optics - Replace with "
  4418. "Avago-certified optics to enable link "
  4419. "operation - Link is%s operational",
  4420. (operational) ? " not" : "");
  4421. break;
  4422. default:
  4423. /* firmware is reporting a status we don't know about */
  4424. sprintf(message, "Unknown event status x%02x", status);
  4425. break;
  4426. }
  4427. phba->sli4_hba.lnk_info.optic_state = status;
  4428. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4429. "3176 Port Name %c %s\n", port_name, message);
  4430. break;
  4431. case LPFC_SLI_EVENT_TYPE_REMOTE_DPORT:
  4432. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4433. "3192 Remote DPort Test Initiated - "
  4434. "Event Data1:x%08x Event Data2: x%08x\n",
  4435. acqe_sli->event_data1, acqe_sli->event_data2);
  4436. break;
  4437. default:
  4438. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4439. "3193 Async SLI event - Event Data1:x%08x Event Data2:"
  4440. "x%08x SLI Event Type:%d\n",
  4441. acqe_sli->event_data1, acqe_sli->event_data2,
  4442. evt_type);
  4443. break;
  4444. }
  4445. }
  4446. /**
  4447. * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
  4448. * @vport: pointer to vport data structure.
  4449. *
  4450. * This routine is to perform Clear Virtual Link (CVL) on a vport in
  4451. * response to a CVL event.
  4452. *
  4453. * Return the pointer to the ndlp with the vport if successful, otherwise
  4454. * return NULL.
  4455. **/
  4456. static struct lpfc_nodelist *
  4457. lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
  4458. {
  4459. struct lpfc_nodelist *ndlp;
  4460. struct Scsi_Host *shost;
  4461. struct lpfc_hba *phba;
  4462. if (!vport)
  4463. return NULL;
  4464. phba = vport->phba;
  4465. if (!phba)
  4466. return NULL;
  4467. ndlp = lpfc_findnode_did(vport, Fabric_DID);
  4468. if (!ndlp) {
  4469. /* Cannot find existing Fabric ndlp, so allocate a new one */
  4470. ndlp = lpfc_nlp_init(vport, Fabric_DID);
  4471. if (!ndlp)
  4472. return 0;
  4473. /* Set the node type */
  4474. ndlp->nlp_type |= NLP_FABRIC;
  4475. /* Put ndlp onto node list */
  4476. lpfc_enqueue_node(vport, ndlp);
  4477. } else if (!NLP_CHK_NODE_ACT(ndlp)) {
  4478. /* re-setup ndlp without removing from node list */
  4479. ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
  4480. if (!ndlp)
  4481. return 0;
  4482. }
  4483. if ((phba->pport->port_state < LPFC_FLOGI) &&
  4484. (phba->pport->port_state != LPFC_VPORT_FAILED))
  4485. return NULL;
  4486. /* If virtual link is not yet instantiated ignore CVL */
  4487. if ((vport != phba->pport) && (vport->port_state < LPFC_FDISC)
  4488. && (vport->port_state != LPFC_VPORT_FAILED))
  4489. return NULL;
  4490. shost = lpfc_shost_from_vport(vport);
  4491. if (!shost)
  4492. return NULL;
  4493. lpfc_linkdown_port(vport);
  4494. lpfc_cleanup_pending_mbox(vport);
  4495. spin_lock_irq(shost->host_lock);
  4496. vport->fc_flag |= FC_VPORT_CVL_RCVD;
  4497. spin_unlock_irq(shost->host_lock);
  4498. return ndlp;
  4499. }
  4500. /**
  4501. * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
  4502. * @vport: pointer to lpfc hba data structure.
  4503. *
  4504. * This routine is to perform Clear Virtual Link (CVL) on all vports in
  4505. * response to a FCF dead event.
  4506. **/
  4507. static void
  4508. lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
  4509. {
  4510. struct lpfc_vport **vports;
  4511. int i;
  4512. vports = lpfc_create_vport_work_array(phba);
  4513. if (vports)
  4514. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
  4515. lpfc_sli4_perform_vport_cvl(vports[i]);
  4516. lpfc_destroy_vport_work_array(phba, vports);
  4517. }
  4518. /**
  4519. * lpfc_sli4_async_fip_evt - Process the asynchronous FCoE FIP event
  4520. * @phba: pointer to lpfc hba data structure.
  4521. * @acqe_link: pointer to the async fcoe completion queue entry.
  4522. *
  4523. * This routine is to handle the SLI4 asynchronous fcoe event.
  4524. **/
  4525. static void
  4526. lpfc_sli4_async_fip_evt(struct lpfc_hba *phba,
  4527. struct lpfc_acqe_fip *acqe_fip)
  4528. {
  4529. uint8_t event_type = bf_get(lpfc_trailer_type, acqe_fip);
  4530. int rc;
  4531. struct lpfc_vport *vport;
  4532. struct lpfc_nodelist *ndlp;
  4533. struct Scsi_Host *shost;
  4534. int active_vlink_present;
  4535. struct lpfc_vport **vports;
  4536. int i;
  4537. phba->fc_eventTag = acqe_fip->event_tag;
  4538. phba->fcoe_eventtag = acqe_fip->event_tag;
  4539. switch (event_type) {
  4540. case LPFC_FIP_EVENT_TYPE_NEW_FCF:
  4541. case LPFC_FIP_EVENT_TYPE_FCF_PARAM_MOD:
  4542. if (event_type == LPFC_FIP_EVENT_TYPE_NEW_FCF)
  4543. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4544. LOG_DISCOVERY,
  4545. "2546 New FCF event, evt_tag:x%x, "
  4546. "index:x%x\n",
  4547. acqe_fip->event_tag,
  4548. acqe_fip->index);
  4549. else
  4550. lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
  4551. LOG_DISCOVERY,
  4552. "2788 FCF param modified event, "
  4553. "evt_tag:x%x, index:x%x\n",
  4554. acqe_fip->event_tag,
  4555. acqe_fip->index);
  4556. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4557. /*
  4558. * During period of FCF discovery, read the FCF
  4559. * table record indexed by the event to update
  4560. * FCF roundrobin failover eligible FCF bmask.
  4561. */
  4562. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4563. LOG_DISCOVERY,
  4564. "2779 Read FCF (x%x) for updating "
  4565. "roundrobin FCF failover bmask\n",
  4566. acqe_fip->index);
  4567. rc = lpfc_sli4_read_fcf_rec(phba, acqe_fip->index);
  4568. }
  4569. /* If the FCF discovery is in progress, do nothing. */
  4570. spin_lock_irq(&phba->hbalock);
  4571. if (phba->hba_flag & FCF_TS_INPROG) {
  4572. spin_unlock_irq(&phba->hbalock);
  4573. break;
  4574. }
  4575. /* If fast FCF failover rescan event is pending, do nothing */
  4576. if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
  4577. spin_unlock_irq(&phba->hbalock);
  4578. break;
  4579. }
  4580. /* If the FCF has been in discovered state, do nothing. */
  4581. if (phba->fcf.fcf_flag & FCF_SCAN_DONE) {
  4582. spin_unlock_irq(&phba->hbalock);
  4583. break;
  4584. }
  4585. spin_unlock_irq(&phba->hbalock);
  4586. /* Otherwise, scan the entire FCF table and re-discover SAN */
  4587. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4588. "2770 Start FCF table scan per async FCF "
  4589. "event, evt_tag:x%x, index:x%x\n",
  4590. acqe_fip->event_tag, acqe_fip->index);
  4591. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
  4592. LPFC_FCOE_FCF_GET_FIRST);
  4593. if (rc)
  4594. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4595. "2547 Issue FCF scan read FCF mailbox "
  4596. "command failed (x%x)\n", rc);
  4597. break;
  4598. case LPFC_FIP_EVENT_TYPE_FCF_TABLE_FULL:
  4599. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4600. "2548 FCF Table full count 0x%x tag 0x%x\n",
  4601. bf_get(lpfc_acqe_fip_fcf_count, acqe_fip),
  4602. acqe_fip->event_tag);
  4603. break;
  4604. case LPFC_FIP_EVENT_TYPE_FCF_DEAD:
  4605. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4606. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4607. "2549 FCF (x%x) disconnected from network, "
  4608. "tag:x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4609. /*
  4610. * If we are in the middle of FCF failover process, clear
  4611. * the corresponding FCF bit in the roundrobin bitmap.
  4612. */
  4613. spin_lock_irq(&phba->hbalock);
  4614. if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
  4615. (phba->fcf.current_rec.fcf_indx != acqe_fip->index)) {
  4616. spin_unlock_irq(&phba->hbalock);
  4617. /* Update FLOGI FCF failover eligible FCF bmask */
  4618. lpfc_sli4_fcf_rr_index_clear(phba, acqe_fip->index);
  4619. break;
  4620. }
  4621. spin_unlock_irq(&phba->hbalock);
  4622. /* If the event is not for currently used fcf do nothing */
  4623. if (phba->fcf.current_rec.fcf_indx != acqe_fip->index)
  4624. break;
  4625. /*
  4626. * Otherwise, request the port to rediscover the entire FCF
  4627. * table for a fast recovery from case that the current FCF
  4628. * is no longer valid as we are not in the middle of FCF
  4629. * failover process already.
  4630. */
  4631. spin_lock_irq(&phba->hbalock);
  4632. /* Mark the fast failover process in progress */
  4633. phba->fcf.fcf_flag |= FCF_DEAD_DISC;
  4634. spin_unlock_irq(&phba->hbalock);
  4635. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4636. "2771 Start FCF fast failover process due to "
  4637. "FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
  4638. "\n", acqe_fip->event_tag, acqe_fip->index);
  4639. rc = lpfc_sli4_redisc_fcf_table(phba);
  4640. if (rc) {
  4641. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4642. LOG_DISCOVERY,
  4643. "2772 Issue FCF rediscover mabilbox "
  4644. "command failed, fail through to FCF "
  4645. "dead event\n");
  4646. spin_lock_irq(&phba->hbalock);
  4647. phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
  4648. spin_unlock_irq(&phba->hbalock);
  4649. /*
  4650. * Last resort will fail over by treating this
  4651. * as a link down to FCF registration.
  4652. */
  4653. lpfc_sli4_fcf_dead_failthrough(phba);
  4654. } else {
  4655. /* Reset FCF roundrobin bmask for new discovery */
  4656. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4657. /*
  4658. * Handling fast FCF failover to a DEAD FCF event is
  4659. * considered equalivant to receiving CVL to all vports.
  4660. */
  4661. lpfc_sli4_perform_all_vport_cvl(phba);
  4662. }
  4663. break;
  4664. case LPFC_FIP_EVENT_TYPE_CVL:
  4665. phba->fcoe_cvl_eventtag = acqe_fip->event_tag;
  4666. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4667. "2718 Clear Virtual Link Received for VPI 0x%x"
  4668. " tag 0x%x\n", acqe_fip->index, acqe_fip->event_tag);
  4669. vport = lpfc_find_vport_by_vpid(phba,
  4670. acqe_fip->index);
  4671. ndlp = lpfc_sli4_perform_vport_cvl(vport);
  4672. if (!ndlp)
  4673. break;
  4674. active_vlink_present = 0;
  4675. vports = lpfc_create_vport_work_array(phba);
  4676. if (vports) {
  4677. for (i = 0; i <= phba->max_vports && vports[i] != NULL;
  4678. i++) {
  4679. if ((!(vports[i]->fc_flag &
  4680. FC_VPORT_CVL_RCVD)) &&
  4681. (vports[i]->port_state > LPFC_FDISC)) {
  4682. active_vlink_present = 1;
  4683. break;
  4684. }
  4685. }
  4686. lpfc_destroy_vport_work_array(phba, vports);
  4687. }
  4688. /*
  4689. * Don't re-instantiate if vport is marked for deletion.
  4690. * If we are here first then vport_delete is going to wait
  4691. * for discovery to complete.
  4692. */
  4693. if (!(vport->load_flag & FC_UNLOADING) &&
  4694. active_vlink_present) {
  4695. /*
  4696. * If there are other active VLinks present,
  4697. * re-instantiate the Vlink using FDISC.
  4698. */
  4699. mod_timer(&ndlp->nlp_delayfunc,
  4700. jiffies + msecs_to_jiffies(1000));
  4701. shost = lpfc_shost_from_vport(vport);
  4702. spin_lock_irq(shost->host_lock);
  4703. ndlp->nlp_flag |= NLP_DELAY_TMO;
  4704. spin_unlock_irq(shost->host_lock);
  4705. ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
  4706. vport->port_state = LPFC_FDISC;
  4707. } else {
  4708. /*
  4709. * Otherwise, we request port to rediscover
  4710. * the entire FCF table for a fast recovery
  4711. * from possible case that the current FCF
  4712. * is no longer valid if we are not already
  4713. * in the FCF failover process.
  4714. */
  4715. spin_lock_irq(&phba->hbalock);
  4716. if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
  4717. spin_unlock_irq(&phba->hbalock);
  4718. break;
  4719. }
  4720. /* Mark the fast failover process in progress */
  4721. phba->fcf.fcf_flag |= FCF_ACVL_DISC;
  4722. spin_unlock_irq(&phba->hbalock);
  4723. lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
  4724. LOG_DISCOVERY,
  4725. "2773 Start FCF failover per CVL, "
  4726. "evt_tag:x%x\n", acqe_fip->event_tag);
  4727. rc = lpfc_sli4_redisc_fcf_table(phba);
  4728. if (rc) {
  4729. lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
  4730. LOG_DISCOVERY,
  4731. "2774 Issue FCF rediscover "
  4732. "mabilbox command failed, "
  4733. "through to CVL event\n");
  4734. spin_lock_irq(&phba->hbalock);
  4735. phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
  4736. spin_unlock_irq(&phba->hbalock);
  4737. /*
  4738. * Last resort will be re-try on the
  4739. * the current registered FCF entry.
  4740. */
  4741. lpfc_retry_pport_discovery(phba);
  4742. } else
  4743. /*
  4744. * Reset FCF roundrobin bmask for new
  4745. * discovery.
  4746. */
  4747. lpfc_sli4_clear_fcf_rr_bmask(phba);
  4748. }
  4749. break;
  4750. default:
  4751. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4752. "0288 Unknown FCoE event type 0x%x event tag "
  4753. "0x%x\n", event_type, acqe_fip->event_tag);
  4754. break;
  4755. }
  4756. }
  4757. /**
  4758. * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
  4759. * @phba: pointer to lpfc hba data structure.
  4760. * @acqe_link: pointer to the async dcbx completion queue entry.
  4761. *
  4762. * This routine is to handle the SLI4 asynchronous dcbx event.
  4763. **/
  4764. static void
  4765. lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
  4766. struct lpfc_acqe_dcbx *acqe_dcbx)
  4767. {
  4768. phba->fc_eventTag = acqe_dcbx->event_tag;
  4769. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4770. "0290 The SLI4 DCBX asynchronous event is not "
  4771. "handled yet\n");
  4772. }
  4773. /**
  4774. * lpfc_sli4_async_grp5_evt - Process the asynchronous group5 event
  4775. * @phba: pointer to lpfc hba data structure.
  4776. * @acqe_link: pointer to the async grp5 completion queue entry.
  4777. *
  4778. * This routine is to handle the SLI4 asynchronous grp5 event. A grp5 event
  4779. * is an asynchronous notified of a logical link speed change. The Port
  4780. * reports the logical link speed in units of 10Mbps.
  4781. **/
  4782. static void
  4783. lpfc_sli4_async_grp5_evt(struct lpfc_hba *phba,
  4784. struct lpfc_acqe_grp5 *acqe_grp5)
  4785. {
  4786. uint16_t prev_ll_spd;
  4787. phba->fc_eventTag = acqe_grp5->event_tag;
  4788. phba->fcoe_eventtag = acqe_grp5->event_tag;
  4789. prev_ll_spd = phba->sli4_hba.link_state.logical_speed;
  4790. phba->sli4_hba.link_state.logical_speed =
  4791. (bf_get(lpfc_acqe_grp5_llink_spd, acqe_grp5)) * 10;
  4792. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  4793. "2789 GRP5 Async Event: Updating logical link speed "
  4794. "from %dMbps to %dMbps\n", prev_ll_spd,
  4795. phba->sli4_hba.link_state.logical_speed);
  4796. }
  4797. /**
  4798. * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
  4799. * @phba: pointer to lpfc hba data structure.
  4800. *
  4801. * This routine is invoked by the worker thread to process all the pending
  4802. * SLI4 asynchronous events.
  4803. **/
  4804. void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
  4805. {
  4806. struct lpfc_cq_event *cq_event;
  4807. /* First, declare the async event has been handled */
  4808. spin_lock_irq(&phba->hbalock);
  4809. phba->hba_flag &= ~ASYNC_EVENT;
  4810. spin_unlock_irq(&phba->hbalock);
  4811. /* Now, handle all the async events */
  4812. while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
  4813. /* Get the first event from the head of the event queue */
  4814. spin_lock_irq(&phba->hbalock);
  4815. list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
  4816. cq_event, struct lpfc_cq_event, list);
  4817. spin_unlock_irq(&phba->hbalock);
  4818. /* Process the asynchronous event */
  4819. switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
  4820. case LPFC_TRAILER_CODE_LINK:
  4821. lpfc_sli4_async_link_evt(phba,
  4822. &cq_event->cqe.acqe_link);
  4823. break;
  4824. case LPFC_TRAILER_CODE_FCOE:
  4825. lpfc_sli4_async_fip_evt(phba, &cq_event->cqe.acqe_fip);
  4826. break;
  4827. case LPFC_TRAILER_CODE_DCBX:
  4828. lpfc_sli4_async_dcbx_evt(phba,
  4829. &cq_event->cqe.acqe_dcbx);
  4830. break;
  4831. case LPFC_TRAILER_CODE_GRP5:
  4832. lpfc_sli4_async_grp5_evt(phba,
  4833. &cq_event->cqe.acqe_grp5);
  4834. break;
  4835. case LPFC_TRAILER_CODE_FC:
  4836. lpfc_sli4_async_fc_evt(phba, &cq_event->cqe.acqe_fc);
  4837. break;
  4838. case LPFC_TRAILER_CODE_SLI:
  4839. lpfc_sli4_async_sli_evt(phba, &cq_event->cqe.acqe_sli);
  4840. break;
  4841. default:
  4842. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  4843. "1804 Invalid asynchrous event code: "
  4844. "x%x\n", bf_get(lpfc_trailer_code,
  4845. &cq_event->cqe.mcqe_cmpl));
  4846. break;
  4847. }
  4848. /* Free the completion event processed to the free pool */
  4849. lpfc_sli4_cq_event_release(phba, cq_event);
  4850. }
  4851. }
  4852. /**
  4853. * lpfc_sli4_fcf_redisc_event_proc - Process fcf table rediscovery event
  4854. * @phba: pointer to lpfc hba data structure.
  4855. *
  4856. * This routine is invoked by the worker thread to process FCF table
  4857. * rediscovery pending completion event.
  4858. **/
  4859. void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
  4860. {
  4861. int rc;
  4862. spin_lock_irq(&phba->hbalock);
  4863. /* Clear FCF rediscovery timeout event */
  4864. phba->fcf.fcf_flag &= ~FCF_REDISC_EVT;
  4865. /* Clear driver fast failover FCF record flag */
  4866. phba->fcf.failover_rec.flag = 0;
  4867. /* Set state for FCF fast failover */
  4868. phba->fcf.fcf_flag |= FCF_REDISC_FOV;
  4869. spin_unlock_irq(&phba->hbalock);
  4870. /* Scan FCF table from the first entry to re-discover SAN */
  4871. lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
  4872. "2777 Start post-quiescent FCF table scan\n");
  4873. rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
  4874. if (rc)
  4875. lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
  4876. "2747 Issue FCF scan read FCF mailbox "
  4877. "command failed 0x%x\n", rc);
  4878. }
  4879. /**
  4880. * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
  4881. * @phba: pointer to lpfc hba data structure.
  4882. * @dev_grp: The HBA PCI-Device group number.
  4883. *
  4884. * This routine is invoked to set up the per HBA PCI-Device group function
  4885. * API jump table entries.
  4886. *
  4887. * Return: 0 if success, otherwise -ENODEV
  4888. **/
  4889. int
  4890. lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  4891. {
  4892. int rc;
  4893. /* Set up lpfc PCI-device group */
  4894. phba->pci_dev_grp = dev_grp;
  4895. /* The LPFC_PCI_DEV_OC uses SLI4 */
  4896. if (dev_grp == LPFC_PCI_DEV_OC)
  4897. phba->sli_rev = LPFC_SLI_REV4;
  4898. /* Set up device INIT API function jump table */
  4899. rc = lpfc_init_api_table_setup(phba, dev_grp);
  4900. if (rc)
  4901. return -ENODEV;
  4902. /* Set up SCSI API function jump table */
  4903. rc = lpfc_scsi_api_table_setup(phba, dev_grp);
  4904. if (rc)
  4905. return -ENODEV;
  4906. /* Set up SLI API function jump table */
  4907. rc = lpfc_sli_api_table_setup(phba, dev_grp);
  4908. if (rc)
  4909. return -ENODEV;
  4910. /* Set up MBOX API function jump table */
  4911. rc = lpfc_mbox_api_table_setup(phba, dev_grp);
  4912. if (rc)
  4913. return -ENODEV;
  4914. return 0;
  4915. }
  4916. /**
  4917. * lpfc_log_intr_mode - Log the active interrupt mode
  4918. * @phba: pointer to lpfc hba data structure.
  4919. * @intr_mode: active interrupt mode adopted.
  4920. *
  4921. * This routine it invoked to log the currently used active interrupt mode
  4922. * to the device.
  4923. **/
  4924. static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
  4925. {
  4926. switch (intr_mode) {
  4927. case 0:
  4928. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4929. "0470 Enable INTx interrupt mode.\n");
  4930. break;
  4931. case 1:
  4932. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4933. "0481 Enabled MSI interrupt mode.\n");
  4934. break;
  4935. case 2:
  4936. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  4937. "0480 Enabled MSI-X interrupt mode.\n");
  4938. break;
  4939. default:
  4940. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4941. "0482 Illegal interrupt mode.\n");
  4942. break;
  4943. }
  4944. return;
  4945. }
  4946. /**
  4947. * lpfc_enable_pci_dev - Enable a generic PCI device.
  4948. * @phba: pointer to lpfc hba data structure.
  4949. *
  4950. * This routine is invoked to enable the PCI device that is common to all
  4951. * PCI devices.
  4952. *
  4953. * Return codes
  4954. * 0 - successful
  4955. * other values - error
  4956. **/
  4957. static int
  4958. lpfc_enable_pci_dev(struct lpfc_hba *phba)
  4959. {
  4960. struct pci_dev *pdev;
  4961. /* Obtain PCI device reference */
  4962. if (!phba->pcidev)
  4963. goto out_error;
  4964. else
  4965. pdev = phba->pcidev;
  4966. /* Enable PCI device */
  4967. if (pci_enable_device_mem(pdev))
  4968. goto out_error;
  4969. /* Request PCI resource for the device */
  4970. if (pci_request_mem_regions(pdev, LPFC_DRIVER_NAME))
  4971. goto out_disable_device;
  4972. /* Set up device as PCI master and save state for EEH */
  4973. pci_set_master(pdev);
  4974. pci_try_set_mwi(pdev);
  4975. pci_save_state(pdev);
  4976. /* PCIe EEH recovery on powerpc platforms needs fundamental reset */
  4977. if (pci_is_pcie(pdev))
  4978. pdev->needs_freset = 1;
  4979. return 0;
  4980. out_disable_device:
  4981. pci_disable_device(pdev);
  4982. out_error:
  4983. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  4984. "1401 Failed to enable pci device\n");
  4985. return -ENODEV;
  4986. }
  4987. /**
  4988. * lpfc_disable_pci_dev - Disable a generic PCI device.
  4989. * @phba: pointer to lpfc hba data structure.
  4990. *
  4991. * This routine is invoked to disable the PCI device that is common to all
  4992. * PCI devices.
  4993. **/
  4994. static void
  4995. lpfc_disable_pci_dev(struct lpfc_hba *phba)
  4996. {
  4997. struct pci_dev *pdev;
  4998. /* Obtain PCI device reference */
  4999. if (!phba->pcidev)
  5000. return;
  5001. else
  5002. pdev = phba->pcidev;
  5003. /* Release PCI resource and disable PCI device */
  5004. pci_release_mem_regions(pdev);
  5005. pci_disable_device(pdev);
  5006. return;
  5007. }
  5008. /**
  5009. * lpfc_reset_hba - Reset a hba
  5010. * @phba: pointer to lpfc hba data structure.
  5011. *
  5012. * This routine is invoked to reset a hba device. It brings the HBA
  5013. * offline, performs a board restart, and then brings the board back
  5014. * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
  5015. * on outstanding mailbox commands.
  5016. **/
  5017. void
  5018. lpfc_reset_hba(struct lpfc_hba *phba)
  5019. {
  5020. /* If resets are disabled then set error state and return. */
  5021. if (!phba->cfg_enable_hba_reset) {
  5022. phba->link_state = LPFC_HBA_ERROR;
  5023. return;
  5024. }
  5025. if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
  5026. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  5027. else
  5028. lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
  5029. lpfc_offline(phba);
  5030. lpfc_sli_brdrestart(phba);
  5031. lpfc_online(phba);
  5032. lpfc_unblock_mgmt_io(phba);
  5033. }
  5034. /**
  5035. * lpfc_sli_sriov_nr_virtfn_get - Get the number of sr-iov virtual functions
  5036. * @phba: pointer to lpfc hba data structure.
  5037. *
  5038. * This function enables the PCI SR-IOV virtual functions to a physical
  5039. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  5040. * enable the number of virtual functions to the physical function. As
  5041. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  5042. * API call does not considered as an error condition for most of the device.
  5043. **/
  5044. uint16_t
  5045. lpfc_sli_sriov_nr_virtfn_get(struct lpfc_hba *phba)
  5046. {
  5047. struct pci_dev *pdev = phba->pcidev;
  5048. uint16_t nr_virtfn;
  5049. int pos;
  5050. pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
  5051. if (pos == 0)
  5052. return 0;
  5053. pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF, &nr_virtfn);
  5054. return nr_virtfn;
  5055. }
  5056. /**
  5057. * lpfc_sli_probe_sriov_nr_virtfn - Enable a number of sr-iov virtual functions
  5058. * @phba: pointer to lpfc hba data structure.
  5059. * @nr_vfn: number of virtual functions to be enabled.
  5060. *
  5061. * This function enables the PCI SR-IOV virtual functions to a physical
  5062. * function. It invokes the PCI SR-IOV api with the @nr_vfn provided to
  5063. * enable the number of virtual functions to the physical function. As
  5064. * not all devices support SR-IOV, the return code from the pci_enable_sriov()
  5065. * API call does not considered as an error condition for most of the device.
  5066. **/
  5067. int
  5068. lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
  5069. {
  5070. struct pci_dev *pdev = phba->pcidev;
  5071. uint16_t max_nr_vfn;
  5072. int rc;
  5073. max_nr_vfn = lpfc_sli_sriov_nr_virtfn_get(phba);
  5074. if (nr_vfn > max_nr_vfn) {
  5075. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5076. "3057 Requested vfs (%d) greater than "
  5077. "supported vfs (%d)", nr_vfn, max_nr_vfn);
  5078. return -EINVAL;
  5079. }
  5080. rc = pci_enable_sriov(pdev, nr_vfn);
  5081. if (rc) {
  5082. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5083. "2806 Failed to enable sriov on this device "
  5084. "with vfn number nr_vf:%d, rc:%d\n",
  5085. nr_vfn, rc);
  5086. } else
  5087. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5088. "2807 Successful enable sriov on this device "
  5089. "with vfn number nr_vf:%d\n", nr_vfn);
  5090. return rc;
  5091. }
  5092. /**
  5093. * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
  5094. * @phba: pointer to lpfc hba data structure.
  5095. *
  5096. * This routine is invoked to set up the driver internal resources before the
  5097. * device specific resource setup to support the HBA device it attached to.
  5098. *
  5099. * Return codes
  5100. * 0 - successful
  5101. * other values - error
  5102. **/
  5103. static int
  5104. lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
  5105. {
  5106. struct lpfc_sli *psli = &phba->sli;
  5107. /*
  5108. * Driver resources common to all SLI revisions
  5109. */
  5110. atomic_set(&phba->fast_event_count, 0);
  5111. spin_lock_init(&phba->hbalock);
  5112. /* Initialize ndlp management spinlock */
  5113. spin_lock_init(&phba->ndlp_lock);
  5114. INIT_LIST_HEAD(&phba->port_list);
  5115. INIT_LIST_HEAD(&phba->work_list);
  5116. init_waitqueue_head(&phba->wait_4_mlo_m_q);
  5117. /* Initialize the wait queue head for the kernel thread */
  5118. init_waitqueue_head(&phba->work_waitq);
  5119. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  5120. "1403 Protocols supported %s %s %s\n",
  5121. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
  5122. "SCSI" : " "),
  5123. ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
  5124. "NVME" : " "),
  5125. (phba->nvmet_support ? "NVMET" : " "));
  5126. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  5127. /* Initialize the scsi buffer list used by driver for scsi IO */
  5128. spin_lock_init(&phba->scsi_buf_list_get_lock);
  5129. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
  5130. spin_lock_init(&phba->scsi_buf_list_put_lock);
  5131. INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
  5132. }
  5133. if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
  5134. (phba->nvmet_support == 0)) {
  5135. /* Initialize the NVME buffer list used by driver for NVME IO */
  5136. spin_lock_init(&phba->nvme_buf_list_get_lock);
  5137. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
  5138. spin_lock_init(&phba->nvme_buf_list_put_lock);
  5139. INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
  5140. }
  5141. /* Initialize the fabric iocb list */
  5142. INIT_LIST_HEAD(&phba->fabric_iocb_list);
  5143. /* Initialize list to save ELS buffers */
  5144. INIT_LIST_HEAD(&phba->elsbuf);
  5145. /* Initialize FCF connection rec list */
  5146. INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
  5147. /* Initialize OAS configuration list */
  5148. spin_lock_init(&phba->devicelock);
  5149. INIT_LIST_HEAD(&phba->luns);
  5150. /* MBOX heartbeat timer */
  5151. timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0);
  5152. /* Fabric block timer */
  5153. timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0);
  5154. /* EA polling mode timer */
  5155. timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0);
  5156. /* Heartbeat timer */
  5157. timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0);
  5158. return 0;
  5159. }
  5160. /**
  5161. * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
  5162. * @phba: pointer to lpfc hba data structure.
  5163. *
  5164. * This routine is invoked to set up the driver internal resources specific to
  5165. * support the SLI-3 HBA device it attached to.
  5166. *
  5167. * Return codes
  5168. * 0 - successful
  5169. * other values - error
  5170. **/
  5171. static int
  5172. lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
  5173. {
  5174. int rc;
  5175. /*
  5176. * Initialize timers used by driver
  5177. */
  5178. /* FCP polling mode timer */
  5179. timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0);
  5180. /* Host attention work mask setup */
  5181. phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
  5182. phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
  5183. /* Get all the module params for configuring this host */
  5184. lpfc_get_cfgparam(phba);
  5185. /* Set up phase-1 common device driver resources */
  5186. rc = lpfc_setup_driver_resource_phase1(phba);
  5187. if (rc)
  5188. return -ENODEV;
  5189. if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
  5190. phba->menlo_flag |= HBA_MENLO_SUPPORT;
  5191. /* check for menlo minimum sg count */
  5192. if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT)
  5193. phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
  5194. }
  5195. if (!phba->sli.sli3_ring)
  5196. phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
  5197. sizeof(struct lpfc_sli_ring), GFP_KERNEL);
  5198. if (!phba->sli.sli3_ring)
  5199. return -ENOMEM;
  5200. /*
  5201. * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
  5202. * used to create the sg_dma_buf_pool must be dynamically calculated.
  5203. */
  5204. /* Initialize the host templates the configured values. */
  5205. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5206. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5207. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5208. /* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
  5209. if (phba->cfg_enable_bg) {
  5210. /*
  5211. * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
  5212. * the FCP rsp, and a BDE for each. Sice we have no control
  5213. * over how many protection data segments the SCSI Layer
  5214. * will hand us (ie: there could be one for every block
  5215. * in the IO), we just allocate enough BDEs to accomidate
  5216. * our max amount and we need to limit lpfc_sg_seg_cnt to
  5217. * minimize the risk of running out.
  5218. */
  5219. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5220. sizeof(struct fcp_rsp) +
  5221. (LPFC_MAX_SG_SEG_CNT * sizeof(struct ulp_bde64));
  5222. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SEG_CNT_DIF)
  5223. phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT_DIF;
  5224. /* Total BDEs in BPL for scsi_sg_list and scsi_sg_prot_list */
  5225. phba->cfg_total_seg_cnt = LPFC_MAX_SG_SEG_CNT;
  5226. } else {
  5227. /*
  5228. * The scsi_buf for a regular I/O will hold the FCP cmnd,
  5229. * the FCP rsp, a BDE for each, and a BDE for up to
  5230. * cfg_sg_seg_cnt data segments.
  5231. */
  5232. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5233. sizeof(struct fcp_rsp) +
  5234. ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
  5235. /* Total BDEs in BPL for scsi_sg_list */
  5236. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
  5237. }
  5238. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5239. "9088 sg_tablesize:%d dmabuf_size:%d total_bde:%d\n",
  5240. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5241. phba->cfg_total_seg_cnt);
  5242. phba->max_vpi = LPFC_MAX_VPI;
  5243. /* This will be set to correct value after config_port mbox */
  5244. phba->max_vports = 0;
  5245. /*
  5246. * Initialize the SLI Layer to run with lpfc HBAs.
  5247. */
  5248. lpfc_sli_setup(phba);
  5249. lpfc_sli_queue_init(phba);
  5250. /* Allocate device driver memory */
  5251. if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
  5252. return -ENOMEM;
  5253. /*
  5254. * Enable sr-iov virtual functions if supported and configured
  5255. * through the module parameter.
  5256. */
  5257. if (phba->cfg_sriov_nr_virtfn > 0) {
  5258. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5259. phba->cfg_sriov_nr_virtfn);
  5260. if (rc) {
  5261. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5262. "2808 Requested number of SR-IOV "
  5263. "virtual functions (%d) is not "
  5264. "supported\n",
  5265. phba->cfg_sriov_nr_virtfn);
  5266. phba->cfg_sriov_nr_virtfn = 0;
  5267. }
  5268. }
  5269. return 0;
  5270. }
  5271. /**
  5272. * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
  5273. * @phba: pointer to lpfc hba data structure.
  5274. *
  5275. * This routine is invoked to unset the driver internal resources set up
  5276. * specific for supporting the SLI-3 HBA device it attached to.
  5277. **/
  5278. static void
  5279. lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
  5280. {
  5281. /* Free device driver memory allocated */
  5282. lpfc_mem_free_all(phba);
  5283. return;
  5284. }
  5285. /**
  5286. * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
  5287. * @phba: pointer to lpfc hba data structure.
  5288. *
  5289. * This routine is invoked to set up the driver internal resources specific to
  5290. * support the SLI-4 HBA device it attached to.
  5291. *
  5292. * Return codes
  5293. * 0 - successful
  5294. * other values - error
  5295. **/
  5296. static int
  5297. lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
  5298. {
  5299. LPFC_MBOXQ_t *mboxq;
  5300. MAILBOX_t *mb;
  5301. int rc, i, max_buf_size;
  5302. uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
  5303. struct lpfc_mqe *mqe;
  5304. int longs;
  5305. int fof_vectors = 0;
  5306. uint64_t wwn;
  5307. phba->sli4_hba.num_online_cpu = num_online_cpus();
  5308. phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
  5309. phba->sli4_hba.curr_disp_cpu = 0;
  5310. /* Get all the module params for configuring this host */
  5311. lpfc_get_cfgparam(phba);
  5312. /* Set up phase-1 common device driver resources */
  5313. rc = lpfc_setup_driver_resource_phase1(phba);
  5314. if (rc)
  5315. return -ENODEV;
  5316. /* Before proceed, wait for POST done and device ready */
  5317. rc = lpfc_sli4_post_status_check(phba);
  5318. if (rc)
  5319. return -ENODEV;
  5320. /*
  5321. * Initialize timers used by driver
  5322. */
  5323. timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0);
  5324. /* FCF rediscover timer */
  5325. timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0);
  5326. /*
  5327. * Control structure for handling external multi-buffer mailbox
  5328. * command pass-through.
  5329. */
  5330. memset((uint8_t *)&phba->mbox_ext_buf_ctx, 0,
  5331. sizeof(struct lpfc_mbox_ext_buf_ctx));
  5332. INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
  5333. phba->max_vpi = LPFC_MAX_VPI;
  5334. /* This will be set to correct value after the read_config mbox */
  5335. phba->max_vports = 0;
  5336. /* Program the default value of vlan_id and fc_map */
  5337. phba->valid_vlan = 0;
  5338. phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
  5339. phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
  5340. phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
  5341. /*
  5342. * For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
  5343. * we will associate a new ring, for each EQ/CQ/WQ tuple.
  5344. * The WQ create will allocate the ring.
  5345. */
  5346. /*
  5347. * It doesn't matter what family our adapter is in, we are
  5348. * limited to 2 Pages, 512 SGEs, for our SGL.
  5349. * There are going to be 2 reserved SGEs: 1 FCP cmnd + 1 FCP rsp
  5350. */
  5351. max_buf_size = (2 * SLI4_PAGE_SIZE);
  5352. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SGL_SEG_CNT - 2)
  5353. phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
  5354. /*
  5355. * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
  5356. * used to create the sg_dma_buf_pool must be calculated.
  5357. */
  5358. if (phba->cfg_enable_bg) {
  5359. /*
  5360. * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
  5361. * the FCP rsp, and a SGE. Sice we have no control
  5362. * over how many protection segments the SCSI Layer
  5363. * will hand us (ie: there could be one for every block
  5364. * in the IO), just allocate enough SGEs to accomidate
  5365. * our max amount and we need to limit lpfc_sg_seg_cnt
  5366. * to minimize the risk of running out.
  5367. */
  5368. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5369. sizeof(struct fcp_rsp) + max_buf_size;
  5370. /* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
  5371. phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
  5372. if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
  5373. phba->cfg_sg_seg_cnt =
  5374. LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
  5375. } else {
  5376. /*
  5377. * The scsi_buf for a regular I/O holds the FCP cmnd,
  5378. * the FCP rsp, a SGE for each, and a SGE for up to
  5379. * cfg_sg_seg_cnt data segments.
  5380. */
  5381. phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
  5382. sizeof(struct fcp_rsp) +
  5383. ((phba->cfg_sg_seg_cnt + 2) *
  5384. sizeof(struct sli4_sge));
  5385. /* Total SGEs for scsi_sg_list */
  5386. phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
  5387. /*
  5388. * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
  5389. * need to post 1 page for the SGL.
  5390. */
  5391. }
  5392. /* Initialize the host templates with the updated values. */
  5393. lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5394. lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
  5395. lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
  5396. if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
  5397. phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
  5398. else
  5399. phba->cfg_sg_dma_buf_size =
  5400. SLI4_PAGE_ALIGN(phba->cfg_sg_dma_buf_size);
  5401. lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP,
  5402. "9087 sg_tablesize:%d dmabuf_size:%d total_sge:%d\n",
  5403. phba->cfg_sg_seg_cnt, phba->cfg_sg_dma_buf_size,
  5404. phba->cfg_total_seg_cnt);
  5405. /* Initialize buffer queue management fields */
  5406. INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
  5407. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
  5408. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
  5409. /*
  5410. * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
  5411. */
  5412. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
  5413. /* Initialize the Abort scsi buffer list used by driver */
  5414. spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
  5415. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  5416. }
  5417. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  5418. /* Initialize the Abort nvme buffer list used by driver */
  5419. spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
  5420. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  5421. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  5422. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
  5423. /* Fast-path XRI aborted CQ Event work queue list */
  5424. INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
  5425. }
  5426. /* This abort list used by worker thread */
  5427. spin_lock_init(&phba->sli4_hba.sgl_list_lock);
  5428. spin_lock_init(&phba->sli4_hba.nvmet_io_wait_lock);
  5429. /*
  5430. * Initialize driver internal slow-path work queues
  5431. */
  5432. /* Driver internel slow-path CQ Event pool */
  5433. INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
  5434. /* Response IOCB work queue list */
  5435. INIT_LIST_HEAD(&phba->sli4_hba.sp_queue_event);
  5436. /* Asynchronous event CQ Event work queue list */
  5437. INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
  5438. /* Fast-path XRI aborted CQ Event work queue list */
  5439. INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
  5440. /* Slow-path XRI aborted CQ Event work queue list */
  5441. INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
  5442. /* Receive queue CQ Event work queue list */
  5443. INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
  5444. /* Initialize extent block lists. */
  5445. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_blk_list);
  5446. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_xri_blk_list);
  5447. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_vfi_blk_list);
  5448. INIT_LIST_HEAD(&phba->lpfc_vpi_blk_list);
  5449. /* Initialize mboxq lists. If the early init routines fail
  5450. * these lists need to be correctly initialized.
  5451. */
  5452. INIT_LIST_HEAD(&phba->sli.mboxq);
  5453. INIT_LIST_HEAD(&phba->sli.mboxq_cmpl);
  5454. /* initialize optic_state to 0xFF */
  5455. phba->sli4_hba.lnk_info.optic_state = 0xff;
  5456. /* Allocate device driver memory */
  5457. rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
  5458. if (rc)
  5459. return -ENOMEM;
  5460. /* IF Type 2 ports get initialized now. */
  5461. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  5462. LPFC_SLI_INTF_IF_TYPE_2) {
  5463. rc = lpfc_pci_function_reset(phba);
  5464. if (unlikely(rc)) {
  5465. rc = -ENODEV;
  5466. goto out_free_mem;
  5467. }
  5468. phba->temp_sensor_support = 1;
  5469. }
  5470. /* Create the bootstrap mailbox command */
  5471. rc = lpfc_create_bootstrap_mbox(phba);
  5472. if (unlikely(rc))
  5473. goto out_free_mem;
  5474. /* Set up the host's endian order with the device. */
  5475. rc = lpfc_setup_endian_order(phba);
  5476. if (unlikely(rc))
  5477. goto out_free_bsmbx;
  5478. /* Set up the hba's configuration parameters. */
  5479. rc = lpfc_sli4_read_config(phba);
  5480. if (unlikely(rc))
  5481. goto out_free_bsmbx;
  5482. rc = lpfc_mem_alloc_active_rrq_pool_s4(phba);
  5483. if (unlikely(rc))
  5484. goto out_free_bsmbx;
  5485. /* IF Type 0 ports get initialized now. */
  5486. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
  5487. LPFC_SLI_INTF_IF_TYPE_0) {
  5488. rc = lpfc_pci_function_reset(phba);
  5489. if (unlikely(rc))
  5490. goto out_free_bsmbx;
  5491. }
  5492. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  5493. GFP_KERNEL);
  5494. if (!mboxq) {
  5495. rc = -ENOMEM;
  5496. goto out_free_bsmbx;
  5497. }
  5498. /* Check for NVMET being configured */
  5499. phba->nvmet_support = 0;
  5500. if (lpfc_enable_nvmet_cnt) {
  5501. /* First get WWN of HBA instance */
  5502. lpfc_read_nv(phba, mboxq);
  5503. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5504. if (rc != MBX_SUCCESS) {
  5505. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  5506. "6016 Mailbox failed , mbxCmd x%x "
  5507. "READ_NV, mbxStatus x%x\n",
  5508. bf_get(lpfc_mqe_command, &mboxq->u.mqe),
  5509. bf_get(lpfc_mqe_status, &mboxq->u.mqe));
  5510. mempool_free(mboxq, phba->mbox_mem_pool);
  5511. rc = -EIO;
  5512. goto out_free_bsmbx;
  5513. }
  5514. mb = &mboxq->u.mb;
  5515. memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
  5516. sizeof(uint64_t));
  5517. wwn = cpu_to_be64(wwn);
  5518. phba->sli4_hba.wwnn.u.name = wwn;
  5519. memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
  5520. sizeof(uint64_t));
  5521. /* wwn is WWPN of HBA instance */
  5522. wwn = cpu_to_be64(wwn);
  5523. phba->sli4_hba.wwpn.u.name = wwn;
  5524. /* Check to see if it matches any module parameter */
  5525. for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
  5526. if (wwn == lpfc_enable_nvmet[i]) {
  5527. #if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
  5528. if (lpfc_nvmet_mem_alloc(phba))
  5529. break;
  5530. phba->nvmet_support = 1; /* a match */
  5531. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5532. "6017 NVME Target %016llx\n",
  5533. wwn);
  5534. #else
  5535. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5536. "6021 Can't enable NVME Target."
  5537. " NVME_TARGET_FC infrastructure"
  5538. " is not in kernel\n");
  5539. #endif
  5540. break;
  5541. }
  5542. }
  5543. }
  5544. lpfc_nvme_mod_param_dep(phba);
  5545. /* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
  5546. lpfc_supported_pages(mboxq);
  5547. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  5548. if (!rc) {
  5549. mqe = &mboxq->u.mqe;
  5550. memcpy(&pn_page[0], ((uint8_t *)&mqe->un.supp_pages.word3),
  5551. LPFC_MAX_SUPPORTED_PAGES);
  5552. for (i = 0; i < LPFC_MAX_SUPPORTED_PAGES; i++) {
  5553. switch (pn_page[i]) {
  5554. case LPFC_SLI4_PARAMETERS:
  5555. phba->sli4_hba.pc_sli4_params.supported = 1;
  5556. break;
  5557. default:
  5558. break;
  5559. }
  5560. }
  5561. /* Read the port's SLI4 Parameters capabilities if supported. */
  5562. if (phba->sli4_hba.pc_sli4_params.supported)
  5563. rc = lpfc_pc_sli4_params_get(phba, mboxq);
  5564. if (rc) {
  5565. mempool_free(mboxq, phba->mbox_mem_pool);
  5566. rc = -EIO;
  5567. goto out_free_bsmbx;
  5568. }
  5569. }
  5570. /*
  5571. * Get sli4 parameters that override parameters from Port capabilities.
  5572. * If this call fails, it isn't critical unless the SLI4 parameters come
  5573. * back in conflict.
  5574. */
  5575. rc = lpfc_get_sli4_parameters(phba, mboxq);
  5576. if (rc) {
  5577. if (phba->sli4_hba.extents_in_use &&
  5578. phba->sli4_hba.rpi_hdrs_in_use) {
  5579. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5580. "2999 Unsupported SLI4 Parameters "
  5581. "Extents and RPI headers enabled.\n");
  5582. }
  5583. mempool_free(mboxq, phba->mbox_mem_pool);
  5584. rc = -EIO;
  5585. goto out_free_bsmbx;
  5586. }
  5587. mempool_free(mboxq, phba->mbox_mem_pool);
  5588. /* Verify OAS is supported */
  5589. lpfc_sli4_oas_verify(phba);
  5590. if (phba->cfg_fof)
  5591. fof_vectors = 1;
  5592. /* Verify all the SLI4 queues */
  5593. rc = lpfc_sli4_queue_verify(phba);
  5594. if (rc)
  5595. goto out_free_bsmbx;
  5596. /* Create driver internal CQE event pool */
  5597. rc = lpfc_sli4_cq_event_pool_create(phba);
  5598. if (rc)
  5599. goto out_free_bsmbx;
  5600. /* Initialize sgl lists per host */
  5601. lpfc_init_sgl_list(phba);
  5602. /* Allocate and initialize active sgl array */
  5603. rc = lpfc_init_active_sgl_array(phba);
  5604. if (rc) {
  5605. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5606. "1430 Failed to initialize sgl list.\n");
  5607. goto out_destroy_cq_event_pool;
  5608. }
  5609. rc = lpfc_sli4_init_rpi_hdrs(phba);
  5610. if (rc) {
  5611. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5612. "1432 Failed to initialize rpi headers.\n");
  5613. goto out_free_active_sgl;
  5614. }
  5615. /* Allocate eligible FCF bmask memory for FCF roundrobin failover */
  5616. longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
  5617. phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
  5618. GFP_KERNEL);
  5619. if (!phba->fcf.fcf_rr_bmask) {
  5620. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5621. "2759 Failed allocate memory for FCF round "
  5622. "robin failover bmask\n");
  5623. rc = -ENOMEM;
  5624. goto out_remove_rpi_hdrs;
  5625. }
  5626. phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
  5627. sizeof(struct lpfc_hba_eq_hdl),
  5628. GFP_KERNEL);
  5629. if (!phba->sli4_hba.hba_eq_hdl) {
  5630. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5631. "2572 Failed allocate memory for "
  5632. "fast-path per-EQ handle array\n");
  5633. rc = -ENOMEM;
  5634. goto out_free_fcf_rr_bmask;
  5635. }
  5636. phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
  5637. sizeof(struct lpfc_vector_map_info),
  5638. GFP_KERNEL);
  5639. if (!phba->sli4_hba.cpu_map) {
  5640. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5641. "3327 Failed allocate memory for msi-x "
  5642. "interrupt vector mapping\n");
  5643. rc = -ENOMEM;
  5644. goto out_free_hba_eq_hdl;
  5645. }
  5646. if (lpfc_used_cpu == NULL) {
  5647. lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
  5648. GFP_KERNEL);
  5649. if (!lpfc_used_cpu) {
  5650. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5651. "3335 Failed allocate memory for msi-x "
  5652. "interrupt vector mapping\n");
  5653. kfree(phba->sli4_hba.cpu_map);
  5654. rc = -ENOMEM;
  5655. goto out_free_hba_eq_hdl;
  5656. }
  5657. for (i = 0; i < lpfc_present_cpu; i++)
  5658. lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
  5659. }
  5660. /*
  5661. * Enable sr-iov virtual functions if supported and configured
  5662. * through the module parameter.
  5663. */
  5664. if (phba->cfg_sriov_nr_virtfn > 0) {
  5665. rc = lpfc_sli_probe_sriov_nr_virtfn(phba,
  5666. phba->cfg_sriov_nr_virtfn);
  5667. if (rc) {
  5668. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  5669. "3020 Requested number of SR-IOV "
  5670. "virtual functions (%d) is not "
  5671. "supported\n",
  5672. phba->cfg_sriov_nr_virtfn);
  5673. phba->cfg_sriov_nr_virtfn = 0;
  5674. }
  5675. }
  5676. return 0;
  5677. out_free_hba_eq_hdl:
  5678. kfree(phba->sli4_hba.hba_eq_hdl);
  5679. out_free_fcf_rr_bmask:
  5680. kfree(phba->fcf.fcf_rr_bmask);
  5681. out_remove_rpi_hdrs:
  5682. lpfc_sli4_remove_rpi_hdrs(phba);
  5683. out_free_active_sgl:
  5684. lpfc_free_active_sgl(phba);
  5685. out_destroy_cq_event_pool:
  5686. lpfc_sli4_cq_event_pool_destroy(phba);
  5687. out_free_bsmbx:
  5688. lpfc_destroy_bootstrap_mbox(phba);
  5689. out_free_mem:
  5690. lpfc_mem_free(phba);
  5691. return rc;
  5692. }
  5693. /**
  5694. * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
  5695. * @phba: pointer to lpfc hba data structure.
  5696. *
  5697. * This routine is invoked to unset the driver internal resources set up
  5698. * specific for supporting the SLI-4 HBA device it attached to.
  5699. **/
  5700. static void
  5701. lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
  5702. {
  5703. struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
  5704. /* Free memory allocated for msi-x interrupt vector to CPU mapping */
  5705. kfree(phba->sli4_hba.cpu_map);
  5706. phba->sli4_hba.num_present_cpu = 0;
  5707. phba->sli4_hba.num_online_cpu = 0;
  5708. phba->sli4_hba.curr_disp_cpu = 0;
  5709. /* Free memory allocated for fast-path work queue handles */
  5710. kfree(phba->sli4_hba.hba_eq_hdl);
  5711. /* Free the allocated rpi headers. */
  5712. lpfc_sli4_remove_rpi_hdrs(phba);
  5713. lpfc_sli4_remove_rpis(phba);
  5714. /* Free eligible FCF index bmask */
  5715. kfree(phba->fcf.fcf_rr_bmask);
  5716. /* Free the ELS sgl list */
  5717. lpfc_free_active_sgl(phba);
  5718. lpfc_free_els_sgl_list(phba);
  5719. lpfc_free_nvmet_sgl_list(phba);
  5720. /* Free the completion queue EQ event pool */
  5721. lpfc_sli4_cq_event_release_all(phba);
  5722. lpfc_sli4_cq_event_pool_destroy(phba);
  5723. /* Release resource identifiers. */
  5724. lpfc_sli4_dealloc_resource_identifiers(phba);
  5725. /* Free the bsmbx region. */
  5726. lpfc_destroy_bootstrap_mbox(phba);
  5727. /* Free the SLI Layer memory with SLI4 HBAs */
  5728. lpfc_mem_free_all(phba);
  5729. /* Free the current connect table */
  5730. list_for_each_entry_safe(conn_entry, next_conn_entry,
  5731. &phba->fcf_conn_rec_list, list) {
  5732. list_del_init(&conn_entry->list);
  5733. kfree(conn_entry);
  5734. }
  5735. return;
  5736. }
  5737. /**
  5738. * lpfc_init_api_table_setup - Set up init api function jump table
  5739. * @phba: The hba struct for which this call is being executed.
  5740. * @dev_grp: The HBA PCI-Device group number.
  5741. *
  5742. * This routine sets up the device INIT interface API function jump table
  5743. * in @phba struct.
  5744. *
  5745. * Returns: 0 - success, -ENODEV - failure.
  5746. **/
  5747. int
  5748. lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
  5749. {
  5750. phba->lpfc_hba_init_link = lpfc_hba_init_link;
  5751. phba->lpfc_hba_down_link = lpfc_hba_down_link;
  5752. phba->lpfc_selective_reset = lpfc_selective_reset;
  5753. switch (dev_grp) {
  5754. case LPFC_PCI_DEV_LP:
  5755. phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
  5756. phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
  5757. phba->lpfc_stop_port = lpfc_stop_port_s3;
  5758. break;
  5759. case LPFC_PCI_DEV_OC:
  5760. phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
  5761. phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
  5762. phba->lpfc_stop_port = lpfc_stop_port_s4;
  5763. break;
  5764. default:
  5765. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  5766. "1431 Invalid HBA PCI-device group: 0x%x\n",
  5767. dev_grp);
  5768. return -ENODEV;
  5769. break;
  5770. }
  5771. return 0;
  5772. }
  5773. /**
  5774. * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
  5775. * @phba: pointer to lpfc hba data structure.
  5776. *
  5777. * This routine is invoked to set up the driver internal resources after the
  5778. * device specific resource setup to support the HBA device it attached to.
  5779. *
  5780. * Return codes
  5781. * 0 - successful
  5782. * other values - error
  5783. **/
  5784. static int
  5785. lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
  5786. {
  5787. int error;
  5788. /* Startup the kernel thread for this host adapter. */
  5789. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  5790. "lpfc_worker_%d", phba->brd_no);
  5791. if (IS_ERR(phba->worker_thread)) {
  5792. error = PTR_ERR(phba->worker_thread);
  5793. return error;
  5794. }
  5795. /* workqueue for deferred irq use */
  5796. phba->wq = alloc_workqueue("lpfc_wq", WQ_MEM_RECLAIM, 0);
  5797. return 0;
  5798. }
  5799. /**
  5800. * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
  5801. * @phba: pointer to lpfc hba data structure.
  5802. *
  5803. * This routine is invoked to unset the driver internal resources set up after
  5804. * the device specific resource setup for supporting the HBA device it
  5805. * attached to.
  5806. **/
  5807. static void
  5808. lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
  5809. {
  5810. if (phba->wq) {
  5811. flush_workqueue(phba->wq);
  5812. destroy_workqueue(phba->wq);
  5813. phba->wq = NULL;
  5814. }
  5815. /* Stop kernel worker thread */
  5816. kthread_stop(phba->worker_thread);
  5817. }
  5818. /**
  5819. * lpfc_free_iocb_list - Free iocb list.
  5820. * @phba: pointer to lpfc hba data structure.
  5821. *
  5822. * This routine is invoked to free the driver's IOCB list and memory.
  5823. **/
  5824. void
  5825. lpfc_free_iocb_list(struct lpfc_hba *phba)
  5826. {
  5827. struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
  5828. spin_lock_irq(&phba->hbalock);
  5829. list_for_each_entry_safe(iocbq_entry, iocbq_next,
  5830. &phba->lpfc_iocb_list, list) {
  5831. list_del(&iocbq_entry->list);
  5832. kfree(iocbq_entry);
  5833. phba->total_iocbq_bufs--;
  5834. }
  5835. spin_unlock_irq(&phba->hbalock);
  5836. return;
  5837. }
  5838. /**
  5839. * lpfc_init_iocb_list - Allocate and initialize iocb list.
  5840. * @phba: pointer to lpfc hba data structure.
  5841. *
  5842. * This routine is invoked to allocate and initizlize the driver's IOCB
  5843. * list and set up the IOCB tag array accordingly.
  5844. *
  5845. * Return codes
  5846. * 0 - successful
  5847. * other values - error
  5848. **/
  5849. int
  5850. lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
  5851. {
  5852. struct lpfc_iocbq *iocbq_entry = NULL;
  5853. uint16_t iotag;
  5854. int i;
  5855. /* Initialize and populate the iocb list per host. */
  5856. INIT_LIST_HEAD(&phba->lpfc_iocb_list);
  5857. for (i = 0; i < iocb_count; i++) {
  5858. iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
  5859. if (iocbq_entry == NULL) {
  5860. printk(KERN_ERR "%s: only allocated %d iocbs of "
  5861. "expected %d count. Unloading driver.\n",
  5862. __func__, i, LPFC_IOCB_LIST_CNT);
  5863. goto out_free_iocbq;
  5864. }
  5865. iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
  5866. if (iotag == 0) {
  5867. kfree(iocbq_entry);
  5868. printk(KERN_ERR "%s: failed to allocate IOTAG. "
  5869. "Unloading driver.\n", __func__);
  5870. goto out_free_iocbq;
  5871. }
  5872. iocbq_entry->sli4_lxritag = NO_XRI;
  5873. iocbq_entry->sli4_xritag = NO_XRI;
  5874. spin_lock_irq(&phba->hbalock);
  5875. list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
  5876. phba->total_iocbq_bufs++;
  5877. spin_unlock_irq(&phba->hbalock);
  5878. }
  5879. return 0;
  5880. out_free_iocbq:
  5881. lpfc_free_iocb_list(phba);
  5882. return -ENOMEM;
  5883. }
  5884. /**
  5885. * lpfc_free_sgl_list - Free a given sgl list.
  5886. * @phba: pointer to lpfc hba data structure.
  5887. * @sglq_list: pointer to the head of sgl list.
  5888. *
  5889. * This routine is invoked to free a give sgl list and memory.
  5890. **/
  5891. void
  5892. lpfc_free_sgl_list(struct lpfc_hba *phba, struct list_head *sglq_list)
  5893. {
  5894. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5895. list_for_each_entry_safe(sglq_entry, sglq_next, sglq_list, list) {
  5896. list_del(&sglq_entry->list);
  5897. lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5898. kfree(sglq_entry);
  5899. }
  5900. }
  5901. /**
  5902. * lpfc_free_els_sgl_list - Free els sgl list.
  5903. * @phba: pointer to lpfc hba data structure.
  5904. *
  5905. * This routine is invoked to free the driver's els sgl list and memory.
  5906. **/
  5907. static void
  5908. lpfc_free_els_sgl_list(struct lpfc_hba *phba)
  5909. {
  5910. LIST_HEAD(sglq_list);
  5911. /* Retrieve all els sgls from driver list */
  5912. spin_lock_irq(&phba->hbalock);
  5913. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5914. list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
  5915. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5916. spin_unlock_irq(&phba->hbalock);
  5917. /* Now free the sgl list */
  5918. lpfc_free_sgl_list(phba, &sglq_list);
  5919. }
  5920. /**
  5921. * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
  5922. * @phba: pointer to lpfc hba data structure.
  5923. *
  5924. * This routine is invoked to free the driver's nvmet sgl list and memory.
  5925. **/
  5926. static void
  5927. lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
  5928. {
  5929. struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
  5930. LIST_HEAD(sglq_list);
  5931. /* Retrieve all nvmet sgls from driver list */
  5932. spin_lock_irq(&phba->hbalock);
  5933. spin_lock(&phba->sli4_hba.sgl_list_lock);
  5934. list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
  5935. spin_unlock(&phba->sli4_hba.sgl_list_lock);
  5936. spin_unlock_irq(&phba->hbalock);
  5937. /* Now free the sgl list */
  5938. list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
  5939. list_del(&sglq_entry->list);
  5940. lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
  5941. kfree(sglq_entry);
  5942. }
  5943. /* Update the nvmet_xri_cnt to reflect no current sgls.
  5944. * The next initialization cycle sets the count and allocates
  5945. * the sgls over again.
  5946. */
  5947. phba->sli4_hba.nvmet_xri_cnt = 0;
  5948. }
  5949. /**
  5950. * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
  5951. * @phba: pointer to lpfc hba data structure.
  5952. *
  5953. * This routine is invoked to allocate the driver's active sgl memory.
  5954. * This array will hold the sglq_entry's for active IOs.
  5955. **/
  5956. static int
  5957. lpfc_init_active_sgl_array(struct lpfc_hba *phba)
  5958. {
  5959. int size;
  5960. size = sizeof(struct lpfc_sglq *);
  5961. size *= phba->sli4_hba.max_cfg_param.max_xri;
  5962. phba->sli4_hba.lpfc_sglq_active_list =
  5963. kzalloc(size, GFP_KERNEL);
  5964. if (!phba->sli4_hba.lpfc_sglq_active_list)
  5965. return -ENOMEM;
  5966. return 0;
  5967. }
  5968. /**
  5969. * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
  5970. * @phba: pointer to lpfc hba data structure.
  5971. *
  5972. * This routine is invoked to walk through the array of active sglq entries
  5973. * and free all of the resources.
  5974. * This is just a place holder for now.
  5975. **/
  5976. static void
  5977. lpfc_free_active_sgl(struct lpfc_hba *phba)
  5978. {
  5979. kfree(phba->sli4_hba.lpfc_sglq_active_list);
  5980. }
  5981. /**
  5982. * lpfc_init_sgl_list - Allocate and initialize sgl list.
  5983. * @phba: pointer to lpfc hba data structure.
  5984. *
  5985. * This routine is invoked to allocate and initizlize the driver's sgl
  5986. * list and set up the sgl xritag tag array accordingly.
  5987. *
  5988. **/
  5989. static void
  5990. lpfc_init_sgl_list(struct lpfc_hba *phba)
  5991. {
  5992. /* Initialize and populate the sglq list per host/VF. */
  5993. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
  5994. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  5995. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
  5996. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  5997. /* els xri-sgl book keeping */
  5998. phba->sli4_hba.els_xri_cnt = 0;
  5999. /* scsi xri-buffer book keeping */
  6000. phba->sli4_hba.scsi_xri_cnt = 0;
  6001. /* nvme xri-buffer book keeping */
  6002. phba->sli4_hba.nvme_xri_cnt = 0;
  6003. }
  6004. /**
  6005. * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
  6006. * @phba: pointer to lpfc hba data structure.
  6007. *
  6008. * This routine is invoked to post rpi header templates to the
  6009. * port for those SLI4 ports that do not support extents. This routine
  6010. * posts a PAGE_SIZE memory region to the port to hold up to
  6011. * PAGE_SIZE modulo 64 rpi context headers. This is an initialization routine
  6012. * and should be called only when interrupts are disabled.
  6013. *
  6014. * Return codes
  6015. * 0 - successful
  6016. * -ERROR - otherwise.
  6017. **/
  6018. int
  6019. lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
  6020. {
  6021. int rc = 0;
  6022. struct lpfc_rpi_hdr *rpi_hdr;
  6023. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
  6024. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6025. return rc;
  6026. if (phba->sli4_hba.extents_in_use)
  6027. return -EIO;
  6028. rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
  6029. if (!rpi_hdr) {
  6030. lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
  6031. "0391 Error during rpi post operation\n");
  6032. lpfc_sli4_remove_rpis(phba);
  6033. rc = -ENODEV;
  6034. }
  6035. return rc;
  6036. }
  6037. /**
  6038. * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
  6039. * @phba: pointer to lpfc hba data structure.
  6040. *
  6041. * This routine is invoked to allocate a single 4KB memory region to
  6042. * support rpis and stores them in the phba. This single region
  6043. * provides support for up to 64 rpis. The region is used globally
  6044. * by the device.
  6045. *
  6046. * Returns:
  6047. * A valid rpi hdr on success.
  6048. * A NULL pointer on any failure.
  6049. **/
  6050. struct lpfc_rpi_hdr *
  6051. lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
  6052. {
  6053. uint16_t rpi_limit, curr_rpi_range;
  6054. struct lpfc_dmabuf *dmabuf;
  6055. struct lpfc_rpi_hdr *rpi_hdr;
  6056. /*
  6057. * If the SLI4 port supports extents, posting the rpi header isn't
  6058. * required. Set the expected maximum count and let the actual value
  6059. * get set when extents are fully allocated.
  6060. */
  6061. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6062. return NULL;
  6063. if (phba->sli4_hba.extents_in_use)
  6064. return NULL;
  6065. /* The limit on the logical index is just the max_rpi count. */
  6066. rpi_limit = phba->sli4_hba.max_cfg_param.max_rpi;
  6067. spin_lock_irq(&phba->hbalock);
  6068. /*
  6069. * Establish the starting RPI in this header block. The starting
  6070. * rpi is normalized to a zero base because the physical rpi is
  6071. * port based.
  6072. */
  6073. curr_rpi_range = phba->sli4_hba.next_rpi;
  6074. spin_unlock_irq(&phba->hbalock);
  6075. /* Reached full RPI range */
  6076. if (curr_rpi_range == rpi_limit)
  6077. return NULL;
  6078. /*
  6079. * First allocate the protocol header region for the port. The
  6080. * port expects a 4KB DMA-mapped memory region that is 4K aligned.
  6081. */
  6082. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  6083. if (!dmabuf)
  6084. return NULL;
  6085. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
  6086. LPFC_HDR_TEMPLATE_SIZE,
  6087. &dmabuf->phys, GFP_KERNEL);
  6088. if (!dmabuf->virt) {
  6089. rpi_hdr = NULL;
  6090. goto err_free_dmabuf;
  6091. }
  6092. if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
  6093. rpi_hdr = NULL;
  6094. goto err_free_coherent;
  6095. }
  6096. /* Save the rpi header data for cleanup later. */
  6097. rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
  6098. if (!rpi_hdr)
  6099. goto err_free_coherent;
  6100. rpi_hdr->dmabuf = dmabuf;
  6101. rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
  6102. rpi_hdr->page_count = 1;
  6103. spin_lock_irq(&phba->hbalock);
  6104. /* The rpi_hdr stores the logical index only. */
  6105. rpi_hdr->start_rpi = curr_rpi_range;
  6106. rpi_hdr->next_rpi = phba->sli4_hba.next_rpi + LPFC_RPI_HDR_COUNT;
  6107. list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
  6108. spin_unlock_irq(&phba->hbalock);
  6109. return rpi_hdr;
  6110. err_free_coherent:
  6111. dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
  6112. dmabuf->virt, dmabuf->phys);
  6113. err_free_dmabuf:
  6114. kfree(dmabuf);
  6115. return NULL;
  6116. }
  6117. /**
  6118. * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
  6119. * @phba: pointer to lpfc hba data structure.
  6120. *
  6121. * This routine is invoked to remove all memory resources allocated
  6122. * to support rpis for SLI4 ports not supporting extents. This routine
  6123. * presumes the caller has released all rpis consumed by fabric or port
  6124. * logins and is prepared to have the header pages removed.
  6125. **/
  6126. void
  6127. lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
  6128. {
  6129. struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
  6130. if (!phba->sli4_hba.rpi_hdrs_in_use)
  6131. goto exit;
  6132. list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
  6133. &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
  6134. list_del(&rpi_hdr->list);
  6135. dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
  6136. rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
  6137. kfree(rpi_hdr->dmabuf);
  6138. kfree(rpi_hdr);
  6139. }
  6140. exit:
  6141. /* There are no rpis available to the port now. */
  6142. phba->sli4_hba.next_rpi = 0;
  6143. }
  6144. /**
  6145. * lpfc_hba_alloc - Allocate driver hba data structure for a device.
  6146. * @pdev: pointer to pci device data structure.
  6147. *
  6148. * This routine is invoked to allocate the driver hba data structure for an
  6149. * HBA device. If the allocation is successful, the phba reference to the
  6150. * PCI device data structure is set.
  6151. *
  6152. * Return codes
  6153. * pointer to @phba - successful
  6154. * NULL - error
  6155. **/
  6156. static struct lpfc_hba *
  6157. lpfc_hba_alloc(struct pci_dev *pdev)
  6158. {
  6159. struct lpfc_hba *phba;
  6160. /* Allocate memory for HBA structure */
  6161. phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
  6162. if (!phba) {
  6163. dev_err(&pdev->dev, "failed to allocate hba struct\n");
  6164. return NULL;
  6165. }
  6166. /* Set reference to PCI device in HBA structure */
  6167. phba->pcidev = pdev;
  6168. /* Assign an unused board number */
  6169. phba->brd_no = lpfc_get_instance();
  6170. if (phba->brd_no < 0) {
  6171. kfree(phba);
  6172. return NULL;
  6173. }
  6174. phba->eratt_poll_interval = LPFC_ERATT_POLL_INTERVAL;
  6175. spin_lock_init(&phba->ct_ev_lock);
  6176. INIT_LIST_HEAD(&phba->ct_ev_waiters);
  6177. return phba;
  6178. }
  6179. /**
  6180. * lpfc_hba_free - Free driver hba data structure with a device.
  6181. * @phba: pointer to lpfc hba data structure.
  6182. *
  6183. * This routine is invoked to free the driver hba data structure with an
  6184. * HBA device.
  6185. **/
  6186. static void
  6187. lpfc_hba_free(struct lpfc_hba *phba)
  6188. {
  6189. /* Release the driver assigned board number */
  6190. idr_remove(&lpfc_hba_index, phba->brd_no);
  6191. /* Free memory allocated with sli3 rings */
  6192. kfree(phba->sli.sli3_ring);
  6193. phba->sli.sli3_ring = NULL;
  6194. kfree(phba);
  6195. return;
  6196. }
  6197. /**
  6198. * lpfc_create_shost - Create hba physical port with associated scsi host.
  6199. * @phba: pointer to lpfc hba data structure.
  6200. *
  6201. * This routine is invoked to create HBA physical port and associate a SCSI
  6202. * host with it.
  6203. *
  6204. * Return codes
  6205. * 0 - successful
  6206. * other values - error
  6207. **/
  6208. static int
  6209. lpfc_create_shost(struct lpfc_hba *phba)
  6210. {
  6211. struct lpfc_vport *vport;
  6212. struct Scsi_Host *shost;
  6213. /* Initialize HBA FC structure */
  6214. phba->fc_edtov = FF_DEF_EDTOV;
  6215. phba->fc_ratov = FF_DEF_RATOV;
  6216. phba->fc_altov = FF_DEF_ALTOV;
  6217. phba->fc_arbtov = FF_DEF_ARBTOV;
  6218. atomic_set(&phba->sdev_cnt, 0);
  6219. atomic_set(&phba->fc4ScsiInputRequests, 0);
  6220. atomic_set(&phba->fc4ScsiOutputRequests, 0);
  6221. atomic_set(&phba->fc4ScsiControlRequests, 0);
  6222. atomic_set(&phba->fc4ScsiIoCmpls, 0);
  6223. atomic_set(&phba->fc4NvmeInputRequests, 0);
  6224. atomic_set(&phba->fc4NvmeOutputRequests, 0);
  6225. atomic_set(&phba->fc4NvmeControlRequests, 0);
  6226. atomic_set(&phba->fc4NvmeIoCmpls, 0);
  6227. atomic_set(&phba->fc4NvmeLsRequests, 0);
  6228. atomic_set(&phba->fc4NvmeLsCmpls, 0);
  6229. vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
  6230. if (!vport)
  6231. return -ENODEV;
  6232. shost = lpfc_shost_from_vport(vport);
  6233. phba->pport = vport;
  6234. if (phba->nvmet_support) {
  6235. /* Only 1 vport (pport) will support NVME target */
  6236. if (phba->txrdy_payload_pool == NULL) {
  6237. phba->txrdy_payload_pool = dma_pool_create(
  6238. "txrdy_pool", &phba->pcidev->dev,
  6239. TXRDY_PAYLOAD_LEN, 16, 0);
  6240. if (phba->txrdy_payload_pool) {
  6241. phba->targetport = NULL;
  6242. phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
  6243. lpfc_printf_log(phba, KERN_INFO,
  6244. LOG_INIT | LOG_NVME_DISC,
  6245. "6076 NVME Target Found\n");
  6246. }
  6247. }
  6248. }
  6249. lpfc_debugfs_initialize(vport);
  6250. /* Put reference to SCSI host to driver's device private data */
  6251. pci_set_drvdata(phba->pcidev, shost);
  6252. /*
  6253. * At this point we are fully registered with PSA. In addition,
  6254. * any initial discovery should be completed.
  6255. */
  6256. vport->load_flag |= FC_ALLOW_FDMI;
  6257. if (phba->cfg_enable_SmartSAN ||
  6258. (phba->cfg_fdmi_on == LPFC_FDMI_SUPPORT)) {
  6259. /* Setup appropriate attribute masks */
  6260. vport->fdmi_hba_mask = LPFC_FDMI2_HBA_ATTR;
  6261. if (phba->cfg_enable_SmartSAN)
  6262. vport->fdmi_port_mask = LPFC_FDMI2_SMART_ATTR;
  6263. else
  6264. vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR;
  6265. }
  6266. return 0;
  6267. }
  6268. /**
  6269. * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
  6270. * @phba: pointer to lpfc hba data structure.
  6271. *
  6272. * This routine is invoked to destroy HBA physical port and the associated
  6273. * SCSI host.
  6274. **/
  6275. static void
  6276. lpfc_destroy_shost(struct lpfc_hba *phba)
  6277. {
  6278. struct lpfc_vport *vport = phba->pport;
  6279. /* Destroy physical port that associated with the SCSI host */
  6280. destroy_port(vport);
  6281. return;
  6282. }
  6283. /**
  6284. * lpfc_setup_bg - Setup Block guard structures and debug areas.
  6285. * @phba: pointer to lpfc hba data structure.
  6286. * @shost: the shost to be used to detect Block guard settings.
  6287. *
  6288. * This routine sets up the local Block guard protocol settings for @shost.
  6289. * This routine also allocates memory for debugging bg buffers.
  6290. **/
  6291. static void
  6292. lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
  6293. {
  6294. uint32_t old_mask;
  6295. uint32_t old_guard;
  6296. int pagecnt = 10;
  6297. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6298. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6299. "1478 Registering BlockGuard with the "
  6300. "SCSI layer\n");
  6301. old_mask = phba->cfg_prot_mask;
  6302. old_guard = phba->cfg_prot_guard;
  6303. /* Only allow supported values */
  6304. phba->cfg_prot_mask &= (SHOST_DIF_TYPE1_PROTECTION |
  6305. SHOST_DIX_TYPE0_PROTECTION |
  6306. SHOST_DIX_TYPE1_PROTECTION);
  6307. phba->cfg_prot_guard &= (SHOST_DIX_GUARD_IP |
  6308. SHOST_DIX_GUARD_CRC);
  6309. /* DIF Type 1 protection for profiles AST1/C1 is end to end */
  6310. if (phba->cfg_prot_mask == SHOST_DIX_TYPE1_PROTECTION)
  6311. phba->cfg_prot_mask |= SHOST_DIF_TYPE1_PROTECTION;
  6312. if (phba->cfg_prot_mask && phba->cfg_prot_guard) {
  6313. if ((old_mask != phba->cfg_prot_mask) ||
  6314. (old_guard != phba->cfg_prot_guard))
  6315. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6316. "1475 Registering BlockGuard with the "
  6317. "SCSI layer: mask %d guard %d\n",
  6318. phba->cfg_prot_mask,
  6319. phba->cfg_prot_guard);
  6320. scsi_host_set_prot(shost, phba->cfg_prot_mask);
  6321. scsi_host_set_guard(shost, phba->cfg_prot_guard);
  6322. } else
  6323. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6324. "1479 Not Registering BlockGuard with the SCSI "
  6325. "layer, Bad protection parameters: %d %d\n",
  6326. old_mask, old_guard);
  6327. }
  6328. if (!_dump_buf_data) {
  6329. while (pagecnt) {
  6330. spin_lock_init(&_dump_buf_lock);
  6331. _dump_buf_data =
  6332. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6333. if (_dump_buf_data) {
  6334. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6335. "9043 BLKGRD: allocated %d pages for "
  6336. "_dump_buf_data at 0x%p\n",
  6337. (1 << pagecnt), _dump_buf_data);
  6338. _dump_buf_data_order = pagecnt;
  6339. memset(_dump_buf_data, 0,
  6340. ((1 << PAGE_SHIFT) << pagecnt));
  6341. break;
  6342. } else
  6343. --pagecnt;
  6344. }
  6345. if (!_dump_buf_data_order)
  6346. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6347. "9044 BLKGRD: ERROR unable to allocate "
  6348. "memory for hexdump\n");
  6349. } else
  6350. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6351. "9045 BLKGRD: already allocated _dump_buf_data=0x%p"
  6352. "\n", _dump_buf_data);
  6353. if (!_dump_buf_dif) {
  6354. while (pagecnt) {
  6355. _dump_buf_dif =
  6356. (char *) __get_free_pages(GFP_KERNEL, pagecnt);
  6357. if (_dump_buf_dif) {
  6358. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6359. "9046 BLKGRD: allocated %d pages for "
  6360. "_dump_buf_dif at 0x%p\n",
  6361. (1 << pagecnt), _dump_buf_dif);
  6362. _dump_buf_dif_order = pagecnt;
  6363. memset(_dump_buf_dif, 0,
  6364. ((1 << PAGE_SHIFT) << pagecnt));
  6365. break;
  6366. } else
  6367. --pagecnt;
  6368. }
  6369. if (!_dump_buf_dif_order)
  6370. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6371. "9047 BLKGRD: ERROR unable to allocate "
  6372. "memory for hexdump\n");
  6373. } else
  6374. lpfc_printf_log(phba, KERN_ERR, LOG_BG,
  6375. "9048 BLKGRD: already allocated _dump_buf_dif=0x%p\n",
  6376. _dump_buf_dif);
  6377. }
  6378. /**
  6379. * lpfc_post_init_setup - Perform necessary device post initialization setup.
  6380. * @phba: pointer to lpfc hba data structure.
  6381. *
  6382. * This routine is invoked to perform all the necessary post initialization
  6383. * setup for the device.
  6384. **/
  6385. static void
  6386. lpfc_post_init_setup(struct lpfc_hba *phba)
  6387. {
  6388. struct Scsi_Host *shost;
  6389. struct lpfc_adapter_event_header adapter_event;
  6390. /* Get the default values for Model Name and Description */
  6391. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  6392. /*
  6393. * hba setup may have changed the hba_queue_depth so we need to
  6394. * adjust the value of can_queue.
  6395. */
  6396. shost = pci_get_drvdata(phba->pcidev);
  6397. shost->can_queue = phba->cfg_hba_queue_depth - 10;
  6398. if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
  6399. lpfc_setup_bg(phba, shost);
  6400. lpfc_host_attrib_init(shost);
  6401. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  6402. spin_lock_irq(shost->host_lock);
  6403. lpfc_poll_start_timer(phba);
  6404. spin_unlock_irq(shost->host_lock);
  6405. }
  6406. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6407. "0428 Perform SCSI scan\n");
  6408. /* Send board arrival event to upper layer */
  6409. adapter_event.event_type = FC_REG_ADAPTER_EVENT;
  6410. adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
  6411. fc_host_post_vendor_event(shost, fc_get_event_number(),
  6412. sizeof(adapter_event),
  6413. (char *) &adapter_event,
  6414. LPFC_NL_VENDOR_ID);
  6415. return;
  6416. }
  6417. /**
  6418. * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
  6419. * @phba: pointer to lpfc hba data structure.
  6420. *
  6421. * This routine is invoked to set up the PCI device memory space for device
  6422. * with SLI-3 interface spec.
  6423. *
  6424. * Return codes
  6425. * 0 - successful
  6426. * other values - error
  6427. **/
  6428. static int
  6429. lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
  6430. {
  6431. struct pci_dev *pdev;
  6432. unsigned long bar0map_len, bar2map_len;
  6433. int i, hbq_count;
  6434. void *ptr;
  6435. int error = -ENODEV;
  6436. /* Obtain PCI device reference */
  6437. if (!phba->pcidev)
  6438. return error;
  6439. else
  6440. pdev = phba->pcidev;
  6441. /* Set the device DMA mask size */
  6442. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  6443. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  6444. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  6445. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  6446. return error;
  6447. }
  6448. }
  6449. /* Get the bus address of Bar0 and Bar2 and the number of bytes
  6450. * required by each mapping.
  6451. */
  6452. phba->pci_bar0_map = pci_resource_start(pdev, 0);
  6453. bar0map_len = pci_resource_len(pdev, 0);
  6454. phba->pci_bar2_map = pci_resource_start(pdev, 2);
  6455. bar2map_len = pci_resource_len(pdev, 2);
  6456. /* Map HBA SLIM to a kernel virtual address. */
  6457. phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
  6458. if (!phba->slim_memmap_p) {
  6459. dev_printk(KERN_ERR, &pdev->dev,
  6460. "ioremap failed for SLIM memory.\n");
  6461. goto out;
  6462. }
  6463. /* Map HBA Control Registers to a kernel virtual address. */
  6464. phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
  6465. if (!phba->ctrl_regs_memmap_p) {
  6466. dev_printk(KERN_ERR, &pdev->dev,
  6467. "ioremap failed for HBA control registers.\n");
  6468. goto out_iounmap_slim;
  6469. }
  6470. /* Allocate memory for SLI-2 structures */
  6471. phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6472. &phba->slim2p.phys, GFP_KERNEL);
  6473. if (!phba->slim2p.virt)
  6474. goto out_iounmap;
  6475. phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
  6476. phba->mbox_ext = (phba->slim2p.virt +
  6477. offsetof(struct lpfc_sli2_slim, mbx_ext_words));
  6478. phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
  6479. phba->IOCBs = (phba->slim2p.virt +
  6480. offsetof(struct lpfc_sli2_slim, IOCBs));
  6481. phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
  6482. lpfc_sli_hbq_size(),
  6483. &phba->hbqslimp.phys,
  6484. GFP_KERNEL);
  6485. if (!phba->hbqslimp.virt)
  6486. goto out_free_slim;
  6487. hbq_count = lpfc_sli_hbq_count();
  6488. ptr = phba->hbqslimp.virt;
  6489. for (i = 0; i < hbq_count; ++i) {
  6490. phba->hbqs[i].hbq_virt = ptr;
  6491. INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
  6492. ptr += (lpfc_hbq_defs[i]->entry_count *
  6493. sizeof(struct lpfc_hbq_entry));
  6494. }
  6495. phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
  6496. phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
  6497. memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
  6498. phba->MBslimaddr = phba->slim_memmap_p;
  6499. phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
  6500. phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
  6501. phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
  6502. phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
  6503. return 0;
  6504. out_free_slim:
  6505. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6506. phba->slim2p.virt, phba->slim2p.phys);
  6507. out_iounmap:
  6508. iounmap(phba->ctrl_regs_memmap_p);
  6509. out_iounmap_slim:
  6510. iounmap(phba->slim_memmap_p);
  6511. out:
  6512. return error;
  6513. }
  6514. /**
  6515. * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
  6516. * @phba: pointer to lpfc hba data structure.
  6517. *
  6518. * This routine is invoked to unset the PCI device memory space for device
  6519. * with SLI-3 interface spec.
  6520. **/
  6521. static void
  6522. lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
  6523. {
  6524. struct pci_dev *pdev;
  6525. /* Obtain PCI device reference */
  6526. if (!phba->pcidev)
  6527. return;
  6528. else
  6529. pdev = phba->pcidev;
  6530. /* Free coherent DMA memory allocated */
  6531. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  6532. phba->hbqslimp.virt, phba->hbqslimp.phys);
  6533. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  6534. phba->slim2p.virt, phba->slim2p.phys);
  6535. /* I/O memory unmap */
  6536. iounmap(phba->ctrl_regs_memmap_p);
  6537. iounmap(phba->slim_memmap_p);
  6538. return;
  6539. }
  6540. /**
  6541. * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
  6542. * @phba: pointer to lpfc hba data structure.
  6543. *
  6544. * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
  6545. * done and check status.
  6546. *
  6547. * Return 0 if successful, otherwise -ENODEV.
  6548. **/
  6549. int
  6550. lpfc_sli4_post_status_check(struct lpfc_hba *phba)
  6551. {
  6552. struct lpfc_register portsmphr_reg, uerrlo_reg, uerrhi_reg;
  6553. struct lpfc_register reg_data;
  6554. int i, port_error = 0;
  6555. uint32_t if_type;
  6556. memset(&portsmphr_reg, 0, sizeof(portsmphr_reg));
  6557. memset(&reg_data, 0, sizeof(reg_data));
  6558. if (!phba->sli4_hba.PSMPHRregaddr)
  6559. return -ENODEV;
  6560. /* Wait up to 30 seconds for the SLI Port POST done and ready */
  6561. for (i = 0; i < 3000; i++) {
  6562. if (lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
  6563. &portsmphr_reg.word0) ||
  6564. (bf_get(lpfc_port_smphr_perr, &portsmphr_reg))) {
  6565. /* Port has a fatal POST error, break out */
  6566. port_error = -ENODEV;
  6567. break;
  6568. }
  6569. if (LPFC_POST_STAGE_PORT_READY ==
  6570. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg))
  6571. break;
  6572. msleep(10);
  6573. }
  6574. /*
  6575. * If there was a port error during POST, then don't proceed with
  6576. * other register reads as the data may not be valid. Just exit.
  6577. */
  6578. if (port_error) {
  6579. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6580. "1408 Port Failed POST - portsmphr=0x%x, "
  6581. "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, scr1=x%x, "
  6582. "scr2=x%x, hscratch=x%x, pstatus=x%x\n",
  6583. portsmphr_reg.word0,
  6584. bf_get(lpfc_port_smphr_perr, &portsmphr_reg),
  6585. bf_get(lpfc_port_smphr_sfi, &portsmphr_reg),
  6586. bf_get(lpfc_port_smphr_nip, &portsmphr_reg),
  6587. bf_get(lpfc_port_smphr_ipc, &portsmphr_reg),
  6588. bf_get(lpfc_port_smphr_scr1, &portsmphr_reg),
  6589. bf_get(lpfc_port_smphr_scr2, &portsmphr_reg),
  6590. bf_get(lpfc_port_smphr_host_scratch, &portsmphr_reg),
  6591. bf_get(lpfc_port_smphr_port_status, &portsmphr_reg));
  6592. } else {
  6593. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  6594. "2534 Device Info: SLIFamily=0x%x, "
  6595. "SLIRev=0x%x, IFType=0x%x, SLIHint_1=0x%x, "
  6596. "SLIHint_2=0x%x, FT=0x%x\n",
  6597. bf_get(lpfc_sli_intf_sli_family,
  6598. &phba->sli4_hba.sli_intf),
  6599. bf_get(lpfc_sli_intf_slirev,
  6600. &phba->sli4_hba.sli_intf),
  6601. bf_get(lpfc_sli_intf_if_type,
  6602. &phba->sli4_hba.sli_intf),
  6603. bf_get(lpfc_sli_intf_sli_hint1,
  6604. &phba->sli4_hba.sli_intf),
  6605. bf_get(lpfc_sli_intf_sli_hint2,
  6606. &phba->sli4_hba.sli_intf),
  6607. bf_get(lpfc_sli_intf_func_type,
  6608. &phba->sli4_hba.sli_intf));
  6609. /*
  6610. * Check for other Port errors during the initialization
  6611. * process. Fail the load if the port did not come up
  6612. * correctly.
  6613. */
  6614. if_type = bf_get(lpfc_sli_intf_if_type,
  6615. &phba->sli4_hba.sli_intf);
  6616. switch (if_type) {
  6617. case LPFC_SLI_INTF_IF_TYPE_0:
  6618. phba->sli4_hba.ue_mask_lo =
  6619. readl(phba->sli4_hba.u.if_type0.UEMASKLOregaddr);
  6620. phba->sli4_hba.ue_mask_hi =
  6621. readl(phba->sli4_hba.u.if_type0.UEMASKHIregaddr);
  6622. uerrlo_reg.word0 =
  6623. readl(phba->sli4_hba.u.if_type0.UERRLOregaddr);
  6624. uerrhi_reg.word0 =
  6625. readl(phba->sli4_hba.u.if_type0.UERRHIregaddr);
  6626. if ((~phba->sli4_hba.ue_mask_lo & uerrlo_reg.word0) ||
  6627. (~phba->sli4_hba.ue_mask_hi & uerrhi_reg.word0)) {
  6628. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6629. "1422 Unrecoverable Error "
  6630. "Detected during POST "
  6631. "uerr_lo_reg=0x%x, "
  6632. "uerr_hi_reg=0x%x, "
  6633. "ue_mask_lo_reg=0x%x, "
  6634. "ue_mask_hi_reg=0x%x\n",
  6635. uerrlo_reg.word0,
  6636. uerrhi_reg.word0,
  6637. phba->sli4_hba.ue_mask_lo,
  6638. phba->sli4_hba.ue_mask_hi);
  6639. port_error = -ENODEV;
  6640. }
  6641. break;
  6642. case LPFC_SLI_INTF_IF_TYPE_2:
  6643. /* Final checks. The port status should be clean. */
  6644. if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
  6645. &reg_data.word0) ||
  6646. (bf_get(lpfc_sliport_status_err, &reg_data) &&
  6647. !bf_get(lpfc_sliport_status_rn, &reg_data))) {
  6648. phba->work_status[0] =
  6649. readl(phba->sli4_hba.u.if_type2.
  6650. ERR1regaddr);
  6651. phba->work_status[1] =
  6652. readl(phba->sli4_hba.u.if_type2.
  6653. ERR2regaddr);
  6654. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  6655. "2888 Unrecoverable port error "
  6656. "following POST: port status reg "
  6657. "0x%x, port_smphr reg 0x%x, "
  6658. "error 1=0x%x, error 2=0x%x\n",
  6659. reg_data.word0,
  6660. portsmphr_reg.word0,
  6661. phba->work_status[0],
  6662. phba->work_status[1]);
  6663. port_error = -ENODEV;
  6664. }
  6665. break;
  6666. case LPFC_SLI_INTF_IF_TYPE_1:
  6667. default:
  6668. break;
  6669. }
  6670. }
  6671. return port_error;
  6672. }
  6673. /**
  6674. * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
  6675. * @phba: pointer to lpfc hba data structure.
  6676. * @if_type: The SLI4 interface type getting configured.
  6677. *
  6678. * This routine is invoked to set up SLI4 BAR0 PCI config space register
  6679. * memory map.
  6680. **/
  6681. static void
  6682. lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
  6683. {
  6684. switch (if_type) {
  6685. case LPFC_SLI_INTF_IF_TYPE_0:
  6686. phba->sli4_hba.u.if_type0.UERRLOregaddr =
  6687. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_LO;
  6688. phba->sli4_hba.u.if_type0.UERRHIregaddr =
  6689. phba->sli4_hba.conf_regs_memmap_p + LPFC_UERR_STATUS_HI;
  6690. phba->sli4_hba.u.if_type0.UEMASKLOregaddr =
  6691. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_LO;
  6692. phba->sli4_hba.u.if_type0.UEMASKHIregaddr =
  6693. phba->sli4_hba.conf_regs_memmap_p + LPFC_UE_MASK_HI;
  6694. phba->sli4_hba.SLIINTFregaddr =
  6695. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6696. break;
  6697. case LPFC_SLI_INTF_IF_TYPE_2:
  6698. phba->sli4_hba.u.if_type2.EQDregaddr =
  6699. phba->sli4_hba.conf_regs_memmap_p +
  6700. LPFC_CTL_PORT_EQ_DELAY_OFFSET;
  6701. phba->sli4_hba.u.if_type2.ERR1regaddr =
  6702. phba->sli4_hba.conf_regs_memmap_p +
  6703. LPFC_CTL_PORT_ER1_OFFSET;
  6704. phba->sli4_hba.u.if_type2.ERR2regaddr =
  6705. phba->sli4_hba.conf_regs_memmap_p +
  6706. LPFC_CTL_PORT_ER2_OFFSET;
  6707. phba->sli4_hba.u.if_type2.CTRLregaddr =
  6708. phba->sli4_hba.conf_regs_memmap_p +
  6709. LPFC_CTL_PORT_CTL_OFFSET;
  6710. phba->sli4_hba.u.if_type2.STATUSregaddr =
  6711. phba->sli4_hba.conf_regs_memmap_p +
  6712. LPFC_CTL_PORT_STA_OFFSET;
  6713. phba->sli4_hba.SLIINTFregaddr =
  6714. phba->sli4_hba.conf_regs_memmap_p + LPFC_SLI_INTF;
  6715. phba->sli4_hba.PSMPHRregaddr =
  6716. phba->sli4_hba.conf_regs_memmap_p +
  6717. LPFC_CTL_PORT_SEM_OFFSET;
  6718. phba->sli4_hba.RQDBregaddr =
  6719. phba->sli4_hba.conf_regs_memmap_p +
  6720. LPFC_ULP0_RQ_DOORBELL;
  6721. phba->sli4_hba.WQDBregaddr =
  6722. phba->sli4_hba.conf_regs_memmap_p +
  6723. LPFC_ULP0_WQ_DOORBELL;
  6724. phba->sli4_hba.EQCQDBregaddr =
  6725. phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
  6726. phba->sli4_hba.MQDBregaddr =
  6727. phba->sli4_hba.conf_regs_memmap_p + LPFC_MQ_DOORBELL;
  6728. phba->sli4_hba.BMBXregaddr =
  6729. phba->sli4_hba.conf_regs_memmap_p + LPFC_BMBX;
  6730. break;
  6731. case LPFC_SLI_INTF_IF_TYPE_1:
  6732. default:
  6733. dev_printk(KERN_ERR, &phba->pcidev->dev,
  6734. "FATAL - unsupported SLI4 interface type - %d\n",
  6735. if_type);
  6736. break;
  6737. }
  6738. }
  6739. /**
  6740. * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
  6741. * @phba: pointer to lpfc hba data structure.
  6742. *
  6743. * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
  6744. * memory map.
  6745. **/
  6746. static void
  6747. lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
  6748. {
  6749. phba->sli4_hba.PSMPHRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6750. LPFC_SLIPORT_IF0_SMPHR;
  6751. phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6752. LPFC_HST_ISR0;
  6753. phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6754. LPFC_HST_IMR0;
  6755. phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
  6756. LPFC_HST_ISCR0;
  6757. }
  6758. /**
  6759. * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
  6760. * @phba: pointer to lpfc hba data structure.
  6761. * @vf: virtual function number
  6762. *
  6763. * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
  6764. * based on the given viftual function number, @vf.
  6765. *
  6766. * Return 0 if successful, otherwise -ENODEV.
  6767. **/
  6768. static int
  6769. lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
  6770. {
  6771. if (vf > LPFC_VIR_FUNC_MAX)
  6772. return -ENODEV;
  6773. phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6774. vf * LPFC_VFR_PAGE_SIZE +
  6775. LPFC_ULP0_RQ_DOORBELL);
  6776. phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6777. vf * LPFC_VFR_PAGE_SIZE +
  6778. LPFC_ULP0_WQ_DOORBELL);
  6779. phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6780. vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
  6781. phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6782. vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
  6783. phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
  6784. vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
  6785. return 0;
  6786. }
  6787. /**
  6788. * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
  6789. * @phba: pointer to lpfc hba data structure.
  6790. *
  6791. * This routine is invoked to create the bootstrap mailbox
  6792. * region consistent with the SLI-4 interface spec. This
  6793. * routine allocates all memory necessary to communicate
  6794. * mailbox commands to the port and sets up all alignment
  6795. * needs. No locks are expected to be held when calling
  6796. * this routine.
  6797. *
  6798. * Return codes
  6799. * 0 - successful
  6800. * -ENOMEM - could not allocated memory.
  6801. **/
  6802. static int
  6803. lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
  6804. {
  6805. uint32_t bmbx_size;
  6806. struct lpfc_dmabuf *dmabuf;
  6807. struct dma_address *dma_address;
  6808. uint32_t pa_addr;
  6809. uint64_t phys_addr;
  6810. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  6811. if (!dmabuf)
  6812. return -ENOMEM;
  6813. /*
  6814. * The bootstrap mailbox region is comprised of 2 parts
  6815. * plus an alignment restriction of 16 bytes.
  6816. */
  6817. bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
  6818. dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
  6819. &dmabuf->phys, GFP_KERNEL);
  6820. if (!dmabuf->virt) {
  6821. kfree(dmabuf);
  6822. return -ENOMEM;
  6823. }
  6824. /*
  6825. * Initialize the bootstrap mailbox pointers now so that the register
  6826. * operations are simple later. The mailbox dma address is required
  6827. * to be 16-byte aligned. Also align the virtual memory as each
  6828. * maibox is copied into the bmbx mailbox region before issuing the
  6829. * command to the port.
  6830. */
  6831. phba->sli4_hba.bmbx.dmabuf = dmabuf;
  6832. phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
  6833. phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
  6834. LPFC_ALIGN_16_BYTE);
  6835. phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
  6836. LPFC_ALIGN_16_BYTE);
  6837. /*
  6838. * Set the high and low physical addresses now. The SLI4 alignment
  6839. * requirement is 16 bytes and the mailbox is posted to the port
  6840. * as two 30-bit addresses. The other data is a bit marking whether
  6841. * the 30-bit address is the high or low address.
  6842. * Upcast bmbx aphys to 64bits so shift instruction compiles
  6843. * clean on 32 bit machines.
  6844. */
  6845. dma_address = &phba->sli4_hba.bmbx.dma_address;
  6846. phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
  6847. pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
  6848. dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
  6849. LPFC_BMBX_BIT1_ADDR_HI);
  6850. pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
  6851. dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
  6852. LPFC_BMBX_BIT1_ADDR_LO);
  6853. return 0;
  6854. }
  6855. /**
  6856. * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
  6857. * @phba: pointer to lpfc hba data structure.
  6858. *
  6859. * This routine is invoked to teardown the bootstrap mailbox
  6860. * region and release all host resources. This routine requires
  6861. * the caller to ensure all mailbox commands recovered, no
  6862. * additional mailbox comands are sent, and interrupts are disabled
  6863. * before calling this routine.
  6864. *
  6865. **/
  6866. static void
  6867. lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
  6868. {
  6869. dma_free_coherent(&phba->pcidev->dev,
  6870. phba->sli4_hba.bmbx.bmbx_size,
  6871. phba->sli4_hba.bmbx.dmabuf->virt,
  6872. phba->sli4_hba.bmbx.dmabuf->phys);
  6873. kfree(phba->sli4_hba.bmbx.dmabuf);
  6874. memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
  6875. }
  6876. /**
  6877. * lpfc_sli4_read_config - Get the config parameters.
  6878. * @phba: pointer to lpfc hba data structure.
  6879. *
  6880. * This routine is invoked to read the configuration parameters from the HBA.
  6881. * The configuration parameters are used to set the base and maximum values
  6882. * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
  6883. * allocation for the port.
  6884. *
  6885. * Return codes
  6886. * 0 - successful
  6887. * -ENOMEM - No available memory
  6888. * -EIO - The mailbox failed to complete successfully.
  6889. **/
  6890. int
  6891. lpfc_sli4_read_config(struct lpfc_hba *phba)
  6892. {
  6893. LPFC_MBOXQ_t *pmb;
  6894. struct lpfc_mbx_read_config *rd_config;
  6895. union lpfc_sli4_cfg_shdr *shdr;
  6896. uint32_t shdr_status, shdr_add_status;
  6897. struct lpfc_mbx_get_func_cfg *get_func_cfg;
  6898. struct lpfc_rsrc_desc_fcfcoe *desc;
  6899. char *pdesc_0;
  6900. uint16_t forced_link_speed;
  6901. uint32_t if_type;
  6902. int length, i, rc = 0, rc2;
  6903. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  6904. if (!pmb) {
  6905. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6906. "2011 Unable to allocate memory for issuing "
  6907. "SLI_CONFIG_SPECIAL mailbox command\n");
  6908. return -ENOMEM;
  6909. }
  6910. lpfc_read_config(phba, pmb);
  6911. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  6912. if (rc != MBX_SUCCESS) {
  6913. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  6914. "2012 Mailbox failed , mbxCmd x%x "
  6915. "READ_CONFIG, mbxStatus x%x\n",
  6916. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  6917. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  6918. rc = -EIO;
  6919. } else {
  6920. rd_config = &pmb->u.mqe.un.rd_config;
  6921. if (bf_get(lpfc_mbx_rd_conf_lnk_ldv, rd_config)) {
  6922. phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
  6923. phba->sli4_hba.lnk_info.lnk_tp =
  6924. bf_get(lpfc_mbx_rd_conf_lnk_type, rd_config);
  6925. phba->sli4_hba.lnk_info.lnk_no =
  6926. bf_get(lpfc_mbx_rd_conf_lnk_numb, rd_config);
  6927. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  6928. "3081 lnk_type:%d, lnk_numb:%d\n",
  6929. phba->sli4_hba.lnk_info.lnk_tp,
  6930. phba->sli4_hba.lnk_info.lnk_no);
  6931. } else
  6932. lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
  6933. "3082 Mailbox (x%x) returned ldv:x0\n",
  6934. bf_get(lpfc_mqe_command, &pmb->u.mqe));
  6935. if (bf_get(lpfc_mbx_rd_conf_bbscn_def, rd_config)) {
  6936. phba->bbcredit_support = 1;
  6937. phba->sli4_hba.bbscn_params.word0 = rd_config->word8;
  6938. }
  6939. phba->sli4_hba.extents_in_use =
  6940. bf_get(lpfc_mbx_rd_conf_extnts_inuse, rd_config);
  6941. phba->sli4_hba.max_cfg_param.max_xri =
  6942. bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
  6943. phba->sli4_hba.max_cfg_param.xri_base =
  6944. bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
  6945. phba->sli4_hba.max_cfg_param.max_vpi =
  6946. bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
  6947. phba->sli4_hba.max_cfg_param.vpi_base =
  6948. bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
  6949. phba->sli4_hba.max_cfg_param.max_rpi =
  6950. bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
  6951. phba->sli4_hba.max_cfg_param.rpi_base =
  6952. bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
  6953. phba->sli4_hba.max_cfg_param.max_vfi =
  6954. bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
  6955. phba->sli4_hba.max_cfg_param.vfi_base =
  6956. bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
  6957. phba->sli4_hba.max_cfg_param.max_fcfi =
  6958. bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
  6959. phba->sli4_hba.max_cfg_param.max_eq =
  6960. bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
  6961. phba->sli4_hba.max_cfg_param.max_rq =
  6962. bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
  6963. phba->sli4_hba.max_cfg_param.max_wq =
  6964. bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
  6965. phba->sli4_hba.max_cfg_param.max_cq =
  6966. bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
  6967. phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
  6968. phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
  6969. phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
  6970. phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
  6971. phba->max_vpi = (phba->sli4_hba.max_cfg_param.max_vpi > 0) ?
  6972. (phba->sli4_hba.max_cfg_param.max_vpi - 1) : 0;
  6973. phba->max_vports = phba->max_vpi;
  6974. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  6975. "2003 cfg params Extents? %d "
  6976. "XRI(B:%d M:%d), "
  6977. "VPI(B:%d M:%d) "
  6978. "VFI(B:%d M:%d) "
  6979. "RPI(B:%d M:%d) "
  6980. "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
  6981. phba->sli4_hba.extents_in_use,
  6982. phba->sli4_hba.max_cfg_param.xri_base,
  6983. phba->sli4_hba.max_cfg_param.max_xri,
  6984. phba->sli4_hba.max_cfg_param.vpi_base,
  6985. phba->sli4_hba.max_cfg_param.max_vpi,
  6986. phba->sli4_hba.max_cfg_param.vfi_base,
  6987. phba->sli4_hba.max_cfg_param.max_vfi,
  6988. phba->sli4_hba.max_cfg_param.rpi_base,
  6989. phba->sli4_hba.max_cfg_param.max_rpi,
  6990. phba->sli4_hba.max_cfg_param.max_fcfi,
  6991. phba->sli4_hba.max_cfg_param.max_eq,
  6992. phba->sli4_hba.max_cfg_param.max_cq,
  6993. phba->sli4_hba.max_cfg_param.max_wq,
  6994. phba->sli4_hba.max_cfg_param.max_rq);
  6995. }
  6996. if (rc)
  6997. goto read_cfg_out;
  6998. /* Update link speed if forced link speed is supported */
  6999. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  7000. if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
  7001. forced_link_speed =
  7002. bf_get(lpfc_mbx_rd_conf_link_speed, rd_config);
  7003. if (forced_link_speed) {
  7004. phba->hba_flag |= HBA_FORCED_LINK_SPEED;
  7005. switch (forced_link_speed) {
  7006. case LINK_SPEED_1G:
  7007. phba->cfg_link_speed =
  7008. LPFC_USER_LINK_SPEED_1G;
  7009. break;
  7010. case LINK_SPEED_2G:
  7011. phba->cfg_link_speed =
  7012. LPFC_USER_LINK_SPEED_2G;
  7013. break;
  7014. case LINK_SPEED_4G:
  7015. phba->cfg_link_speed =
  7016. LPFC_USER_LINK_SPEED_4G;
  7017. break;
  7018. case LINK_SPEED_8G:
  7019. phba->cfg_link_speed =
  7020. LPFC_USER_LINK_SPEED_8G;
  7021. break;
  7022. case LINK_SPEED_10G:
  7023. phba->cfg_link_speed =
  7024. LPFC_USER_LINK_SPEED_10G;
  7025. break;
  7026. case LINK_SPEED_16G:
  7027. phba->cfg_link_speed =
  7028. LPFC_USER_LINK_SPEED_16G;
  7029. break;
  7030. case LINK_SPEED_32G:
  7031. phba->cfg_link_speed =
  7032. LPFC_USER_LINK_SPEED_32G;
  7033. break;
  7034. case 0xffff:
  7035. phba->cfg_link_speed =
  7036. LPFC_USER_LINK_SPEED_AUTO;
  7037. break;
  7038. default:
  7039. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7040. "0047 Unrecognized link "
  7041. "speed : %d\n",
  7042. forced_link_speed);
  7043. phba->cfg_link_speed =
  7044. LPFC_USER_LINK_SPEED_AUTO;
  7045. }
  7046. }
  7047. }
  7048. /* Reset the DFT_HBA_Q_DEPTH to the max xri */
  7049. length = phba->sli4_hba.max_cfg_param.max_xri -
  7050. lpfc_sli4_get_els_iocb_cnt(phba);
  7051. if (phba->cfg_hba_queue_depth > length) {
  7052. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  7053. "3361 HBA queue depth changed from %d to %d\n",
  7054. phba->cfg_hba_queue_depth, length);
  7055. phba->cfg_hba_queue_depth = length;
  7056. }
  7057. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
  7058. LPFC_SLI_INTF_IF_TYPE_2)
  7059. goto read_cfg_out;
  7060. /* get the pf# and vf# for SLI4 if_type 2 port */
  7061. length = (sizeof(struct lpfc_mbx_get_func_cfg) -
  7062. sizeof(struct lpfc_sli4_cfg_mhdr));
  7063. lpfc_sli4_config(phba, pmb, LPFC_MBOX_SUBSYSTEM_COMMON,
  7064. LPFC_MBOX_OPCODE_GET_FUNCTION_CONFIG,
  7065. length, LPFC_SLI4_MBX_EMBED);
  7066. rc2 = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  7067. shdr = (union lpfc_sli4_cfg_shdr *)
  7068. &pmb->u.mqe.un.sli4_config.header.cfg_shdr;
  7069. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7070. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7071. if (rc2 || shdr_status || shdr_add_status) {
  7072. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7073. "3026 Mailbox failed , mbxCmd x%x "
  7074. "GET_FUNCTION_CONFIG, mbxStatus x%x\n",
  7075. bf_get(lpfc_mqe_command, &pmb->u.mqe),
  7076. bf_get(lpfc_mqe_status, &pmb->u.mqe));
  7077. goto read_cfg_out;
  7078. }
  7079. /* search for fc_fcoe resrouce descriptor */
  7080. get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
  7081. pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
  7082. desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
  7083. length = bf_get(lpfc_rsrc_desc_fcfcoe_length, desc);
  7084. if (length == LPFC_RSRC_DESC_TYPE_FCFCOE_V0_RSVD)
  7085. length = LPFC_RSRC_DESC_TYPE_FCFCOE_V0_LENGTH;
  7086. else if (length != LPFC_RSRC_DESC_TYPE_FCFCOE_V1_LENGTH)
  7087. goto read_cfg_out;
  7088. for (i = 0; i < LPFC_RSRC_DESC_MAX_NUM; i++) {
  7089. desc = (struct lpfc_rsrc_desc_fcfcoe *)(pdesc_0 + length * i);
  7090. if (LPFC_RSRC_DESC_TYPE_FCFCOE ==
  7091. bf_get(lpfc_rsrc_desc_fcfcoe_type, desc)) {
  7092. phba->sli4_hba.iov.pf_number =
  7093. bf_get(lpfc_rsrc_desc_fcfcoe_pfnum, desc);
  7094. phba->sli4_hba.iov.vf_number =
  7095. bf_get(lpfc_rsrc_desc_fcfcoe_vfnum, desc);
  7096. break;
  7097. }
  7098. }
  7099. if (i < LPFC_RSRC_DESC_MAX_NUM)
  7100. lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
  7101. "3027 GET_FUNCTION_CONFIG: pf_number:%d, "
  7102. "vf_number:%d\n", phba->sli4_hba.iov.pf_number,
  7103. phba->sli4_hba.iov.vf_number);
  7104. else
  7105. lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
  7106. "3028 GET_FUNCTION_CONFIG: failed to find "
  7107. "Resrouce Descriptor:x%x\n",
  7108. LPFC_RSRC_DESC_TYPE_FCFCOE);
  7109. read_cfg_out:
  7110. mempool_free(pmb, phba->mbox_mem_pool);
  7111. return rc;
  7112. }
  7113. /**
  7114. * lpfc_setup_endian_order - Write endian order to an SLI4 if_type 0 port.
  7115. * @phba: pointer to lpfc hba data structure.
  7116. *
  7117. * This routine is invoked to setup the port-side endian order when
  7118. * the port if_type is 0. This routine has no function for other
  7119. * if_types.
  7120. *
  7121. * Return codes
  7122. * 0 - successful
  7123. * -ENOMEM - No available memory
  7124. * -EIO - The mailbox failed to complete successfully.
  7125. **/
  7126. static int
  7127. lpfc_setup_endian_order(struct lpfc_hba *phba)
  7128. {
  7129. LPFC_MBOXQ_t *mboxq;
  7130. uint32_t if_type, rc = 0;
  7131. uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
  7132. HOST_ENDIAN_HIGH_WORD1};
  7133. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  7134. switch (if_type) {
  7135. case LPFC_SLI_INTF_IF_TYPE_0:
  7136. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  7137. GFP_KERNEL);
  7138. if (!mboxq) {
  7139. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7140. "0492 Unable to allocate memory for "
  7141. "issuing SLI_CONFIG_SPECIAL mailbox "
  7142. "command\n");
  7143. return -ENOMEM;
  7144. }
  7145. /*
  7146. * The SLI4_CONFIG_SPECIAL mailbox command requires the first
  7147. * two words to contain special data values and no other data.
  7148. */
  7149. memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
  7150. memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
  7151. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7152. if (rc != MBX_SUCCESS) {
  7153. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7154. "0493 SLI_CONFIG_SPECIAL mailbox "
  7155. "failed with status x%x\n",
  7156. rc);
  7157. rc = -EIO;
  7158. }
  7159. mempool_free(mboxq, phba->mbox_mem_pool);
  7160. break;
  7161. case LPFC_SLI_INTF_IF_TYPE_2:
  7162. case LPFC_SLI_INTF_IF_TYPE_1:
  7163. default:
  7164. break;
  7165. }
  7166. return rc;
  7167. }
  7168. /**
  7169. * lpfc_sli4_queue_verify - Verify and update EQ counts
  7170. * @phba: pointer to lpfc hba data structure.
  7171. *
  7172. * This routine is invoked to check the user settable queue counts for EQs.
  7173. * After this routine is called the counts will be set to valid values that
  7174. * adhere to the constraints of the system's interrupt vectors and the port's
  7175. * queue resources.
  7176. *
  7177. * Return codes
  7178. * 0 - successful
  7179. * -ENOMEM - No available memory
  7180. **/
  7181. static int
  7182. lpfc_sli4_queue_verify(struct lpfc_hba *phba)
  7183. {
  7184. int io_channel;
  7185. int fof_vectors = phba->cfg_fof ? 1 : 0;
  7186. /*
  7187. * Sanity check for configured queue parameters against the run-time
  7188. * device parameters
  7189. */
  7190. /* Sanity check on HBA EQ parameters */
  7191. io_channel = phba->io_channel_irqs;
  7192. if (phba->sli4_hba.num_online_cpu < io_channel) {
  7193. lpfc_printf_log(phba,
  7194. KERN_ERR, LOG_INIT,
  7195. "3188 Reducing IO channels to match number of "
  7196. "online CPUs: from %d to %d\n",
  7197. io_channel, phba->sli4_hba.num_online_cpu);
  7198. io_channel = phba->sli4_hba.num_online_cpu;
  7199. }
  7200. if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
  7201. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7202. "2575 Reducing IO channels to match number of "
  7203. "available EQs: from %d to %d\n",
  7204. io_channel,
  7205. phba->sli4_hba.max_cfg_param.max_eq);
  7206. io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
  7207. }
  7208. /* The actual number of FCP / NVME event queues adopted */
  7209. if (io_channel != phba->io_channel_irqs)
  7210. phba->io_channel_irqs = io_channel;
  7211. if (phba->cfg_fcp_io_channel > io_channel)
  7212. phba->cfg_fcp_io_channel = io_channel;
  7213. if (phba->cfg_nvme_io_channel > io_channel)
  7214. phba->cfg_nvme_io_channel = io_channel;
  7215. if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
  7216. phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
  7217. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7218. "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
  7219. phba->io_channel_irqs, phba->cfg_fcp_io_channel,
  7220. phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
  7221. /* Get EQ depth from module parameter, fake the default for now */
  7222. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7223. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7224. /* Get CQ depth from module parameter, fake the default for now */
  7225. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7226. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7227. return 0;
  7228. }
  7229. static int
  7230. lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
  7231. {
  7232. struct lpfc_queue *qdesc;
  7233. int cnt;
  7234. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7235. phba->sli4_hba.cq_ecount);
  7236. if (!qdesc) {
  7237. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7238. "0508 Failed allocate fast-path NVME CQ (%d)\n",
  7239. wqidx);
  7240. return 1;
  7241. }
  7242. phba->sli4_hba.nvme_cq[wqidx] = qdesc;
  7243. cnt = LPFC_NVME_WQSIZE;
  7244. qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
  7245. if (!qdesc) {
  7246. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7247. "0509 Failed allocate fast-path NVME WQ (%d)\n",
  7248. wqidx);
  7249. return 1;
  7250. }
  7251. phba->sli4_hba.nvme_wq[wqidx] = qdesc;
  7252. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7253. return 0;
  7254. }
  7255. static int
  7256. lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
  7257. {
  7258. struct lpfc_queue *qdesc;
  7259. uint32_t wqesize;
  7260. /* Create Fast Path FCP CQs */
  7261. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7262. phba->sli4_hba.cq_ecount);
  7263. if (!qdesc) {
  7264. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7265. "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
  7266. return 1;
  7267. }
  7268. phba->sli4_hba.fcp_cq[wqidx] = qdesc;
  7269. /* Create Fast Path FCP WQs */
  7270. wqesize = (phba->fcp_embed_io) ?
  7271. LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
  7272. qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
  7273. if (!qdesc) {
  7274. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7275. "0503 Failed allocate fast-path FCP WQ (%d)\n",
  7276. wqidx);
  7277. return 1;
  7278. }
  7279. phba->sli4_hba.fcp_wq[wqidx] = qdesc;
  7280. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7281. return 0;
  7282. }
  7283. /**
  7284. * lpfc_sli4_queue_create - Create all the SLI4 queues
  7285. * @phba: pointer to lpfc hba data structure.
  7286. *
  7287. * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
  7288. * operation. For each SLI4 queue type, the parameters such as queue entry
  7289. * count (queue depth) shall be taken from the module parameter. For now,
  7290. * we just use some constant number as place holder.
  7291. *
  7292. * Return codes
  7293. * 0 - successful
  7294. * -ENOMEM - No availble memory
  7295. * -EIO - The mailbox failed to complete successfully.
  7296. **/
  7297. int
  7298. lpfc_sli4_queue_create(struct lpfc_hba *phba)
  7299. {
  7300. struct lpfc_queue *qdesc;
  7301. int idx, io_channel;
  7302. /*
  7303. * Create HBA Record arrays.
  7304. * Both NVME and FCP will share that same vectors / EQs
  7305. */
  7306. io_channel = phba->io_channel_irqs;
  7307. if (!io_channel)
  7308. return -ERANGE;
  7309. phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
  7310. phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
  7311. phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
  7312. phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
  7313. phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
  7314. phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
  7315. phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
  7316. phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
  7317. phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
  7318. phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
  7319. phba->sli4_hba.hba_eq = kcalloc(io_channel,
  7320. sizeof(struct lpfc_queue *),
  7321. GFP_KERNEL);
  7322. if (!phba->sli4_hba.hba_eq) {
  7323. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7324. "2576 Failed allocate memory for "
  7325. "fast-path EQ record array\n");
  7326. goto out_error;
  7327. }
  7328. if (phba->cfg_fcp_io_channel) {
  7329. phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
  7330. sizeof(struct lpfc_queue *),
  7331. GFP_KERNEL);
  7332. if (!phba->sli4_hba.fcp_cq) {
  7333. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7334. "2577 Failed allocate memory for "
  7335. "fast-path CQ record array\n");
  7336. goto out_error;
  7337. }
  7338. phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
  7339. sizeof(struct lpfc_queue *),
  7340. GFP_KERNEL);
  7341. if (!phba->sli4_hba.fcp_wq) {
  7342. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7343. "2578 Failed allocate memory for "
  7344. "fast-path FCP WQ record array\n");
  7345. goto out_error;
  7346. }
  7347. /*
  7348. * Since the first EQ can have multiple CQs associated with it,
  7349. * this array is used to quickly see if we have a FCP fast-path
  7350. * CQ match.
  7351. */
  7352. phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
  7353. sizeof(uint16_t),
  7354. GFP_KERNEL);
  7355. if (!phba->sli4_hba.fcp_cq_map) {
  7356. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7357. "2545 Failed allocate memory for "
  7358. "fast-path CQ map\n");
  7359. goto out_error;
  7360. }
  7361. }
  7362. if (phba->cfg_nvme_io_channel) {
  7363. phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
  7364. sizeof(struct lpfc_queue *),
  7365. GFP_KERNEL);
  7366. if (!phba->sli4_hba.nvme_cq) {
  7367. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7368. "6077 Failed allocate memory for "
  7369. "fast-path CQ record array\n");
  7370. goto out_error;
  7371. }
  7372. phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
  7373. sizeof(struct lpfc_queue *),
  7374. GFP_KERNEL);
  7375. if (!phba->sli4_hba.nvme_wq) {
  7376. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7377. "2581 Failed allocate memory for "
  7378. "fast-path NVME WQ record array\n");
  7379. goto out_error;
  7380. }
  7381. /*
  7382. * Since the first EQ can have multiple CQs associated with it,
  7383. * this array is used to quickly see if we have a NVME fast-path
  7384. * CQ match.
  7385. */
  7386. phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
  7387. sizeof(uint16_t),
  7388. GFP_KERNEL);
  7389. if (!phba->sli4_hba.nvme_cq_map) {
  7390. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7391. "6078 Failed allocate memory for "
  7392. "fast-path CQ map\n");
  7393. goto out_error;
  7394. }
  7395. if (phba->nvmet_support) {
  7396. phba->sli4_hba.nvmet_cqset = kcalloc(
  7397. phba->cfg_nvmet_mrq,
  7398. sizeof(struct lpfc_queue *),
  7399. GFP_KERNEL);
  7400. if (!phba->sli4_hba.nvmet_cqset) {
  7401. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7402. "3121 Fail allocate memory for "
  7403. "fast-path CQ set array\n");
  7404. goto out_error;
  7405. }
  7406. phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
  7407. phba->cfg_nvmet_mrq,
  7408. sizeof(struct lpfc_queue *),
  7409. GFP_KERNEL);
  7410. if (!phba->sli4_hba.nvmet_mrq_hdr) {
  7411. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7412. "3122 Fail allocate memory for "
  7413. "fast-path RQ set hdr array\n");
  7414. goto out_error;
  7415. }
  7416. phba->sli4_hba.nvmet_mrq_data = kcalloc(
  7417. phba->cfg_nvmet_mrq,
  7418. sizeof(struct lpfc_queue *),
  7419. GFP_KERNEL);
  7420. if (!phba->sli4_hba.nvmet_mrq_data) {
  7421. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7422. "3124 Fail allocate memory for "
  7423. "fast-path RQ set data array\n");
  7424. goto out_error;
  7425. }
  7426. }
  7427. }
  7428. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7429. /* Create HBA Event Queues (EQs) */
  7430. for (idx = 0; idx < io_channel; idx++) {
  7431. /* Create EQs */
  7432. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
  7433. phba->sli4_hba.eq_ecount);
  7434. if (!qdesc) {
  7435. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7436. "0497 Failed allocate EQ (%d)\n", idx);
  7437. goto out_error;
  7438. }
  7439. phba->sli4_hba.hba_eq[idx] = qdesc;
  7440. }
  7441. /* FCP and NVME io channels are not required to be balanced */
  7442. for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
  7443. if (lpfc_alloc_fcp_wq_cq(phba, idx))
  7444. goto out_error;
  7445. for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
  7446. if (lpfc_alloc_nvme_wq_cq(phba, idx))
  7447. goto out_error;
  7448. if (phba->nvmet_support) {
  7449. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7450. qdesc = lpfc_sli4_queue_alloc(phba,
  7451. phba->sli4_hba.cq_esize,
  7452. phba->sli4_hba.cq_ecount);
  7453. if (!qdesc) {
  7454. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7455. "3142 Failed allocate NVME "
  7456. "CQ Set (%d)\n", idx);
  7457. goto out_error;
  7458. }
  7459. phba->sli4_hba.nvmet_cqset[idx] = qdesc;
  7460. }
  7461. }
  7462. /*
  7463. * Create Slow Path Completion Queues (CQs)
  7464. */
  7465. /* Create slow-path Mailbox Command Complete Queue */
  7466. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7467. phba->sli4_hba.cq_ecount);
  7468. if (!qdesc) {
  7469. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7470. "0500 Failed allocate slow-path mailbox CQ\n");
  7471. goto out_error;
  7472. }
  7473. phba->sli4_hba.mbx_cq = qdesc;
  7474. /* Create slow-path ELS Complete Queue */
  7475. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7476. phba->sli4_hba.cq_ecount);
  7477. if (!qdesc) {
  7478. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7479. "0501 Failed allocate slow-path ELS CQ\n");
  7480. goto out_error;
  7481. }
  7482. phba->sli4_hba.els_cq = qdesc;
  7483. /*
  7484. * Create Slow Path Work Queues (WQs)
  7485. */
  7486. /* Create Mailbox Command Queue */
  7487. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
  7488. phba->sli4_hba.mq_ecount);
  7489. if (!qdesc) {
  7490. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7491. "0505 Failed allocate slow-path MQ\n");
  7492. goto out_error;
  7493. }
  7494. phba->sli4_hba.mbx_wq = qdesc;
  7495. /*
  7496. * Create ELS Work Queues
  7497. */
  7498. /* Create slow-path ELS Work Queue */
  7499. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
  7500. phba->sli4_hba.wq_ecount);
  7501. if (!qdesc) {
  7502. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7503. "0504 Failed allocate slow-path ELS WQ\n");
  7504. goto out_error;
  7505. }
  7506. phba->sli4_hba.els_wq = qdesc;
  7507. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7508. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  7509. /* Create NVME LS Complete Queue */
  7510. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  7511. phba->sli4_hba.cq_ecount);
  7512. if (!qdesc) {
  7513. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7514. "6079 Failed allocate NVME LS CQ\n");
  7515. goto out_error;
  7516. }
  7517. phba->sli4_hba.nvmels_cq = qdesc;
  7518. /* Create NVME LS Work Queue */
  7519. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
  7520. phba->sli4_hba.wq_ecount);
  7521. if (!qdesc) {
  7522. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7523. "6080 Failed allocate NVME LS WQ\n");
  7524. goto out_error;
  7525. }
  7526. phba->sli4_hba.nvmels_wq = qdesc;
  7527. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  7528. }
  7529. /*
  7530. * Create Receive Queue (RQ)
  7531. */
  7532. /* Create Receive Queue for header */
  7533. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
  7534. phba->sli4_hba.rq_ecount);
  7535. if (!qdesc) {
  7536. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7537. "0506 Failed allocate receive HRQ\n");
  7538. goto out_error;
  7539. }
  7540. phba->sli4_hba.hdr_rq = qdesc;
  7541. /* Create Receive Queue for data */
  7542. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
  7543. phba->sli4_hba.rq_ecount);
  7544. if (!qdesc) {
  7545. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7546. "0507 Failed allocate receive DRQ\n");
  7547. goto out_error;
  7548. }
  7549. phba->sli4_hba.dat_rq = qdesc;
  7550. if (phba->nvmet_support) {
  7551. for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
  7552. /* Create NVMET Receive Queue for header */
  7553. qdesc = lpfc_sli4_queue_alloc(phba,
  7554. phba->sli4_hba.rq_esize,
  7555. LPFC_NVMET_RQE_DEF_COUNT);
  7556. if (!qdesc) {
  7557. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7558. "3146 Failed allocate "
  7559. "receive HRQ\n");
  7560. goto out_error;
  7561. }
  7562. phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
  7563. /* Only needed for header of RQ pair */
  7564. qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
  7565. GFP_KERNEL);
  7566. if (qdesc->rqbp == NULL) {
  7567. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7568. "6131 Failed allocate "
  7569. "Header RQBP\n");
  7570. goto out_error;
  7571. }
  7572. /* Put list in known state in case driver load fails. */
  7573. INIT_LIST_HEAD(&qdesc->rqbp->rqb_buffer_list);
  7574. /* Create NVMET Receive Queue for data */
  7575. qdesc = lpfc_sli4_queue_alloc(phba,
  7576. phba->sli4_hba.rq_esize,
  7577. LPFC_NVMET_RQE_DEF_COUNT);
  7578. if (!qdesc) {
  7579. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7580. "3156 Failed allocate "
  7581. "receive DRQ\n");
  7582. goto out_error;
  7583. }
  7584. phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
  7585. }
  7586. }
  7587. /* Create the Queues needed for Flash Optimized Fabric operations */
  7588. if (phba->cfg_fof)
  7589. lpfc_fof_queue_create(phba);
  7590. return 0;
  7591. out_error:
  7592. lpfc_sli4_queue_destroy(phba);
  7593. return -ENOMEM;
  7594. }
  7595. static inline void
  7596. __lpfc_sli4_release_queue(struct lpfc_queue **qp)
  7597. {
  7598. if (*qp != NULL) {
  7599. lpfc_sli4_queue_free(*qp);
  7600. *qp = NULL;
  7601. }
  7602. }
  7603. static inline void
  7604. lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
  7605. {
  7606. int idx;
  7607. if (*qs == NULL)
  7608. return;
  7609. for (idx = 0; idx < max; idx++)
  7610. __lpfc_sli4_release_queue(&(*qs)[idx]);
  7611. kfree(*qs);
  7612. *qs = NULL;
  7613. }
  7614. static inline void
  7615. lpfc_sli4_release_queue_map(uint16_t **qmap)
  7616. {
  7617. if (*qmap != NULL) {
  7618. kfree(*qmap);
  7619. *qmap = NULL;
  7620. }
  7621. }
  7622. /**
  7623. * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
  7624. * @phba: pointer to lpfc hba data structure.
  7625. *
  7626. * This routine is invoked to release all the SLI4 queues with the FCoE HBA
  7627. * operation.
  7628. *
  7629. * Return codes
  7630. * 0 - successful
  7631. * -ENOMEM - No available memory
  7632. * -EIO - The mailbox failed to complete successfully.
  7633. **/
  7634. void
  7635. lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
  7636. {
  7637. if (phba->cfg_fof)
  7638. lpfc_fof_queue_destroy(phba);
  7639. /* Release HBA eqs */
  7640. lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
  7641. /* Release FCP cqs */
  7642. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
  7643. phba->cfg_fcp_io_channel);
  7644. /* Release FCP wqs */
  7645. lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
  7646. phba->cfg_fcp_io_channel);
  7647. /* Release FCP CQ mapping array */
  7648. lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
  7649. /* Release NVME cqs */
  7650. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
  7651. phba->cfg_nvme_io_channel);
  7652. /* Release NVME wqs */
  7653. lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
  7654. phba->cfg_nvme_io_channel);
  7655. /* Release NVME CQ mapping array */
  7656. lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
  7657. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
  7658. phba->cfg_nvmet_mrq);
  7659. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
  7660. phba->cfg_nvmet_mrq);
  7661. lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
  7662. phba->cfg_nvmet_mrq);
  7663. /* Release mailbox command work queue */
  7664. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
  7665. /* Release ELS work queue */
  7666. __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
  7667. /* Release ELS work queue */
  7668. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
  7669. /* Release unsolicited receive queue */
  7670. __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
  7671. __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
  7672. /* Release ELS complete queue */
  7673. __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
  7674. /* Release NVME LS complete queue */
  7675. __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
  7676. /* Release mailbox command complete queue */
  7677. __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
  7678. /* Everything on this list has been freed */
  7679. INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
  7680. }
  7681. int
  7682. lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
  7683. {
  7684. struct lpfc_rqb *rqbp;
  7685. struct lpfc_dmabuf *h_buf;
  7686. struct rqb_dmabuf *rqb_buffer;
  7687. rqbp = rq->rqbp;
  7688. while (!list_empty(&rqbp->rqb_buffer_list)) {
  7689. list_remove_head(&rqbp->rqb_buffer_list, h_buf,
  7690. struct lpfc_dmabuf, list);
  7691. rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
  7692. (rqbp->rqb_free_buffer)(phba, rqb_buffer);
  7693. rqbp->buffer_count--;
  7694. }
  7695. return 1;
  7696. }
  7697. static int
  7698. lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
  7699. struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
  7700. int qidx, uint32_t qtype)
  7701. {
  7702. struct lpfc_sli_ring *pring;
  7703. int rc;
  7704. if (!eq || !cq || !wq) {
  7705. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7706. "6085 Fast-path %s (%d) not allocated\n",
  7707. ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
  7708. return -ENOMEM;
  7709. }
  7710. /* create the Cq first */
  7711. rc = lpfc_cq_create(phba, cq, eq,
  7712. (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
  7713. if (rc) {
  7714. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7715. "6086 Failed setup of CQ (%d), rc = 0x%x\n",
  7716. qidx, (uint32_t)rc);
  7717. return rc;
  7718. }
  7719. if (qtype != LPFC_MBOX) {
  7720. /* Setup nvme_cq_map for fast lookup */
  7721. if (cq_map)
  7722. *cq_map = cq->queue_id;
  7723. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7724. "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
  7725. qidx, cq->queue_id, qidx, eq->queue_id);
  7726. /* create the wq */
  7727. rc = lpfc_wq_create(phba, wq, cq, qtype);
  7728. if (rc) {
  7729. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7730. "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
  7731. qidx, (uint32_t)rc);
  7732. /* no need to tear down cq - caller will do so */
  7733. return rc;
  7734. }
  7735. /* Bind this CQ/WQ to the NVME ring */
  7736. pring = wq->pring;
  7737. pring->sli.sli4.wqp = (void *)wq;
  7738. cq->pring = pring;
  7739. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7740. "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
  7741. qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
  7742. } else {
  7743. rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
  7744. if (rc) {
  7745. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7746. "0539 Failed setup of slow-path MQ: "
  7747. "rc = 0x%x\n", rc);
  7748. /* no need to tear down cq - caller will do so */
  7749. return rc;
  7750. }
  7751. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7752. "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
  7753. phba->sli4_hba.mbx_wq->queue_id,
  7754. phba->sli4_hba.mbx_cq->queue_id);
  7755. }
  7756. return 0;
  7757. }
  7758. /**
  7759. * lpfc_sli4_queue_setup - Set up all the SLI4 queues
  7760. * @phba: pointer to lpfc hba data structure.
  7761. *
  7762. * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
  7763. * operation.
  7764. *
  7765. * Return codes
  7766. * 0 - successful
  7767. * -ENOMEM - No available memory
  7768. * -EIO - The mailbox failed to complete successfully.
  7769. **/
  7770. int
  7771. lpfc_sli4_queue_setup(struct lpfc_hba *phba)
  7772. {
  7773. uint32_t shdr_status, shdr_add_status;
  7774. union lpfc_sli4_cfg_shdr *shdr;
  7775. LPFC_MBOXQ_t *mboxq;
  7776. int qidx;
  7777. uint32_t length, io_channel;
  7778. int rc = -ENOMEM;
  7779. /* Check for dual-ULP support */
  7780. mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  7781. if (!mboxq) {
  7782. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7783. "3249 Unable to allocate memory for "
  7784. "QUERY_FW_CFG mailbox command\n");
  7785. return -ENOMEM;
  7786. }
  7787. length = (sizeof(struct lpfc_mbx_query_fw_config) -
  7788. sizeof(struct lpfc_sli4_cfg_mhdr));
  7789. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  7790. LPFC_MBOX_OPCODE_QUERY_FW_CFG,
  7791. length, LPFC_SLI4_MBX_EMBED);
  7792. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  7793. shdr = (union lpfc_sli4_cfg_shdr *)
  7794. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  7795. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  7796. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
  7797. if (shdr_status || shdr_add_status || rc) {
  7798. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7799. "3250 QUERY_FW_CFG mailbox failed with status "
  7800. "x%x add_status x%x, mbx status x%x\n",
  7801. shdr_status, shdr_add_status, rc);
  7802. if (rc != MBX_TIMEOUT)
  7803. mempool_free(mboxq, phba->mbox_mem_pool);
  7804. rc = -ENXIO;
  7805. goto out_error;
  7806. }
  7807. phba->sli4_hba.fw_func_mode =
  7808. mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
  7809. phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
  7810. phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
  7811. phba->sli4_hba.physical_port =
  7812. mboxq->u.mqe.un.query_fw_cfg.rsp.physical_port;
  7813. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7814. "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
  7815. "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
  7816. phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
  7817. if (rc != MBX_TIMEOUT)
  7818. mempool_free(mboxq, phba->mbox_mem_pool);
  7819. /*
  7820. * Set up HBA Event Queues (EQs)
  7821. */
  7822. io_channel = phba->io_channel_irqs;
  7823. /* Set up HBA event queue */
  7824. if (io_channel && !phba->sli4_hba.hba_eq) {
  7825. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7826. "3147 Fast-path EQs not allocated\n");
  7827. rc = -ENOMEM;
  7828. goto out_error;
  7829. }
  7830. for (qidx = 0; qidx < io_channel; qidx++) {
  7831. if (!phba->sli4_hba.hba_eq[qidx]) {
  7832. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7833. "0522 Fast-path EQ (%d) not "
  7834. "allocated\n", qidx);
  7835. rc = -ENOMEM;
  7836. goto out_destroy;
  7837. }
  7838. rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
  7839. phba->cfg_fcp_imax);
  7840. if (rc) {
  7841. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7842. "0523 Failed setup of fast-path EQ "
  7843. "(%d), rc = 0x%x\n", qidx,
  7844. (uint32_t)rc);
  7845. goto out_destroy;
  7846. }
  7847. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7848. "2584 HBA EQ setup: queue[%d]-id=%d\n",
  7849. qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
  7850. }
  7851. if (phba->cfg_nvme_io_channel) {
  7852. if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
  7853. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7854. "6084 Fast-path NVME %s array not allocated\n",
  7855. (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
  7856. rc = -ENOMEM;
  7857. goto out_destroy;
  7858. }
  7859. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
  7860. rc = lpfc_create_wq_cq(phba,
  7861. phba->sli4_hba.hba_eq[
  7862. qidx % io_channel],
  7863. phba->sli4_hba.nvme_cq[qidx],
  7864. phba->sli4_hba.nvme_wq[qidx],
  7865. &phba->sli4_hba.nvme_cq_map[qidx],
  7866. qidx, LPFC_NVME);
  7867. if (rc) {
  7868. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7869. "6123 Failed to setup fastpath "
  7870. "NVME WQ/CQ (%d), rc = 0x%x\n",
  7871. qidx, (uint32_t)rc);
  7872. goto out_destroy;
  7873. }
  7874. }
  7875. }
  7876. if (phba->cfg_fcp_io_channel) {
  7877. /* Set up fast-path FCP Response Complete Queue */
  7878. if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
  7879. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7880. "3148 Fast-path FCP %s array not allocated\n",
  7881. phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
  7882. rc = -ENOMEM;
  7883. goto out_destroy;
  7884. }
  7885. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
  7886. rc = lpfc_create_wq_cq(phba,
  7887. phba->sli4_hba.hba_eq[
  7888. qidx % io_channel],
  7889. phba->sli4_hba.fcp_cq[qidx],
  7890. phba->sli4_hba.fcp_wq[qidx],
  7891. &phba->sli4_hba.fcp_cq_map[qidx],
  7892. qidx, LPFC_FCP);
  7893. if (rc) {
  7894. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7895. "0535 Failed to setup fastpath "
  7896. "FCP WQ/CQ (%d), rc = 0x%x\n",
  7897. qidx, (uint32_t)rc);
  7898. goto out_destroy;
  7899. }
  7900. }
  7901. }
  7902. /*
  7903. * Set up Slow Path Complete Queues (CQs)
  7904. */
  7905. /* Set up slow-path MBOX CQ/MQ */
  7906. if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
  7907. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7908. "0528 %s not allocated\n",
  7909. phba->sli4_hba.mbx_cq ?
  7910. "Mailbox WQ" : "Mailbox CQ");
  7911. rc = -ENOMEM;
  7912. goto out_destroy;
  7913. }
  7914. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7915. phba->sli4_hba.mbx_cq,
  7916. phba->sli4_hba.mbx_wq,
  7917. NULL, 0, LPFC_MBOX);
  7918. if (rc) {
  7919. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7920. "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
  7921. (uint32_t)rc);
  7922. goto out_destroy;
  7923. }
  7924. if (phba->nvmet_support) {
  7925. if (!phba->sli4_hba.nvmet_cqset) {
  7926. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7927. "3165 Fast-path NVME CQ Set "
  7928. "array not allocated\n");
  7929. rc = -ENOMEM;
  7930. goto out_destroy;
  7931. }
  7932. if (phba->cfg_nvmet_mrq > 1) {
  7933. rc = lpfc_cq_create_set(phba,
  7934. phba->sli4_hba.nvmet_cqset,
  7935. phba->sli4_hba.hba_eq,
  7936. LPFC_WCQ, LPFC_NVMET);
  7937. if (rc) {
  7938. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7939. "3164 Failed setup of NVME CQ "
  7940. "Set, rc = 0x%x\n",
  7941. (uint32_t)rc);
  7942. goto out_destroy;
  7943. }
  7944. } else {
  7945. /* Set up NVMET Receive Complete Queue */
  7946. rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
  7947. phba->sli4_hba.hba_eq[0],
  7948. LPFC_WCQ, LPFC_NVMET);
  7949. if (rc) {
  7950. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7951. "6089 Failed setup NVMET CQ: "
  7952. "rc = 0x%x\n", (uint32_t)rc);
  7953. goto out_destroy;
  7954. }
  7955. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7956. "6090 NVMET CQ setup: cq-id=%d, "
  7957. "parent eq-id=%d\n",
  7958. phba->sli4_hba.nvmet_cqset[0]->queue_id,
  7959. phba->sli4_hba.hba_eq[0]->queue_id);
  7960. }
  7961. }
  7962. /* Set up slow-path ELS WQ/CQ */
  7963. if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
  7964. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7965. "0530 ELS %s not allocated\n",
  7966. phba->sli4_hba.els_cq ? "WQ" : "CQ");
  7967. rc = -ENOMEM;
  7968. goto out_destroy;
  7969. }
  7970. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7971. phba->sli4_hba.els_cq,
  7972. phba->sli4_hba.els_wq,
  7973. NULL, 0, LPFC_ELS);
  7974. if (rc) {
  7975. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7976. "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
  7977. (uint32_t)rc);
  7978. goto out_destroy;
  7979. }
  7980. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  7981. "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
  7982. phba->sli4_hba.els_wq->queue_id,
  7983. phba->sli4_hba.els_cq->queue_id);
  7984. if (phba->cfg_nvme_io_channel) {
  7985. /* Set up NVME LS Complete Queue */
  7986. if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
  7987. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7988. "6091 LS %s not allocated\n",
  7989. phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
  7990. rc = -ENOMEM;
  7991. goto out_destroy;
  7992. }
  7993. rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
  7994. phba->sli4_hba.nvmels_cq,
  7995. phba->sli4_hba.nvmels_wq,
  7996. NULL, 0, LPFC_NVME_LS);
  7997. if (rc) {
  7998. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  7999. "0529 Failed setup of NVVME LS WQ/CQ: "
  8000. "rc = 0x%x\n", (uint32_t)rc);
  8001. goto out_destroy;
  8002. }
  8003. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8004. "6096 ELS WQ setup: wq-id=%d, "
  8005. "parent cq-id=%d\n",
  8006. phba->sli4_hba.nvmels_wq->queue_id,
  8007. phba->sli4_hba.nvmels_cq->queue_id);
  8008. }
  8009. /*
  8010. * Create NVMET Receive Queue (RQ)
  8011. */
  8012. if (phba->nvmet_support) {
  8013. if ((!phba->sli4_hba.nvmet_cqset) ||
  8014. (!phba->sli4_hba.nvmet_mrq_hdr) ||
  8015. (!phba->sli4_hba.nvmet_mrq_data)) {
  8016. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8017. "6130 MRQ CQ Queues not "
  8018. "allocated\n");
  8019. rc = -ENOMEM;
  8020. goto out_destroy;
  8021. }
  8022. if (phba->cfg_nvmet_mrq > 1) {
  8023. rc = lpfc_mrq_create(phba,
  8024. phba->sli4_hba.nvmet_mrq_hdr,
  8025. phba->sli4_hba.nvmet_mrq_data,
  8026. phba->sli4_hba.nvmet_cqset,
  8027. LPFC_NVMET);
  8028. if (rc) {
  8029. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8030. "6098 Failed setup of NVMET "
  8031. "MRQ: rc = 0x%x\n",
  8032. (uint32_t)rc);
  8033. goto out_destroy;
  8034. }
  8035. } else {
  8036. rc = lpfc_rq_create(phba,
  8037. phba->sli4_hba.nvmet_mrq_hdr[0],
  8038. phba->sli4_hba.nvmet_mrq_data[0],
  8039. phba->sli4_hba.nvmet_cqset[0],
  8040. LPFC_NVMET);
  8041. if (rc) {
  8042. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8043. "6057 Failed setup of NVMET "
  8044. "Receive Queue: rc = 0x%x\n",
  8045. (uint32_t)rc);
  8046. goto out_destroy;
  8047. }
  8048. lpfc_printf_log(
  8049. phba, KERN_INFO, LOG_INIT,
  8050. "6099 NVMET RQ setup: hdr-rq-id=%d, "
  8051. "dat-rq-id=%d parent cq-id=%d\n",
  8052. phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
  8053. phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
  8054. phba->sli4_hba.nvmet_cqset[0]->queue_id);
  8055. }
  8056. }
  8057. if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
  8058. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8059. "0540 Receive Queue not allocated\n");
  8060. rc = -ENOMEM;
  8061. goto out_destroy;
  8062. }
  8063. rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
  8064. phba->sli4_hba.els_cq, LPFC_USOL);
  8065. if (rc) {
  8066. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8067. "0541 Failed setup of Receive Queue: "
  8068. "rc = 0x%x\n", (uint32_t)rc);
  8069. goto out_destroy;
  8070. }
  8071. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8072. "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
  8073. "parent cq-id=%d\n",
  8074. phba->sli4_hba.hdr_rq->queue_id,
  8075. phba->sli4_hba.dat_rq->queue_id,
  8076. phba->sli4_hba.els_cq->queue_id);
  8077. if (phba->cfg_fof) {
  8078. rc = lpfc_fof_queue_setup(phba);
  8079. if (rc) {
  8080. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8081. "0549 Failed setup of FOF Queues: "
  8082. "rc = 0x%x\n", rc);
  8083. goto out_destroy;
  8084. }
  8085. }
  8086. for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
  8087. lpfc_modify_hba_eq_delay(phba, qidx, LPFC_MAX_EQ_DELAY_EQID_CNT,
  8088. phba->cfg_fcp_imax);
  8089. return 0;
  8090. out_destroy:
  8091. lpfc_sli4_queue_unset(phba);
  8092. out_error:
  8093. return rc;
  8094. }
  8095. /**
  8096. * lpfc_sli4_queue_unset - Unset all the SLI4 queues
  8097. * @phba: pointer to lpfc hba data structure.
  8098. *
  8099. * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
  8100. * operation.
  8101. *
  8102. * Return codes
  8103. * 0 - successful
  8104. * -ENOMEM - No available memory
  8105. * -EIO - The mailbox failed to complete successfully.
  8106. **/
  8107. void
  8108. lpfc_sli4_queue_unset(struct lpfc_hba *phba)
  8109. {
  8110. int qidx;
  8111. /* Unset the queues created for Flash Optimized Fabric operations */
  8112. if (phba->cfg_fof)
  8113. lpfc_fof_queue_destroy(phba);
  8114. /* Unset mailbox command work queue */
  8115. if (phba->sli4_hba.mbx_wq)
  8116. lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
  8117. /* Unset NVME LS work queue */
  8118. if (phba->sli4_hba.nvmels_wq)
  8119. lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
  8120. /* Unset ELS work queue */
  8121. if (phba->sli4_hba.els_wq)
  8122. lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
  8123. /* Unset unsolicited receive queue */
  8124. if (phba->sli4_hba.hdr_rq)
  8125. lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
  8126. phba->sli4_hba.dat_rq);
  8127. /* Unset FCP work queue */
  8128. if (phba->sli4_hba.fcp_wq)
  8129. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  8130. lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
  8131. /* Unset NVME work queue */
  8132. if (phba->sli4_hba.nvme_wq) {
  8133. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8134. lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
  8135. }
  8136. /* Unset mailbox command complete queue */
  8137. if (phba->sli4_hba.mbx_cq)
  8138. lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
  8139. /* Unset ELS complete queue */
  8140. if (phba->sli4_hba.els_cq)
  8141. lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
  8142. /* Unset NVME LS complete queue */
  8143. if (phba->sli4_hba.nvmels_cq)
  8144. lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
  8145. /* Unset NVME response complete queue */
  8146. if (phba->sli4_hba.nvme_cq)
  8147. for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
  8148. lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
  8149. /* Unset NVMET MRQ queue */
  8150. if (phba->sli4_hba.nvmet_mrq_hdr) {
  8151. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8152. lpfc_rq_destroy(phba,
  8153. phba->sli4_hba.nvmet_mrq_hdr[qidx],
  8154. phba->sli4_hba.nvmet_mrq_data[qidx]);
  8155. }
  8156. /* Unset NVMET CQ Set complete queue */
  8157. if (phba->sli4_hba.nvmet_cqset) {
  8158. for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
  8159. lpfc_cq_destroy(phba,
  8160. phba->sli4_hba.nvmet_cqset[qidx]);
  8161. }
  8162. /* Unset FCP response complete queue */
  8163. if (phba->sli4_hba.fcp_cq)
  8164. for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
  8165. lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
  8166. /* Unset fast-path event queue */
  8167. if (phba->sli4_hba.hba_eq)
  8168. for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
  8169. lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
  8170. }
  8171. /**
  8172. * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
  8173. * @phba: pointer to lpfc hba data structure.
  8174. *
  8175. * This routine is invoked to allocate and set up a pool of completion queue
  8176. * events. The body of the completion queue event is a completion queue entry
  8177. * CQE. For now, this pool is used for the interrupt service routine to queue
  8178. * the following HBA completion queue events for the worker thread to process:
  8179. * - Mailbox asynchronous events
  8180. * - Receive queue completion unsolicited events
  8181. * Later, this can be used for all the slow-path events.
  8182. *
  8183. * Return codes
  8184. * 0 - successful
  8185. * -ENOMEM - No available memory
  8186. **/
  8187. static int
  8188. lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
  8189. {
  8190. struct lpfc_cq_event *cq_event;
  8191. int i;
  8192. for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
  8193. cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
  8194. if (!cq_event)
  8195. goto out_pool_create_fail;
  8196. list_add_tail(&cq_event->list,
  8197. &phba->sli4_hba.sp_cqe_event_pool);
  8198. }
  8199. return 0;
  8200. out_pool_create_fail:
  8201. lpfc_sli4_cq_event_pool_destroy(phba);
  8202. return -ENOMEM;
  8203. }
  8204. /**
  8205. * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
  8206. * @phba: pointer to lpfc hba data structure.
  8207. *
  8208. * This routine is invoked to free the pool of completion queue events at
  8209. * driver unload time. Note that, it is the responsibility of the driver
  8210. * cleanup routine to free all the outstanding completion-queue events
  8211. * allocated from this pool back into the pool before invoking this routine
  8212. * to destroy the pool.
  8213. **/
  8214. static void
  8215. lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
  8216. {
  8217. struct lpfc_cq_event *cq_event, *next_cq_event;
  8218. list_for_each_entry_safe(cq_event, next_cq_event,
  8219. &phba->sli4_hba.sp_cqe_event_pool, list) {
  8220. list_del(&cq_event->list);
  8221. kfree(cq_event);
  8222. }
  8223. }
  8224. /**
  8225. * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8226. * @phba: pointer to lpfc hba data structure.
  8227. *
  8228. * This routine is the lock free version of the API invoked to allocate a
  8229. * completion-queue event from the free pool.
  8230. *
  8231. * Return: Pointer to the newly allocated completion-queue event if successful
  8232. * NULL otherwise.
  8233. **/
  8234. struct lpfc_cq_event *
  8235. __lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8236. {
  8237. struct lpfc_cq_event *cq_event = NULL;
  8238. list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
  8239. struct lpfc_cq_event, list);
  8240. return cq_event;
  8241. }
  8242. /**
  8243. * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
  8244. * @phba: pointer to lpfc hba data structure.
  8245. *
  8246. * This routine is the lock version of the API invoked to allocate a
  8247. * completion-queue event from the free pool.
  8248. *
  8249. * Return: Pointer to the newly allocated completion-queue event if successful
  8250. * NULL otherwise.
  8251. **/
  8252. struct lpfc_cq_event *
  8253. lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
  8254. {
  8255. struct lpfc_cq_event *cq_event;
  8256. unsigned long iflags;
  8257. spin_lock_irqsave(&phba->hbalock, iflags);
  8258. cq_event = __lpfc_sli4_cq_event_alloc(phba);
  8259. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8260. return cq_event;
  8261. }
  8262. /**
  8263. * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8264. * @phba: pointer to lpfc hba data structure.
  8265. * @cq_event: pointer to the completion queue event to be freed.
  8266. *
  8267. * This routine is the lock free version of the API invoked to release a
  8268. * completion-queue event back into the free pool.
  8269. **/
  8270. void
  8271. __lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8272. struct lpfc_cq_event *cq_event)
  8273. {
  8274. list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
  8275. }
  8276. /**
  8277. * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
  8278. * @phba: pointer to lpfc hba data structure.
  8279. * @cq_event: pointer to the completion queue event to be freed.
  8280. *
  8281. * This routine is the lock version of the API invoked to release a
  8282. * completion-queue event back into the free pool.
  8283. **/
  8284. void
  8285. lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
  8286. struct lpfc_cq_event *cq_event)
  8287. {
  8288. unsigned long iflags;
  8289. spin_lock_irqsave(&phba->hbalock, iflags);
  8290. __lpfc_sli4_cq_event_release(phba, cq_event);
  8291. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8292. }
  8293. /**
  8294. * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
  8295. * @phba: pointer to lpfc hba data structure.
  8296. *
  8297. * This routine is to free all the pending completion-queue events to the
  8298. * back into the free pool for device reset.
  8299. **/
  8300. static void
  8301. lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
  8302. {
  8303. LIST_HEAD(cqelist);
  8304. struct lpfc_cq_event *cqe;
  8305. unsigned long iflags;
  8306. /* Retrieve all the pending WCQEs from pending WCQE lists */
  8307. spin_lock_irqsave(&phba->hbalock, iflags);
  8308. /* Pending FCP XRI abort events */
  8309. list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
  8310. &cqelist);
  8311. /* Pending ELS XRI abort events */
  8312. list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
  8313. &cqelist);
  8314. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  8315. /* Pending NVME XRI abort events */
  8316. list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
  8317. &cqelist);
  8318. }
  8319. /* Pending asynnc events */
  8320. list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
  8321. &cqelist);
  8322. spin_unlock_irqrestore(&phba->hbalock, iflags);
  8323. while (!list_empty(&cqelist)) {
  8324. list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
  8325. lpfc_sli4_cq_event_release(phba, cqe);
  8326. }
  8327. }
  8328. /**
  8329. * lpfc_pci_function_reset - Reset pci function.
  8330. * @phba: pointer to lpfc hba data structure.
  8331. *
  8332. * This routine is invoked to request a PCI function reset. It will destroys
  8333. * all resources assigned to the PCI function which originates this request.
  8334. *
  8335. * Return codes
  8336. * 0 - successful
  8337. * -ENOMEM - No available memory
  8338. * -EIO - The mailbox failed to complete successfully.
  8339. **/
  8340. int
  8341. lpfc_pci_function_reset(struct lpfc_hba *phba)
  8342. {
  8343. LPFC_MBOXQ_t *mboxq;
  8344. uint32_t rc = 0, if_type;
  8345. uint32_t shdr_status, shdr_add_status;
  8346. uint32_t rdy_chk;
  8347. uint32_t port_reset = 0;
  8348. union lpfc_sli4_cfg_shdr *shdr;
  8349. struct lpfc_register reg_data;
  8350. uint16_t devid;
  8351. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8352. switch (if_type) {
  8353. case LPFC_SLI_INTF_IF_TYPE_0:
  8354. mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
  8355. GFP_KERNEL);
  8356. if (!mboxq) {
  8357. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8358. "0494 Unable to allocate memory for "
  8359. "issuing SLI_FUNCTION_RESET mailbox "
  8360. "command\n");
  8361. return -ENOMEM;
  8362. }
  8363. /* Setup PCI function reset mailbox-ioctl command */
  8364. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  8365. LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
  8366. LPFC_SLI4_MBX_EMBED);
  8367. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  8368. shdr = (union lpfc_sli4_cfg_shdr *)
  8369. &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
  8370. shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
  8371. shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
  8372. &shdr->response);
  8373. if (rc != MBX_TIMEOUT)
  8374. mempool_free(mboxq, phba->mbox_mem_pool);
  8375. if (shdr_status || shdr_add_status || rc) {
  8376. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8377. "0495 SLI_FUNCTION_RESET mailbox "
  8378. "failed with status x%x add_status x%x,"
  8379. " mbx status x%x\n",
  8380. shdr_status, shdr_add_status, rc);
  8381. rc = -ENXIO;
  8382. }
  8383. break;
  8384. case LPFC_SLI_INTF_IF_TYPE_2:
  8385. wait:
  8386. /*
  8387. * Poll the Port Status Register and wait for RDY for
  8388. * up to 30 seconds. If the port doesn't respond, treat
  8389. * it as an error.
  8390. */
  8391. for (rdy_chk = 0; rdy_chk < 1500; rdy_chk++) {
  8392. if (lpfc_readl(phba->sli4_hba.u.if_type2.
  8393. STATUSregaddr, &reg_data.word0)) {
  8394. rc = -ENODEV;
  8395. goto out;
  8396. }
  8397. if (bf_get(lpfc_sliport_status_rdy, &reg_data))
  8398. break;
  8399. msleep(20);
  8400. }
  8401. if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
  8402. phba->work_status[0] = readl(
  8403. phba->sli4_hba.u.if_type2.ERR1regaddr);
  8404. phba->work_status[1] = readl(
  8405. phba->sli4_hba.u.if_type2.ERR2regaddr);
  8406. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8407. "2890 Port not ready, port status reg "
  8408. "0x%x error 1=0x%x, error 2=0x%x\n",
  8409. reg_data.word0,
  8410. phba->work_status[0],
  8411. phba->work_status[1]);
  8412. rc = -ENODEV;
  8413. goto out;
  8414. }
  8415. if (!port_reset) {
  8416. /*
  8417. * Reset the port now
  8418. */
  8419. reg_data.word0 = 0;
  8420. bf_set(lpfc_sliport_ctrl_end, &reg_data,
  8421. LPFC_SLIPORT_LITTLE_ENDIAN);
  8422. bf_set(lpfc_sliport_ctrl_ip, &reg_data,
  8423. LPFC_SLIPORT_INIT_PORT);
  8424. writel(reg_data.word0, phba->sli4_hba.u.if_type2.
  8425. CTRLregaddr);
  8426. /* flush */
  8427. pci_read_config_word(phba->pcidev,
  8428. PCI_DEVICE_ID, &devid);
  8429. port_reset = 1;
  8430. msleep(20);
  8431. goto wait;
  8432. } else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
  8433. rc = -ENODEV;
  8434. goto out;
  8435. }
  8436. break;
  8437. case LPFC_SLI_INTF_IF_TYPE_1:
  8438. default:
  8439. break;
  8440. }
  8441. out:
  8442. /* Catch the not-ready port failure after a port reset. */
  8443. if (rc) {
  8444. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8445. "3317 HBA not functional: IP Reset Failed "
  8446. "try: echo fw_reset > board_mode\n");
  8447. rc = -ENODEV;
  8448. }
  8449. return rc;
  8450. }
  8451. /**
  8452. * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
  8453. * @phba: pointer to lpfc hba data structure.
  8454. *
  8455. * This routine is invoked to set up the PCI device memory space for device
  8456. * with SLI-4 interface spec.
  8457. *
  8458. * Return codes
  8459. * 0 - successful
  8460. * other values - error
  8461. **/
  8462. static int
  8463. lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
  8464. {
  8465. struct pci_dev *pdev;
  8466. unsigned long bar0map_len, bar1map_len, bar2map_len;
  8467. int error = -ENODEV;
  8468. uint32_t if_type;
  8469. /* Obtain PCI device reference */
  8470. if (!phba->pcidev)
  8471. return error;
  8472. else
  8473. pdev = phba->pcidev;
  8474. /* Set the device DMA mask size */
  8475. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0
  8476. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(64)) != 0) {
  8477. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0
  8478. || pci_set_consistent_dma_mask(pdev,DMA_BIT_MASK(32)) != 0) {
  8479. return error;
  8480. }
  8481. }
  8482. /*
  8483. * The BARs and register set definitions and offset locations are
  8484. * dependent on the if_type.
  8485. */
  8486. if (pci_read_config_dword(pdev, LPFC_SLI_INTF,
  8487. &phba->sli4_hba.sli_intf.word0)) {
  8488. return error;
  8489. }
  8490. /* There is no SLI3 failback for SLI4 devices. */
  8491. if (bf_get(lpfc_sli_intf_valid, &phba->sli4_hba.sli_intf) !=
  8492. LPFC_SLI_INTF_VALID) {
  8493. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8494. "2894 SLI_INTF reg contents invalid "
  8495. "sli_intf reg 0x%x\n",
  8496. phba->sli4_hba.sli_intf.word0);
  8497. return error;
  8498. }
  8499. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8500. /*
  8501. * Get the bus address of SLI4 device Bar regions and the
  8502. * number of bytes required by each mapping. The mapping of the
  8503. * particular PCI BARs regions is dependent on the type of
  8504. * SLI4 device.
  8505. */
  8506. if (pci_resource_start(pdev, PCI_64BIT_BAR0)) {
  8507. phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
  8508. bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
  8509. /*
  8510. * Map SLI4 PCI Config Space Register base to a kernel virtual
  8511. * addr
  8512. */
  8513. phba->sli4_hba.conf_regs_memmap_p =
  8514. ioremap(phba->pci_bar0_map, bar0map_len);
  8515. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8516. dev_printk(KERN_ERR, &pdev->dev,
  8517. "ioremap failed for SLI4 PCI config "
  8518. "registers.\n");
  8519. goto out;
  8520. }
  8521. phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p;
  8522. /* Set up BAR0 PCI config space register memory map */
  8523. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8524. } else {
  8525. phba->pci_bar0_map = pci_resource_start(pdev, 1);
  8526. bar0map_len = pci_resource_len(pdev, 1);
  8527. if (if_type == LPFC_SLI_INTF_IF_TYPE_2) {
  8528. dev_printk(KERN_ERR, &pdev->dev,
  8529. "FATAL - No BAR0 mapping for SLI4, if_type 2\n");
  8530. goto out;
  8531. }
  8532. phba->sli4_hba.conf_regs_memmap_p =
  8533. ioremap(phba->pci_bar0_map, bar0map_len);
  8534. if (!phba->sli4_hba.conf_regs_memmap_p) {
  8535. dev_printk(KERN_ERR, &pdev->dev,
  8536. "ioremap failed for SLI4 PCI config "
  8537. "registers.\n");
  8538. goto out;
  8539. }
  8540. lpfc_sli4_bar0_register_memmap(phba, if_type);
  8541. }
  8542. if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
  8543. (pci_resource_start(pdev, PCI_64BIT_BAR2))) {
  8544. /*
  8545. * Map SLI4 if type 0 HBA Control Register base to a kernel
  8546. * virtual address and setup the registers.
  8547. */
  8548. phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
  8549. bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
  8550. phba->sli4_hba.ctrl_regs_memmap_p =
  8551. ioremap(phba->pci_bar1_map, bar1map_len);
  8552. if (!phba->sli4_hba.ctrl_regs_memmap_p) {
  8553. dev_printk(KERN_ERR, &pdev->dev,
  8554. "ioremap failed for SLI4 HBA control registers.\n");
  8555. goto out_iounmap_conf;
  8556. }
  8557. phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p;
  8558. lpfc_sli4_bar1_register_memmap(phba);
  8559. }
  8560. if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) &&
  8561. (pci_resource_start(pdev, PCI_64BIT_BAR4))) {
  8562. /*
  8563. * Map SLI4 if type 0 HBA Doorbell Register base to a kernel
  8564. * virtual address and setup the registers.
  8565. */
  8566. phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
  8567. bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
  8568. phba->sli4_hba.drbl_regs_memmap_p =
  8569. ioremap(phba->pci_bar2_map, bar2map_len);
  8570. if (!phba->sli4_hba.drbl_regs_memmap_p) {
  8571. dev_printk(KERN_ERR, &pdev->dev,
  8572. "ioremap failed for SLI4 HBA doorbell registers.\n");
  8573. goto out_iounmap_ctrl;
  8574. }
  8575. phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p;
  8576. error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
  8577. if (error)
  8578. goto out_iounmap_all;
  8579. }
  8580. return 0;
  8581. out_iounmap_all:
  8582. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8583. out_iounmap_ctrl:
  8584. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8585. out_iounmap_conf:
  8586. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8587. out:
  8588. return error;
  8589. }
  8590. /**
  8591. * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
  8592. * @phba: pointer to lpfc hba data structure.
  8593. *
  8594. * This routine is invoked to unset the PCI device memory space for device
  8595. * with SLI-4 interface spec.
  8596. **/
  8597. static void
  8598. lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
  8599. {
  8600. uint32_t if_type;
  8601. if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
  8602. switch (if_type) {
  8603. case LPFC_SLI_INTF_IF_TYPE_0:
  8604. iounmap(phba->sli4_hba.drbl_regs_memmap_p);
  8605. iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
  8606. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8607. break;
  8608. case LPFC_SLI_INTF_IF_TYPE_2:
  8609. iounmap(phba->sli4_hba.conf_regs_memmap_p);
  8610. break;
  8611. case LPFC_SLI_INTF_IF_TYPE_1:
  8612. default:
  8613. dev_printk(KERN_ERR, &phba->pcidev->dev,
  8614. "FATAL - unsupported SLI4 interface type - %d\n",
  8615. if_type);
  8616. break;
  8617. }
  8618. }
  8619. /**
  8620. * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
  8621. * @phba: pointer to lpfc hba data structure.
  8622. *
  8623. * This routine is invoked to enable the MSI-X interrupt vectors to device
  8624. * with SLI-3 interface specs.
  8625. *
  8626. * Return codes
  8627. * 0 - successful
  8628. * other values - error
  8629. **/
  8630. static int
  8631. lpfc_sli_enable_msix(struct lpfc_hba *phba)
  8632. {
  8633. int rc;
  8634. LPFC_MBOXQ_t *pmb;
  8635. /* Set up MSI-X multi-message vectors */
  8636. rc = pci_alloc_irq_vectors(phba->pcidev,
  8637. LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
  8638. if (rc < 0) {
  8639. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8640. "0420 PCI enable MSI-X failed (%d)\n", rc);
  8641. goto vec_fail_out;
  8642. }
  8643. /*
  8644. * Assign MSI-X vectors to interrupt handlers
  8645. */
  8646. /* vector-0 is associated to slow-path handler */
  8647. rc = request_irq(pci_irq_vector(phba->pcidev, 0),
  8648. &lpfc_sli_sp_intr_handler, 0,
  8649. LPFC_SP_DRIVER_HANDLER_NAME, phba);
  8650. if (rc) {
  8651. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8652. "0421 MSI-X slow-path request_irq failed "
  8653. "(%d)\n", rc);
  8654. goto msi_fail_out;
  8655. }
  8656. /* vector-1 is associated to fast-path handler */
  8657. rc = request_irq(pci_irq_vector(phba->pcidev, 1),
  8658. &lpfc_sli_fp_intr_handler, 0,
  8659. LPFC_FP_DRIVER_HANDLER_NAME, phba);
  8660. if (rc) {
  8661. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8662. "0429 MSI-X fast-path request_irq failed "
  8663. "(%d)\n", rc);
  8664. goto irq_fail_out;
  8665. }
  8666. /*
  8667. * Configure HBA MSI-X attention conditions to messages
  8668. */
  8669. pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  8670. if (!pmb) {
  8671. rc = -ENOMEM;
  8672. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8673. "0474 Unable to allocate memory for issuing "
  8674. "MBOX_CONFIG_MSI command\n");
  8675. goto mem_fail_out;
  8676. }
  8677. rc = lpfc_config_msi(phba, pmb);
  8678. if (rc)
  8679. goto mbx_fail_out;
  8680. rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
  8681. if (rc != MBX_SUCCESS) {
  8682. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  8683. "0351 Config MSI mailbox command failed, "
  8684. "mbxCmd x%x, mbxStatus x%x\n",
  8685. pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
  8686. goto mbx_fail_out;
  8687. }
  8688. /* Free memory allocated for mailbox command */
  8689. mempool_free(pmb, phba->mbox_mem_pool);
  8690. return rc;
  8691. mbx_fail_out:
  8692. /* Free memory allocated for mailbox command */
  8693. mempool_free(pmb, phba->mbox_mem_pool);
  8694. mem_fail_out:
  8695. /* free the irq already requested */
  8696. free_irq(pci_irq_vector(phba->pcidev, 1), phba);
  8697. irq_fail_out:
  8698. /* free the irq already requested */
  8699. free_irq(pci_irq_vector(phba->pcidev, 0), phba);
  8700. msi_fail_out:
  8701. /* Unconfigure MSI-X capability structure */
  8702. pci_free_irq_vectors(phba->pcidev);
  8703. vec_fail_out:
  8704. return rc;
  8705. }
  8706. /**
  8707. * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
  8708. * @phba: pointer to lpfc hba data structure.
  8709. *
  8710. * This routine is invoked to enable the MSI interrupt mode to device with
  8711. * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
  8712. * enable the MSI vector. The device driver is responsible for calling the
  8713. * request_irq() to register MSI vector with a interrupt the handler, which
  8714. * is done in this function.
  8715. *
  8716. * Return codes
  8717. * 0 - successful
  8718. * other values - error
  8719. */
  8720. static int
  8721. lpfc_sli_enable_msi(struct lpfc_hba *phba)
  8722. {
  8723. int rc;
  8724. rc = pci_enable_msi(phba->pcidev);
  8725. if (!rc)
  8726. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8727. "0462 PCI enable MSI mode success.\n");
  8728. else {
  8729. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8730. "0471 PCI enable MSI mode failed (%d)\n", rc);
  8731. return rc;
  8732. }
  8733. rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  8734. 0, LPFC_DRIVER_NAME, phba);
  8735. if (rc) {
  8736. pci_disable_msi(phba->pcidev);
  8737. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8738. "0478 MSI request_irq failed (%d)\n", rc);
  8739. }
  8740. return rc;
  8741. }
  8742. /**
  8743. * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
  8744. * @phba: pointer to lpfc hba data structure.
  8745. *
  8746. * This routine is invoked to enable device interrupt and associate driver's
  8747. * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
  8748. * spec. Depends on the interrupt mode configured to the driver, the driver
  8749. * will try to fallback from the configured interrupt mode to an interrupt
  8750. * mode which is supported by the platform, kernel, and device in the order
  8751. * of:
  8752. * MSI-X -> MSI -> IRQ.
  8753. *
  8754. * Return codes
  8755. * 0 - successful
  8756. * other values - error
  8757. **/
  8758. static uint32_t
  8759. lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  8760. {
  8761. uint32_t intr_mode = LPFC_INTR_ERROR;
  8762. int retval;
  8763. if (cfg_mode == 2) {
  8764. /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
  8765. retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
  8766. if (!retval) {
  8767. /* Now, try to enable MSI-X interrupt mode */
  8768. retval = lpfc_sli_enable_msix(phba);
  8769. if (!retval) {
  8770. /* Indicate initialization to MSI-X mode */
  8771. phba->intr_type = MSIX;
  8772. intr_mode = 2;
  8773. }
  8774. }
  8775. }
  8776. /* Fallback to MSI if MSI-X initialization failed */
  8777. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  8778. retval = lpfc_sli_enable_msi(phba);
  8779. if (!retval) {
  8780. /* Indicate initialization to MSI mode */
  8781. phba->intr_type = MSI;
  8782. intr_mode = 1;
  8783. }
  8784. }
  8785. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  8786. if (phba->intr_type == NONE) {
  8787. retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
  8788. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  8789. if (!retval) {
  8790. /* Indicate initialization to INTx mode */
  8791. phba->intr_type = INTx;
  8792. intr_mode = 0;
  8793. }
  8794. }
  8795. return intr_mode;
  8796. }
  8797. /**
  8798. * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
  8799. * @phba: pointer to lpfc hba data structure.
  8800. *
  8801. * This routine is invoked to disable device interrupt and disassociate the
  8802. * driver's interrupt handler(s) from interrupt vector(s) to device with
  8803. * SLI-3 interface spec. Depending on the interrupt mode, the driver will
  8804. * release the interrupt vector(s) for the message signaled interrupt.
  8805. **/
  8806. static void
  8807. lpfc_sli_disable_intr(struct lpfc_hba *phba)
  8808. {
  8809. int nr_irqs, i;
  8810. if (phba->intr_type == MSIX)
  8811. nr_irqs = LPFC_MSIX_VECTORS;
  8812. else
  8813. nr_irqs = 1;
  8814. for (i = 0; i < nr_irqs; i++)
  8815. free_irq(pci_irq_vector(phba->pcidev, i), phba);
  8816. pci_free_irq_vectors(phba->pcidev);
  8817. /* Reset interrupt management states */
  8818. phba->intr_type = NONE;
  8819. phba->sli.slistat.sli_intr = 0;
  8820. }
  8821. /**
  8822. * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
  8823. * @phba: pointer to lpfc hba data structure.
  8824. * @vectors: number of msix vectors allocated.
  8825. *
  8826. * The routine will figure out the CPU affinity assignment for every
  8827. * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
  8828. * with a pointer to the CPU mask that defines ALL the CPUs this vector
  8829. * can be associated with. If the vector can be unquely associated with
  8830. * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
  8831. * In addition, the CPU to IO channel mapping will be calculated
  8832. * and the phba->sli4_hba.cpu_map array will reflect this.
  8833. */
  8834. static void
  8835. lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
  8836. {
  8837. struct lpfc_vector_map_info *cpup;
  8838. int index = 0;
  8839. int vec = 0;
  8840. int cpu;
  8841. #ifdef CONFIG_X86
  8842. struct cpuinfo_x86 *cpuinfo;
  8843. #endif
  8844. /* Init cpu_map array */
  8845. memset(phba->sli4_hba.cpu_map, 0xff,
  8846. (sizeof(struct lpfc_vector_map_info) *
  8847. phba->sli4_hba.num_present_cpu));
  8848. /* Update CPU map with physical id and core id of each CPU */
  8849. cpup = phba->sli4_hba.cpu_map;
  8850. for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
  8851. #ifdef CONFIG_X86
  8852. cpuinfo = &cpu_data(cpu);
  8853. cpup->phys_id = cpuinfo->phys_proc_id;
  8854. cpup->core_id = cpuinfo->cpu_core_id;
  8855. #else
  8856. /* No distinction between CPUs for other platforms */
  8857. cpup->phys_id = 0;
  8858. cpup->core_id = 0;
  8859. #endif
  8860. cpup->channel_id = index; /* For now round robin */
  8861. cpup->irq = pci_irq_vector(phba->pcidev, vec);
  8862. vec++;
  8863. if (vec >= vectors)
  8864. vec = 0;
  8865. index++;
  8866. if (index >= phba->cfg_fcp_io_channel)
  8867. index = 0;
  8868. cpup++;
  8869. }
  8870. }
  8871. /**
  8872. * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
  8873. * @phba: pointer to lpfc hba data structure.
  8874. *
  8875. * This routine is invoked to enable the MSI-X interrupt vectors to device
  8876. * with SLI-4 interface spec.
  8877. *
  8878. * Return codes
  8879. * 0 - successful
  8880. * other values - error
  8881. **/
  8882. static int
  8883. lpfc_sli4_enable_msix(struct lpfc_hba *phba)
  8884. {
  8885. int vectors, rc, index;
  8886. char *name;
  8887. /* Set up MSI-X multi-message vectors */
  8888. vectors = phba->io_channel_irqs;
  8889. if (phba->cfg_fof)
  8890. vectors++;
  8891. rc = pci_alloc_irq_vectors(phba->pcidev,
  8892. (phba->nvmet_support) ? 1 : 2,
  8893. vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
  8894. if (rc < 0) {
  8895. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8896. "0484 PCI enable MSI-X failed (%d)\n", rc);
  8897. goto vec_fail_out;
  8898. }
  8899. vectors = rc;
  8900. /* Assign MSI-X vectors to interrupt handlers */
  8901. for (index = 0; index < vectors; index++) {
  8902. name = phba->sli4_hba.hba_eq_hdl[index].handler_name;
  8903. memset(name, 0, LPFC_SLI4_HANDLER_NAME_SZ);
  8904. snprintf(name, LPFC_SLI4_HANDLER_NAME_SZ,
  8905. LPFC_DRIVER_HANDLER_NAME"%d", index);
  8906. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8907. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8908. atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
  8909. if (phba->cfg_fof && (index == (vectors - 1)))
  8910. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  8911. &lpfc_sli4_fof_intr_handler, 0,
  8912. name,
  8913. &phba->sli4_hba.hba_eq_hdl[index]);
  8914. else
  8915. rc = request_irq(pci_irq_vector(phba->pcidev, index),
  8916. &lpfc_sli4_hba_intr_handler, 0,
  8917. name,
  8918. &phba->sli4_hba.hba_eq_hdl[index]);
  8919. if (rc) {
  8920. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8921. "0486 MSI-X fast-path (%d) "
  8922. "request_irq failed (%d)\n", index, rc);
  8923. goto cfg_fail_out;
  8924. }
  8925. }
  8926. if (phba->cfg_fof)
  8927. vectors--;
  8928. if (vectors != phba->io_channel_irqs) {
  8929. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  8930. "3238 Reducing IO channels to match number of "
  8931. "MSI-X vectors, requested %d got %d\n",
  8932. phba->io_channel_irqs, vectors);
  8933. if (phba->cfg_fcp_io_channel > vectors)
  8934. phba->cfg_fcp_io_channel = vectors;
  8935. if (phba->cfg_nvme_io_channel > vectors)
  8936. phba->cfg_nvme_io_channel = vectors;
  8937. if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
  8938. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  8939. else
  8940. phba->io_channel_irqs = phba->cfg_nvme_io_channel;
  8941. }
  8942. lpfc_cpu_affinity_check(phba, vectors);
  8943. return rc;
  8944. cfg_fail_out:
  8945. /* free the irq already requested */
  8946. for (--index; index >= 0; index--)
  8947. free_irq(pci_irq_vector(phba->pcidev, index),
  8948. &phba->sli4_hba.hba_eq_hdl[index]);
  8949. /* Unconfigure MSI-X capability structure */
  8950. pci_free_irq_vectors(phba->pcidev);
  8951. vec_fail_out:
  8952. return rc;
  8953. }
  8954. /**
  8955. * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
  8956. * @phba: pointer to lpfc hba data structure.
  8957. *
  8958. * This routine is invoked to enable the MSI interrupt mode to device with
  8959. * SLI-4 interface spec. The kernel function pci_enable_msi() is called
  8960. * to enable the MSI vector. The device driver is responsible for calling
  8961. * the request_irq() to register MSI vector with a interrupt the handler,
  8962. * which is done in this function.
  8963. *
  8964. * Return codes
  8965. * 0 - successful
  8966. * other values - error
  8967. **/
  8968. static int
  8969. lpfc_sli4_enable_msi(struct lpfc_hba *phba)
  8970. {
  8971. int rc, index;
  8972. rc = pci_enable_msi(phba->pcidev);
  8973. if (!rc)
  8974. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8975. "0487 PCI enable MSI mode success.\n");
  8976. else {
  8977. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  8978. "0488 PCI enable MSI mode failed (%d)\n", rc);
  8979. return rc;
  8980. }
  8981. rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  8982. 0, LPFC_DRIVER_NAME, phba);
  8983. if (rc) {
  8984. pci_disable_msi(phba->pcidev);
  8985. lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
  8986. "0490 MSI request_irq failed (%d)\n", rc);
  8987. return rc;
  8988. }
  8989. for (index = 0; index < phba->io_channel_irqs; index++) {
  8990. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8991. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8992. }
  8993. if (phba->cfg_fof) {
  8994. phba->sli4_hba.hba_eq_hdl[index].idx = index;
  8995. phba->sli4_hba.hba_eq_hdl[index].phba = phba;
  8996. }
  8997. return 0;
  8998. }
  8999. /**
  9000. * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
  9001. * @phba: pointer to lpfc hba data structure.
  9002. *
  9003. * This routine is invoked to enable device interrupt and associate driver's
  9004. * interrupt handler(s) to interrupt vector(s) to device with SLI-4
  9005. * interface spec. Depends on the interrupt mode configured to the driver,
  9006. * the driver will try to fallback from the configured interrupt mode to an
  9007. * interrupt mode which is supported by the platform, kernel, and device in
  9008. * the order of:
  9009. * MSI-X -> MSI -> IRQ.
  9010. *
  9011. * Return codes
  9012. * 0 - successful
  9013. * other values - error
  9014. **/
  9015. static uint32_t
  9016. lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
  9017. {
  9018. uint32_t intr_mode = LPFC_INTR_ERROR;
  9019. int retval, idx;
  9020. if (cfg_mode == 2) {
  9021. /* Preparation before conf_msi mbox cmd */
  9022. retval = 0;
  9023. if (!retval) {
  9024. /* Now, try to enable MSI-X interrupt mode */
  9025. retval = lpfc_sli4_enable_msix(phba);
  9026. if (!retval) {
  9027. /* Indicate initialization to MSI-X mode */
  9028. phba->intr_type = MSIX;
  9029. intr_mode = 2;
  9030. }
  9031. }
  9032. }
  9033. /* Fallback to MSI if MSI-X initialization failed */
  9034. if (cfg_mode >= 1 && phba->intr_type == NONE) {
  9035. retval = lpfc_sli4_enable_msi(phba);
  9036. if (!retval) {
  9037. /* Indicate initialization to MSI mode */
  9038. phba->intr_type = MSI;
  9039. intr_mode = 1;
  9040. }
  9041. }
  9042. /* Fallback to INTx if both MSI-X/MSI initalization failed */
  9043. if (phba->intr_type == NONE) {
  9044. retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
  9045. IRQF_SHARED, LPFC_DRIVER_NAME, phba);
  9046. if (!retval) {
  9047. struct lpfc_hba_eq_hdl *eqhdl;
  9048. /* Indicate initialization to INTx mode */
  9049. phba->intr_type = INTx;
  9050. intr_mode = 0;
  9051. for (idx = 0; idx < phba->io_channel_irqs; idx++) {
  9052. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  9053. eqhdl->idx = idx;
  9054. eqhdl->phba = phba;
  9055. atomic_set(&eqhdl->hba_eq_in_use, 1);
  9056. }
  9057. if (phba->cfg_fof) {
  9058. eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
  9059. eqhdl->idx = idx;
  9060. eqhdl->phba = phba;
  9061. atomic_set(&eqhdl->hba_eq_in_use, 1);
  9062. }
  9063. }
  9064. }
  9065. return intr_mode;
  9066. }
  9067. /**
  9068. * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
  9069. * @phba: pointer to lpfc hba data structure.
  9070. *
  9071. * This routine is invoked to disable device interrupt and disassociate
  9072. * the driver's interrupt handler(s) from interrupt vector(s) to device
  9073. * with SLI-4 interface spec. Depending on the interrupt mode, the driver
  9074. * will release the interrupt vector(s) for the message signaled interrupt.
  9075. **/
  9076. static void
  9077. lpfc_sli4_disable_intr(struct lpfc_hba *phba)
  9078. {
  9079. /* Disable the currently initialized interrupt mode */
  9080. if (phba->intr_type == MSIX) {
  9081. int index;
  9082. /* Free up MSI-X multi-message vectors */
  9083. for (index = 0; index < phba->io_channel_irqs; index++)
  9084. free_irq(pci_irq_vector(phba->pcidev, index),
  9085. &phba->sli4_hba.hba_eq_hdl[index]);
  9086. if (phba->cfg_fof)
  9087. free_irq(pci_irq_vector(phba->pcidev, index),
  9088. &phba->sli4_hba.hba_eq_hdl[index]);
  9089. } else {
  9090. free_irq(phba->pcidev->irq, phba);
  9091. }
  9092. pci_free_irq_vectors(phba->pcidev);
  9093. /* Reset interrupt management states */
  9094. phba->intr_type = NONE;
  9095. phba->sli.slistat.sli_intr = 0;
  9096. }
  9097. /**
  9098. * lpfc_unset_hba - Unset SLI3 hba device initialization
  9099. * @phba: pointer to lpfc hba data structure.
  9100. *
  9101. * This routine is invoked to unset the HBA device initialization steps to
  9102. * a device with SLI-3 interface spec.
  9103. **/
  9104. static void
  9105. lpfc_unset_hba(struct lpfc_hba *phba)
  9106. {
  9107. struct lpfc_vport *vport = phba->pport;
  9108. struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
  9109. spin_lock_irq(shost->host_lock);
  9110. vport->load_flag |= FC_UNLOADING;
  9111. spin_unlock_irq(shost->host_lock);
  9112. kfree(phba->vpi_bmask);
  9113. kfree(phba->vpi_ids);
  9114. lpfc_stop_hba_timers(phba);
  9115. phba->pport->work_port_events = 0;
  9116. lpfc_sli_hba_down(phba);
  9117. lpfc_sli_brdrestart(phba);
  9118. lpfc_sli_disable_intr(phba);
  9119. return;
  9120. }
  9121. /**
  9122. * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
  9123. * @phba: Pointer to HBA context object.
  9124. *
  9125. * This function is called in the SLI4 code path to wait for completion
  9126. * of device's XRIs exchange busy. It will check the XRI exchange busy
  9127. * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
  9128. * that, it will check the XRI exchange busy on outstanding FCP and ELS
  9129. * I/Os every 30 seconds, log error message, and wait forever. Only when
  9130. * all XRI exchange busy complete, the driver unload shall proceed with
  9131. * invoking the function reset ioctl mailbox command to the CNA and the
  9132. * the rest of the driver unload resource release.
  9133. **/
  9134. static void
  9135. lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
  9136. {
  9137. int wait_time = 0;
  9138. int nvme_xri_cmpl = 1;
  9139. int nvmet_xri_cmpl = 1;
  9140. int fcp_xri_cmpl = 1;
  9141. int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9142. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9143. fcp_xri_cmpl =
  9144. list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9145. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  9146. nvme_xri_cmpl =
  9147. list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9148. nvmet_xri_cmpl =
  9149. list_empty(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  9150. }
  9151. while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
  9152. !nvmet_xri_cmpl) {
  9153. if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
  9154. if (!nvme_xri_cmpl)
  9155. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9156. "6100 NVME XRI exchange busy "
  9157. "wait time: %d seconds.\n",
  9158. wait_time/1000);
  9159. if (!fcp_xri_cmpl)
  9160. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9161. "2877 FCP XRI exchange busy "
  9162. "wait time: %d seconds.\n",
  9163. wait_time/1000);
  9164. if (!els_xri_cmpl)
  9165. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9166. "2878 ELS XRI exchange busy "
  9167. "wait time: %d seconds.\n",
  9168. wait_time/1000);
  9169. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
  9170. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
  9171. } else {
  9172. msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
  9173. wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
  9174. }
  9175. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  9176. nvme_xri_cmpl = list_empty(
  9177. &phba->sli4_hba.lpfc_abts_nvme_buf_list);
  9178. nvmet_xri_cmpl = list_empty(
  9179. &phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
  9180. }
  9181. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  9182. fcp_xri_cmpl = list_empty(
  9183. &phba->sli4_hba.lpfc_abts_scsi_buf_list);
  9184. els_xri_cmpl =
  9185. list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
  9186. }
  9187. }
  9188. /**
  9189. * lpfc_sli4_hba_unset - Unset the fcoe hba
  9190. * @phba: Pointer to HBA context object.
  9191. *
  9192. * This function is called in the SLI4 code path to reset the HBA's FCoE
  9193. * function. The caller is not required to hold any lock. This routine
  9194. * issues PCI function reset mailbox command to reset the FCoE function.
  9195. * At the end of the function, it calls lpfc_hba_down_post function to
  9196. * free any pending commands.
  9197. **/
  9198. static void
  9199. lpfc_sli4_hba_unset(struct lpfc_hba *phba)
  9200. {
  9201. int wait_cnt = 0;
  9202. LPFC_MBOXQ_t *mboxq;
  9203. struct pci_dev *pdev = phba->pcidev;
  9204. lpfc_stop_hba_timers(phba);
  9205. phba->sli4_hba.intr_enable = 0;
  9206. /*
  9207. * Gracefully wait out the potential current outstanding asynchronous
  9208. * mailbox command.
  9209. */
  9210. /* First, block any pending async mailbox command from posted */
  9211. spin_lock_irq(&phba->hbalock);
  9212. phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
  9213. spin_unlock_irq(&phba->hbalock);
  9214. /* Now, trying to wait it out if we can */
  9215. while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9216. msleep(10);
  9217. if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
  9218. break;
  9219. }
  9220. /* Forcefully release the outstanding mailbox command if timed out */
  9221. if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
  9222. spin_lock_irq(&phba->hbalock);
  9223. mboxq = phba->sli.mbox_active;
  9224. mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
  9225. __lpfc_mbox_cmpl_put(phba, mboxq);
  9226. phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
  9227. phba->sli.mbox_active = NULL;
  9228. spin_unlock_irq(&phba->hbalock);
  9229. }
  9230. /* Abort all iocbs associated with the hba */
  9231. lpfc_sli_hba_iocb_abort(phba);
  9232. /* Wait for completion of device XRI exchange busy */
  9233. lpfc_sli4_xri_exchange_busy_wait(phba);
  9234. /* Disable PCI subsystem interrupt */
  9235. lpfc_sli4_disable_intr(phba);
  9236. /* Disable SR-IOV if enabled */
  9237. if (phba->cfg_sriov_nr_virtfn)
  9238. pci_disable_sriov(pdev);
  9239. /* Stop kthread signal shall trigger work_done one more time */
  9240. kthread_stop(phba->worker_thread);
  9241. /* Unset the queues shared with the hardware then release all
  9242. * allocated resources.
  9243. */
  9244. lpfc_sli4_queue_unset(phba);
  9245. lpfc_sli4_queue_destroy(phba);
  9246. /* Reset SLI4 HBA FCoE function */
  9247. lpfc_pci_function_reset(phba);
  9248. /* Stop the SLI4 device port */
  9249. phba->pport->work_port_events = 0;
  9250. }
  9251. /**
  9252. * lpfc_pc_sli4_params_get - Get the SLI4_PARAMS port capabilities.
  9253. * @phba: Pointer to HBA context object.
  9254. * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
  9255. *
  9256. * This function is called in the SLI4 code path to read the port's
  9257. * sli4 capabilities.
  9258. *
  9259. * This function may be be called from any context that can block-wait
  9260. * for the completion. The expectation is that this routine is called
  9261. * typically from probe_one or from the online routine.
  9262. **/
  9263. int
  9264. lpfc_pc_sli4_params_get(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  9265. {
  9266. int rc;
  9267. struct lpfc_mqe *mqe;
  9268. struct lpfc_pc_sli4_params *sli4_params;
  9269. uint32_t mbox_tmo;
  9270. rc = 0;
  9271. mqe = &mboxq->u.mqe;
  9272. /* Read the port's SLI4 Parameters port capabilities */
  9273. lpfc_pc_sli4_params(mboxq);
  9274. if (!phba->sli4_hba.intr_enable)
  9275. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  9276. else {
  9277. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  9278. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  9279. }
  9280. if (unlikely(rc))
  9281. return 1;
  9282. sli4_params = &phba->sli4_hba.pc_sli4_params;
  9283. sli4_params->if_type = bf_get(if_type, &mqe->un.sli4_params);
  9284. sli4_params->sli_rev = bf_get(sli_rev, &mqe->un.sli4_params);
  9285. sli4_params->sli_family = bf_get(sli_family, &mqe->un.sli4_params);
  9286. sli4_params->featurelevel_1 = bf_get(featurelevel_1,
  9287. &mqe->un.sli4_params);
  9288. sli4_params->featurelevel_2 = bf_get(featurelevel_2,
  9289. &mqe->un.sli4_params);
  9290. sli4_params->proto_types = mqe->un.sli4_params.word3;
  9291. sli4_params->sge_supp_len = mqe->un.sli4_params.sge_supp_len;
  9292. sli4_params->if_page_sz = bf_get(if_page_sz, &mqe->un.sli4_params);
  9293. sli4_params->rq_db_window = bf_get(rq_db_window, &mqe->un.sli4_params);
  9294. sli4_params->loopbk_scope = bf_get(loopbk_scope, &mqe->un.sli4_params);
  9295. sli4_params->eq_pages_max = bf_get(eq_pages, &mqe->un.sli4_params);
  9296. sli4_params->eqe_size = bf_get(eqe_size, &mqe->un.sli4_params);
  9297. sli4_params->cq_pages_max = bf_get(cq_pages, &mqe->un.sli4_params);
  9298. sli4_params->cqe_size = bf_get(cqe_size, &mqe->un.sli4_params);
  9299. sli4_params->mq_pages_max = bf_get(mq_pages, &mqe->un.sli4_params);
  9300. sli4_params->mqe_size = bf_get(mqe_size, &mqe->un.sli4_params);
  9301. sli4_params->mq_elem_cnt = bf_get(mq_elem_cnt, &mqe->un.sli4_params);
  9302. sli4_params->wq_pages_max = bf_get(wq_pages, &mqe->un.sli4_params);
  9303. sli4_params->wqe_size = bf_get(wqe_size, &mqe->un.sli4_params);
  9304. sli4_params->rq_pages_max = bf_get(rq_pages, &mqe->un.sli4_params);
  9305. sli4_params->rqe_size = bf_get(rqe_size, &mqe->un.sli4_params);
  9306. sli4_params->hdr_pages_max = bf_get(hdr_pages, &mqe->un.sli4_params);
  9307. sli4_params->hdr_size = bf_get(hdr_size, &mqe->un.sli4_params);
  9308. sli4_params->hdr_pp_align = bf_get(hdr_pp_align, &mqe->un.sli4_params);
  9309. sli4_params->sgl_pages_max = bf_get(sgl_pages, &mqe->un.sli4_params);
  9310. sli4_params->sgl_pp_align = bf_get(sgl_pp_align, &mqe->un.sli4_params);
  9311. /* Make sure that sge_supp_len can be handled by the driver */
  9312. if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
  9313. sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
  9314. return rc;
  9315. }
  9316. /**
  9317. * lpfc_get_sli4_parameters - Get the SLI4 Config PARAMETERS.
  9318. * @phba: Pointer to HBA context object.
  9319. * @mboxq: Pointer to the mailboxq memory for the mailbox command response.
  9320. *
  9321. * This function is called in the SLI4 code path to read the port's
  9322. * sli4 capabilities.
  9323. *
  9324. * This function may be be called from any context that can block-wait
  9325. * for the completion. The expectation is that this routine is called
  9326. * typically from probe_one or from the online routine.
  9327. **/
  9328. int
  9329. lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
  9330. {
  9331. int rc;
  9332. struct lpfc_mqe *mqe = &mboxq->u.mqe;
  9333. struct lpfc_pc_sli4_params *sli4_params;
  9334. uint32_t mbox_tmo;
  9335. int length;
  9336. struct lpfc_sli4_parameters *mbx_sli4_parameters;
  9337. /*
  9338. * By default, the driver assumes the SLI4 port requires RPI
  9339. * header postings. The SLI4_PARAM response will correct this
  9340. * assumption.
  9341. */
  9342. phba->sli4_hba.rpi_hdrs_in_use = 1;
  9343. /* Read the port's SLI4 Config Parameters */
  9344. length = (sizeof(struct lpfc_mbx_get_sli4_parameters) -
  9345. sizeof(struct lpfc_sli4_cfg_mhdr));
  9346. lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
  9347. LPFC_MBOX_OPCODE_GET_SLI4_PARAMETERS,
  9348. length, LPFC_SLI4_MBX_EMBED);
  9349. if (!phba->sli4_hba.intr_enable)
  9350. rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
  9351. else {
  9352. mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
  9353. rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
  9354. }
  9355. if (unlikely(rc))
  9356. return rc;
  9357. sli4_params = &phba->sli4_hba.pc_sli4_params;
  9358. mbx_sli4_parameters = &mqe->un.get_sli4_parameters.sli4_parameters;
  9359. sli4_params->if_type = bf_get(cfg_if_type, mbx_sli4_parameters);
  9360. sli4_params->sli_rev = bf_get(cfg_sli_rev, mbx_sli4_parameters);
  9361. sli4_params->sli_family = bf_get(cfg_sli_family, mbx_sli4_parameters);
  9362. sli4_params->featurelevel_1 = bf_get(cfg_sli_hint_1,
  9363. mbx_sli4_parameters);
  9364. sli4_params->featurelevel_2 = bf_get(cfg_sli_hint_2,
  9365. mbx_sli4_parameters);
  9366. if (bf_get(cfg_phwq, mbx_sli4_parameters))
  9367. phba->sli3_options |= LPFC_SLI4_PHWQ_ENABLED;
  9368. else
  9369. phba->sli3_options &= ~LPFC_SLI4_PHWQ_ENABLED;
  9370. sli4_params->sge_supp_len = mbx_sli4_parameters->sge_supp_len;
  9371. sli4_params->loopbk_scope = bf_get(loopbk_scope, mbx_sli4_parameters);
  9372. sli4_params->oas_supported = bf_get(cfg_oas, mbx_sli4_parameters);
  9373. sli4_params->cqv = bf_get(cfg_cqv, mbx_sli4_parameters);
  9374. sli4_params->mqv = bf_get(cfg_mqv, mbx_sli4_parameters);
  9375. sli4_params->wqv = bf_get(cfg_wqv, mbx_sli4_parameters);
  9376. sli4_params->rqv = bf_get(cfg_rqv, mbx_sli4_parameters);
  9377. sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
  9378. sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
  9379. mbx_sli4_parameters);
  9380. sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
  9381. sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
  9382. mbx_sli4_parameters);
  9383. phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
  9384. phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
  9385. phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
  9386. bf_get(cfg_xib, mbx_sli4_parameters));
  9387. if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
  9388. !phba->nvme_support) {
  9389. phba->nvme_support = 0;
  9390. phba->nvmet_support = 0;
  9391. phba->cfg_nvmet_mrq = 0;
  9392. phba->cfg_nvme_io_channel = 0;
  9393. phba->io_channel_irqs = phba->cfg_fcp_io_channel;
  9394. lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
  9395. "6101 Disabling NVME support: "
  9396. "Not supported by firmware: %d %d\n",
  9397. bf_get(cfg_nvme, mbx_sli4_parameters),
  9398. bf_get(cfg_xib, mbx_sli4_parameters));
  9399. /* If firmware doesn't support NVME, just use SCSI support */
  9400. if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
  9401. return -ENODEV;
  9402. phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
  9403. }
  9404. if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
  9405. phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
  9406. if (bf_get(cfg_eqdr, mbx_sli4_parameters))
  9407. phba->sli.sli_flag |= LPFC_SLI_USE_EQDR;
  9408. /* Make sure that sge_supp_len can be handled by the driver */
  9409. if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
  9410. sli4_params->sge_supp_len = LPFC_MAX_SGE_SIZE;
  9411. /*
  9412. * Issue IOs with CDB embedded in WQE to minimized the number
  9413. * of DMAs the firmware has to do. Setting this to 1 also forces
  9414. * the driver to use 128 bytes WQEs for FCP IOs.
  9415. */
  9416. if (bf_get(cfg_ext_embed_cb, mbx_sli4_parameters))
  9417. phba->fcp_embed_io = 1;
  9418. else
  9419. phba->fcp_embed_io = 0;
  9420. /*
  9421. * Check if the SLI port supports MDS Diagnostics
  9422. */
  9423. if (bf_get(cfg_mds_diags, mbx_sli4_parameters))
  9424. phba->mds_diags_support = 1;
  9425. else
  9426. phba->mds_diags_support = 0;
  9427. return 0;
  9428. }
  9429. /**
  9430. * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
  9431. * @pdev: pointer to PCI device
  9432. * @pid: pointer to PCI device identifier
  9433. *
  9434. * This routine is to be called to attach a device with SLI-3 interface spec
  9435. * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9436. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  9437. * information of the device and driver to see if the driver state that it can
  9438. * support this kind of device. If the match is successful, the driver core
  9439. * invokes this routine. If this routine determines it can claim the HBA, it
  9440. * does all the initialization that it needs to do to handle the HBA properly.
  9441. *
  9442. * Return code
  9443. * 0 - driver can claim the device
  9444. * negative value - driver can not claim the device
  9445. **/
  9446. static int
  9447. lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
  9448. {
  9449. struct lpfc_hba *phba;
  9450. struct lpfc_vport *vport = NULL;
  9451. struct Scsi_Host *shost = NULL;
  9452. int error;
  9453. uint32_t cfg_mode, intr_mode;
  9454. /* Allocate memory for HBA structure */
  9455. phba = lpfc_hba_alloc(pdev);
  9456. if (!phba)
  9457. return -ENOMEM;
  9458. /* Perform generic PCI device enabling operation */
  9459. error = lpfc_enable_pci_dev(phba);
  9460. if (error)
  9461. goto out_free_phba;
  9462. /* Set up SLI API function jump table for PCI-device group-0 HBAs */
  9463. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
  9464. if (error)
  9465. goto out_disable_pci_dev;
  9466. /* Set up SLI-3 specific device PCI memory space */
  9467. error = lpfc_sli_pci_mem_setup(phba);
  9468. if (error) {
  9469. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9470. "1402 Failed to set up pci memory space.\n");
  9471. goto out_disable_pci_dev;
  9472. }
  9473. /* Set up SLI-3 specific device driver resources */
  9474. error = lpfc_sli_driver_resource_setup(phba);
  9475. if (error) {
  9476. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9477. "1404 Failed to set up driver resource.\n");
  9478. goto out_unset_pci_mem_s3;
  9479. }
  9480. /* Initialize and populate the iocb list per host */
  9481. error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
  9482. if (error) {
  9483. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9484. "1405 Failed to initialize iocb list.\n");
  9485. goto out_unset_driver_resource_s3;
  9486. }
  9487. /* Set up common device driver resources */
  9488. error = lpfc_setup_driver_resource_phase2(phba);
  9489. if (error) {
  9490. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9491. "1406 Failed to set up driver resource.\n");
  9492. goto out_free_iocb_list;
  9493. }
  9494. /* Get the default values for Model Name and Description */
  9495. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  9496. /* Create SCSI host to the physical port */
  9497. error = lpfc_create_shost(phba);
  9498. if (error) {
  9499. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9500. "1407 Failed to create scsi host.\n");
  9501. goto out_unset_driver_resource;
  9502. }
  9503. /* Configure sysfs attributes */
  9504. vport = phba->pport;
  9505. error = lpfc_alloc_sysfs_attr(vport);
  9506. if (error) {
  9507. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9508. "1476 Failed to allocate sysfs attr\n");
  9509. goto out_destroy_shost;
  9510. }
  9511. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  9512. /* Now, trying to enable interrupt and bring up the device */
  9513. cfg_mode = phba->cfg_use_msi;
  9514. while (true) {
  9515. /* Put device to a known state before enabling interrupt */
  9516. lpfc_stop_port(phba);
  9517. /* Configure and enable interrupt */
  9518. intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
  9519. if (intr_mode == LPFC_INTR_ERROR) {
  9520. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9521. "0431 Failed to enable interrupt.\n");
  9522. error = -ENODEV;
  9523. goto out_free_sysfs_attr;
  9524. }
  9525. /* SLI-3 HBA setup */
  9526. if (lpfc_sli_hba_setup(phba)) {
  9527. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9528. "1477 Failed to set up hba\n");
  9529. error = -ENODEV;
  9530. goto out_remove_device;
  9531. }
  9532. /* Wait 50ms for the interrupts of previous mailbox commands */
  9533. msleep(50);
  9534. /* Check active interrupts on message signaled interrupts */
  9535. if (intr_mode == 0 ||
  9536. phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
  9537. /* Log the current active interrupt mode */
  9538. phba->intr_mode = intr_mode;
  9539. lpfc_log_intr_mode(phba, intr_mode);
  9540. break;
  9541. } else {
  9542. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9543. "0447 Configure interrupt mode (%d) "
  9544. "failed active interrupt test.\n",
  9545. intr_mode);
  9546. /* Disable the current interrupt mode */
  9547. lpfc_sli_disable_intr(phba);
  9548. /* Try next level of interrupt mode */
  9549. cfg_mode = --intr_mode;
  9550. }
  9551. }
  9552. /* Perform post initialization setup */
  9553. lpfc_post_init_setup(phba);
  9554. /* Check if there are static vports to be created. */
  9555. lpfc_create_static_vport(phba);
  9556. return 0;
  9557. out_remove_device:
  9558. lpfc_unset_hba(phba);
  9559. out_free_sysfs_attr:
  9560. lpfc_free_sysfs_attr(vport);
  9561. out_destroy_shost:
  9562. lpfc_destroy_shost(phba);
  9563. out_unset_driver_resource:
  9564. lpfc_unset_driver_resource_phase2(phba);
  9565. out_free_iocb_list:
  9566. lpfc_free_iocb_list(phba);
  9567. out_unset_driver_resource_s3:
  9568. lpfc_sli_driver_resource_unset(phba);
  9569. out_unset_pci_mem_s3:
  9570. lpfc_sli_pci_mem_unset(phba);
  9571. out_disable_pci_dev:
  9572. lpfc_disable_pci_dev(phba);
  9573. if (shost)
  9574. scsi_host_put(shost);
  9575. out_free_phba:
  9576. lpfc_hba_free(phba);
  9577. return error;
  9578. }
  9579. /**
  9580. * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
  9581. * @pdev: pointer to PCI device
  9582. *
  9583. * This routine is to be called to disattach a device with SLI-3 interface
  9584. * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
  9585. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  9586. * device to be removed from the PCI subsystem properly.
  9587. **/
  9588. static void
  9589. lpfc_pci_remove_one_s3(struct pci_dev *pdev)
  9590. {
  9591. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9592. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  9593. struct lpfc_vport **vports;
  9594. struct lpfc_hba *phba = vport->phba;
  9595. int i;
  9596. spin_lock_irq(&phba->hbalock);
  9597. vport->load_flag |= FC_UNLOADING;
  9598. spin_unlock_irq(&phba->hbalock);
  9599. lpfc_free_sysfs_attr(vport);
  9600. /* Release all the vports against this physical port */
  9601. vports = lpfc_create_vport_work_array(phba);
  9602. if (vports != NULL)
  9603. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  9604. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  9605. continue;
  9606. fc_vport_terminate(vports[i]->fc_vport);
  9607. }
  9608. lpfc_destroy_vport_work_array(phba, vports);
  9609. /* Remove FC host and then SCSI host with the physical port */
  9610. fc_remove_host(shost);
  9611. scsi_remove_host(shost);
  9612. lpfc_cleanup(vport);
  9613. /*
  9614. * Bring down the SLI Layer. This step disable all interrupts,
  9615. * clears the rings, discards all mailbox commands, and resets
  9616. * the HBA.
  9617. */
  9618. /* HBA interrupt will be disabled after this call */
  9619. lpfc_sli_hba_down(phba);
  9620. /* Stop kthread signal shall trigger work_done one more time */
  9621. kthread_stop(phba->worker_thread);
  9622. /* Final cleanup of txcmplq and reset the HBA */
  9623. lpfc_sli_brdrestart(phba);
  9624. kfree(phba->vpi_bmask);
  9625. kfree(phba->vpi_ids);
  9626. lpfc_stop_hba_timers(phba);
  9627. spin_lock_irq(&phba->hbalock);
  9628. list_del_init(&vport->listentry);
  9629. spin_unlock_irq(&phba->hbalock);
  9630. lpfc_debugfs_terminate(vport);
  9631. /* Disable SR-IOV if enabled */
  9632. if (phba->cfg_sriov_nr_virtfn)
  9633. pci_disable_sriov(pdev);
  9634. /* Disable interrupt */
  9635. lpfc_sli_disable_intr(phba);
  9636. scsi_host_put(shost);
  9637. /*
  9638. * Call scsi_free before mem_free since scsi bufs are released to their
  9639. * corresponding pools here.
  9640. */
  9641. lpfc_scsi_free(phba);
  9642. lpfc_mem_free_all(phba);
  9643. dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
  9644. phba->hbqslimp.virt, phba->hbqslimp.phys);
  9645. /* Free resources associated with SLI2 interface */
  9646. dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
  9647. phba->slim2p.virt, phba->slim2p.phys);
  9648. /* unmap adapter SLIM and Control Registers */
  9649. iounmap(phba->ctrl_regs_memmap_p);
  9650. iounmap(phba->slim_memmap_p);
  9651. lpfc_hba_free(phba);
  9652. pci_release_mem_regions(pdev);
  9653. pci_disable_device(pdev);
  9654. }
  9655. /**
  9656. * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
  9657. * @pdev: pointer to PCI device
  9658. * @msg: power management message
  9659. *
  9660. * This routine is to be called from the kernel's PCI subsystem to support
  9661. * system Power Management (PM) to device with SLI-3 interface spec. When
  9662. * PM invokes this method, it quiesces the device by stopping the driver's
  9663. * worker thread for the device, turning off device's interrupt and DMA,
  9664. * and bring the device offline. Note that as the driver implements the
  9665. * minimum PM requirements to a power-aware driver's PM support for the
  9666. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  9667. * to the suspend() method call will be treated as SUSPEND and the driver will
  9668. * fully reinitialize its device during resume() method call, the driver will
  9669. * set device to PCI_D3hot state in PCI config space instead of setting it
  9670. * according to the @msg provided by the PM.
  9671. *
  9672. * Return code
  9673. * 0 - driver suspended the device
  9674. * Error otherwise
  9675. **/
  9676. static int
  9677. lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
  9678. {
  9679. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9680. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9681. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9682. "0473 PCI device Power Management suspend.\n");
  9683. /* Bring down the device */
  9684. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  9685. lpfc_offline(phba);
  9686. kthread_stop(phba->worker_thread);
  9687. /* Disable interrupt from device */
  9688. lpfc_sli_disable_intr(phba);
  9689. /* Save device state to PCI config space */
  9690. pci_save_state(pdev);
  9691. pci_set_power_state(pdev, PCI_D3hot);
  9692. return 0;
  9693. }
  9694. /**
  9695. * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
  9696. * @pdev: pointer to PCI device
  9697. *
  9698. * This routine is to be called from the kernel's PCI subsystem to support
  9699. * system Power Management (PM) to device with SLI-3 interface spec. When PM
  9700. * invokes this method, it restores the device's PCI config space state and
  9701. * fully reinitializes the device and brings it online. Note that as the
  9702. * driver implements the minimum PM requirements to a power-aware driver's
  9703. * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
  9704. * FREEZE) to the suspend() method call will be treated as SUSPEND and the
  9705. * driver will fully reinitialize its device during resume() method call,
  9706. * the device will be set to PCI_D0 directly in PCI config space before
  9707. * restoring the state.
  9708. *
  9709. * Return code
  9710. * 0 - driver suspended the device
  9711. * Error otherwise
  9712. **/
  9713. static int
  9714. lpfc_pci_resume_one_s3(struct pci_dev *pdev)
  9715. {
  9716. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9717. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9718. uint32_t intr_mode;
  9719. int error;
  9720. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  9721. "0452 PCI device Power Management resume.\n");
  9722. /* Restore device state from PCI config space */
  9723. pci_set_power_state(pdev, PCI_D0);
  9724. pci_restore_state(pdev);
  9725. /*
  9726. * As the new kernel behavior of pci_restore_state() API call clears
  9727. * device saved_state flag, need to save the restored state again.
  9728. */
  9729. pci_save_state(pdev);
  9730. if (pdev->is_busmaster)
  9731. pci_set_master(pdev);
  9732. /* Startup the kernel thread for this host adapter. */
  9733. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  9734. "lpfc_worker_%d", phba->brd_no);
  9735. if (IS_ERR(phba->worker_thread)) {
  9736. error = PTR_ERR(phba->worker_thread);
  9737. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9738. "0434 PM resume failed to start worker "
  9739. "thread: error=x%x.\n", error);
  9740. return error;
  9741. }
  9742. /* Configure and enable interrupt */
  9743. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  9744. if (intr_mode == LPFC_INTR_ERROR) {
  9745. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9746. "0430 PM resume Failed to enable interrupt\n");
  9747. return -EIO;
  9748. } else
  9749. phba->intr_mode = intr_mode;
  9750. /* Restart HBA and bring it online */
  9751. lpfc_sli_brdrestart(phba);
  9752. lpfc_online(phba);
  9753. /* Log the current active interrupt mode */
  9754. lpfc_log_intr_mode(phba, phba->intr_mode);
  9755. return 0;
  9756. }
  9757. /**
  9758. * lpfc_sli_prep_dev_for_recover - Prepare SLI3 device for pci slot recover
  9759. * @phba: pointer to lpfc hba data structure.
  9760. *
  9761. * This routine is called to prepare the SLI3 device for PCI slot recover. It
  9762. * aborts all the outstanding SCSI I/Os to the pci device.
  9763. **/
  9764. static void
  9765. lpfc_sli_prep_dev_for_recover(struct lpfc_hba *phba)
  9766. {
  9767. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9768. "2723 PCI channel I/O abort preparing for recovery\n");
  9769. /*
  9770. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  9771. * and let the SCSI mid-layer to retry them to recover.
  9772. */
  9773. lpfc_sli_abort_fcp_rings(phba);
  9774. }
  9775. /**
  9776. * lpfc_sli_prep_dev_for_reset - Prepare SLI3 device for pci slot reset
  9777. * @phba: pointer to lpfc hba data structure.
  9778. *
  9779. * This routine is called to prepare the SLI3 device for PCI slot reset. It
  9780. * disables the device interrupt and pci device, and aborts the internal FCP
  9781. * pending I/Os.
  9782. **/
  9783. static void
  9784. lpfc_sli_prep_dev_for_reset(struct lpfc_hba *phba)
  9785. {
  9786. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9787. "2710 PCI channel disable preparing for reset\n");
  9788. /* Block any management I/Os to the device */
  9789. lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
  9790. /* Block all SCSI devices' I/Os on the host */
  9791. lpfc_scsi_dev_block(phba);
  9792. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  9793. lpfc_sli_flush_fcp_rings(phba);
  9794. /* stop all timers */
  9795. lpfc_stop_hba_timers(phba);
  9796. /* Disable interrupt and pci device */
  9797. lpfc_sli_disable_intr(phba);
  9798. pci_disable_device(phba->pcidev);
  9799. }
  9800. /**
  9801. * lpfc_sli_prep_dev_for_perm_failure - Prepare SLI3 dev for pci slot disable
  9802. * @phba: pointer to lpfc hba data structure.
  9803. *
  9804. * This routine is called to prepare the SLI3 device for PCI slot permanently
  9805. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  9806. * pending I/Os.
  9807. **/
  9808. static void
  9809. lpfc_sli_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  9810. {
  9811. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9812. "2711 PCI channel permanent disable for failure\n");
  9813. /* Block all SCSI devices' I/Os on the host */
  9814. lpfc_scsi_dev_block(phba);
  9815. /* stop all timers */
  9816. lpfc_stop_hba_timers(phba);
  9817. /* Clean up all driver's outstanding SCSI I/Os */
  9818. lpfc_sli_flush_fcp_rings(phba);
  9819. }
  9820. /**
  9821. * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
  9822. * @pdev: pointer to PCI device.
  9823. * @state: the current PCI connection state.
  9824. *
  9825. * This routine is called from the PCI subsystem for I/O error handling to
  9826. * device with SLI-3 interface spec. This function is called by the PCI
  9827. * subsystem after a PCI bus error affecting this device has been detected.
  9828. * When this function is invoked, it will need to stop all the I/Os and
  9829. * interrupt(s) to the device. Once that is done, it will return
  9830. * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
  9831. * as desired.
  9832. *
  9833. * Return codes
  9834. * PCI_ERS_RESULT_CAN_RECOVER - can be recovered with reset_link
  9835. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  9836. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  9837. **/
  9838. static pci_ers_result_t
  9839. lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
  9840. {
  9841. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9842. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9843. switch (state) {
  9844. case pci_channel_io_normal:
  9845. /* Non-fatal error, prepare for recovery */
  9846. lpfc_sli_prep_dev_for_recover(phba);
  9847. return PCI_ERS_RESULT_CAN_RECOVER;
  9848. case pci_channel_io_frozen:
  9849. /* Fatal error, prepare for slot reset */
  9850. lpfc_sli_prep_dev_for_reset(phba);
  9851. return PCI_ERS_RESULT_NEED_RESET;
  9852. case pci_channel_io_perm_failure:
  9853. /* Permanent failure, prepare for device down */
  9854. lpfc_sli_prep_dev_for_perm_failure(phba);
  9855. return PCI_ERS_RESULT_DISCONNECT;
  9856. default:
  9857. /* Unknown state, prepare and request slot reset */
  9858. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9859. "0472 Unknown PCI error state: x%x\n", state);
  9860. lpfc_sli_prep_dev_for_reset(phba);
  9861. return PCI_ERS_RESULT_NEED_RESET;
  9862. }
  9863. }
  9864. /**
  9865. * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
  9866. * @pdev: pointer to PCI device.
  9867. *
  9868. * This routine is called from the PCI subsystem for error handling to
  9869. * device with SLI-3 interface spec. This is called after PCI bus has been
  9870. * reset to restart the PCI card from scratch, as if from a cold-boot.
  9871. * During the PCI subsystem error recovery, after driver returns
  9872. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  9873. * recovery and then call this routine before calling the .resume method
  9874. * to recover the device. This function will initialize the HBA device,
  9875. * enable the interrupt, but it will just put the HBA to offline state
  9876. * without passing any I/O traffic.
  9877. *
  9878. * Return codes
  9879. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  9880. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  9881. */
  9882. static pci_ers_result_t
  9883. lpfc_io_slot_reset_s3(struct pci_dev *pdev)
  9884. {
  9885. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9886. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9887. struct lpfc_sli *psli = &phba->sli;
  9888. uint32_t intr_mode;
  9889. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  9890. if (pci_enable_device_mem(pdev)) {
  9891. printk(KERN_ERR "lpfc: Cannot re-enable "
  9892. "PCI device after reset.\n");
  9893. return PCI_ERS_RESULT_DISCONNECT;
  9894. }
  9895. pci_restore_state(pdev);
  9896. /*
  9897. * As the new kernel behavior of pci_restore_state() API call clears
  9898. * device saved_state flag, need to save the restored state again.
  9899. */
  9900. pci_save_state(pdev);
  9901. if (pdev->is_busmaster)
  9902. pci_set_master(pdev);
  9903. spin_lock_irq(&phba->hbalock);
  9904. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  9905. spin_unlock_irq(&phba->hbalock);
  9906. /* Configure and enable interrupt */
  9907. intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
  9908. if (intr_mode == LPFC_INTR_ERROR) {
  9909. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  9910. "0427 Cannot re-enable interrupt after "
  9911. "slot reset.\n");
  9912. return PCI_ERS_RESULT_DISCONNECT;
  9913. } else
  9914. phba->intr_mode = intr_mode;
  9915. /* Take device offline, it will perform cleanup */
  9916. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  9917. lpfc_offline(phba);
  9918. lpfc_sli_brdrestart(phba);
  9919. /* Log the current active interrupt mode */
  9920. lpfc_log_intr_mode(phba, phba->intr_mode);
  9921. return PCI_ERS_RESULT_RECOVERED;
  9922. }
  9923. /**
  9924. * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
  9925. * @pdev: pointer to PCI device
  9926. *
  9927. * This routine is called from the PCI subsystem for error handling to device
  9928. * with SLI-3 interface spec. It is called when kernel error recovery tells
  9929. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  9930. * error recovery. After this call, traffic can start to flow from this device
  9931. * again.
  9932. */
  9933. static void
  9934. lpfc_io_resume_s3(struct pci_dev *pdev)
  9935. {
  9936. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9937. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  9938. /* Bring device online, it will be no-op for non-fatal error resume */
  9939. lpfc_online(phba);
  9940. /* Clean up Advanced Error Reporting (AER) if needed */
  9941. if (phba->hba_flag & HBA_AER_ENABLED)
  9942. pci_cleanup_aer_uncorrect_error_status(pdev);
  9943. }
  9944. /**
  9945. * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
  9946. * @phba: pointer to lpfc hba data structure.
  9947. *
  9948. * returns the number of ELS/CT IOCBs to reserve
  9949. **/
  9950. int
  9951. lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
  9952. {
  9953. int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
  9954. if (phba->sli_rev == LPFC_SLI_REV4) {
  9955. if (max_xri <= 100)
  9956. return 10;
  9957. else if (max_xri <= 256)
  9958. return 25;
  9959. else if (max_xri <= 512)
  9960. return 50;
  9961. else if (max_xri <= 1024)
  9962. return 100;
  9963. else if (max_xri <= 1536)
  9964. return 150;
  9965. else if (max_xri <= 2048)
  9966. return 200;
  9967. else
  9968. return 250;
  9969. } else
  9970. return 0;
  9971. }
  9972. /**
  9973. * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
  9974. * @phba: pointer to lpfc hba data structure.
  9975. *
  9976. * returns the number of ELS/CT + NVMET IOCBs to reserve
  9977. **/
  9978. int
  9979. lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
  9980. {
  9981. int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
  9982. if (phba->nvmet_support)
  9983. max_xri += LPFC_NVMET_BUF_POST;
  9984. return max_xri;
  9985. }
  9986. /**
  9987. * lpfc_write_firmware - attempt to write a firmware image to the port
  9988. * @fw: pointer to firmware image returned from request_firmware.
  9989. * @phba: pointer to lpfc hba data structure.
  9990. *
  9991. **/
  9992. static void
  9993. lpfc_write_firmware(const struct firmware *fw, void *context)
  9994. {
  9995. struct lpfc_hba *phba = (struct lpfc_hba *)context;
  9996. char fwrev[FW_REV_STR_SIZE];
  9997. struct lpfc_grp_hdr *image;
  9998. struct list_head dma_buffer_list;
  9999. int i, rc = 0;
  10000. struct lpfc_dmabuf *dmabuf, *next;
  10001. uint32_t offset = 0, temp_offset = 0;
  10002. uint32_t magic_number, ftype, fid, fsize;
  10003. /* It can be null in no-wait mode, sanity check */
  10004. if (!fw) {
  10005. rc = -ENXIO;
  10006. goto out;
  10007. }
  10008. image = (struct lpfc_grp_hdr *)fw->data;
  10009. magic_number = be32_to_cpu(image->magic_number);
  10010. ftype = bf_get_be32(lpfc_grp_hdr_file_type, image);
  10011. fid = bf_get_be32(lpfc_grp_hdr_id, image),
  10012. fsize = be32_to_cpu(image->size);
  10013. INIT_LIST_HEAD(&dma_buffer_list);
  10014. if ((magic_number != LPFC_GROUP_OJECT_MAGIC_G5 &&
  10015. magic_number != LPFC_GROUP_OJECT_MAGIC_G6) ||
  10016. ftype != LPFC_FILE_TYPE_GROUP || fsize != fw->size) {
  10017. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10018. "3022 Invalid FW image found. "
  10019. "Magic:%x Type:%x ID:%x Size %d %zd\n",
  10020. magic_number, ftype, fid, fsize, fw->size);
  10021. rc = -EINVAL;
  10022. goto release_out;
  10023. }
  10024. lpfc_decode_firmware_rev(phba, fwrev, 1);
  10025. if (strncmp(fwrev, image->revision, strnlen(image->revision, 16))) {
  10026. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10027. "3023 Updating Firmware, Current Version:%s "
  10028. "New Version:%s\n",
  10029. fwrev, image->revision);
  10030. for (i = 0; i < LPFC_MBX_WR_CONFIG_MAX_BDE; i++) {
  10031. dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
  10032. GFP_KERNEL);
  10033. if (!dmabuf) {
  10034. rc = -ENOMEM;
  10035. goto release_out;
  10036. }
  10037. dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
  10038. SLI4_PAGE_SIZE,
  10039. &dmabuf->phys,
  10040. GFP_KERNEL);
  10041. if (!dmabuf->virt) {
  10042. kfree(dmabuf);
  10043. rc = -ENOMEM;
  10044. goto release_out;
  10045. }
  10046. list_add_tail(&dmabuf->list, &dma_buffer_list);
  10047. }
  10048. while (offset < fw->size) {
  10049. temp_offset = offset;
  10050. list_for_each_entry(dmabuf, &dma_buffer_list, list) {
  10051. if (temp_offset + SLI4_PAGE_SIZE > fw->size) {
  10052. memcpy(dmabuf->virt,
  10053. fw->data + temp_offset,
  10054. fw->size - temp_offset);
  10055. temp_offset = fw->size;
  10056. break;
  10057. }
  10058. memcpy(dmabuf->virt, fw->data + temp_offset,
  10059. SLI4_PAGE_SIZE);
  10060. temp_offset += SLI4_PAGE_SIZE;
  10061. }
  10062. rc = lpfc_wr_object(phba, &dma_buffer_list,
  10063. (fw->size - offset), &offset);
  10064. if (rc)
  10065. goto release_out;
  10066. }
  10067. rc = offset;
  10068. }
  10069. release_out:
  10070. list_for_each_entry_safe(dmabuf, next, &dma_buffer_list, list) {
  10071. list_del(&dmabuf->list);
  10072. dma_free_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
  10073. dmabuf->virt, dmabuf->phys);
  10074. kfree(dmabuf);
  10075. }
  10076. release_firmware(fw);
  10077. out:
  10078. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10079. "3024 Firmware update done: %d.\n", rc);
  10080. return;
  10081. }
  10082. /**
  10083. * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
  10084. * @phba: pointer to lpfc hba data structure.
  10085. *
  10086. * This routine is called to perform Linux generic firmware upgrade on device
  10087. * that supports such feature.
  10088. **/
  10089. int
  10090. lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
  10091. {
  10092. uint8_t file_name[ELX_MODEL_NAME_SIZE];
  10093. int ret;
  10094. const struct firmware *fw;
  10095. /* Only supported on SLI4 interface type 2 for now */
  10096. if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
  10097. LPFC_SLI_INTF_IF_TYPE_2)
  10098. return -EPERM;
  10099. snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
  10100. if (fw_upgrade == INT_FW_UPGRADE) {
  10101. ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
  10102. file_name, &phba->pcidev->dev,
  10103. GFP_KERNEL, (void *)phba,
  10104. lpfc_write_firmware);
  10105. } else if (fw_upgrade == RUN_FW_UPGRADE) {
  10106. ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
  10107. if (!ret)
  10108. lpfc_write_firmware(fw, (void *)phba);
  10109. } else {
  10110. ret = -EINVAL;
  10111. }
  10112. return ret;
  10113. }
  10114. /**
  10115. * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
  10116. * @pdev: pointer to PCI device
  10117. * @pid: pointer to PCI device identifier
  10118. *
  10119. * This routine is called from the kernel's PCI subsystem to device with
  10120. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  10121. * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
  10122. * information of the device and driver to see if the driver state that it
  10123. * can support this kind of device. If the match is successful, the driver
  10124. * core invokes this routine. If this routine determines it can claim the HBA,
  10125. * it does all the initialization that it needs to do to handle the HBA
  10126. * properly.
  10127. *
  10128. * Return code
  10129. * 0 - driver can claim the device
  10130. * negative value - driver can not claim the device
  10131. **/
  10132. static int
  10133. lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
  10134. {
  10135. struct lpfc_hba *phba;
  10136. struct lpfc_vport *vport = NULL;
  10137. struct Scsi_Host *shost = NULL;
  10138. int error;
  10139. uint32_t cfg_mode, intr_mode;
  10140. /* Allocate memory for HBA structure */
  10141. phba = lpfc_hba_alloc(pdev);
  10142. if (!phba)
  10143. return -ENOMEM;
  10144. /* Perform generic PCI device enabling operation */
  10145. error = lpfc_enable_pci_dev(phba);
  10146. if (error)
  10147. goto out_free_phba;
  10148. /* Set up SLI API function jump table for PCI-device group-1 HBAs */
  10149. error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
  10150. if (error)
  10151. goto out_disable_pci_dev;
  10152. /* Set up SLI-4 specific device PCI memory space */
  10153. error = lpfc_sli4_pci_mem_setup(phba);
  10154. if (error) {
  10155. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10156. "1410 Failed to set up pci memory space.\n");
  10157. goto out_disable_pci_dev;
  10158. }
  10159. /* Set up SLI-4 Specific device driver resources */
  10160. error = lpfc_sli4_driver_resource_setup(phba);
  10161. if (error) {
  10162. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10163. "1412 Failed to set up driver resource.\n");
  10164. goto out_unset_pci_mem_s4;
  10165. }
  10166. INIT_LIST_HEAD(&phba->active_rrq_list);
  10167. INIT_LIST_HEAD(&phba->fcf.fcf_pri_list);
  10168. /* Set up common device driver resources */
  10169. error = lpfc_setup_driver_resource_phase2(phba);
  10170. if (error) {
  10171. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10172. "1414 Failed to set up driver resource.\n");
  10173. goto out_unset_driver_resource_s4;
  10174. }
  10175. /* Get the default values for Model Name and Description */
  10176. lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
  10177. /* Create SCSI host to the physical port */
  10178. error = lpfc_create_shost(phba);
  10179. if (error) {
  10180. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10181. "1415 Failed to create scsi host.\n");
  10182. goto out_unset_driver_resource;
  10183. }
  10184. /* Configure sysfs attributes */
  10185. vport = phba->pport;
  10186. error = lpfc_alloc_sysfs_attr(vport);
  10187. if (error) {
  10188. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10189. "1416 Failed to allocate sysfs attr\n");
  10190. goto out_destroy_shost;
  10191. }
  10192. shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
  10193. /* Now, trying to enable interrupt and bring up the device */
  10194. cfg_mode = phba->cfg_use_msi;
  10195. /* Put device to a known state before enabling interrupt */
  10196. lpfc_stop_port(phba);
  10197. /* Configure and enable interrupt */
  10198. intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
  10199. if (intr_mode == LPFC_INTR_ERROR) {
  10200. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10201. "0426 Failed to enable interrupt.\n");
  10202. error = -ENODEV;
  10203. goto out_free_sysfs_attr;
  10204. }
  10205. /* Default to single EQ for non-MSI-X */
  10206. if (phba->intr_type != MSIX) {
  10207. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
  10208. phba->cfg_fcp_io_channel = 1;
  10209. if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
  10210. phba->cfg_nvme_io_channel = 1;
  10211. if (phba->nvmet_support)
  10212. phba->cfg_nvmet_mrq = 1;
  10213. }
  10214. phba->io_channel_irqs = 1;
  10215. }
  10216. /* Set up SLI-4 HBA */
  10217. if (lpfc_sli4_hba_setup(phba)) {
  10218. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10219. "1421 Failed to set up hba\n");
  10220. error = -ENODEV;
  10221. goto out_disable_intr;
  10222. }
  10223. /* Log the current active interrupt mode */
  10224. phba->intr_mode = intr_mode;
  10225. lpfc_log_intr_mode(phba, intr_mode);
  10226. /* Perform post initialization setup */
  10227. lpfc_post_init_setup(phba);
  10228. /* NVME support in FW earlier in the driver load corrects the
  10229. * FC4 type making a check for nvme_support unnecessary.
  10230. */
  10231. if ((phba->nvmet_support == 0) &&
  10232. (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
  10233. /* Create NVME binding with nvme_fc_transport. This
  10234. * ensures the vport is initialized. If the localport
  10235. * create fails, it should not unload the driver to
  10236. * support field issues.
  10237. */
  10238. error = lpfc_nvme_create_localport(vport);
  10239. if (error) {
  10240. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10241. "6004 NVME registration failed, "
  10242. "error x%x\n",
  10243. error);
  10244. }
  10245. }
  10246. /* check for firmware upgrade or downgrade */
  10247. if (phba->cfg_request_firmware_upgrade)
  10248. lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
  10249. /* Check if there are static vports to be created. */
  10250. lpfc_create_static_vport(phba);
  10251. return 0;
  10252. out_disable_intr:
  10253. lpfc_sli4_disable_intr(phba);
  10254. out_free_sysfs_attr:
  10255. lpfc_free_sysfs_attr(vport);
  10256. out_destroy_shost:
  10257. lpfc_destroy_shost(phba);
  10258. out_unset_driver_resource:
  10259. lpfc_unset_driver_resource_phase2(phba);
  10260. out_unset_driver_resource_s4:
  10261. lpfc_sli4_driver_resource_unset(phba);
  10262. out_unset_pci_mem_s4:
  10263. lpfc_sli4_pci_mem_unset(phba);
  10264. out_disable_pci_dev:
  10265. lpfc_disable_pci_dev(phba);
  10266. if (shost)
  10267. scsi_host_put(shost);
  10268. out_free_phba:
  10269. lpfc_hba_free(phba);
  10270. return error;
  10271. }
  10272. /**
  10273. * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
  10274. * @pdev: pointer to PCI device
  10275. *
  10276. * This routine is called from the kernel's PCI subsystem to device with
  10277. * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
  10278. * removed from PCI bus, it performs all the necessary cleanup for the HBA
  10279. * device to be removed from the PCI subsystem properly.
  10280. **/
  10281. static void
  10282. lpfc_pci_remove_one_s4(struct pci_dev *pdev)
  10283. {
  10284. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10285. struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
  10286. struct lpfc_vport **vports;
  10287. struct lpfc_hba *phba = vport->phba;
  10288. int i;
  10289. /* Mark the device unloading flag */
  10290. spin_lock_irq(&phba->hbalock);
  10291. vport->load_flag |= FC_UNLOADING;
  10292. spin_unlock_irq(&phba->hbalock);
  10293. /* Free the HBA sysfs attributes */
  10294. lpfc_free_sysfs_attr(vport);
  10295. /* Release all the vports against this physical port */
  10296. vports = lpfc_create_vport_work_array(phba);
  10297. if (vports != NULL)
  10298. for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
  10299. if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
  10300. continue;
  10301. fc_vport_terminate(vports[i]->fc_vport);
  10302. }
  10303. lpfc_destroy_vport_work_array(phba, vports);
  10304. /* Remove FC host and then SCSI host with the physical port */
  10305. fc_remove_host(shost);
  10306. scsi_remove_host(shost);
  10307. /*
  10308. * Bring down the SLI Layer. This step disables all interrupts,
  10309. * clears the rings, discards all mailbox commands, and resets
  10310. * the HBA FCoE function.
  10311. */
  10312. lpfc_debugfs_terminate(vport);
  10313. lpfc_sli4_hba_unset(phba);
  10314. /* Perform ndlp cleanup on the physical port. The nvme and nvmet
  10315. * localports are destroyed after to cleanup all transport memory.
  10316. */
  10317. lpfc_cleanup(vport);
  10318. lpfc_nvmet_destroy_targetport(phba);
  10319. lpfc_nvme_destroy_localport(vport);
  10320. lpfc_stop_hba_timers(phba);
  10321. spin_lock_irq(&phba->hbalock);
  10322. list_del_init(&vport->listentry);
  10323. spin_unlock_irq(&phba->hbalock);
  10324. /* Perform scsi free before driver resource_unset since scsi
  10325. * buffers are released to their corresponding pools here.
  10326. */
  10327. lpfc_scsi_free(phba);
  10328. lpfc_nvme_free(phba);
  10329. lpfc_free_iocb_list(phba);
  10330. lpfc_sli4_driver_resource_unset(phba);
  10331. /* Unmap adapter Control and Doorbell registers */
  10332. lpfc_sli4_pci_mem_unset(phba);
  10333. /* Release PCI resources and disable device's PCI function */
  10334. scsi_host_put(shost);
  10335. lpfc_disable_pci_dev(phba);
  10336. /* Finally, free the driver's device data structure */
  10337. lpfc_hba_free(phba);
  10338. return;
  10339. }
  10340. /**
  10341. * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
  10342. * @pdev: pointer to PCI device
  10343. * @msg: power management message
  10344. *
  10345. * This routine is called from the kernel's PCI subsystem to support system
  10346. * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
  10347. * this method, it quiesces the device by stopping the driver's worker
  10348. * thread for the device, turning off device's interrupt and DMA, and bring
  10349. * the device offline. Note that as the driver implements the minimum PM
  10350. * requirements to a power-aware driver's PM support for suspend/resume -- all
  10351. * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
  10352. * method call will be treated as SUSPEND and the driver will fully
  10353. * reinitialize its device during resume() method call, the driver will set
  10354. * device to PCI_D3hot state in PCI config space instead of setting it
  10355. * according to the @msg provided by the PM.
  10356. *
  10357. * Return code
  10358. * 0 - driver suspended the device
  10359. * Error otherwise
  10360. **/
  10361. static int
  10362. lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
  10363. {
  10364. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10365. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10366. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10367. "2843 PCI device Power Management suspend.\n");
  10368. /* Bring down the device */
  10369. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10370. lpfc_offline(phba);
  10371. kthread_stop(phba->worker_thread);
  10372. /* Disable interrupt from device */
  10373. lpfc_sli4_disable_intr(phba);
  10374. lpfc_sli4_queue_destroy(phba);
  10375. /* Save device state to PCI config space */
  10376. pci_save_state(pdev);
  10377. pci_set_power_state(pdev, PCI_D3hot);
  10378. return 0;
  10379. }
  10380. /**
  10381. * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
  10382. * @pdev: pointer to PCI device
  10383. *
  10384. * This routine is called from the kernel's PCI subsystem to support system
  10385. * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
  10386. * this method, it restores the device's PCI config space state and fully
  10387. * reinitializes the device and brings it online. Note that as the driver
  10388. * implements the minimum PM requirements to a power-aware driver's PM for
  10389. * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
  10390. * to the suspend() method call will be treated as SUSPEND and the driver
  10391. * will fully reinitialize its device during resume() method call, the device
  10392. * will be set to PCI_D0 directly in PCI config space before restoring the
  10393. * state.
  10394. *
  10395. * Return code
  10396. * 0 - driver suspended the device
  10397. * Error otherwise
  10398. **/
  10399. static int
  10400. lpfc_pci_resume_one_s4(struct pci_dev *pdev)
  10401. {
  10402. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10403. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10404. uint32_t intr_mode;
  10405. int error;
  10406. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  10407. "0292 PCI device Power Management resume.\n");
  10408. /* Restore device state from PCI config space */
  10409. pci_set_power_state(pdev, PCI_D0);
  10410. pci_restore_state(pdev);
  10411. /*
  10412. * As the new kernel behavior of pci_restore_state() API call clears
  10413. * device saved_state flag, need to save the restored state again.
  10414. */
  10415. pci_save_state(pdev);
  10416. if (pdev->is_busmaster)
  10417. pci_set_master(pdev);
  10418. /* Startup the kernel thread for this host adapter. */
  10419. phba->worker_thread = kthread_run(lpfc_do_work, phba,
  10420. "lpfc_worker_%d", phba->brd_no);
  10421. if (IS_ERR(phba->worker_thread)) {
  10422. error = PTR_ERR(phba->worker_thread);
  10423. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10424. "0293 PM resume failed to start worker "
  10425. "thread: error=x%x.\n", error);
  10426. return error;
  10427. }
  10428. /* Configure and enable interrupt */
  10429. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10430. if (intr_mode == LPFC_INTR_ERROR) {
  10431. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10432. "0294 PM resume Failed to enable interrupt\n");
  10433. return -EIO;
  10434. } else
  10435. phba->intr_mode = intr_mode;
  10436. /* Restart HBA and bring it online */
  10437. lpfc_sli_brdrestart(phba);
  10438. lpfc_online(phba);
  10439. /* Log the current active interrupt mode */
  10440. lpfc_log_intr_mode(phba, phba->intr_mode);
  10441. return 0;
  10442. }
  10443. /**
  10444. * lpfc_sli4_prep_dev_for_recover - Prepare SLI4 device for pci slot recover
  10445. * @phba: pointer to lpfc hba data structure.
  10446. *
  10447. * This routine is called to prepare the SLI4 device for PCI slot recover. It
  10448. * aborts all the outstanding SCSI I/Os to the pci device.
  10449. **/
  10450. static void
  10451. lpfc_sli4_prep_dev_for_recover(struct lpfc_hba *phba)
  10452. {
  10453. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10454. "2828 PCI channel I/O abort preparing for recovery\n");
  10455. /*
  10456. * There may be errored I/Os through HBA, abort all I/Os on txcmplq
  10457. * and let the SCSI mid-layer to retry them to recover.
  10458. */
  10459. lpfc_sli_abort_fcp_rings(phba);
  10460. }
  10461. /**
  10462. * lpfc_sli4_prep_dev_for_reset - Prepare SLI4 device for pci slot reset
  10463. * @phba: pointer to lpfc hba data structure.
  10464. *
  10465. * This routine is called to prepare the SLI4 device for PCI slot reset. It
  10466. * disables the device interrupt and pci device, and aborts the internal FCP
  10467. * pending I/Os.
  10468. **/
  10469. static void
  10470. lpfc_sli4_prep_dev_for_reset(struct lpfc_hba *phba)
  10471. {
  10472. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10473. "2826 PCI channel disable preparing for reset\n");
  10474. /* Block any management I/Os to the device */
  10475. lpfc_block_mgmt_io(phba, LPFC_MBX_NO_WAIT);
  10476. /* Block all SCSI devices' I/Os on the host */
  10477. lpfc_scsi_dev_block(phba);
  10478. /* Flush all driver's outstanding SCSI I/Os as we are to reset */
  10479. lpfc_sli_flush_fcp_rings(phba);
  10480. /* stop all timers */
  10481. lpfc_stop_hba_timers(phba);
  10482. /* Disable interrupt and pci device */
  10483. lpfc_sli4_disable_intr(phba);
  10484. lpfc_sli4_queue_destroy(phba);
  10485. pci_disable_device(phba->pcidev);
  10486. }
  10487. /**
  10488. * lpfc_sli4_prep_dev_for_perm_failure - Prepare SLI4 dev for pci slot disable
  10489. * @phba: pointer to lpfc hba data structure.
  10490. *
  10491. * This routine is called to prepare the SLI4 device for PCI slot permanently
  10492. * disabling. It blocks the SCSI transport layer traffic and flushes the FCP
  10493. * pending I/Os.
  10494. **/
  10495. static void
  10496. lpfc_sli4_prep_dev_for_perm_failure(struct lpfc_hba *phba)
  10497. {
  10498. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10499. "2827 PCI channel permanent disable for failure\n");
  10500. /* Block all SCSI devices' I/Os on the host */
  10501. lpfc_scsi_dev_block(phba);
  10502. /* stop all timers */
  10503. lpfc_stop_hba_timers(phba);
  10504. /* Clean up all driver's outstanding SCSI I/Os */
  10505. lpfc_sli_flush_fcp_rings(phba);
  10506. }
  10507. /**
  10508. * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
  10509. * @pdev: pointer to PCI device.
  10510. * @state: the current PCI connection state.
  10511. *
  10512. * This routine is called from the PCI subsystem for error handling to device
  10513. * with SLI-4 interface spec. This function is called by the PCI subsystem
  10514. * after a PCI bus error affecting this device has been detected. When this
  10515. * function is invoked, it will need to stop all the I/Os and interrupt(s)
  10516. * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
  10517. * for the PCI subsystem to perform proper recovery as desired.
  10518. *
  10519. * Return codes
  10520. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10521. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10522. **/
  10523. static pci_ers_result_t
  10524. lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
  10525. {
  10526. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10527. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10528. switch (state) {
  10529. case pci_channel_io_normal:
  10530. /* Non-fatal error, prepare for recovery */
  10531. lpfc_sli4_prep_dev_for_recover(phba);
  10532. return PCI_ERS_RESULT_CAN_RECOVER;
  10533. case pci_channel_io_frozen:
  10534. /* Fatal error, prepare for slot reset */
  10535. lpfc_sli4_prep_dev_for_reset(phba);
  10536. return PCI_ERS_RESULT_NEED_RESET;
  10537. case pci_channel_io_perm_failure:
  10538. /* Permanent failure, prepare for device down */
  10539. lpfc_sli4_prep_dev_for_perm_failure(phba);
  10540. return PCI_ERS_RESULT_DISCONNECT;
  10541. default:
  10542. /* Unknown state, prepare and request slot reset */
  10543. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10544. "2825 Unknown PCI error state: x%x\n", state);
  10545. lpfc_sli4_prep_dev_for_reset(phba);
  10546. return PCI_ERS_RESULT_NEED_RESET;
  10547. }
  10548. }
  10549. /**
  10550. * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
  10551. * @pdev: pointer to PCI device.
  10552. *
  10553. * This routine is called from the PCI subsystem for error handling to device
  10554. * with SLI-4 interface spec. It is called after PCI bus has been reset to
  10555. * restart the PCI card from scratch, as if from a cold-boot. During the
  10556. * PCI subsystem error recovery, after the driver returns
  10557. * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
  10558. * recovery and then call this routine before calling the .resume method to
  10559. * recover the device. This function will initialize the HBA device, enable
  10560. * the interrupt, but it will just put the HBA to offline state without
  10561. * passing any I/O traffic.
  10562. *
  10563. * Return codes
  10564. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10565. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10566. */
  10567. static pci_ers_result_t
  10568. lpfc_io_slot_reset_s4(struct pci_dev *pdev)
  10569. {
  10570. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10571. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10572. struct lpfc_sli *psli = &phba->sli;
  10573. uint32_t intr_mode;
  10574. dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
  10575. if (pci_enable_device_mem(pdev)) {
  10576. printk(KERN_ERR "lpfc: Cannot re-enable "
  10577. "PCI device after reset.\n");
  10578. return PCI_ERS_RESULT_DISCONNECT;
  10579. }
  10580. pci_restore_state(pdev);
  10581. /*
  10582. * As the new kernel behavior of pci_restore_state() API call clears
  10583. * device saved_state flag, need to save the restored state again.
  10584. */
  10585. pci_save_state(pdev);
  10586. if (pdev->is_busmaster)
  10587. pci_set_master(pdev);
  10588. spin_lock_irq(&phba->hbalock);
  10589. psli->sli_flag &= ~LPFC_SLI_ACTIVE;
  10590. spin_unlock_irq(&phba->hbalock);
  10591. /* Configure and enable interrupt */
  10592. intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
  10593. if (intr_mode == LPFC_INTR_ERROR) {
  10594. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10595. "2824 Cannot re-enable interrupt after "
  10596. "slot reset.\n");
  10597. return PCI_ERS_RESULT_DISCONNECT;
  10598. } else
  10599. phba->intr_mode = intr_mode;
  10600. /* Log the current active interrupt mode */
  10601. lpfc_log_intr_mode(phba, phba->intr_mode);
  10602. return PCI_ERS_RESULT_RECOVERED;
  10603. }
  10604. /**
  10605. * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
  10606. * @pdev: pointer to PCI device
  10607. *
  10608. * This routine is called from the PCI subsystem for error handling to device
  10609. * with SLI-4 interface spec. It is called when kernel error recovery tells
  10610. * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
  10611. * error recovery. After this call, traffic can start to flow from this device
  10612. * again.
  10613. **/
  10614. static void
  10615. lpfc_io_resume_s4(struct pci_dev *pdev)
  10616. {
  10617. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10618. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10619. /*
  10620. * In case of slot reset, as function reset is performed through
  10621. * mailbox command which needs DMA to be enabled, this operation
  10622. * has to be moved to the io resume phase. Taking device offline
  10623. * will perform the necessary cleanup.
  10624. */
  10625. if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) {
  10626. /* Perform device reset */
  10627. lpfc_offline_prep(phba, LPFC_MBX_WAIT);
  10628. lpfc_offline(phba);
  10629. lpfc_sli_brdrestart(phba);
  10630. /* Bring the device back online */
  10631. lpfc_online(phba);
  10632. }
  10633. /* Clean up Advanced Error Reporting (AER) if needed */
  10634. if (phba->hba_flag & HBA_AER_ENABLED)
  10635. pci_cleanup_aer_uncorrect_error_status(pdev);
  10636. }
  10637. /**
  10638. * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
  10639. * @pdev: pointer to PCI device
  10640. * @pid: pointer to PCI device identifier
  10641. *
  10642. * This routine is to be registered to the kernel's PCI subsystem. When an
  10643. * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
  10644. * at PCI device-specific information of the device and driver to see if the
  10645. * driver state that it can support this kind of device. If the match is
  10646. * successful, the driver core invokes this routine. This routine dispatches
  10647. * the action to the proper SLI-3 or SLI-4 device probing routine, which will
  10648. * do all the initialization that it needs to do to handle the HBA device
  10649. * properly.
  10650. *
  10651. * Return code
  10652. * 0 - driver can claim the device
  10653. * negative value - driver can not claim the device
  10654. **/
  10655. static int
  10656. lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
  10657. {
  10658. int rc;
  10659. struct lpfc_sli_intf intf;
  10660. if (pci_read_config_dword(pdev, LPFC_SLI_INTF, &intf.word0))
  10661. return -ENODEV;
  10662. if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
  10663. (bf_get(lpfc_sli_intf_slirev, &intf) == LPFC_SLI_INTF_REV_SLI4))
  10664. rc = lpfc_pci_probe_one_s4(pdev, pid);
  10665. else
  10666. rc = lpfc_pci_probe_one_s3(pdev, pid);
  10667. return rc;
  10668. }
  10669. /**
  10670. * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
  10671. * @pdev: pointer to PCI device
  10672. *
  10673. * This routine is to be registered to the kernel's PCI subsystem. When an
  10674. * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
  10675. * This routine dispatches the action to the proper SLI-3 or SLI-4 device
  10676. * remove routine, which will perform all the necessary cleanup for the
  10677. * device to be removed from the PCI subsystem properly.
  10678. **/
  10679. static void
  10680. lpfc_pci_remove_one(struct pci_dev *pdev)
  10681. {
  10682. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10683. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10684. switch (phba->pci_dev_grp) {
  10685. case LPFC_PCI_DEV_LP:
  10686. lpfc_pci_remove_one_s3(pdev);
  10687. break;
  10688. case LPFC_PCI_DEV_OC:
  10689. lpfc_pci_remove_one_s4(pdev);
  10690. break;
  10691. default:
  10692. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10693. "1424 Invalid PCI device group: 0x%x\n",
  10694. phba->pci_dev_grp);
  10695. break;
  10696. }
  10697. return;
  10698. }
  10699. /**
  10700. * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
  10701. * @pdev: pointer to PCI device
  10702. * @msg: power management message
  10703. *
  10704. * This routine is to be registered to the kernel's PCI subsystem to support
  10705. * system Power Management (PM). When PM invokes this method, it dispatches
  10706. * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
  10707. * suspend the device.
  10708. *
  10709. * Return code
  10710. * 0 - driver suspended the device
  10711. * Error otherwise
  10712. **/
  10713. static int
  10714. lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
  10715. {
  10716. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10717. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10718. int rc = -ENODEV;
  10719. switch (phba->pci_dev_grp) {
  10720. case LPFC_PCI_DEV_LP:
  10721. rc = lpfc_pci_suspend_one_s3(pdev, msg);
  10722. break;
  10723. case LPFC_PCI_DEV_OC:
  10724. rc = lpfc_pci_suspend_one_s4(pdev, msg);
  10725. break;
  10726. default:
  10727. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10728. "1425 Invalid PCI device group: 0x%x\n",
  10729. phba->pci_dev_grp);
  10730. break;
  10731. }
  10732. return rc;
  10733. }
  10734. /**
  10735. * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
  10736. * @pdev: pointer to PCI device
  10737. *
  10738. * This routine is to be registered to the kernel's PCI subsystem to support
  10739. * system Power Management (PM). When PM invokes this method, it dispatches
  10740. * the action to the proper SLI-3 or SLI-4 device resume routine, which will
  10741. * resume the device.
  10742. *
  10743. * Return code
  10744. * 0 - driver suspended the device
  10745. * Error otherwise
  10746. **/
  10747. static int
  10748. lpfc_pci_resume_one(struct pci_dev *pdev)
  10749. {
  10750. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10751. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10752. int rc = -ENODEV;
  10753. switch (phba->pci_dev_grp) {
  10754. case LPFC_PCI_DEV_LP:
  10755. rc = lpfc_pci_resume_one_s3(pdev);
  10756. break;
  10757. case LPFC_PCI_DEV_OC:
  10758. rc = lpfc_pci_resume_one_s4(pdev);
  10759. break;
  10760. default:
  10761. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10762. "1426 Invalid PCI device group: 0x%x\n",
  10763. phba->pci_dev_grp);
  10764. break;
  10765. }
  10766. return rc;
  10767. }
  10768. /**
  10769. * lpfc_io_error_detected - lpfc method for handling PCI I/O error
  10770. * @pdev: pointer to PCI device.
  10771. * @state: the current PCI connection state.
  10772. *
  10773. * This routine is registered to the PCI subsystem for error handling. This
  10774. * function is called by the PCI subsystem after a PCI bus error affecting
  10775. * this device has been detected. When this routine is invoked, it dispatches
  10776. * the action to the proper SLI-3 or SLI-4 device error detected handling
  10777. * routine, which will perform the proper error detected operation.
  10778. *
  10779. * Return codes
  10780. * PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
  10781. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10782. **/
  10783. static pci_ers_result_t
  10784. lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  10785. {
  10786. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10787. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10788. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  10789. switch (phba->pci_dev_grp) {
  10790. case LPFC_PCI_DEV_LP:
  10791. rc = lpfc_io_error_detected_s3(pdev, state);
  10792. break;
  10793. case LPFC_PCI_DEV_OC:
  10794. rc = lpfc_io_error_detected_s4(pdev, state);
  10795. break;
  10796. default:
  10797. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10798. "1427 Invalid PCI device group: 0x%x\n",
  10799. phba->pci_dev_grp);
  10800. break;
  10801. }
  10802. return rc;
  10803. }
  10804. /**
  10805. * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
  10806. * @pdev: pointer to PCI device.
  10807. *
  10808. * This routine is registered to the PCI subsystem for error handling. This
  10809. * function is called after PCI bus has been reset to restart the PCI card
  10810. * from scratch, as if from a cold-boot. When this routine is invoked, it
  10811. * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
  10812. * routine, which will perform the proper device reset.
  10813. *
  10814. * Return codes
  10815. * PCI_ERS_RESULT_RECOVERED - the device has been recovered
  10816. * PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  10817. **/
  10818. static pci_ers_result_t
  10819. lpfc_io_slot_reset(struct pci_dev *pdev)
  10820. {
  10821. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10822. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10823. pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
  10824. switch (phba->pci_dev_grp) {
  10825. case LPFC_PCI_DEV_LP:
  10826. rc = lpfc_io_slot_reset_s3(pdev);
  10827. break;
  10828. case LPFC_PCI_DEV_OC:
  10829. rc = lpfc_io_slot_reset_s4(pdev);
  10830. break;
  10831. default:
  10832. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10833. "1428 Invalid PCI device group: 0x%x\n",
  10834. phba->pci_dev_grp);
  10835. break;
  10836. }
  10837. return rc;
  10838. }
  10839. /**
  10840. * lpfc_io_resume - lpfc method for resuming PCI I/O operation
  10841. * @pdev: pointer to PCI device
  10842. *
  10843. * This routine is registered to the PCI subsystem for error handling. It
  10844. * is called when kernel error recovery tells the lpfc driver that it is
  10845. * OK to resume normal PCI operation after PCI bus error recovery. When
  10846. * this routine is invoked, it dispatches the action to the proper SLI-3
  10847. * or SLI-4 device io_resume routine, which will resume the device operation.
  10848. **/
  10849. static void
  10850. lpfc_io_resume(struct pci_dev *pdev)
  10851. {
  10852. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  10853. struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
  10854. switch (phba->pci_dev_grp) {
  10855. case LPFC_PCI_DEV_LP:
  10856. lpfc_io_resume_s3(pdev);
  10857. break;
  10858. case LPFC_PCI_DEV_OC:
  10859. lpfc_io_resume_s4(pdev);
  10860. break;
  10861. default:
  10862. lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
  10863. "1429 Invalid PCI device group: 0x%x\n",
  10864. phba->pci_dev_grp);
  10865. break;
  10866. }
  10867. return;
  10868. }
  10869. /**
  10870. * lpfc_sli4_oas_verify - Verify OAS is supported by this adapter
  10871. * @phba: pointer to lpfc hba data structure.
  10872. *
  10873. * This routine checks to see if OAS is supported for this adapter. If
  10874. * supported, the configure Flash Optimized Fabric flag is set. Otherwise,
  10875. * the enable oas flag is cleared and the pool created for OAS device data
  10876. * is destroyed.
  10877. *
  10878. **/
  10879. void
  10880. lpfc_sli4_oas_verify(struct lpfc_hba *phba)
  10881. {
  10882. if (!phba->cfg_EnableXLane)
  10883. return;
  10884. if (phba->sli4_hba.pc_sli4_params.oas_supported) {
  10885. phba->cfg_fof = 1;
  10886. } else {
  10887. phba->cfg_fof = 0;
  10888. if (phba->device_data_mem_pool)
  10889. mempool_destroy(phba->device_data_mem_pool);
  10890. phba->device_data_mem_pool = NULL;
  10891. }
  10892. return;
  10893. }
  10894. /**
  10895. * lpfc_fof_queue_setup - Set up all the fof queues
  10896. * @phba: pointer to lpfc hba data structure.
  10897. *
  10898. * This routine is invoked to set up all the fof queues for the FC HBA
  10899. * operation.
  10900. *
  10901. * Return codes
  10902. * 0 - successful
  10903. * -ENOMEM - No available memory
  10904. **/
  10905. int
  10906. lpfc_fof_queue_setup(struct lpfc_hba *phba)
  10907. {
  10908. struct lpfc_sli_ring *pring;
  10909. int rc;
  10910. rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
  10911. if (rc)
  10912. return -ENOMEM;
  10913. if (phba->cfg_fof) {
  10914. rc = lpfc_cq_create(phba, phba->sli4_hba.oas_cq,
  10915. phba->sli4_hba.fof_eq, LPFC_WCQ, LPFC_FCP);
  10916. if (rc)
  10917. goto out_oas_cq;
  10918. rc = lpfc_wq_create(phba, phba->sli4_hba.oas_wq,
  10919. phba->sli4_hba.oas_cq, LPFC_FCP);
  10920. if (rc)
  10921. goto out_oas_wq;
  10922. /* Bind this CQ/WQ to the NVME ring */
  10923. pring = phba->sli4_hba.oas_wq->pring;
  10924. pring->sli.sli4.wqp =
  10925. (void *)phba->sli4_hba.oas_wq;
  10926. phba->sli4_hba.oas_cq->pring = pring;
  10927. }
  10928. return 0;
  10929. out_oas_wq:
  10930. lpfc_cq_destroy(phba, phba->sli4_hba.oas_cq);
  10931. out_oas_cq:
  10932. lpfc_eq_destroy(phba, phba->sli4_hba.fof_eq);
  10933. return rc;
  10934. }
  10935. /**
  10936. * lpfc_fof_queue_create - Create all the fof queues
  10937. * @phba: pointer to lpfc hba data structure.
  10938. *
  10939. * This routine is invoked to allocate all the fof queues for the FC HBA
  10940. * operation. For each SLI4 queue type, the parameters such as queue entry
  10941. * count (queue depth) shall be taken from the module parameter. For now,
  10942. * we just use some constant number as place holder.
  10943. *
  10944. * Return codes
  10945. * 0 - successful
  10946. * -ENOMEM - No availble memory
  10947. * -EIO - The mailbox failed to complete successfully.
  10948. **/
  10949. int
  10950. lpfc_fof_queue_create(struct lpfc_hba *phba)
  10951. {
  10952. struct lpfc_queue *qdesc;
  10953. uint32_t wqesize;
  10954. /* Create FOF EQ */
  10955. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
  10956. phba->sli4_hba.eq_ecount);
  10957. if (!qdesc)
  10958. goto out_error;
  10959. phba->sli4_hba.fof_eq = qdesc;
  10960. if (phba->cfg_fof) {
  10961. /* Create OAS CQ */
  10962. qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
  10963. phba->sli4_hba.cq_ecount);
  10964. if (!qdesc)
  10965. goto out_error;
  10966. phba->sli4_hba.oas_cq = qdesc;
  10967. /* Create OAS WQ */
  10968. wqesize = (phba->fcp_embed_io) ?
  10969. LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
  10970. qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
  10971. phba->sli4_hba.wq_ecount);
  10972. if (!qdesc)
  10973. goto out_error;
  10974. phba->sli4_hba.oas_wq = qdesc;
  10975. list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
  10976. }
  10977. return 0;
  10978. out_error:
  10979. lpfc_fof_queue_destroy(phba);
  10980. return -ENOMEM;
  10981. }
  10982. /**
  10983. * lpfc_fof_queue_destroy - Destroy all the fof queues
  10984. * @phba: pointer to lpfc hba data structure.
  10985. *
  10986. * This routine is invoked to release all the SLI4 queues with the FC HBA
  10987. * operation.
  10988. *
  10989. * Return codes
  10990. * 0 - successful
  10991. **/
  10992. int
  10993. lpfc_fof_queue_destroy(struct lpfc_hba *phba)
  10994. {
  10995. /* Release FOF Event queue */
  10996. if (phba->sli4_hba.fof_eq != NULL) {
  10997. lpfc_sli4_queue_free(phba->sli4_hba.fof_eq);
  10998. phba->sli4_hba.fof_eq = NULL;
  10999. }
  11000. /* Release OAS Completion queue */
  11001. if (phba->sli4_hba.oas_cq != NULL) {
  11002. lpfc_sli4_queue_free(phba->sli4_hba.oas_cq);
  11003. phba->sli4_hba.oas_cq = NULL;
  11004. }
  11005. /* Release OAS Work queue */
  11006. if (phba->sli4_hba.oas_wq != NULL) {
  11007. lpfc_sli4_queue_free(phba->sli4_hba.oas_wq);
  11008. phba->sli4_hba.oas_wq = NULL;
  11009. }
  11010. return 0;
  11011. }
  11012. MODULE_DEVICE_TABLE(pci, lpfc_id_table);
  11013. static const struct pci_error_handlers lpfc_err_handler = {
  11014. .error_detected = lpfc_io_error_detected,
  11015. .slot_reset = lpfc_io_slot_reset,
  11016. .resume = lpfc_io_resume,
  11017. };
  11018. static struct pci_driver lpfc_driver = {
  11019. .name = LPFC_DRIVER_NAME,
  11020. .id_table = lpfc_id_table,
  11021. .probe = lpfc_pci_probe_one,
  11022. .remove = lpfc_pci_remove_one,
  11023. .shutdown = lpfc_pci_remove_one,
  11024. .suspend = lpfc_pci_suspend_one,
  11025. .resume = lpfc_pci_resume_one,
  11026. .err_handler = &lpfc_err_handler,
  11027. };
  11028. static const struct file_operations lpfc_mgmt_fop = {
  11029. .owner = THIS_MODULE,
  11030. };
  11031. static struct miscdevice lpfc_mgmt_dev = {
  11032. .minor = MISC_DYNAMIC_MINOR,
  11033. .name = "lpfcmgmt",
  11034. .fops = &lpfc_mgmt_fop,
  11035. };
  11036. /**
  11037. * lpfc_init - lpfc module initialization routine
  11038. *
  11039. * This routine is to be invoked when the lpfc module is loaded into the
  11040. * kernel. The special kernel macro module_init() is used to indicate the
  11041. * role of this routine to the kernel as lpfc module entry point.
  11042. *
  11043. * Return codes
  11044. * 0 - successful
  11045. * -ENOMEM - FC attach transport failed
  11046. * all others - failed
  11047. */
  11048. static int __init
  11049. lpfc_init(void)
  11050. {
  11051. int error = 0;
  11052. printk(LPFC_MODULE_DESC "\n");
  11053. printk(LPFC_COPYRIGHT "\n");
  11054. error = misc_register(&lpfc_mgmt_dev);
  11055. if (error)
  11056. printk(KERN_ERR "Could not register lpfcmgmt device, "
  11057. "misc_register returned with status %d", error);
  11058. lpfc_transport_functions.vport_create = lpfc_vport_create;
  11059. lpfc_transport_functions.vport_delete = lpfc_vport_delete;
  11060. lpfc_transport_template =
  11061. fc_attach_transport(&lpfc_transport_functions);
  11062. if (lpfc_transport_template == NULL)
  11063. return -ENOMEM;
  11064. lpfc_vport_transport_template =
  11065. fc_attach_transport(&lpfc_vport_transport_functions);
  11066. if (lpfc_vport_transport_template == NULL) {
  11067. fc_release_transport(lpfc_transport_template);
  11068. return -ENOMEM;
  11069. }
  11070. /* Initialize in case vector mapping is needed */
  11071. lpfc_used_cpu = NULL;
  11072. lpfc_present_cpu = num_present_cpus();
  11073. error = pci_register_driver(&lpfc_driver);
  11074. if (error) {
  11075. fc_release_transport(lpfc_transport_template);
  11076. fc_release_transport(lpfc_vport_transport_template);
  11077. }
  11078. return error;
  11079. }
  11080. /**
  11081. * lpfc_exit - lpfc module removal routine
  11082. *
  11083. * This routine is invoked when the lpfc module is removed from the kernel.
  11084. * The special kernel macro module_exit() is used to indicate the role of
  11085. * this routine to the kernel as lpfc module exit point.
  11086. */
  11087. static void __exit
  11088. lpfc_exit(void)
  11089. {
  11090. misc_deregister(&lpfc_mgmt_dev);
  11091. pci_unregister_driver(&lpfc_driver);
  11092. fc_release_transport(lpfc_transport_template);
  11093. fc_release_transport(lpfc_vport_transport_template);
  11094. if (_dump_buf_data) {
  11095. printk(KERN_ERR "9062 BLKGRD: freeing %lu pages for "
  11096. "_dump_buf_data at 0x%p\n",
  11097. (1L << _dump_buf_data_order), _dump_buf_data);
  11098. free_pages((unsigned long)_dump_buf_data, _dump_buf_data_order);
  11099. }
  11100. if (_dump_buf_dif) {
  11101. printk(KERN_ERR "9049 BLKGRD: freeing %lu pages for "
  11102. "_dump_buf_dif at 0x%p\n",
  11103. (1L << _dump_buf_dif_order), _dump_buf_dif);
  11104. free_pages((unsigned long)_dump_buf_dif, _dump_buf_dif_order);
  11105. }
  11106. kfree(lpfc_used_cpu);
  11107. idr_destroy(&lpfc_hba_index);
  11108. }
  11109. module_init(lpfc_init);
  11110. module_exit(lpfc_exit);
  11111. MODULE_LICENSE("GPL");
  11112. MODULE_DESCRIPTION(LPFC_MODULE_DESC);
  11113. MODULE_AUTHOR("Broadcom");
  11114. MODULE_VERSION("0:" LPFC_DRIVER_VERSION);