mpt3sas_scsih.c 322 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389103901039110392103931039410395103961039710398103991040010401104021040310404104051040610407104081040910410104111041210413104141041510416104171041810419104201042110422104231042410425104261042710428104291043010431104321043310434104351043610437104381043910440104411044210443104441044510446104471044810449104501045110452104531045410455104561045710458104591046010461104621046310464104651046610467104681046910470104711047210473104741047510476104771047810479104801048110482104831048410485104861048710488104891049010491104921049310494104951049610497104981049910500105011050210503105041050510506105071050810509105101051110512105131051410515105161051710518105191052010521105221052310524105251052610527105281052910530105311053210533105341053510536105371053810539105401054110542105431054410545105461054710548105491055010551105521055310554105551055610557105581055910560105611056210563105641056510566105671056810569105701057110572105731057410575105761057710578105791058010581105821058310584105851058610587105881058910590105911059210593105941059510596105971059810599106001060110602106031060410605106061060710608106091061010611106121061310614106151061610617106181061910620106211062210623106241062510626106271062810629106301063110632106331063410635106361063710638106391064010641106421064310644106451064610647106481064910650106511065210653106541065510656106571065810659106601066110662106631066410665106661066710668106691067010671106721067310674106751067610677106781067910680106811068210683106841068510686106871068810689106901069110692106931069410695106961069710698106991070010701107021070310704107051070610707107081070910710107111071210713107141071510716107171071810719107201072110722107231072410725107261072710728107291073010731107321073310734107351073610737107381073910740107411074210743107441074510746107471074810749107501075110752107531075410755107561075710758107591076010761107621076310764107651076610767107681076910770107711077210773107741077510776107771077810779107801078110782107831078410785107861078710788107891079010791107921079310794107951079610797107981079910800108011080210803108041080510806108071080810809108101081110812108131081410815108161081710818108191082010821108221082310824108251082610827108281082910830108311083210833108341083510836108371083810839108401084110842108431084410845108461084710848108491085010851108521085310854108551085610857108581085910860108611086210863108641086510866108671086810869108701087110872108731087410875108761087710878108791088010881108821088310884108851088610887108881088910890108911089210893108941089510896108971089810899109001090110902109031090410905109061090710908109091091010911109121091310914109151091610917109181091910920109211092210923109241092510926109271092810929109301093110932109331093410935109361093710938109391094010941109421094310944109451094610947109481094910950109511095210953109541095510956109571095810959109601096110962109631096410965109661096710968109691097010971109721097310974109751097610977109781097910980109811098210983109841098510986109871098810989109901099110992109931099410995109961099710998109991100011001110021100311004110051100611007110081100911010110111101211013110141101511016110171101811019110201102111022110231102411025110261102711028110291103011031110321103311034110351103611037110381103911040110411104211043110441104511046110471104811049110501105111052110531105411055110561105711058110591106011061110621106311064110651106611067110681106911070110711107211073110741107511076110771107811079110801108111082110831108411085110861108711088110891109011091110921109311094110951109611097110981109911100111011110211103111041110511106111071110811109111101111111112111131111411115111161111711118111191112011121111221112311124111251112611127111281112911130111311113211133111341113511136111371113811139111401114111142111431114411145111461114711148111491115011151111521115311154111551115611157111581115911160111611116211163111641116511166111671116811169111701117111172111731117411175111761117711178111791118011181111821118311184111851118611187111881118911190111911119211193111941119511196111971119811199112001120111202112031120411205112061120711208112091121011211112121121311214112151121611217112181121911220112211122211223112241122511226112271122811229112301123111232112331123411235112361123711238112391124011241112421124311244112451124611247112481124911250112511125211253112541125511256112571125811259112601126111262112631126411265112661126711268112691127011271112721127311274112751127611277112781127911280112811128211283112841128511286112871128811289112901129111292112931129411295112961129711298
  1. /*
  2. * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  3. *
  4. * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
  5. * Copyright (C) 2012-2014 LSI Corporation
  6. * Copyright (C) 2013-2014 Avago Technologies
  7. * (mailto: MPT-FusionLinux.pdl@avagotech.com)
  8. *
  9. * This program is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU General Public License
  11. * as published by the Free Software Foundation; either version 2
  12. * of the License, or (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * NO WARRANTY
  20. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24. * solely responsible for determining the appropriateness of using and
  25. * distributing the Program and assumes all risks associated with its
  26. * exercise of rights under this Agreement, including but not limited to
  27. * the risks and costs of program errors, damage to or loss of data,
  28. * programs or equipment, and unavailability or interruption of operations.
  29. * DISCLAIMER OF LIABILITY
  30. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  31. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  32. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  33. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  34. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  35. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  36. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  37. * You should have received a copy of the GNU General Public License
  38. * along with this program; if not, write to the Free Software
  39. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  40. * USA.
  41. */
  42. #include <linux/module.h>
  43. #include <linux/kernel.h>
  44. #include <linux/init.h>
  45. #include <linux/errno.h>
  46. #include <linux/blkdev.h>
  47. #include <linux/sched.h>
  48. #include <linux/workqueue.h>
  49. #include <linux/delay.h>
  50. #include <linux/pci.h>
  51. #include <linux/pci-aspm.h>
  52. #include <linux/interrupt.h>
  53. #include <linux/aer.h>
  54. #include <linux/raid_class.h>
  55. #include <asm/unaligned.h>
  56. #include "mpt3sas_base.h"
  57. #define RAID_CHANNEL 1
  58. #define PCIE_CHANNEL 2
  59. /* forward proto's */
  60. static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
  61. struct _sas_node *sas_expander);
  62. static void _firmware_event_work(struct work_struct *work);
  63. static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
  64. struct _sas_device *sas_device);
  65. static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  66. u8 retry_count, u8 is_pd);
  67. static int _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
  68. static void _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
  69. struct _pcie_device *pcie_device);
  70. static void
  71. _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle);
  72. static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
  73. /* global parameters */
  74. LIST_HEAD(mpt3sas_ioc_list);
  75. /* global ioc lock for list operations */
  76. DEFINE_SPINLOCK(gioc_lock);
  77. MODULE_AUTHOR(MPT3SAS_AUTHOR);
  78. MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
  79. MODULE_LICENSE("GPL");
  80. MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
  81. MODULE_ALIAS("mpt2sas");
  82. /* local parameters */
  83. static u8 scsi_io_cb_idx = -1;
  84. static u8 tm_cb_idx = -1;
  85. static u8 ctl_cb_idx = -1;
  86. static u8 base_cb_idx = -1;
  87. static u8 port_enable_cb_idx = -1;
  88. static u8 transport_cb_idx = -1;
  89. static u8 scsih_cb_idx = -1;
  90. static u8 config_cb_idx = -1;
  91. static int mpt2_ids;
  92. static int mpt3_ids;
  93. static u8 tm_tr_cb_idx = -1 ;
  94. static u8 tm_tr_volume_cb_idx = -1 ;
  95. static u8 tm_sas_control_cb_idx = -1;
  96. /* command line options */
  97. static u32 logging_level;
  98. MODULE_PARM_DESC(logging_level,
  99. " bits for enabling additional logging info (default=0)");
  100. static ushort max_sectors = 0xFFFF;
  101. module_param(max_sectors, ushort, 0);
  102. MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
  103. static int missing_delay[2] = {-1, -1};
  104. module_param_array(missing_delay, int, NULL, 0);
  105. MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
  106. /* scsi-mid layer global parmeter is max_report_luns, which is 511 */
  107. #define MPT3SAS_MAX_LUN (16895)
  108. static u64 max_lun = MPT3SAS_MAX_LUN;
  109. module_param(max_lun, ullong, 0);
  110. MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
  111. static ushort hbas_to_enumerate;
  112. module_param(hbas_to_enumerate, ushort, 0);
  113. MODULE_PARM_DESC(hbas_to_enumerate,
  114. " 0 - enumerates both SAS 2.0 & SAS 3.0 generation HBAs\n \
  115. 1 - enumerates only SAS 2.0 generation HBAs\n \
  116. 2 - enumerates only SAS 3.0 generation HBAs (default=0)");
  117. /* diag_buffer_enable is bitwise
  118. * bit 0 set = TRACE
  119. * bit 1 set = SNAPSHOT
  120. * bit 2 set = EXTENDED
  121. *
  122. * Either bit can be set, or both
  123. */
  124. static int diag_buffer_enable = -1;
  125. module_param(diag_buffer_enable, int, 0);
  126. MODULE_PARM_DESC(diag_buffer_enable,
  127. " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
  128. static int disable_discovery = -1;
  129. module_param(disable_discovery, int, 0);
  130. MODULE_PARM_DESC(disable_discovery, " disable discovery ");
  131. /* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
  132. static int prot_mask = -1;
  133. module_param(prot_mask, int, 0);
  134. MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
  135. /* raid transport support */
  136. static struct raid_template *mpt3sas_raid_template;
  137. static struct raid_template *mpt2sas_raid_template;
  138. /**
  139. * struct sense_info - common structure for obtaining sense keys
  140. * @skey: sense key
  141. * @asc: additional sense code
  142. * @ascq: additional sense code qualifier
  143. */
  144. struct sense_info {
  145. u8 skey;
  146. u8 asc;
  147. u8 ascq;
  148. };
  149. #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
  150. #define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
  151. #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
  152. #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
  153. #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
  154. /**
  155. * struct fw_event_work - firmware event struct
  156. * @list: link list framework
  157. * @work: work object (ioc->fault_reset_work_q)
  158. * @ioc: per adapter object
  159. * @device_handle: device handle
  160. * @VF_ID: virtual function id
  161. * @VP_ID: virtual port id
  162. * @ignore: flag meaning this event has been marked to ignore
  163. * @event: firmware event MPI2_EVENT_XXX defined in mpi2_ioc.h
  164. * @refcount: kref for this event
  165. * @event_data: reply event data payload follows
  166. *
  167. * This object stored on ioc->fw_event_list.
  168. */
  169. struct fw_event_work {
  170. struct list_head list;
  171. struct work_struct work;
  172. struct MPT3SAS_ADAPTER *ioc;
  173. u16 device_handle;
  174. u8 VF_ID;
  175. u8 VP_ID;
  176. u8 ignore;
  177. u16 event;
  178. struct kref refcount;
  179. char event_data[0] __aligned(4);
  180. };
  181. static void fw_event_work_free(struct kref *r)
  182. {
  183. kfree(container_of(r, struct fw_event_work, refcount));
  184. }
  185. static void fw_event_work_get(struct fw_event_work *fw_work)
  186. {
  187. kref_get(&fw_work->refcount);
  188. }
  189. static void fw_event_work_put(struct fw_event_work *fw_work)
  190. {
  191. kref_put(&fw_work->refcount, fw_event_work_free);
  192. }
  193. static struct fw_event_work *alloc_fw_event_work(int len)
  194. {
  195. struct fw_event_work *fw_event;
  196. fw_event = kzalloc(sizeof(*fw_event) + len, GFP_ATOMIC);
  197. if (!fw_event)
  198. return NULL;
  199. kref_init(&fw_event->refcount);
  200. return fw_event;
  201. }
  202. /**
  203. * struct _scsi_io_transfer - scsi io transfer
  204. * @handle: sas device handle (assigned by firmware)
  205. * @is_raid: flag set for hidden raid components
  206. * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
  207. * @data_length: data transfer length
  208. * @data_dma: dma pointer to data
  209. * @sense: sense data
  210. * @lun: lun number
  211. * @cdb_length: cdb length
  212. * @cdb: cdb contents
  213. * @timeout: timeout for this command
  214. * @VF_ID: virtual function id
  215. * @VP_ID: virtual port id
  216. * @valid_reply: flag set for reply message
  217. * @sense_length: sense length
  218. * @ioc_status: ioc status
  219. * @scsi_state: scsi state
  220. * @scsi_status: scsi staus
  221. * @log_info: log information
  222. * @transfer_length: data length transfer when there is a reply message
  223. *
  224. * Used for sending internal scsi commands to devices within this module.
  225. * Refer to _scsi_send_scsi_io().
  226. */
  227. struct _scsi_io_transfer {
  228. u16 handle;
  229. u8 is_raid;
  230. enum dma_data_direction dir;
  231. u32 data_length;
  232. dma_addr_t data_dma;
  233. u8 sense[SCSI_SENSE_BUFFERSIZE];
  234. u32 lun;
  235. u8 cdb_length;
  236. u8 cdb[32];
  237. u8 timeout;
  238. u8 VF_ID;
  239. u8 VP_ID;
  240. u8 valid_reply;
  241. /* the following bits are only valid when 'valid_reply = 1' */
  242. u32 sense_length;
  243. u16 ioc_status;
  244. u8 scsi_state;
  245. u8 scsi_status;
  246. u32 log_info;
  247. u32 transfer_length;
  248. };
  249. /**
  250. * _scsih_set_debug_level - global setting of ioc->logging_level.
  251. *
  252. * Note: The logging levels are defined in mpt3sas_debug.h.
  253. */
  254. static int
  255. _scsih_set_debug_level(const char *val, const struct kernel_param *kp)
  256. {
  257. int ret = param_set_int(val, kp);
  258. struct MPT3SAS_ADAPTER *ioc;
  259. if (ret)
  260. return ret;
  261. pr_info("setting logging_level(0x%08x)\n", logging_level);
  262. spin_lock(&gioc_lock);
  263. list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
  264. ioc->logging_level = logging_level;
  265. spin_unlock(&gioc_lock);
  266. return 0;
  267. }
  268. module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
  269. &logging_level, 0644);
  270. /**
  271. * _scsih_srch_boot_sas_address - search based on sas_address
  272. * @sas_address: sas address
  273. * @boot_device: boot device object from bios page 2
  274. *
  275. * Returns 1 when there's a match, 0 means no match.
  276. */
  277. static inline int
  278. _scsih_srch_boot_sas_address(u64 sas_address,
  279. Mpi2BootDeviceSasWwid_t *boot_device)
  280. {
  281. return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
  282. }
  283. /**
  284. * _scsih_srch_boot_device_name - search based on device name
  285. * @device_name: device name specified in INDENTIFY fram
  286. * @boot_device: boot device object from bios page 2
  287. *
  288. * Returns 1 when there's a match, 0 means no match.
  289. */
  290. static inline int
  291. _scsih_srch_boot_device_name(u64 device_name,
  292. Mpi2BootDeviceDeviceName_t *boot_device)
  293. {
  294. return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
  295. }
  296. /**
  297. * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
  298. * @enclosure_logical_id: enclosure logical id
  299. * @slot_number: slot number
  300. * @boot_device: boot device object from bios page 2
  301. *
  302. * Returns 1 when there's a match, 0 means no match.
  303. */
  304. static inline int
  305. _scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
  306. Mpi2BootDeviceEnclosureSlot_t *boot_device)
  307. {
  308. return (enclosure_logical_id == le64_to_cpu(boot_device->
  309. EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
  310. SlotNumber)) ? 1 : 0;
  311. }
  312. /**
  313. * _scsih_is_boot_device - search for matching boot device.
  314. * @sas_address: sas address
  315. * @device_name: device name specified in INDENTIFY fram
  316. * @enclosure_logical_id: enclosure logical id
  317. * @slot_number: slot number
  318. * @form: specifies boot device form
  319. * @boot_device: boot device object from bios page 2
  320. *
  321. * Returns 1 when there's a match, 0 means no match.
  322. */
  323. static int
  324. _scsih_is_boot_device(u64 sas_address, u64 device_name,
  325. u64 enclosure_logical_id, u16 slot, u8 form,
  326. Mpi2BiosPage2BootDevice_t *boot_device)
  327. {
  328. int rc = 0;
  329. switch (form) {
  330. case MPI2_BIOSPAGE2_FORM_SAS_WWID:
  331. if (!sas_address)
  332. break;
  333. rc = _scsih_srch_boot_sas_address(
  334. sas_address, &boot_device->SasWwid);
  335. break;
  336. case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
  337. if (!enclosure_logical_id)
  338. break;
  339. rc = _scsih_srch_boot_encl_slot(
  340. enclosure_logical_id,
  341. slot, &boot_device->EnclosureSlot);
  342. break;
  343. case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
  344. if (!device_name)
  345. break;
  346. rc = _scsih_srch_boot_device_name(
  347. device_name, &boot_device->DeviceName);
  348. break;
  349. case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
  350. break;
  351. }
  352. return rc;
  353. }
  354. /**
  355. * _scsih_get_sas_address - set the sas_address for given device handle
  356. * @handle: device handle
  357. * @sas_address: sas address
  358. *
  359. * Returns 0 success, non-zero when failure
  360. */
  361. static int
  362. _scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  363. u64 *sas_address)
  364. {
  365. Mpi2SasDevicePage0_t sas_device_pg0;
  366. Mpi2ConfigReply_t mpi_reply;
  367. u32 ioc_status;
  368. *sas_address = 0;
  369. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  370. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  371. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
  372. __FILE__, __LINE__, __func__);
  373. return -ENXIO;
  374. }
  375. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  376. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  377. /* For HBA, vSES doesn't return HBA SAS address. Instead return
  378. * vSES's sas address.
  379. */
  380. if ((handle <= ioc->sas_hba.num_phys) &&
  381. (!(le32_to_cpu(sas_device_pg0.DeviceInfo) &
  382. MPI2_SAS_DEVICE_INFO_SEP)))
  383. *sas_address = ioc->sas_hba.sas_address;
  384. else
  385. *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  386. return 0;
  387. }
  388. /* we hit this because the given parent handle doesn't exist */
  389. if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
  390. return -ENXIO;
  391. /* else error case */
  392. pr_err(MPT3SAS_FMT
  393. "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
  394. ioc->name, handle, ioc_status,
  395. __FILE__, __LINE__, __func__);
  396. return -EIO;
  397. }
  398. /**
  399. * _scsih_determine_boot_device - determine boot device.
  400. * @ioc: per adapter object
  401. * @device: sas_device or pcie_device object
  402. * @channel: SAS or PCIe channel
  403. *
  404. * Determines whether this device should be first reported device to
  405. * to scsi-ml or sas transport, this purpose is for persistent boot device.
  406. * There are primary, alternate, and current entries in bios page 2. The order
  407. * priority is primary, alternate, then current. This routine saves
  408. * the corresponding device object.
  409. * The saved data to be used later in _scsih_probe_boot_devices().
  410. */
  411. static void
  412. _scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc, void *device,
  413. u32 channel)
  414. {
  415. struct _sas_device *sas_device;
  416. struct _pcie_device *pcie_device;
  417. struct _raid_device *raid_device;
  418. u64 sas_address;
  419. u64 device_name;
  420. u64 enclosure_logical_id;
  421. u16 slot;
  422. /* only process this function when driver loads */
  423. if (!ioc->is_driver_loading)
  424. return;
  425. /* no Bios, return immediately */
  426. if (!ioc->bios_pg3.BiosVersion)
  427. return;
  428. if (channel == RAID_CHANNEL) {
  429. raid_device = device;
  430. sas_address = raid_device->wwid;
  431. device_name = 0;
  432. enclosure_logical_id = 0;
  433. slot = 0;
  434. } else if (channel == PCIE_CHANNEL) {
  435. pcie_device = device;
  436. sas_address = pcie_device->wwid;
  437. device_name = 0;
  438. enclosure_logical_id = 0;
  439. slot = 0;
  440. } else {
  441. sas_device = device;
  442. sas_address = sas_device->sas_address;
  443. device_name = sas_device->device_name;
  444. enclosure_logical_id = sas_device->enclosure_logical_id;
  445. slot = sas_device->slot;
  446. }
  447. if (!ioc->req_boot_device.device) {
  448. if (_scsih_is_boot_device(sas_address, device_name,
  449. enclosure_logical_id, slot,
  450. (ioc->bios_pg2.ReqBootDeviceForm &
  451. MPI2_BIOSPAGE2_FORM_MASK),
  452. &ioc->bios_pg2.RequestedBootDevice)) {
  453. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  454. "%s: req_boot_device(0x%016llx)\n",
  455. ioc->name, __func__,
  456. (unsigned long long)sas_address));
  457. ioc->req_boot_device.device = device;
  458. ioc->req_boot_device.channel = channel;
  459. }
  460. }
  461. if (!ioc->req_alt_boot_device.device) {
  462. if (_scsih_is_boot_device(sas_address, device_name,
  463. enclosure_logical_id, slot,
  464. (ioc->bios_pg2.ReqAltBootDeviceForm &
  465. MPI2_BIOSPAGE2_FORM_MASK),
  466. &ioc->bios_pg2.RequestedAltBootDevice)) {
  467. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  468. "%s: req_alt_boot_device(0x%016llx)\n",
  469. ioc->name, __func__,
  470. (unsigned long long)sas_address));
  471. ioc->req_alt_boot_device.device = device;
  472. ioc->req_alt_boot_device.channel = channel;
  473. }
  474. }
  475. if (!ioc->current_boot_device.device) {
  476. if (_scsih_is_boot_device(sas_address, device_name,
  477. enclosure_logical_id, slot,
  478. (ioc->bios_pg2.CurrentBootDeviceForm &
  479. MPI2_BIOSPAGE2_FORM_MASK),
  480. &ioc->bios_pg2.CurrentBootDevice)) {
  481. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  482. "%s: current_boot_device(0x%016llx)\n",
  483. ioc->name, __func__,
  484. (unsigned long long)sas_address));
  485. ioc->current_boot_device.device = device;
  486. ioc->current_boot_device.channel = channel;
  487. }
  488. }
  489. }
  490. static struct _sas_device *
  491. __mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  492. struct MPT3SAS_TARGET *tgt_priv)
  493. {
  494. struct _sas_device *ret;
  495. assert_spin_locked(&ioc->sas_device_lock);
  496. ret = tgt_priv->sas_dev;
  497. if (ret)
  498. sas_device_get(ret);
  499. return ret;
  500. }
  501. static struct _sas_device *
  502. mpt3sas_get_sdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  503. struct MPT3SAS_TARGET *tgt_priv)
  504. {
  505. struct _sas_device *ret;
  506. unsigned long flags;
  507. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  508. ret = __mpt3sas_get_sdev_from_target(ioc, tgt_priv);
  509. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  510. return ret;
  511. }
  512. static struct _pcie_device *
  513. __mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  514. struct MPT3SAS_TARGET *tgt_priv)
  515. {
  516. struct _pcie_device *ret;
  517. assert_spin_locked(&ioc->pcie_device_lock);
  518. ret = tgt_priv->pcie_dev;
  519. if (ret)
  520. pcie_device_get(ret);
  521. return ret;
  522. }
  523. /**
  524. * mpt3sas_get_pdev_from_target - pcie device search
  525. * @ioc: per adapter object
  526. * @tgt_priv: starget private object
  527. *
  528. * Context: This function will acquire ioc->pcie_device_lock and will release
  529. * before returning the pcie_device object.
  530. *
  531. * This searches for pcie_device from target, then return pcie_device object.
  532. */
  533. static struct _pcie_device *
  534. mpt3sas_get_pdev_from_target(struct MPT3SAS_ADAPTER *ioc,
  535. struct MPT3SAS_TARGET *tgt_priv)
  536. {
  537. struct _pcie_device *ret;
  538. unsigned long flags;
  539. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  540. ret = __mpt3sas_get_pdev_from_target(ioc, tgt_priv);
  541. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  542. return ret;
  543. }
  544. struct _sas_device *
  545. __mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
  546. u64 sas_address)
  547. {
  548. struct _sas_device *sas_device;
  549. assert_spin_locked(&ioc->sas_device_lock);
  550. list_for_each_entry(sas_device, &ioc->sas_device_list, list)
  551. if (sas_device->sas_address == sas_address)
  552. goto found_device;
  553. list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
  554. if (sas_device->sas_address == sas_address)
  555. goto found_device;
  556. return NULL;
  557. found_device:
  558. sas_device_get(sas_device);
  559. return sas_device;
  560. }
  561. /**
  562. * mpt3sas_get_sdev_by_addr - sas device search
  563. * @ioc: per adapter object
  564. * @sas_address: sas address
  565. * Context: Calling function should acquire ioc->sas_device_lock
  566. *
  567. * This searches for sas_device based on sas_address, then return sas_device
  568. * object.
  569. */
  570. struct _sas_device *
  571. mpt3sas_get_sdev_by_addr(struct MPT3SAS_ADAPTER *ioc,
  572. u64 sas_address)
  573. {
  574. struct _sas_device *sas_device;
  575. unsigned long flags;
  576. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  577. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  578. sas_address);
  579. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  580. return sas_device;
  581. }
  582. static struct _sas_device *
  583. __mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  584. {
  585. struct _sas_device *sas_device;
  586. assert_spin_locked(&ioc->sas_device_lock);
  587. list_for_each_entry(sas_device, &ioc->sas_device_list, list)
  588. if (sas_device->handle == handle)
  589. goto found_device;
  590. list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
  591. if (sas_device->handle == handle)
  592. goto found_device;
  593. return NULL;
  594. found_device:
  595. sas_device_get(sas_device);
  596. return sas_device;
  597. }
  598. /**
  599. * mpt3sas_get_sdev_by_handle - sas device search
  600. * @ioc: per adapter object
  601. * @handle: sas device handle (assigned by firmware)
  602. * Context: Calling function should acquire ioc->sas_device_lock
  603. *
  604. * This searches for sas_device based on sas_address, then return sas_device
  605. * object.
  606. */
  607. struct _sas_device *
  608. mpt3sas_get_sdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  609. {
  610. struct _sas_device *sas_device;
  611. unsigned long flags;
  612. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  613. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  614. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  615. return sas_device;
  616. }
  617. /**
  618. * _scsih_display_enclosure_chassis_info - display device location info
  619. * @ioc: per adapter object
  620. * @sas_device: per sas device object
  621. * @sdev: scsi device struct
  622. * @starget: scsi target struct
  623. *
  624. * Returns nothing.
  625. */
  626. static void
  627. _scsih_display_enclosure_chassis_info(struct MPT3SAS_ADAPTER *ioc,
  628. struct _sas_device *sas_device, struct scsi_device *sdev,
  629. struct scsi_target *starget)
  630. {
  631. if (sdev) {
  632. if (sas_device->enclosure_handle != 0)
  633. sdev_printk(KERN_INFO, sdev,
  634. "enclosure logical id (0x%016llx), slot(%d) \n",
  635. (unsigned long long)
  636. sas_device->enclosure_logical_id,
  637. sas_device->slot);
  638. if (sas_device->connector_name[0] != '\0')
  639. sdev_printk(KERN_INFO, sdev,
  640. "enclosure level(0x%04x), connector name( %s)\n",
  641. sas_device->enclosure_level,
  642. sas_device->connector_name);
  643. if (sas_device->is_chassis_slot_valid)
  644. sdev_printk(KERN_INFO, sdev, "chassis slot(0x%04x)\n",
  645. sas_device->chassis_slot);
  646. } else if (starget) {
  647. if (sas_device->enclosure_handle != 0)
  648. starget_printk(KERN_INFO, starget,
  649. "enclosure logical id(0x%016llx), slot(%d) \n",
  650. (unsigned long long)
  651. sas_device->enclosure_logical_id,
  652. sas_device->slot);
  653. if (sas_device->connector_name[0] != '\0')
  654. starget_printk(KERN_INFO, starget,
  655. "enclosure level(0x%04x), connector name( %s)\n",
  656. sas_device->enclosure_level,
  657. sas_device->connector_name);
  658. if (sas_device->is_chassis_slot_valid)
  659. starget_printk(KERN_INFO, starget,
  660. "chassis slot(0x%04x)\n",
  661. sas_device->chassis_slot);
  662. } else {
  663. if (sas_device->enclosure_handle != 0)
  664. pr_info(MPT3SAS_FMT
  665. "enclosure logical id(0x%016llx), slot(%d) \n",
  666. ioc->name, (unsigned long long)
  667. sas_device->enclosure_logical_id,
  668. sas_device->slot);
  669. if (sas_device->connector_name[0] != '\0')
  670. pr_info(MPT3SAS_FMT
  671. "enclosure level(0x%04x), connector name( %s)\n",
  672. ioc->name, sas_device->enclosure_level,
  673. sas_device->connector_name);
  674. if (sas_device->is_chassis_slot_valid)
  675. pr_info(MPT3SAS_FMT "chassis slot(0x%04x)\n",
  676. ioc->name, sas_device->chassis_slot);
  677. }
  678. }
  679. /**
  680. * _scsih_sas_device_remove - remove sas_device from list.
  681. * @ioc: per adapter object
  682. * @sas_device: the sas_device object
  683. * Context: This function will acquire ioc->sas_device_lock.
  684. *
  685. * If sas_device is on the list, remove it and decrement its reference count.
  686. */
  687. static void
  688. _scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
  689. struct _sas_device *sas_device)
  690. {
  691. unsigned long flags;
  692. if (!sas_device)
  693. return;
  694. pr_info(MPT3SAS_FMT
  695. "removing handle(0x%04x), sas_addr(0x%016llx)\n",
  696. ioc->name, sas_device->handle,
  697. (unsigned long long) sas_device->sas_address);
  698. _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  699. /*
  700. * The lock serializes access to the list, but we still need to verify
  701. * that nobody removed the entry while we were waiting on the lock.
  702. */
  703. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  704. if (!list_empty(&sas_device->list)) {
  705. list_del_init(&sas_device->list);
  706. sas_device_put(sas_device);
  707. }
  708. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  709. }
  710. /**
  711. * _scsih_device_remove_by_handle - removing device object by handle
  712. * @ioc: per adapter object
  713. * @handle: device handle
  714. *
  715. * Return nothing.
  716. */
  717. static void
  718. _scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  719. {
  720. struct _sas_device *sas_device;
  721. unsigned long flags;
  722. if (ioc->shost_recovery)
  723. return;
  724. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  725. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  726. if (sas_device) {
  727. list_del_init(&sas_device->list);
  728. sas_device_put(sas_device);
  729. }
  730. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  731. if (sas_device) {
  732. _scsih_remove_device(ioc, sas_device);
  733. sas_device_put(sas_device);
  734. }
  735. }
  736. /**
  737. * mpt3sas_device_remove_by_sas_address - removing device object by sas address
  738. * @ioc: per adapter object
  739. * @sas_address: device sas_address
  740. *
  741. * Return nothing.
  742. */
  743. void
  744. mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
  745. u64 sas_address)
  746. {
  747. struct _sas_device *sas_device;
  748. unsigned long flags;
  749. if (ioc->shost_recovery)
  750. return;
  751. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  752. sas_device = __mpt3sas_get_sdev_by_addr(ioc, sas_address);
  753. if (sas_device) {
  754. list_del_init(&sas_device->list);
  755. sas_device_put(sas_device);
  756. }
  757. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  758. if (sas_device) {
  759. _scsih_remove_device(ioc, sas_device);
  760. sas_device_put(sas_device);
  761. }
  762. }
  763. /**
  764. * _scsih_sas_device_add - insert sas_device to the list.
  765. * @ioc: per adapter object
  766. * @sas_device: the sas_device object
  767. * Context: This function will acquire ioc->sas_device_lock.
  768. *
  769. * Adding new object to the ioc->sas_device_list.
  770. */
  771. static void
  772. _scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
  773. struct _sas_device *sas_device)
  774. {
  775. unsigned long flags;
  776. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  777. "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
  778. ioc->name, __func__, sas_device->handle,
  779. (unsigned long long)sas_device->sas_address));
  780. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  781. NULL, NULL));
  782. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  783. sas_device_get(sas_device);
  784. list_add_tail(&sas_device->list, &ioc->sas_device_list);
  785. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  786. if (ioc->hide_drives) {
  787. clear_bit(sas_device->handle, ioc->pend_os_device_add);
  788. return;
  789. }
  790. if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
  791. sas_device->sas_address_parent)) {
  792. _scsih_sas_device_remove(ioc, sas_device);
  793. } else if (!sas_device->starget) {
  794. /*
  795. * When asyn scanning is enabled, its not possible to remove
  796. * devices while scanning is turned on due to an oops in
  797. * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
  798. */
  799. if (!ioc->is_driver_loading) {
  800. mpt3sas_transport_port_remove(ioc,
  801. sas_device->sas_address,
  802. sas_device->sas_address_parent);
  803. _scsih_sas_device_remove(ioc, sas_device);
  804. }
  805. } else
  806. clear_bit(sas_device->handle, ioc->pend_os_device_add);
  807. }
  808. /**
  809. * _scsih_sas_device_init_add - insert sas_device to the list.
  810. * @ioc: per adapter object
  811. * @sas_device: the sas_device object
  812. * Context: This function will acquire ioc->sas_device_lock.
  813. *
  814. * Adding new object at driver load time to the ioc->sas_device_init_list.
  815. */
  816. static void
  817. _scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
  818. struct _sas_device *sas_device)
  819. {
  820. unsigned long flags;
  821. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  822. "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
  823. __func__, sas_device->handle,
  824. (unsigned long long)sas_device->sas_address));
  825. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  826. NULL, NULL));
  827. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  828. sas_device_get(sas_device);
  829. list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
  830. _scsih_determine_boot_device(ioc, sas_device, 0);
  831. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  832. }
  833. static struct _pcie_device *
  834. __mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  835. {
  836. struct _pcie_device *pcie_device;
  837. assert_spin_locked(&ioc->pcie_device_lock);
  838. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  839. if (pcie_device->wwid == wwid)
  840. goto found_device;
  841. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  842. if (pcie_device->wwid == wwid)
  843. goto found_device;
  844. return NULL;
  845. found_device:
  846. pcie_device_get(pcie_device);
  847. return pcie_device;
  848. }
  849. /**
  850. * mpt3sas_get_pdev_by_wwid - pcie device search
  851. * @ioc: per adapter object
  852. * @wwid: wwid
  853. *
  854. * Context: This function will acquire ioc->pcie_device_lock and will release
  855. * before returning the pcie_device object.
  856. *
  857. * This searches for pcie_device based on wwid, then return pcie_device object.
  858. */
  859. static struct _pcie_device *
  860. mpt3sas_get_pdev_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  861. {
  862. struct _pcie_device *pcie_device;
  863. unsigned long flags;
  864. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  865. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  866. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  867. return pcie_device;
  868. }
  869. static struct _pcie_device *
  870. __mpt3sas_get_pdev_by_idchannel(struct MPT3SAS_ADAPTER *ioc, int id,
  871. int channel)
  872. {
  873. struct _pcie_device *pcie_device;
  874. assert_spin_locked(&ioc->pcie_device_lock);
  875. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  876. if (pcie_device->id == id && pcie_device->channel == channel)
  877. goto found_device;
  878. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  879. if (pcie_device->id == id && pcie_device->channel == channel)
  880. goto found_device;
  881. return NULL;
  882. found_device:
  883. pcie_device_get(pcie_device);
  884. return pcie_device;
  885. }
  886. static struct _pcie_device *
  887. __mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  888. {
  889. struct _pcie_device *pcie_device;
  890. assert_spin_locked(&ioc->pcie_device_lock);
  891. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list)
  892. if (pcie_device->handle == handle)
  893. goto found_device;
  894. list_for_each_entry(pcie_device, &ioc->pcie_device_init_list, list)
  895. if (pcie_device->handle == handle)
  896. goto found_device;
  897. return NULL;
  898. found_device:
  899. pcie_device_get(pcie_device);
  900. return pcie_device;
  901. }
  902. /**
  903. * mpt3sas_get_pdev_by_handle - pcie device search
  904. * @ioc: per adapter object
  905. * @handle: Firmware device handle
  906. *
  907. * Context: This function will acquire ioc->pcie_device_lock and will release
  908. * before returning the pcie_device object.
  909. *
  910. * This searches for pcie_device based on handle, then return pcie_device
  911. * object.
  912. */
  913. struct _pcie_device *
  914. mpt3sas_get_pdev_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  915. {
  916. struct _pcie_device *pcie_device;
  917. unsigned long flags;
  918. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  919. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  920. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  921. return pcie_device;
  922. }
  923. /**
  924. * _scsih_pcie_device_remove - remove pcie_device from list.
  925. * @ioc: per adapter object
  926. * @pcie_device: the pcie_device object
  927. * Context: This function will acquire ioc->pcie_device_lock.
  928. *
  929. * If pcie_device is on the list, remove it and decrement its reference count.
  930. */
  931. static void
  932. _scsih_pcie_device_remove(struct MPT3SAS_ADAPTER *ioc,
  933. struct _pcie_device *pcie_device)
  934. {
  935. unsigned long flags;
  936. int was_on_pcie_device_list = 0;
  937. if (!pcie_device)
  938. return;
  939. pr_info(MPT3SAS_FMT
  940. "removing handle(0x%04x), wwid(0x%016llx)\n",
  941. ioc->name, pcie_device->handle,
  942. (unsigned long long) pcie_device->wwid);
  943. if (pcie_device->enclosure_handle != 0)
  944. pr_info(MPT3SAS_FMT
  945. "removing enclosure logical id(0x%016llx), slot(%d)\n",
  946. ioc->name,
  947. (unsigned long long)pcie_device->enclosure_logical_id,
  948. pcie_device->slot);
  949. if (pcie_device->connector_name[0] != '\0')
  950. pr_info(MPT3SAS_FMT
  951. "removing enclosure level(0x%04x), connector name( %s)\n",
  952. ioc->name, pcie_device->enclosure_level,
  953. pcie_device->connector_name);
  954. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  955. if (!list_empty(&pcie_device->list)) {
  956. list_del_init(&pcie_device->list);
  957. was_on_pcie_device_list = 1;
  958. }
  959. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  960. if (was_on_pcie_device_list) {
  961. kfree(pcie_device->serial_number);
  962. pcie_device_put(pcie_device);
  963. }
  964. }
  965. /**
  966. * _scsih_pcie_device_remove_by_handle - removing pcie device object by handle
  967. * @ioc: per adapter object
  968. * @handle: device handle
  969. *
  970. * Return nothing.
  971. */
  972. static void
  973. _scsih_pcie_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  974. {
  975. struct _pcie_device *pcie_device;
  976. unsigned long flags;
  977. int was_on_pcie_device_list = 0;
  978. if (ioc->shost_recovery)
  979. return;
  980. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  981. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  982. if (pcie_device) {
  983. if (!list_empty(&pcie_device->list)) {
  984. list_del_init(&pcie_device->list);
  985. was_on_pcie_device_list = 1;
  986. pcie_device_put(pcie_device);
  987. }
  988. }
  989. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  990. if (was_on_pcie_device_list) {
  991. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  992. pcie_device_put(pcie_device);
  993. }
  994. }
  995. /**
  996. * _scsih_pcie_device_add - add pcie_device object
  997. * @ioc: per adapter object
  998. * @pcie_device: pcie_device object
  999. *
  1000. * This is added to the pcie_device_list link list.
  1001. */
  1002. static void
  1003. _scsih_pcie_device_add(struct MPT3SAS_ADAPTER *ioc,
  1004. struct _pcie_device *pcie_device)
  1005. {
  1006. unsigned long flags;
  1007. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1008. "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
  1009. pcie_device->handle, (unsigned long long)pcie_device->wwid));
  1010. if (pcie_device->enclosure_handle != 0)
  1011. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1012. "%s: enclosure logical id(0x%016llx), slot( %d)\n",
  1013. ioc->name, __func__,
  1014. (unsigned long long)pcie_device->enclosure_logical_id,
  1015. pcie_device->slot));
  1016. if (pcie_device->connector_name[0] != '\0')
  1017. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1018. "%s: enclosure level(0x%04x), connector name( %s)\n",
  1019. ioc->name, __func__, pcie_device->enclosure_level,
  1020. pcie_device->connector_name));
  1021. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1022. pcie_device_get(pcie_device);
  1023. list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
  1024. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1025. if (scsi_add_device(ioc->shost, PCIE_CHANNEL, pcie_device->id, 0)) {
  1026. _scsih_pcie_device_remove(ioc, pcie_device);
  1027. } else if (!pcie_device->starget) {
  1028. if (!ioc->is_driver_loading) {
  1029. /*TODO-- Need to find out whether this condition will occur or not*/
  1030. clear_bit(pcie_device->handle, ioc->pend_os_device_add);
  1031. }
  1032. } else
  1033. clear_bit(pcie_device->handle, ioc->pend_os_device_add);
  1034. }
  1035. /*
  1036. * _scsih_pcie_device_init_add - insert pcie_device to the init list.
  1037. * @ioc: per adapter object
  1038. * @pcie_device: the pcie_device object
  1039. * Context: This function will acquire ioc->pcie_device_lock.
  1040. *
  1041. * Adding new object at driver load time to the ioc->pcie_device_init_list.
  1042. */
  1043. static void
  1044. _scsih_pcie_device_init_add(struct MPT3SAS_ADAPTER *ioc,
  1045. struct _pcie_device *pcie_device)
  1046. {
  1047. unsigned long flags;
  1048. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1049. "%s: handle (0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
  1050. pcie_device->handle, (unsigned long long)pcie_device->wwid));
  1051. if (pcie_device->enclosure_handle != 0)
  1052. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1053. "%s: enclosure logical id(0x%016llx), slot( %d)\n",
  1054. ioc->name, __func__,
  1055. (unsigned long long)pcie_device->enclosure_logical_id,
  1056. pcie_device->slot));
  1057. if (pcie_device->connector_name[0] != '\0')
  1058. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1059. "%s: enclosure level(0x%04x), connector name( %s)\n",
  1060. ioc->name, __func__, pcie_device->enclosure_level,
  1061. pcie_device->connector_name));
  1062. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1063. pcie_device_get(pcie_device);
  1064. list_add_tail(&pcie_device->list, &ioc->pcie_device_init_list);
  1065. _scsih_determine_boot_device(ioc, pcie_device, PCIE_CHANNEL);
  1066. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1067. }
  1068. /**
  1069. * _scsih_raid_device_find_by_id - raid device search
  1070. * @ioc: per adapter object
  1071. * @id: sas device target id
  1072. * @channel: sas device channel
  1073. * Context: Calling function should acquire ioc->raid_device_lock
  1074. *
  1075. * This searches for raid_device based on target id, then return raid_device
  1076. * object.
  1077. */
  1078. static struct _raid_device *
  1079. _scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
  1080. {
  1081. struct _raid_device *raid_device, *r;
  1082. r = NULL;
  1083. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1084. if (raid_device->id == id && raid_device->channel == channel) {
  1085. r = raid_device;
  1086. goto out;
  1087. }
  1088. }
  1089. out:
  1090. return r;
  1091. }
  1092. /**
  1093. * mpt3sas_raid_device_find_by_handle - raid device search
  1094. * @ioc: per adapter object
  1095. * @handle: sas device handle (assigned by firmware)
  1096. * Context: Calling function should acquire ioc->raid_device_lock
  1097. *
  1098. * This searches for raid_device based on handle, then return raid_device
  1099. * object.
  1100. */
  1101. struct _raid_device *
  1102. mpt3sas_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1103. {
  1104. struct _raid_device *raid_device, *r;
  1105. r = NULL;
  1106. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1107. if (raid_device->handle != handle)
  1108. continue;
  1109. r = raid_device;
  1110. goto out;
  1111. }
  1112. out:
  1113. return r;
  1114. }
  1115. /**
  1116. * _scsih_raid_device_find_by_wwid - raid device search
  1117. * @ioc: per adapter object
  1118. * @handle: sas device handle (assigned by firmware)
  1119. * Context: Calling function should acquire ioc->raid_device_lock
  1120. *
  1121. * This searches for raid_device based on wwid, then return raid_device
  1122. * object.
  1123. */
  1124. static struct _raid_device *
  1125. _scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
  1126. {
  1127. struct _raid_device *raid_device, *r;
  1128. r = NULL;
  1129. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  1130. if (raid_device->wwid != wwid)
  1131. continue;
  1132. r = raid_device;
  1133. goto out;
  1134. }
  1135. out:
  1136. return r;
  1137. }
  1138. /**
  1139. * _scsih_raid_device_add - add raid_device object
  1140. * @ioc: per adapter object
  1141. * @raid_device: raid_device object
  1142. *
  1143. * This is added to the raid_device_list link list.
  1144. */
  1145. static void
  1146. _scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
  1147. struct _raid_device *raid_device)
  1148. {
  1149. unsigned long flags;
  1150. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1151. "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
  1152. raid_device->handle, (unsigned long long)raid_device->wwid));
  1153. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1154. list_add_tail(&raid_device->list, &ioc->raid_device_list);
  1155. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1156. }
  1157. /**
  1158. * _scsih_raid_device_remove - delete raid_device object
  1159. * @ioc: per adapter object
  1160. * @raid_device: raid_device object
  1161. *
  1162. */
  1163. static void
  1164. _scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
  1165. struct _raid_device *raid_device)
  1166. {
  1167. unsigned long flags;
  1168. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1169. list_del(&raid_device->list);
  1170. kfree(raid_device);
  1171. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1172. }
  1173. /**
  1174. * mpt3sas_scsih_expander_find_by_handle - expander device search
  1175. * @ioc: per adapter object
  1176. * @handle: expander handle (assigned by firmware)
  1177. * Context: Calling function should acquire ioc->sas_device_lock
  1178. *
  1179. * This searches for expander device based on handle, then returns the
  1180. * sas_node object.
  1181. */
  1182. struct _sas_node *
  1183. mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1184. {
  1185. struct _sas_node *sas_expander, *r;
  1186. r = NULL;
  1187. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  1188. if (sas_expander->handle != handle)
  1189. continue;
  1190. r = sas_expander;
  1191. goto out;
  1192. }
  1193. out:
  1194. return r;
  1195. }
  1196. /**
  1197. * mpt3sas_scsih_enclosure_find_by_handle - exclosure device search
  1198. * @ioc: per adapter object
  1199. * @handle: enclosure handle (assigned by firmware)
  1200. * Context: Calling function should acquire ioc->sas_device_lock
  1201. *
  1202. * This searches for enclosure device based on handle, then returns the
  1203. * enclosure object.
  1204. */
  1205. static struct _enclosure_node *
  1206. mpt3sas_scsih_enclosure_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  1207. {
  1208. struct _enclosure_node *enclosure_dev, *r;
  1209. r = NULL;
  1210. list_for_each_entry(enclosure_dev, &ioc->enclosure_list, list) {
  1211. if (le16_to_cpu(enclosure_dev->pg0.EnclosureHandle) != handle)
  1212. continue;
  1213. r = enclosure_dev;
  1214. goto out;
  1215. }
  1216. out:
  1217. return r;
  1218. }
  1219. /**
  1220. * mpt3sas_scsih_expander_find_by_sas_address - expander device search
  1221. * @ioc: per adapter object
  1222. * @sas_address: sas address
  1223. * Context: Calling function should acquire ioc->sas_node_lock.
  1224. *
  1225. * This searches for expander device based on sas_address, then returns the
  1226. * sas_node object.
  1227. */
  1228. struct _sas_node *
  1229. mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
  1230. u64 sas_address)
  1231. {
  1232. struct _sas_node *sas_expander, *r;
  1233. r = NULL;
  1234. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  1235. if (sas_expander->sas_address != sas_address)
  1236. continue;
  1237. r = sas_expander;
  1238. goto out;
  1239. }
  1240. out:
  1241. return r;
  1242. }
  1243. /**
  1244. * _scsih_expander_node_add - insert expander device to the list.
  1245. * @ioc: per adapter object
  1246. * @sas_expander: the sas_device object
  1247. * Context: This function will acquire ioc->sas_node_lock.
  1248. *
  1249. * Adding new object to the ioc->sas_expander_list.
  1250. *
  1251. * Return nothing.
  1252. */
  1253. static void
  1254. _scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
  1255. struct _sas_node *sas_expander)
  1256. {
  1257. unsigned long flags;
  1258. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  1259. list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
  1260. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  1261. }
  1262. /**
  1263. * _scsih_is_end_device - determines if device is an end device
  1264. * @device_info: bitfield providing information about the device.
  1265. * Context: none
  1266. *
  1267. * Returns 1 if end device.
  1268. */
  1269. static int
  1270. _scsih_is_end_device(u32 device_info)
  1271. {
  1272. if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
  1273. ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
  1274. (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
  1275. (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
  1276. return 1;
  1277. else
  1278. return 0;
  1279. }
  1280. /**
  1281. * _scsih_is_nvme_device - determines if device is an nvme device
  1282. * @device_info: bitfield providing information about the device.
  1283. * Context: none
  1284. *
  1285. * Returns 1 if nvme device.
  1286. */
  1287. static int
  1288. _scsih_is_nvme_device(u32 device_info)
  1289. {
  1290. if ((device_info & MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE)
  1291. == MPI26_PCIE_DEVINFO_NVME)
  1292. return 1;
  1293. else
  1294. return 0;
  1295. }
  1296. /**
  1297. * mpt3sas_scsih_scsi_lookup_get - returns scmd entry
  1298. * @ioc: per adapter object
  1299. * @smid: system request message index
  1300. *
  1301. * Returns the smid stored scmd pointer.
  1302. * Then will dereference the stored scmd pointer.
  1303. */
  1304. struct scsi_cmnd *
  1305. mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  1306. {
  1307. struct scsi_cmnd *scmd = NULL;
  1308. struct scsiio_tracker *st;
  1309. if (smid > 0 &&
  1310. smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
  1311. u32 unique_tag = smid - 1;
  1312. scmd = scsi_host_find_tag(ioc->shost, unique_tag);
  1313. if (scmd) {
  1314. st = scsi_cmd_priv(scmd);
  1315. if (st->cb_idx == 0xFF)
  1316. scmd = NULL;
  1317. }
  1318. }
  1319. return scmd;
  1320. }
  1321. /**
  1322. * scsih_change_queue_depth - setting device queue depth
  1323. * @sdev: scsi device struct
  1324. * @qdepth: requested queue depth
  1325. *
  1326. * Returns queue depth.
  1327. */
  1328. static int
  1329. scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
  1330. {
  1331. struct Scsi_Host *shost = sdev->host;
  1332. int max_depth;
  1333. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1334. struct MPT3SAS_DEVICE *sas_device_priv_data;
  1335. struct MPT3SAS_TARGET *sas_target_priv_data;
  1336. struct _sas_device *sas_device;
  1337. unsigned long flags;
  1338. max_depth = shost->can_queue;
  1339. /* limit max device queue for SATA to 32 */
  1340. sas_device_priv_data = sdev->hostdata;
  1341. if (!sas_device_priv_data)
  1342. goto not_sata;
  1343. sas_target_priv_data = sas_device_priv_data->sas_target;
  1344. if (!sas_target_priv_data)
  1345. goto not_sata;
  1346. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
  1347. goto not_sata;
  1348. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1349. sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
  1350. if (sas_device) {
  1351. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  1352. max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
  1353. sas_device_put(sas_device);
  1354. }
  1355. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1356. not_sata:
  1357. if (!sdev->tagged_supported)
  1358. max_depth = 1;
  1359. if (qdepth > max_depth)
  1360. qdepth = max_depth;
  1361. return scsi_change_queue_depth(sdev, qdepth);
  1362. }
  1363. /**
  1364. * scsih_target_alloc - target add routine
  1365. * @starget: scsi target struct
  1366. *
  1367. * Returns 0 if ok. Any other return is assumed to be an error and
  1368. * the device is ignored.
  1369. */
  1370. static int
  1371. scsih_target_alloc(struct scsi_target *starget)
  1372. {
  1373. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  1374. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1375. struct MPT3SAS_TARGET *sas_target_priv_data;
  1376. struct _sas_device *sas_device;
  1377. struct _raid_device *raid_device;
  1378. struct _pcie_device *pcie_device;
  1379. unsigned long flags;
  1380. struct sas_rphy *rphy;
  1381. sas_target_priv_data = kzalloc(sizeof(*sas_target_priv_data),
  1382. GFP_KERNEL);
  1383. if (!sas_target_priv_data)
  1384. return -ENOMEM;
  1385. starget->hostdata = sas_target_priv_data;
  1386. sas_target_priv_data->starget = starget;
  1387. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  1388. /* RAID volumes */
  1389. if (starget->channel == RAID_CHANNEL) {
  1390. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1391. raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
  1392. starget->channel);
  1393. if (raid_device) {
  1394. sas_target_priv_data->handle = raid_device->handle;
  1395. sas_target_priv_data->sas_address = raid_device->wwid;
  1396. sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
  1397. if (ioc->is_warpdrive)
  1398. sas_target_priv_data->raid_device = raid_device;
  1399. raid_device->starget = starget;
  1400. }
  1401. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1402. return 0;
  1403. }
  1404. /* PCIe devices */
  1405. if (starget->channel == PCIE_CHANNEL) {
  1406. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1407. pcie_device = __mpt3sas_get_pdev_by_idchannel(ioc, starget->id,
  1408. starget->channel);
  1409. if (pcie_device) {
  1410. sas_target_priv_data->handle = pcie_device->handle;
  1411. sas_target_priv_data->sas_address = pcie_device->wwid;
  1412. sas_target_priv_data->pcie_dev = pcie_device;
  1413. pcie_device->starget = starget;
  1414. pcie_device->id = starget->id;
  1415. pcie_device->channel = starget->channel;
  1416. sas_target_priv_data->flags |=
  1417. MPT_TARGET_FLAGS_PCIE_DEVICE;
  1418. if (pcie_device->fast_path)
  1419. sas_target_priv_data->flags |=
  1420. MPT_TARGET_FASTPATH_IO;
  1421. }
  1422. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1423. return 0;
  1424. }
  1425. /* sas/sata devices */
  1426. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1427. rphy = dev_to_rphy(starget->dev.parent);
  1428. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  1429. rphy->identify.sas_address);
  1430. if (sas_device) {
  1431. sas_target_priv_data->handle = sas_device->handle;
  1432. sas_target_priv_data->sas_address = sas_device->sas_address;
  1433. sas_target_priv_data->sas_dev = sas_device;
  1434. sas_device->starget = starget;
  1435. sas_device->id = starget->id;
  1436. sas_device->channel = starget->channel;
  1437. if (test_bit(sas_device->handle, ioc->pd_handles))
  1438. sas_target_priv_data->flags |=
  1439. MPT_TARGET_FLAGS_RAID_COMPONENT;
  1440. if (sas_device->fast_path)
  1441. sas_target_priv_data->flags |=
  1442. MPT_TARGET_FASTPATH_IO;
  1443. }
  1444. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1445. return 0;
  1446. }
  1447. /**
  1448. * scsih_target_destroy - target destroy routine
  1449. * @starget: scsi target struct
  1450. *
  1451. * Returns nothing.
  1452. */
  1453. static void
  1454. scsih_target_destroy(struct scsi_target *starget)
  1455. {
  1456. struct Scsi_Host *shost = dev_to_shost(&starget->dev);
  1457. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1458. struct MPT3SAS_TARGET *sas_target_priv_data;
  1459. struct _sas_device *sas_device;
  1460. struct _raid_device *raid_device;
  1461. struct _pcie_device *pcie_device;
  1462. unsigned long flags;
  1463. struct sas_rphy *rphy;
  1464. sas_target_priv_data = starget->hostdata;
  1465. if (!sas_target_priv_data)
  1466. return;
  1467. if (starget->channel == RAID_CHANNEL) {
  1468. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1469. raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
  1470. starget->channel);
  1471. if (raid_device) {
  1472. raid_device->starget = NULL;
  1473. raid_device->sdev = NULL;
  1474. }
  1475. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1476. goto out;
  1477. }
  1478. if (starget->channel == PCIE_CHANNEL) {
  1479. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1480. pcie_device = __mpt3sas_get_pdev_from_target(ioc,
  1481. sas_target_priv_data);
  1482. if (pcie_device && (pcie_device->starget == starget) &&
  1483. (pcie_device->id == starget->id) &&
  1484. (pcie_device->channel == starget->channel))
  1485. pcie_device->starget = NULL;
  1486. if (pcie_device) {
  1487. /*
  1488. * Corresponding get() is in _scsih_target_alloc()
  1489. */
  1490. sas_target_priv_data->pcie_dev = NULL;
  1491. pcie_device_put(pcie_device);
  1492. pcie_device_put(pcie_device);
  1493. }
  1494. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1495. goto out;
  1496. }
  1497. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1498. rphy = dev_to_rphy(starget->dev.parent);
  1499. sas_device = __mpt3sas_get_sdev_from_target(ioc, sas_target_priv_data);
  1500. if (sas_device && (sas_device->starget == starget) &&
  1501. (sas_device->id == starget->id) &&
  1502. (sas_device->channel == starget->channel))
  1503. sas_device->starget = NULL;
  1504. if (sas_device) {
  1505. /*
  1506. * Corresponding get() is in _scsih_target_alloc()
  1507. */
  1508. sas_target_priv_data->sas_dev = NULL;
  1509. sas_device_put(sas_device);
  1510. sas_device_put(sas_device);
  1511. }
  1512. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1513. out:
  1514. kfree(sas_target_priv_data);
  1515. starget->hostdata = NULL;
  1516. }
  1517. /**
  1518. * scsih_slave_alloc - device add routine
  1519. * @sdev: scsi device struct
  1520. *
  1521. * Returns 0 if ok. Any other return is assumed to be an error and
  1522. * the device is ignored.
  1523. */
  1524. static int
  1525. scsih_slave_alloc(struct scsi_device *sdev)
  1526. {
  1527. struct Scsi_Host *shost;
  1528. struct MPT3SAS_ADAPTER *ioc;
  1529. struct MPT3SAS_TARGET *sas_target_priv_data;
  1530. struct MPT3SAS_DEVICE *sas_device_priv_data;
  1531. struct scsi_target *starget;
  1532. struct _raid_device *raid_device;
  1533. struct _sas_device *sas_device;
  1534. struct _pcie_device *pcie_device;
  1535. unsigned long flags;
  1536. sas_device_priv_data = kzalloc(sizeof(*sas_device_priv_data),
  1537. GFP_KERNEL);
  1538. if (!sas_device_priv_data)
  1539. return -ENOMEM;
  1540. sas_device_priv_data->lun = sdev->lun;
  1541. sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
  1542. starget = scsi_target(sdev);
  1543. sas_target_priv_data = starget->hostdata;
  1544. sas_target_priv_data->num_luns++;
  1545. sas_device_priv_data->sas_target = sas_target_priv_data;
  1546. sdev->hostdata = sas_device_priv_data;
  1547. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
  1548. sdev->no_uld_attach = 1;
  1549. shost = dev_to_shost(&starget->dev);
  1550. ioc = shost_priv(shost);
  1551. if (starget->channel == RAID_CHANNEL) {
  1552. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1553. raid_device = _scsih_raid_device_find_by_id(ioc,
  1554. starget->id, starget->channel);
  1555. if (raid_device)
  1556. raid_device->sdev = sdev; /* raid is single lun */
  1557. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1558. }
  1559. if (starget->channel == PCIE_CHANNEL) {
  1560. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1561. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
  1562. sas_target_priv_data->sas_address);
  1563. if (pcie_device && (pcie_device->starget == NULL)) {
  1564. sdev_printk(KERN_INFO, sdev,
  1565. "%s : pcie_device->starget set to starget @ %d\n",
  1566. __func__, __LINE__);
  1567. pcie_device->starget = starget;
  1568. }
  1569. if (pcie_device)
  1570. pcie_device_put(pcie_device);
  1571. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1572. } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
  1573. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1574. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  1575. sas_target_priv_data->sas_address);
  1576. if (sas_device && (sas_device->starget == NULL)) {
  1577. sdev_printk(KERN_INFO, sdev,
  1578. "%s : sas_device->starget set to starget @ %d\n",
  1579. __func__, __LINE__);
  1580. sas_device->starget = starget;
  1581. }
  1582. if (sas_device)
  1583. sas_device_put(sas_device);
  1584. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1585. }
  1586. return 0;
  1587. }
  1588. /**
  1589. * scsih_slave_destroy - device destroy routine
  1590. * @sdev: scsi device struct
  1591. *
  1592. * Returns nothing.
  1593. */
  1594. static void
  1595. scsih_slave_destroy(struct scsi_device *sdev)
  1596. {
  1597. struct MPT3SAS_TARGET *sas_target_priv_data;
  1598. struct scsi_target *starget;
  1599. struct Scsi_Host *shost;
  1600. struct MPT3SAS_ADAPTER *ioc;
  1601. struct _sas_device *sas_device;
  1602. struct _pcie_device *pcie_device;
  1603. unsigned long flags;
  1604. if (!sdev->hostdata)
  1605. return;
  1606. starget = scsi_target(sdev);
  1607. sas_target_priv_data = starget->hostdata;
  1608. sas_target_priv_data->num_luns--;
  1609. shost = dev_to_shost(&starget->dev);
  1610. ioc = shost_priv(shost);
  1611. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  1612. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  1613. pcie_device = __mpt3sas_get_pdev_from_target(ioc,
  1614. sas_target_priv_data);
  1615. if (pcie_device && !sas_target_priv_data->num_luns)
  1616. pcie_device->starget = NULL;
  1617. if (pcie_device)
  1618. pcie_device_put(pcie_device);
  1619. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  1620. } else if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
  1621. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  1622. sas_device = __mpt3sas_get_sdev_from_target(ioc,
  1623. sas_target_priv_data);
  1624. if (sas_device && !sas_target_priv_data->num_luns)
  1625. sas_device->starget = NULL;
  1626. if (sas_device)
  1627. sas_device_put(sas_device);
  1628. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  1629. }
  1630. kfree(sdev->hostdata);
  1631. sdev->hostdata = NULL;
  1632. }
  1633. /**
  1634. * _scsih_display_sata_capabilities - sata capabilities
  1635. * @ioc: per adapter object
  1636. * @handle: device handle
  1637. * @sdev: scsi device struct
  1638. */
  1639. static void
  1640. _scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
  1641. u16 handle, struct scsi_device *sdev)
  1642. {
  1643. Mpi2ConfigReply_t mpi_reply;
  1644. Mpi2SasDevicePage0_t sas_device_pg0;
  1645. u32 ioc_status;
  1646. u16 flags;
  1647. u32 device_info;
  1648. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  1649. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  1650. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  1651. ioc->name, __FILE__, __LINE__, __func__);
  1652. return;
  1653. }
  1654. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  1655. MPI2_IOCSTATUS_MASK;
  1656. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  1657. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  1658. ioc->name, __FILE__, __LINE__, __func__);
  1659. return;
  1660. }
  1661. flags = le16_to_cpu(sas_device_pg0.Flags);
  1662. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  1663. sdev_printk(KERN_INFO, sdev,
  1664. "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
  1665. "sw_preserve(%s)\n",
  1666. (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
  1667. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
  1668. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
  1669. "n",
  1670. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
  1671. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
  1672. (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
  1673. }
  1674. /*
  1675. * raid transport support -
  1676. * Enabled for SLES11 and newer, in older kernels the driver will panic when
  1677. * unloading the driver followed by a load - I believe that the subroutine
  1678. * raid_class_release() is not cleaning up properly.
  1679. */
  1680. /**
  1681. * scsih_is_raid - return boolean indicating device is raid volume
  1682. * @dev the device struct object
  1683. */
  1684. static int
  1685. scsih_is_raid(struct device *dev)
  1686. {
  1687. struct scsi_device *sdev = to_scsi_device(dev);
  1688. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  1689. if (ioc->is_warpdrive)
  1690. return 0;
  1691. return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
  1692. }
  1693. static int
  1694. scsih_is_nvme(struct device *dev)
  1695. {
  1696. struct scsi_device *sdev = to_scsi_device(dev);
  1697. return (sdev->channel == PCIE_CHANNEL) ? 1 : 0;
  1698. }
  1699. /**
  1700. * scsih_get_resync - get raid volume resync percent complete
  1701. * @dev the device struct object
  1702. */
  1703. static void
  1704. scsih_get_resync(struct device *dev)
  1705. {
  1706. struct scsi_device *sdev = to_scsi_device(dev);
  1707. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  1708. static struct _raid_device *raid_device;
  1709. unsigned long flags;
  1710. Mpi2RaidVolPage0_t vol_pg0;
  1711. Mpi2ConfigReply_t mpi_reply;
  1712. u32 volume_status_flags;
  1713. u8 percent_complete;
  1714. u16 handle;
  1715. percent_complete = 0;
  1716. handle = 0;
  1717. if (ioc->is_warpdrive)
  1718. goto out;
  1719. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1720. raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
  1721. sdev->channel);
  1722. if (raid_device) {
  1723. handle = raid_device->handle;
  1724. percent_complete = raid_device->percent_complete;
  1725. }
  1726. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1727. if (!handle)
  1728. goto out;
  1729. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
  1730. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  1731. sizeof(Mpi2RaidVolPage0_t))) {
  1732. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  1733. ioc->name, __FILE__, __LINE__, __func__);
  1734. percent_complete = 0;
  1735. goto out;
  1736. }
  1737. volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
  1738. if (!(volume_status_flags &
  1739. MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
  1740. percent_complete = 0;
  1741. out:
  1742. switch (ioc->hba_mpi_version_belonged) {
  1743. case MPI2_VERSION:
  1744. raid_set_resync(mpt2sas_raid_template, dev, percent_complete);
  1745. break;
  1746. case MPI25_VERSION:
  1747. case MPI26_VERSION:
  1748. raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
  1749. break;
  1750. }
  1751. }
  1752. /**
  1753. * scsih_get_state - get raid volume level
  1754. * @dev the device struct object
  1755. */
  1756. static void
  1757. scsih_get_state(struct device *dev)
  1758. {
  1759. struct scsi_device *sdev = to_scsi_device(dev);
  1760. struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
  1761. static struct _raid_device *raid_device;
  1762. unsigned long flags;
  1763. Mpi2RaidVolPage0_t vol_pg0;
  1764. Mpi2ConfigReply_t mpi_reply;
  1765. u32 volstate;
  1766. enum raid_state state = RAID_STATE_UNKNOWN;
  1767. u16 handle = 0;
  1768. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1769. raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
  1770. sdev->channel);
  1771. if (raid_device)
  1772. handle = raid_device->handle;
  1773. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1774. if (!raid_device)
  1775. goto out;
  1776. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
  1777. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  1778. sizeof(Mpi2RaidVolPage0_t))) {
  1779. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  1780. ioc->name, __FILE__, __LINE__, __func__);
  1781. goto out;
  1782. }
  1783. volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
  1784. if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
  1785. state = RAID_STATE_RESYNCING;
  1786. goto out;
  1787. }
  1788. switch (vol_pg0.VolumeState) {
  1789. case MPI2_RAID_VOL_STATE_OPTIMAL:
  1790. case MPI2_RAID_VOL_STATE_ONLINE:
  1791. state = RAID_STATE_ACTIVE;
  1792. break;
  1793. case MPI2_RAID_VOL_STATE_DEGRADED:
  1794. state = RAID_STATE_DEGRADED;
  1795. break;
  1796. case MPI2_RAID_VOL_STATE_FAILED:
  1797. case MPI2_RAID_VOL_STATE_MISSING:
  1798. state = RAID_STATE_OFFLINE;
  1799. break;
  1800. }
  1801. out:
  1802. switch (ioc->hba_mpi_version_belonged) {
  1803. case MPI2_VERSION:
  1804. raid_set_state(mpt2sas_raid_template, dev, state);
  1805. break;
  1806. case MPI25_VERSION:
  1807. case MPI26_VERSION:
  1808. raid_set_state(mpt3sas_raid_template, dev, state);
  1809. break;
  1810. }
  1811. }
  1812. /**
  1813. * _scsih_set_level - set raid level
  1814. * @sdev: scsi device struct
  1815. * @volume_type: volume type
  1816. */
  1817. static void
  1818. _scsih_set_level(struct MPT3SAS_ADAPTER *ioc,
  1819. struct scsi_device *sdev, u8 volume_type)
  1820. {
  1821. enum raid_level level = RAID_LEVEL_UNKNOWN;
  1822. switch (volume_type) {
  1823. case MPI2_RAID_VOL_TYPE_RAID0:
  1824. level = RAID_LEVEL_0;
  1825. break;
  1826. case MPI2_RAID_VOL_TYPE_RAID10:
  1827. level = RAID_LEVEL_10;
  1828. break;
  1829. case MPI2_RAID_VOL_TYPE_RAID1E:
  1830. level = RAID_LEVEL_1E;
  1831. break;
  1832. case MPI2_RAID_VOL_TYPE_RAID1:
  1833. level = RAID_LEVEL_1;
  1834. break;
  1835. }
  1836. switch (ioc->hba_mpi_version_belonged) {
  1837. case MPI2_VERSION:
  1838. raid_set_level(mpt2sas_raid_template,
  1839. &sdev->sdev_gendev, level);
  1840. break;
  1841. case MPI25_VERSION:
  1842. case MPI26_VERSION:
  1843. raid_set_level(mpt3sas_raid_template,
  1844. &sdev->sdev_gendev, level);
  1845. break;
  1846. }
  1847. }
  1848. /**
  1849. * _scsih_get_volume_capabilities - volume capabilities
  1850. * @ioc: per adapter object
  1851. * @sas_device: the raid_device object
  1852. *
  1853. * Returns 0 for success, else 1
  1854. */
  1855. static int
  1856. _scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
  1857. struct _raid_device *raid_device)
  1858. {
  1859. Mpi2RaidVolPage0_t *vol_pg0;
  1860. Mpi2RaidPhysDiskPage0_t pd_pg0;
  1861. Mpi2SasDevicePage0_t sas_device_pg0;
  1862. Mpi2ConfigReply_t mpi_reply;
  1863. u16 sz;
  1864. u8 num_pds;
  1865. if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
  1866. &num_pds)) || !num_pds) {
  1867. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1868. "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
  1869. __func__));
  1870. return 1;
  1871. }
  1872. raid_device->num_pds = num_pds;
  1873. sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
  1874. sizeof(Mpi2RaidVol0PhysDisk_t));
  1875. vol_pg0 = kzalloc(sz, GFP_KERNEL);
  1876. if (!vol_pg0) {
  1877. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1878. "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
  1879. __func__));
  1880. return 1;
  1881. }
  1882. if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
  1883. MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
  1884. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1885. "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
  1886. __func__));
  1887. kfree(vol_pg0);
  1888. return 1;
  1889. }
  1890. raid_device->volume_type = vol_pg0->VolumeType;
  1891. /* figure out what the underlying devices are by
  1892. * obtaining the device_info bits for the 1st device
  1893. */
  1894. if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  1895. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
  1896. vol_pg0->PhysDisk[0].PhysDiskNum))) {
  1897. if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  1898. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  1899. le16_to_cpu(pd_pg0.DevHandle)))) {
  1900. raid_device->device_info =
  1901. le32_to_cpu(sas_device_pg0.DeviceInfo);
  1902. }
  1903. }
  1904. kfree(vol_pg0);
  1905. return 0;
  1906. }
  1907. /**
  1908. * _scsih_enable_tlr - setting TLR flags
  1909. * @ioc: per adapter object
  1910. * @sdev: scsi device struct
  1911. *
  1912. * Enabling Transaction Layer Retries for tape devices when
  1913. * vpd page 0x90 is present
  1914. *
  1915. */
  1916. static void
  1917. _scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
  1918. {
  1919. /* only for TAPE */
  1920. if (sdev->type != TYPE_TAPE)
  1921. return;
  1922. if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
  1923. return;
  1924. sas_enable_tlr(sdev);
  1925. sdev_printk(KERN_INFO, sdev, "TLR %s\n",
  1926. sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
  1927. return;
  1928. }
  1929. /**
  1930. * scsih_slave_configure - device configure routine.
  1931. * @sdev: scsi device struct
  1932. *
  1933. * Returns 0 if ok. Any other return is assumed to be an error and
  1934. * the device is ignored.
  1935. */
  1936. static int
  1937. scsih_slave_configure(struct scsi_device *sdev)
  1938. {
  1939. struct Scsi_Host *shost = sdev->host;
  1940. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  1941. struct MPT3SAS_DEVICE *sas_device_priv_data;
  1942. struct MPT3SAS_TARGET *sas_target_priv_data;
  1943. struct _sas_device *sas_device;
  1944. struct _pcie_device *pcie_device;
  1945. struct _raid_device *raid_device;
  1946. unsigned long flags;
  1947. int qdepth;
  1948. u8 ssp_target = 0;
  1949. char *ds = "";
  1950. char *r_level = "";
  1951. u16 handle, volume_handle = 0;
  1952. u64 volume_wwid = 0;
  1953. qdepth = 1;
  1954. sas_device_priv_data = sdev->hostdata;
  1955. sas_device_priv_data->configured_lun = 1;
  1956. sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
  1957. sas_target_priv_data = sas_device_priv_data->sas_target;
  1958. handle = sas_target_priv_data->handle;
  1959. /* raid volume handling */
  1960. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
  1961. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  1962. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  1963. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  1964. if (!raid_device) {
  1965. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1966. "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
  1967. __LINE__, __func__));
  1968. return 1;
  1969. }
  1970. if (_scsih_get_volume_capabilities(ioc, raid_device)) {
  1971. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1972. "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
  1973. __LINE__, __func__));
  1974. return 1;
  1975. }
  1976. /*
  1977. * WARPDRIVE: Initialize the required data for Direct IO
  1978. */
  1979. mpt3sas_init_warpdrive_properties(ioc, raid_device);
  1980. /* RAID Queue Depth Support
  1981. * IS volume = underlying qdepth of drive type, either
  1982. * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
  1983. * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
  1984. */
  1985. if (raid_device->device_info &
  1986. MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
  1987. qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
  1988. ds = "SSP";
  1989. } else {
  1990. qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
  1991. if (raid_device->device_info &
  1992. MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  1993. ds = "SATA";
  1994. else
  1995. ds = "STP";
  1996. }
  1997. switch (raid_device->volume_type) {
  1998. case MPI2_RAID_VOL_TYPE_RAID0:
  1999. r_level = "RAID0";
  2000. break;
  2001. case MPI2_RAID_VOL_TYPE_RAID1E:
  2002. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2003. if (ioc->manu_pg10.OEMIdentifier &&
  2004. (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
  2005. MFG10_GF0_R10_DISPLAY) &&
  2006. !(raid_device->num_pds % 2))
  2007. r_level = "RAID10";
  2008. else
  2009. r_level = "RAID1E";
  2010. break;
  2011. case MPI2_RAID_VOL_TYPE_RAID1:
  2012. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2013. r_level = "RAID1";
  2014. break;
  2015. case MPI2_RAID_VOL_TYPE_RAID10:
  2016. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2017. r_level = "RAID10";
  2018. break;
  2019. case MPI2_RAID_VOL_TYPE_UNKNOWN:
  2020. default:
  2021. qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
  2022. r_level = "RAIDX";
  2023. break;
  2024. }
  2025. if (!ioc->hide_ir_msg)
  2026. sdev_printk(KERN_INFO, sdev,
  2027. "%s: handle(0x%04x), wwid(0x%016llx),"
  2028. " pd_count(%d), type(%s)\n",
  2029. r_level, raid_device->handle,
  2030. (unsigned long long)raid_device->wwid,
  2031. raid_device->num_pds, ds);
  2032. if (shost->max_sectors > MPT3SAS_RAID_MAX_SECTORS) {
  2033. blk_queue_max_hw_sectors(sdev->request_queue,
  2034. MPT3SAS_RAID_MAX_SECTORS);
  2035. sdev_printk(KERN_INFO, sdev,
  2036. "Set queue's max_sector to: %u\n",
  2037. MPT3SAS_RAID_MAX_SECTORS);
  2038. }
  2039. scsih_change_queue_depth(sdev, qdepth);
  2040. /* raid transport support */
  2041. if (!ioc->is_warpdrive)
  2042. _scsih_set_level(ioc, sdev, raid_device->volume_type);
  2043. return 0;
  2044. }
  2045. /* non-raid handling */
  2046. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2047. if (mpt3sas_config_get_volume_handle(ioc, handle,
  2048. &volume_handle)) {
  2049. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  2050. "failure at %s:%d/%s()!\n", ioc->name,
  2051. __FILE__, __LINE__, __func__));
  2052. return 1;
  2053. }
  2054. if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
  2055. volume_handle, &volume_wwid)) {
  2056. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  2057. "failure at %s:%d/%s()!\n", ioc->name,
  2058. __FILE__, __LINE__, __func__));
  2059. return 1;
  2060. }
  2061. }
  2062. /* PCIe handling */
  2063. if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  2064. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  2065. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc,
  2066. sas_device_priv_data->sas_target->sas_address);
  2067. if (!pcie_device) {
  2068. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2069. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  2070. "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
  2071. __LINE__, __func__));
  2072. return 1;
  2073. }
  2074. qdepth = MPT3SAS_NVME_QUEUE_DEPTH;
  2075. ds = "NVMe";
  2076. sdev_printk(KERN_INFO, sdev,
  2077. "%s: handle(0x%04x), wwid(0x%016llx), port(%d)\n",
  2078. ds, handle, (unsigned long long)pcie_device->wwid,
  2079. pcie_device->port_num);
  2080. if (pcie_device->enclosure_handle != 0)
  2081. sdev_printk(KERN_INFO, sdev,
  2082. "%s: enclosure logical id(0x%016llx), slot(%d)\n",
  2083. ds,
  2084. (unsigned long long)pcie_device->enclosure_logical_id,
  2085. pcie_device->slot);
  2086. if (pcie_device->connector_name[0] != '\0')
  2087. sdev_printk(KERN_INFO, sdev,
  2088. "%s: enclosure level(0x%04x),"
  2089. "connector name( %s)\n", ds,
  2090. pcie_device->enclosure_level,
  2091. pcie_device->connector_name);
  2092. pcie_device_put(pcie_device);
  2093. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2094. scsih_change_queue_depth(sdev, qdepth);
  2095. if (pcie_device->nvme_mdts)
  2096. blk_queue_max_hw_sectors(sdev->request_queue,
  2097. pcie_device->nvme_mdts/512);
  2098. /* Enable QUEUE_FLAG_NOMERGES flag, so that IOs won't be
  2099. ** merged and can eliminate holes created during merging
  2100. ** operation.
  2101. **/
  2102. blk_queue_flag_set(QUEUE_FLAG_NOMERGES,
  2103. sdev->request_queue);
  2104. blk_queue_virt_boundary(sdev->request_queue,
  2105. ioc->page_size - 1);
  2106. return 0;
  2107. }
  2108. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  2109. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  2110. sas_device_priv_data->sas_target->sas_address);
  2111. if (!sas_device) {
  2112. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2113. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  2114. "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
  2115. __func__));
  2116. return 1;
  2117. }
  2118. sas_device->volume_handle = volume_handle;
  2119. sas_device->volume_wwid = volume_wwid;
  2120. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
  2121. qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
  2122. ssp_target = 1;
  2123. if (sas_device->device_info &
  2124. MPI2_SAS_DEVICE_INFO_SEP) {
  2125. sdev_printk(KERN_WARNING, sdev,
  2126. "set ignore_delay_remove for handle(0x%04x)\n",
  2127. sas_device_priv_data->sas_target->handle);
  2128. sas_device_priv_data->ignore_delay_remove = 1;
  2129. ds = "SES";
  2130. } else
  2131. ds = "SSP";
  2132. } else {
  2133. qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
  2134. if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
  2135. ds = "STP";
  2136. else if (sas_device->device_info &
  2137. MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
  2138. ds = "SATA";
  2139. }
  2140. sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
  2141. "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
  2142. ds, handle, (unsigned long long)sas_device->sas_address,
  2143. sas_device->phy, (unsigned long long)sas_device->device_name);
  2144. _scsih_display_enclosure_chassis_info(NULL, sas_device, sdev, NULL);
  2145. sas_device_put(sas_device);
  2146. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2147. if (!ssp_target)
  2148. _scsih_display_sata_capabilities(ioc, handle, sdev);
  2149. scsih_change_queue_depth(sdev, qdepth);
  2150. if (ssp_target) {
  2151. sas_read_port_mode_page(sdev);
  2152. _scsih_enable_tlr(ioc, sdev);
  2153. }
  2154. return 0;
  2155. }
  2156. /**
  2157. * scsih_bios_param - fetch head, sector, cylinder info for a disk
  2158. * @sdev: scsi device struct
  2159. * @bdev: pointer to block device context
  2160. * @capacity: device size (in 512 byte sectors)
  2161. * @params: three element array to place output:
  2162. * params[0] number of heads (max 255)
  2163. * params[1] number of sectors (max 63)
  2164. * params[2] number of cylinders
  2165. *
  2166. * Return nothing.
  2167. */
  2168. static int
  2169. scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
  2170. sector_t capacity, int params[])
  2171. {
  2172. int heads;
  2173. int sectors;
  2174. sector_t cylinders;
  2175. ulong dummy;
  2176. heads = 64;
  2177. sectors = 32;
  2178. dummy = heads * sectors;
  2179. cylinders = capacity;
  2180. sector_div(cylinders, dummy);
  2181. /*
  2182. * Handle extended translation size for logical drives
  2183. * > 1Gb
  2184. */
  2185. if ((ulong)capacity >= 0x200000) {
  2186. heads = 255;
  2187. sectors = 63;
  2188. dummy = heads * sectors;
  2189. cylinders = capacity;
  2190. sector_div(cylinders, dummy);
  2191. }
  2192. /* return result */
  2193. params[0] = heads;
  2194. params[1] = sectors;
  2195. params[2] = cylinders;
  2196. return 0;
  2197. }
  2198. /**
  2199. * _scsih_response_code - translation of device response code
  2200. * @ioc: per adapter object
  2201. * @response_code: response code returned by the device
  2202. *
  2203. * Return nothing.
  2204. */
  2205. static void
  2206. _scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
  2207. {
  2208. char *desc;
  2209. switch (response_code) {
  2210. case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
  2211. desc = "task management request completed";
  2212. break;
  2213. case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
  2214. desc = "invalid frame";
  2215. break;
  2216. case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
  2217. desc = "task management request not supported";
  2218. break;
  2219. case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
  2220. desc = "task management request failed";
  2221. break;
  2222. case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
  2223. desc = "task management request succeeded";
  2224. break;
  2225. case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
  2226. desc = "invalid lun";
  2227. break;
  2228. case 0xA:
  2229. desc = "overlapped tag attempted";
  2230. break;
  2231. case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
  2232. desc = "task queued, however not sent to target";
  2233. break;
  2234. default:
  2235. desc = "unknown";
  2236. break;
  2237. }
  2238. pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
  2239. ioc->name, response_code, desc);
  2240. }
  2241. /**
  2242. * _scsih_tm_done - tm completion routine
  2243. * @ioc: per adapter object
  2244. * @smid: system request message index
  2245. * @msix_index: MSIX table index supplied by the OS
  2246. * @reply: reply message frame(lower 32bit addr)
  2247. * Context: none.
  2248. *
  2249. * The callback handler when using scsih_issue_tm.
  2250. *
  2251. * Return 1 meaning mf should be freed from _base_interrupt
  2252. * 0 means the mf is freed from this function.
  2253. */
  2254. static u8
  2255. _scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  2256. {
  2257. MPI2DefaultReply_t *mpi_reply;
  2258. if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
  2259. return 1;
  2260. if (ioc->tm_cmds.smid != smid)
  2261. return 1;
  2262. ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
  2263. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  2264. if (mpi_reply) {
  2265. memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  2266. ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
  2267. }
  2268. ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
  2269. complete(&ioc->tm_cmds.done);
  2270. return 1;
  2271. }
  2272. /**
  2273. * mpt3sas_scsih_set_tm_flag - set per target tm_busy
  2274. * @ioc: per adapter object
  2275. * @handle: device handle
  2276. *
  2277. * During taskmangement request, we need to freeze the device queue.
  2278. */
  2279. void
  2280. mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  2281. {
  2282. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2283. struct scsi_device *sdev;
  2284. u8 skip = 0;
  2285. shost_for_each_device(sdev, ioc->shost) {
  2286. if (skip)
  2287. continue;
  2288. sas_device_priv_data = sdev->hostdata;
  2289. if (!sas_device_priv_data)
  2290. continue;
  2291. if (sas_device_priv_data->sas_target->handle == handle) {
  2292. sas_device_priv_data->sas_target->tm_busy = 1;
  2293. skip = 1;
  2294. ioc->ignore_loginfos = 1;
  2295. }
  2296. }
  2297. }
  2298. /**
  2299. * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
  2300. * @ioc: per adapter object
  2301. * @handle: device handle
  2302. *
  2303. * During taskmangement request, we need to freeze the device queue.
  2304. */
  2305. void
  2306. mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  2307. {
  2308. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2309. struct scsi_device *sdev;
  2310. u8 skip = 0;
  2311. shost_for_each_device(sdev, ioc->shost) {
  2312. if (skip)
  2313. continue;
  2314. sas_device_priv_data = sdev->hostdata;
  2315. if (!sas_device_priv_data)
  2316. continue;
  2317. if (sas_device_priv_data->sas_target->handle == handle) {
  2318. sas_device_priv_data->sas_target->tm_busy = 0;
  2319. skip = 1;
  2320. ioc->ignore_loginfos = 0;
  2321. }
  2322. }
  2323. }
  2324. /**
  2325. * mpt3sas_scsih_issue_tm - main routine for sending tm requests
  2326. * @ioc: per adapter struct
  2327. * @handle: device handle
  2328. * @lun: lun number
  2329. * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
  2330. * @smid_task: smid assigned to the task
  2331. * @msix_task: MSIX table index supplied by the OS
  2332. * @timeout: timeout in seconds
  2333. * @tr_method: Target Reset Method
  2334. * Context: user
  2335. *
  2336. * A generic API for sending task management requests to firmware.
  2337. *
  2338. * The callback index is set inside `ioc->tm_cb_idx`.
  2339. * The caller is responsible to check for outstanding commands.
  2340. *
  2341. * Return SUCCESS or FAILED.
  2342. */
  2343. int
  2344. mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
  2345. u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
  2346. {
  2347. Mpi2SCSITaskManagementRequest_t *mpi_request;
  2348. Mpi2SCSITaskManagementReply_t *mpi_reply;
  2349. u16 smid = 0;
  2350. u32 ioc_state;
  2351. int rc;
  2352. lockdep_assert_held(&ioc->tm_cmds.mutex);
  2353. if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
  2354. pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
  2355. __func__, ioc->name);
  2356. return FAILED;
  2357. }
  2358. if (ioc->shost_recovery || ioc->remove_host ||
  2359. ioc->pci_error_recovery) {
  2360. pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
  2361. __func__, ioc->name);
  2362. return FAILED;
  2363. }
  2364. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  2365. if (ioc_state & MPI2_DOORBELL_USED) {
  2366. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  2367. "unexpected doorbell active!\n", ioc->name));
  2368. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2369. return (!rc) ? SUCCESS : FAILED;
  2370. }
  2371. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  2372. mpt3sas_base_fault_info(ioc, ioc_state &
  2373. MPI2_DOORBELL_DATA_MASK);
  2374. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2375. return (!rc) ? SUCCESS : FAILED;
  2376. }
  2377. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
  2378. if (!smid) {
  2379. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  2380. ioc->name, __func__);
  2381. return FAILED;
  2382. }
  2383. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  2384. "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d), timeout(%d), tr_method(0x%x)\n",
  2385. ioc->name, handle, type, smid_task, timeout, tr_method));
  2386. ioc->tm_cmds.status = MPT3_CMD_PENDING;
  2387. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  2388. ioc->tm_cmds.smid = smid;
  2389. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  2390. memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
  2391. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  2392. mpi_request->DevHandle = cpu_to_le16(handle);
  2393. mpi_request->TaskType = type;
  2394. mpi_request->MsgFlags = tr_method;
  2395. mpi_request->TaskMID = cpu_to_le16(smid_task);
  2396. int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
  2397. mpt3sas_scsih_set_tm_flag(ioc, handle);
  2398. init_completion(&ioc->tm_cmds.done);
  2399. mpt3sas_base_put_smid_hi_priority(ioc, smid, msix_task);
  2400. wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
  2401. if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
  2402. pr_err(MPT3SAS_FMT "%s: timeout\n",
  2403. ioc->name, __func__);
  2404. _debug_dump_mf(mpi_request,
  2405. sizeof(Mpi2SCSITaskManagementRequest_t)/4);
  2406. if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
  2407. rc = mpt3sas_base_hard_reset_handler(ioc,
  2408. FORCE_BIG_HAMMER);
  2409. rc = (!rc) ? SUCCESS : FAILED;
  2410. goto out;
  2411. }
  2412. }
  2413. /* sync IRQs in case those were busy during flush. */
  2414. mpt3sas_base_sync_reply_irqs(ioc);
  2415. if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
  2416. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
  2417. mpi_reply = ioc->tm_cmds.reply;
  2418. dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
  2419. "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
  2420. ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
  2421. le32_to_cpu(mpi_reply->IOCLogInfo),
  2422. le32_to_cpu(mpi_reply->TerminationCount)));
  2423. if (ioc->logging_level & MPT_DEBUG_TM) {
  2424. _scsih_response_code(ioc, mpi_reply->ResponseCode);
  2425. if (mpi_reply->IOCStatus)
  2426. _debug_dump_mf(mpi_request,
  2427. sizeof(Mpi2SCSITaskManagementRequest_t)/4);
  2428. }
  2429. }
  2430. rc = SUCCESS;
  2431. out:
  2432. mpt3sas_scsih_clear_tm_flag(ioc, handle);
  2433. ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
  2434. return rc;
  2435. }
  2436. int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
  2437. u64 lun, u8 type, u16 smid_task, u16 msix_task,
  2438. u8 timeout, u8 tr_method)
  2439. {
  2440. int ret;
  2441. mutex_lock(&ioc->tm_cmds.mutex);
  2442. ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
  2443. msix_task, timeout, tr_method);
  2444. mutex_unlock(&ioc->tm_cmds.mutex);
  2445. return ret;
  2446. }
  2447. /**
  2448. * _scsih_tm_display_info - displays info about the device
  2449. * @ioc: per adapter struct
  2450. * @scmd: pointer to scsi command object
  2451. *
  2452. * Called by task management callback handlers.
  2453. */
  2454. static void
  2455. _scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
  2456. {
  2457. struct scsi_target *starget = scmd->device->sdev_target;
  2458. struct MPT3SAS_TARGET *priv_target = starget->hostdata;
  2459. struct _sas_device *sas_device = NULL;
  2460. struct _pcie_device *pcie_device = NULL;
  2461. unsigned long flags;
  2462. char *device_str = NULL;
  2463. if (!priv_target)
  2464. return;
  2465. if (ioc->hide_ir_msg)
  2466. device_str = "WarpDrive";
  2467. else
  2468. device_str = "volume";
  2469. scsi_print_command(scmd);
  2470. if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  2471. starget_printk(KERN_INFO, starget,
  2472. "%s handle(0x%04x), %s wwid(0x%016llx)\n",
  2473. device_str, priv_target->handle,
  2474. device_str, (unsigned long long)priv_target->sas_address);
  2475. } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  2476. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  2477. pcie_device = __mpt3sas_get_pdev_from_target(ioc, priv_target);
  2478. if (pcie_device) {
  2479. starget_printk(KERN_INFO, starget,
  2480. "handle(0x%04x), wwid(0x%016llx), port(%d)\n",
  2481. pcie_device->handle,
  2482. (unsigned long long)pcie_device->wwid,
  2483. pcie_device->port_num);
  2484. if (pcie_device->enclosure_handle != 0)
  2485. starget_printk(KERN_INFO, starget,
  2486. "enclosure logical id(0x%016llx), slot(%d)\n",
  2487. (unsigned long long)
  2488. pcie_device->enclosure_logical_id,
  2489. pcie_device->slot);
  2490. if (pcie_device->connector_name[0] != '\0')
  2491. starget_printk(KERN_INFO, starget,
  2492. "enclosure level(0x%04x), connector name( %s)\n",
  2493. pcie_device->enclosure_level,
  2494. pcie_device->connector_name);
  2495. pcie_device_put(pcie_device);
  2496. }
  2497. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  2498. } else {
  2499. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  2500. sas_device = __mpt3sas_get_sdev_from_target(ioc, priv_target);
  2501. if (sas_device) {
  2502. if (priv_target->flags &
  2503. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2504. starget_printk(KERN_INFO, starget,
  2505. "volume handle(0x%04x), "
  2506. "volume wwid(0x%016llx)\n",
  2507. sas_device->volume_handle,
  2508. (unsigned long long)sas_device->volume_wwid);
  2509. }
  2510. starget_printk(KERN_INFO, starget,
  2511. "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
  2512. sas_device->handle,
  2513. (unsigned long long)sas_device->sas_address,
  2514. sas_device->phy);
  2515. _scsih_display_enclosure_chassis_info(NULL, sas_device,
  2516. NULL, starget);
  2517. sas_device_put(sas_device);
  2518. }
  2519. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  2520. }
  2521. }
  2522. /**
  2523. * scsih_abort - eh threads main abort routine
  2524. * @scmd: pointer to scsi command object
  2525. *
  2526. * Returns SUCCESS if command aborted else FAILED
  2527. */
  2528. static int
  2529. scsih_abort(struct scsi_cmnd *scmd)
  2530. {
  2531. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2532. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2533. struct scsiio_tracker *st = scsi_cmd_priv(scmd);
  2534. u16 handle;
  2535. int r;
  2536. u8 timeout = 30;
  2537. struct _pcie_device *pcie_device = NULL;
  2538. sdev_printk(KERN_INFO, scmd->device,
  2539. "attempting task abort! scmd(%p)\n", scmd);
  2540. _scsih_tm_display_info(ioc, scmd);
  2541. sas_device_priv_data = scmd->device->hostdata;
  2542. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  2543. ioc->remove_host) {
  2544. sdev_printk(KERN_INFO, scmd->device,
  2545. "device been deleted! scmd(%p)\n", scmd);
  2546. scmd->result = DID_NO_CONNECT << 16;
  2547. scmd->scsi_done(scmd);
  2548. r = SUCCESS;
  2549. goto out;
  2550. }
  2551. /* check for completed command */
  2552. if (st == NULL || st->cb_idx == 0xFF) {
  2553. scmd->result = DID_RESET << 16;
  2554. r = SUCCESS;
  2555. goto out;
  2556. }
  2557. /* for hidden raid components and volumes this is not supported */
  2558. if (sas_device_priv_data->sas_target->flags &
  2559. MPT_TARGET_FLAGS_RAID_COMPONENT ||
  2560. sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  2561. scmd->result = DID_RESET << 16;
  2562. r = FAILED;
  2563. goto out;
  2564. }
  2565. mpt3sas_halt_firmware(ioc);
  2566. handle = sas_device_priv_data->sas_target->handle;
  2567. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  2568. if (pcie_device && (!ioc->tm_custom_handling))
  2569. timeout = ioc->nvme_abort_timeout;
  2570. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
  2571. MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
  2572. st->smid, st->msix_io, timeout, 0);
  2573. /* Command must be cleared after abort */
  2574. if (r == SUCCESS && st->cb_idx != 0xFF)
  2575. r = FAILED;
  2576. out:
  2577. sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
  2578. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  2579. if (pcie_device)
  2580. pcie_device_put(pcie_device);
  2581. return r;
  2582. }
  2583. /**
  2584. * scsih_dev_reset - eh threads main device reset routine
  2585. * @scmd: pointer to scsi command object
  2586. *
  2587. * Returns SUCCESS if command aborted else FAILED
  2588. */
  2589. static int
  2590. scsih_dev_reset(struct scsi_cmnd *scmd)
  2591. {
  2592. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2593. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2594. struct _sas_device *sas_device = NULL;
  2595. struct _pcie_device *pcie_device = NULL;
  2596. u16 handle;
  2597. u8 tr_method = 0;
  2598. u8 tr_timeout = 30;
  2599. int r;
  2600. struct scsi_target *starget = scmd->device->sdev_target;
  2601. struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
  2602. sdev_printk(KERN_INFO, scmd->device,
  2603. "attempting device reset! scmd(%p)\n", scmd);
  2604. _scsih_tm_display_info(ioc, scmd);
  2605. sas_device_priv_data = scmd->device->hostdata;
  2606. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  2607. ioc->remove_host) {
  2608. sdev_printk(KERN_INFO, scmd->device,
  2609. "device been deleted! scmd(%p)\n", scmd);
  2610. scmd->result = DID_NO_CONNECT << 16;
  2611. scmd->scsi_done(scmd);
  2612. r = SUCCESS;
  2613. goto out;
  2614. }
  2615. /* for hidden raid components obtain the volume_handle */
  2616. handle = 0;
  2617. if (sas_device_priv_data->sas_target->flags &
  2618. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2619. sas_device = mpt3sas_get_sdev_from_target(ioc,
  2620. target_priv_data);
  2621. if (sas_device)
  2622. handle = sas_device->volume_handle;
  2623. } else
  2624. handle = sas_device_priv_data->sas_target->handle;
  2625. if (!handle) {
  2626. scmd->result = DID_RESET << 16;
  2627. r = FAILED;
  2628. goto out;
  2629. }
  2630. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  2631. if (pcie_device && (!ioc->tm_custom_handling)) {
  2632. tr_timeout = pcie_device->reset_timeout;
  2633. tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  2634. } else
  2635. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  2636. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
  2637. MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
  2638. tr_timeout, tr_method);
  2639. /* Check for busy commands after reset */
  2640. if (r == SUCCESS && atomic_read(&scmd->device->device_busy))
  2641. r = FAILED;
  2642. out:
  2643. sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
  2644. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  2645. if (sas_device)
  2646. sas_device_put(sas_device);
  2647. if (pcie_device)
  2648. pcie_device_put(pcie_device);
  2649. return r;
  2650. }
  2651. /**
  2652. * scsih_target_reset - eh threads main target reset routine
  2653. * @scmd: pointer to scsi command object
  2654. *
  2655. * Returns SUCCESS if command aborted else FAILED
  2656. */
  2657. static int
  2658. scsih_target_reset(struct scsi_cmnd *scmd)
  2659. {
  2660. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2661. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2662. struct _sas_device *sas_device = NULL;
  2663. struct _pcie_device *pcie_device = NULL;
  2664. u16 handle;
  2665. u8 tr_method = 0;
  2666. u8 tr_timeout = 30;
  2667. int r;
  2668. struct scsi_target *starget = scmd->device->sdev_target;
  2669. struct MPT3SAS_TARGET *target_priv_data = starget->hostdata;
  2670. starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
  2671. scmd);
  2672. _scsih_tm_display_info(ioc, scmd);
  2673. sas_device_priv_data = scmd->device->hostdata;
  2674. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  2675. ioc->remove_host) {
  2676. starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
  2677. scmd);
  2678. scmd->result = DID_NO_CONNECT << 16;
  2679. scmd->scsi_done(scmd);
  2680. r = SUCCESS;
  2681. goto out;
  2682. }
  2683. /* for hidden raid components obtain the volume_handle */
  2684. handle = 0;
  2685. if (sas_device_priv_data->sas_target->flags &
  2686. MPT_TARGET_FLAGS_RAID_COMPONENT) {
  2687. sas_device = mpt3sas_get_sdev_from_target(ioc,
  2688. target_priv_data);
  2689. if (sas_device)
  2690. handle = sas_device->volume_handle;
  2691. } else
  2692. handle = sas_device_priv_data->sas_target->handle;
  2693. if (!handle) {
  2694. scmd->result = DID_RESET << 16;
  2695. r = FAILED;
  2696. goto out;
  2697. }
  2698. pcie_device = mpt3sas_get_pdev_by_handle(ioc, handle);
  2699. if (pcie_device && (!ioc->tm_custom_handling)) {
  2700. tr_timeout = pcie_device->reset_timeout;
  2701. tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  2702. } else
  2703. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  2704. r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
  2705. MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
  2706. tr_timeout, tr_method);
  2707. /* Check for busy commands after reset */
  2708. if (r == SUCCESS && atomic_read(&starget->target_busy))
  2709. r = FAILED;
  2710. out:
  2711. starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
  2712. ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  2713. if (sas_device)
  2714. sas_device_put(sas_device);
  2715. if (pcie_device)
  2716. pcie_device_put(pcie_device);
  2717. return r;
  2718. }
  2719. /**
  2720. * scsih_host_reset - eh threads main host reset routine
  2721. * @scmd: pointer to scsi command object
  2722. *
  2723. * Returns SUCCESS if command aborted else FAILED
  2724. */
  2725. static int
  2726. scsih_host_reset(struct scsi_cmnd *scmd)
  2727. {
  2728. struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
  2729. int r, retval;
  2730. pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
  2731. ioc->name, scmd);
  2732. scsi_print_command(scmd);
  2733. if (ioc->is_driver_loading || ioc->remove_host) {
  2734. pr_info(MPT3SAS_FMT "Blocking the host reset\n",
  2735. ioc->name);
  2736. r = FAILED;
  2737. goto out;
  2738. }
  2739. retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  2740. r = (retval < 0) ? FAILED : SUCCESS;
  2741. out:
  2742. pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
  2743. ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
  2744. return r;
  2745. }
  2746. /**
  2747. * _scsih_fw_event_add - insert and queue up fw_event
  2748. * @ioc: per adapter object
  2749. * @fw_event: object describing the event
  2750. * Context: This function will acquire ioc->fw_event_lock.
  2751. *
  2752. * This adds the firmware event object into link list, then queues it up to
  2753. * be processed from user context.
  2754. *
  2755. * Return nothing.
  2756. */
  2757. static void
  2758. _scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
  2759. {
  2760. unsigned long flags;
  2761. if (ioc->firmware_event_thread == NULL)
  2762. return;
  2763. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  2764. fw_event_work_get(fw_event);
  2765. INIT_LIST_HEAD(&fw_event->list);
  2766. list_add_tail(&fw_event->list, &ioc->fw_event_list);
  2767. INIT_WORK(&fw_event->work, _firmware_event_work);
  2768. fw_event_work_get(fw_event);
  2769. queue_work(ioc->firmware_event_thread, &fw_event->work);
  2770. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  2771. }
  2772. /**
  2773. * _scsih_fw_event_del_from_list - delete fw_event from the list
  2774. * @ioc: per adapter object
  2775. * @fw_event: object describing the event
  2776. * Context: This function will acquire ioc->fw_event_lock.
  2777. *
  2778. * If the fw_event is on the fw_event_list, remove it and do a put.
  2779. *
  2780. * Return nothing.
  2781. */
  2782. static void
  2783. _scsih_fw_event_del_from_list(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
  2784. *fw_event)
  2785. {
  2786. unsigned long flags;
  2787. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  2788. if (!list_empty(&fw_event->list)) {
  2789. list_del_init(&fw_event->list);
  2790. fw_event_work_put(fw_event);
  2791. }
  2792. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  2793. }
  2794. /**
  2795. * mpt3sas_send_trigger_data_event - send event for processing trigger data
  2796. * @ioc: per adapter object
  2797. * @event_data: trigger event data
  2798. *
  2799. * Return nothing.
  2800. */
  2801. void
  2802. mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
  2803. struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
  2804. {
  2805. struct fw_event_work *fw_event;
  2806. u16 sz;
  2807. if (ioc->is_driver_loading)
  2808. return;
  2809. sz = sizeof(*event_data);
  2810. fw_event = alloc_fw_event_work(sz);
  2811. if (!fw_event)
  2812. return;
  2813. fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
  2814. fw_event->ioc = ioc;
  2815. memcpy(fw_event->event_data, event_data, sizeof(*event_data));
  2816. _scsih_fw_event_add(ioc, fw_event);
  2817. fw_event_work_put(fw_event);
  2818. }
  2819. /**
  2820. * _scsih_error_recovery_delete_devices - remove devices not responding
  2821. * @ioc: per adapter object
  2822. *
  2823. * Return nothing.
  2824. */
  2825. static void
  2826. _scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
  2827. {
  2828. struct fw_event_work *fw_event;
  2829. if (ioc->is_driver_loading)
  2830. return;
  2831. fw_event = alloc_fw_event_work(0);
  2832. if (!fw_event)
  2833. return;
  2834. fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
  2835. fw_event->ioc = ioc;
  2836. _scsih_fw_event_add(ioc, fw_event);
  2837. fw_event_work_put(fw_event);
  2838. }
  2839. /**
  2840. * mpt3sas_port_enable_complete - port enable completed (fake event)
  2841. * @ioc: per adapter object
  2842. *
  2843. * Return nothing.
  2844. */
  2845. void
  2846. mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
  2847. {
  2848. struct fw_event_work *fw_event;
  2849. fw_event = alloc_fw_event_work(0);
  2850. if (!fw_event)
  2851. return;
  2852. fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
  2853. fw_event->ioc = ioc;
  2854. _scsih_fw_event_add(ioc, fw_event);
  2855. fw_event_work_put(fw_event);
  2856. }
  2857. static struct fw_event_work *dequeue_next_fw_event(struct MPT3SAS_ADAPTER *ioc)
  2858. {
  2859. unsigned long flags;
  2860. struct fw_event_work *fw_event = NULL;
  2861. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  2862. if (!list_empty(&ioc->fw_event_list)) {
  2863. fw_event = list_first_entry(&ioc->fw_event_list,
  2864. struct fw_event_work, list);
  2865. list_del_init(&fw_event->list);
  2866. }
  2867. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  2868. return fw_event;
  2869. }
  2870. /**
  2871. * _scsih_fw_event_cleanup_queue - cleanup event queue
  2872. * @ioc: per adapter object
  2873. *
  2874. * Walk the firmware event queue, either killing timers, or waiting
  2875. * for outstanding events to complete
  2876. *
  2877. * Return nothing.
  2878. */
  2879. static void
  2880. _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
  2881. {
  2882. struct fw_event_work *fw_event;
  2883. if (list_empty(&ioc->fw_event_list) ||
  2884. !ioc->firmware_event_thread || in_interrupt())
  2885. return;
  2886. while ((fw_event = dequeue_next_fw_event(ioc))) {
  2887. /*
  2888. * Wait on the fw_event to complete. If this returns 1, then
  2889. * the event was never executed, and we need a put for the
  2890. * reference the work had on the fw_event.
  2891. *
  2892. * If it did execute, we wait for it to finish, and the put will
  2893. * happen from _firmware_event_work()
  2894. */
  2895. if (cancel_work_sync(&fw_event->work))
  2896. fw_event_work_put(fw_event);
  2897. fw_event_work_put(fw_event);
  2898. }
  2899. }
  2900. /**
  2901. * _scsih_internal_device_block - block the sdev device
  2902. * @sdev: per device object
  2903. * @sas_device_priv_data : per device driver private data
  2904. *
  2905. * make sure device is blocked without error, if not
  2906. * print an error
  2907. */
  2908. static void
  2909. _scsih_internal_device_block(struct scsi_device *sdev,
  2910. struct MPT3SAS_DEVICE *sas_device_priv_data)
  2911. {
  2912. int r = 0;
  2913. sdev_printk(KERN_INFO, sdev, "device_block, handle(0x%04x)\n",
  2914. sas_device_priv_data->sas_target->handle);
  2915. sas_device_priv_data->block = 1;
  2916. r = scsi_internal_device_block_nowait(sdev);
  2917. if (r == -EINVAL)
  2918. sdev_printk(KERN_WARNING, sdev,
  2919. "device_block failed with return(%d) for handle(0x%04x)\n",
  2920. r, sas_device_priv_data->sas_target->handle);
  2921. }
  2922. /**
  2923. * _scsih_internal_device_unblock - unblock the sdev device
  2924. * @sdev: per device object
  2925. * @sas_device_priv_data : per device driver private data
  2926. * make sure device is unblocked without error, if not retry
  2927. * by blocking and then unblocking
  2928. */
  2929. static void
  2930. _scsih_internal_device_unblock(struct scsi_device *sdev,
  2931. struct MPT3SAS_DEVICE *sas_device_priv_data)
  2932. {
  2933. int r = 0;
  2934. sdev_printk(KERN_WARNING, sdev, "device_unblock and setting to running, "
  2935. "handle(0x%04x)\n", sas_device_priv_data->sas_target->handle);
  2936. sas_device_priv_data->block = 0;
  2937. r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
  2938. if (r == -EINVAL) {
  2939. /* The device has been set to SDEV_RUNNING by SD layer during
  2940. * device addition but the request queue is still stopped by
  2941. * our earlier block call. We need to perform a block again
  2942. * to get the device to SDEV_BLOCK and then to SDEV_RUNNING */
  2943. sdev_printk(KERN_WARNING, sdev,
  2944. "device_unblock failed with return(%d) for handle(0x%04x) "
  2945. "performing a block followed by an unblock\n",
  2946. r, sas_device_priv_data->sas_target->handle);
  2947. sas_device_priv_data->block = 1;
  2948. r = scsi_internal_device_block_nowait(sdev);
  2949. if (r)
  2950. sdev_printk(KERN_WARNING, sdev, "retried device_block "
  2951. "failed with return(%d) for handle(0x%04x)\n",
  2952. r, sas_device_priv_data->sas_target->handle);
  2953. sas_device_priv_data->block = 0;
  2954. r = scsi_internal_device_unblock_nowait(sdev, SDEV_RUNNING);
  2955. if (r)
  2956. sdev_printk(KERN_WARNING, sdev, "retried device_unblock"
  2957. " failed with return(%d) for handle(0x%04x)\n",
  2958. r, sas_device_priv_data->sas_target->handle);
  2959. }
  2960. }
  2961. /**
  2962. * _scsih_ublock_io_all_device - unblock every device
  2963. * @ioc: per adapter object
  2964. *
  2965. * change the device state from block to running
  2966. */
  2967. static void
  2968. _scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
  2969. {
  2970. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2971. struct scsi_device *sdev;
  2972. shost_for_each_device(sdev, ioc->shost) {
  2973. sas_device_priv_data = sdev->hostdata;
  2974. if (!sas_device_priv_data)
  2975. continue;
  2976. if (!sas_device_priv_data->block)
  2977. continue;
  2978. dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
  2979. "device_running, handle(0x%04x)\n",
  2980. sas_device_priv_data->sas_target->handle));
  2981. _scsih_internal_device_unblock(sdev, sas_device_priv_data);
  2982. }
  2983. }
  2984. /**
  2985. * _scsih_ublock_io_device - prepare device to be deleted
  2986. * @ioc: per adapter object
  2987. * @sas_addr: sas address
  2988. *
  2989. * unblock then put device in offline state
  2990. */
  2991. static void
  2992. _scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
  2993. {
  2994. struct MPT3SAS_DEVICE *sas_device_priv_data;
  2995. struct scsi_device *sdev;
  2996. shost_for_each_device(sdev, ioc->shost) {
  2997. sas_device_priv_data = sdev->hostdata;
  2998. if (!sas_device_priv_data)
  2999. continue;
  3000. if (sas_device_priv_data->sas_target->sas_address
  3001. != sas_address)
  3002. continue;
  3003. if (sas_device_priv_data->block)
  3004. _scsih_internal_device_unblock(sdev,
  3005. sas_device_priv_data);
  3006. }
  3007. }
  3008. /**
  3009. * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
  3010. * @ioc: per adapter object
  3011. * @handle: device handle
  3012. *
  3013. * During device pull we need to appropriately set the sdev state.
  3014. */
  3015. static void
  3016. _scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
  3017. {
  3018. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3019. struct scsi_device *sdev;
  3020. shost_for_each_device(sdev, ioc->shost) {
  3021. sas_device_priv_data = sdev->hostdata;
  3022. if (!sas_device_priv_data)
  3023. continue;
  3024. if (sas_device_priv_data->block)
  3025. continue;
  3026. if (sas_device_priv_data->ignore_delay_remove) {
  3027. sdev_printk(KERN_INFO, sdev,
  3028. "%s skip device_block for SES handle(0x%04x)\n",
  3029. __func__, sas_device_priv_data->sas_target->handle);
  3030. continue;
  3031. }
  3032. _scsih_internal_device_block(sdev, sas_device_priv_data);
  3033. }
  3034. }
  3035. /**
  3036. * _scsih_block_io_device - set the device state to SDEV_BLOCK
  3037. * @ioc: per adapter object
  3038. * @handle: device handle
  3039. *
  3040. * During device pull we need to appropriately set the sdev state.
  3041. */
  3042. static void
  3043. _scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3044. {
  3045. struct MPT3SAS_DEVICE *sas_device_priv_data;
  3046. struct scsi_device *sdev;
  3047. struct _sas_device *sas_device;
  3048. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  3049. shost_for_each_device(sdev, ioc->shost) {
  3050. sas_device_priv_data = sdev->hostdata;
  3051. if (!sas_device_priv_data)
  3052. continue;
  3053. if (sas_device_priv_data->sas_target->handle != handle)
  3054. continue;
  3055. if (sas_device_priv_data->block)
  3056. continue;
  3057. if (sas_device && sas_device->pend_sas_rphy_add)
  3058. continue;
  3059. if (sas_device_priv_data->ignore_delay_remove) {
  3060. sdev_printk(KERN_INFO, sdev,
  3061. "%s skip device_block for SES handle(0x%04x)\n",
  3062. __func__, sas_device_priv_data->sas_target->handle);
  3063. continue;
  3064. }
  3065. _scsih_internal_device_block(sdev, sas_device_priv_data);
  3066. }
  3067. if (sas_device)
  3068. sas_device_put(sas_device);
  3069. }
  3070. /**
  3071. * _scsih_block_io_to_children_attached_to_ex
  3072. * @ioc: per adapter object
  3073. * @sas_expander: the sas_device object
  3074. *
  3075. * This routine set sdev state to SDEV_BLOCK for all devices
  3076. * attached to this expander. This function called when expander is
  3077. * pulled.
  3078. */
  3079. static void
  3080. _scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
  3081. struct _sas_node *sas_expander)
  3082. {
  3083. struct _sas_port *mpt3sas_port;
  3084. struct _sas_device *sas_device;
  3085. struct _sas_node *expander_sibling;
  3086. unsigned long flags;
  3087. if (!sas_expander)
  3088. return;
  3089. list_for_each_entry(mpt3sas_port,
  3090. &sas_expander->sas_port_list, port_list) {
  3091. if (mpt3sas_port->remote_identify.device_type ==
  3092. SAS_END_DEVICE) {
  3093. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  3094. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  3095. mpt3sas_port->remote_identify.sas_address);
  3096. if (sas_device) {
  3097. set_bit(sas_device->handle,
  3098. ioc->blocking_handles);
  3099. sas_device_put(sas_device);
  3100. }
  3101. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  3102. }
  3103. }
  3104. list_for_each_entry(mpt3sas_port,
  3105. &sas_expander->sas_port_list, port_list) {
  3106. if (mpt3sas_port->remote_identify.device_type ==
  3107. SAS_EDGE_EXPANDER_DEVICE ||
  3108. mpt3sas_port->remote_identify.device_type ==
  3109. SAS_FANOUT_EXPANDER_DEVICE) {
  3110. expander_sibling =
  3111. mpt3sas_scsih_expander_find_by_sas_address(
  3112. ioc, mpt3sas_port->remote_identify.sas_address);
  3113. _scsih_block_io_to_children_attached_to_ex(ioc,
  3114. expander_sibling);
  3115. }
  3116. }
  3117. }
  3118. /**
  3119. * _scsih_block_io_to_children_attached_directly
  3120. * @ioc: per adapter object
  3121. * @event_data: topology change event data
  3122. *
  3123. * This routine set sdev state to SDEV_BLOCK for all devices
  3124. * direct attached during device pull.
  3125. */
  3126. static void
  3127. _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
  3128. Mpi2EventDataSasTopologyChangeList_t *event_data)
  3129. {
  3130. int i;
  3131. u16 handle;
  3132. u16 reason_code;
  3133. for (i = 0; i < event_data->NumEntries; i++) {
  3134. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  3135. if (!handle)
  3136. continue;
  3137. reason_code = event_data->PHY[i].PhyStatus &
  3138. MPI2_EVENT_SAS_TOPO_RC_MASK;
  3139. if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
  3140. _scsih_block_io_device(ioc, handle);
  3141. }
  3142. }
  3143. /**
  3144. * _scsih_block_io_to_pcie_children_attached_directly
  3145. * @ioc: per adapter object
  3146. * @event_data: topology change event data
  3147. *
  3148. * This routine set sdev state to SDEV_BLOCK for all devices
  3149. * direct attached during device pull/reconnect.
  3150. */
  3151. static void
  3152. _scsih_block_io_to_pcie_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
  3153. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  3154. {
  3155. int i;
  3156. u16 handle;
  3157. u16 reason_code;
  3158. for (i = 0; i < event_data->NumEntries; i++) {
  3159. handle =
  3160. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  3161. if (!handle)
  3162. continue;
  3163. reason_code = event_data->PortEntry[i].PortStatus;
  3164. if (reason_code ==
  3165. MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING)
  3166. _scsih_block_io_device(ioc, handle);
  3167. }
  3168. }
  3169. /**
  3170. * _scsih_tm_tr_send - send task management request
  3171. * @ioc: per adapter object
  3172. * @handle: device handle
  3173. * Context: interrupt time.
  3174. *
  3175. * This code is to initiate the device removal handshake protocol
  3176. * with controller firmware. This function will issue target reset
  3177. * using high priority request queue. It will send a sas iounit
  3178. * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
  3179. *
  3180. * This is designed to send muliple task management request at the same
  3181. * time to the fifo. If the fifo is full, we will append the request,
  3182. * and process it in a future completion.
  3183. */
  3184. static void
  3185. _scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3186. {
  3187. Mpi2SCSITaskManagementRequest_t *mpi_request;
  3188. u16 smid;
  3189. struct _sas_device *sas_device = NULL;
  3190. struct _pcie_device *pcie_device = NULL;
  3191. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  3192. u64 sas_address = 0;
  3193. unsigned long flags;
  3194. struct _tr_list *delayed_tr;
  3195. u32 ioc_state;
  3196. u8 tr_method = 0;
  3197. if (ioc->pci_error_recovery) {
  3198. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3199. "%s: host in pci error recovery: handle(0x%04x)\n",
  3200. __func__, ioc->name,
  3201. handle));
  3202. return;
  3203. }
  3204. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  3205. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  3206. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3207. "%s: host is not operational: handle(0x%04x)\n",
  3208. __func__, ioc->name,
  3209. handle));
  3210. return;
  3211. }
  3212. /* if PD, then return */
  3213. if (test_bit(handle, ioc->pd_handles))
  3214. return;
  3215. clear_bit(handle, ioc->pend_os_device_add);
  3216. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  3217. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  3218. if (sas_device && sas_device->starget &&
  3219. sas_device->starget->hostdata) {
  3220. sas_target_priv_data = sas_device->starget->hostdata;
  3221. sas_target_priv_data->deleted = 1;
  3222. sas_address = sas_device->sas_address;
  3223. }
  3224. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  3225. if (!sas_device) {
  3226. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  3227. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  3228. if (pcie_device && pcie_device->starget &&
  3229. pcie_device->starget->hostdata) {
  3230. sas_target_priv_data = pcie_device->starget->hostdata;
  3231. sas_target_priv_data->deleted = 1;
  3232. sas_address = pcie_device->wwid;
  3233. }
  3234. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  3235. if (pcie_device && (!ioc->tm_custom_handling))
  3236. tr_method =
  3237. MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
  3238. else
  3239. tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
  3240. }
  3241. if (sas_target_priv_data) {
  3242. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3243. "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
  3244. ioc->name, handle,
  3245. (unsigned long long)sas_address));
  3246. if (sas_device) {
  3247. if (sas_device->enclosure_handle != 0)
  3248. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3249. "setting delete flag:enclosure logical "
  3250. "id(0x%016llx), slot(%d)\n", ioc->name,
  3251. (unsigned long long)
  3252. sas_device->enclosure_logical_id,
  3253. sas_device->slot));
  3254. if (sas_device->connector_name[0] != '\0')
  3255. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3256. "setting delete flag: enclosure "
  3257. "level(0x%04x), connector name( %s)\n",
  3258. ioc->name, sas_device->enclosure_level,
  3259. sas_device->connector_name));
  3260. } else if (pcie_device) {
  3261. if (pcie_device->enclosure_handle != 0)
  3262. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3263. "setting delete flag: logical "
  3264. "id(0x%016llx), slot(%d)\n", ioc->name,
  3265. (unsigned long long)
  3266. pcie_device->enclosure_logical_id,
  3267. pcie_device->slot));
  3268. if (pcie_device->connector_name[0] != '\0')
  3269. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3270. "setting delete flag:, enclosure "
  3271. "level(0x%04x), "
  3272. "connector name( %s)\n", ioc->name,
  3273. pcie_device->enclosure_level,
  3274. pcie_device->connector_name));
  3275. }
  3276. _scsih_ublock_io_device(ioc, sas_address);
  3277. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  3278. }
  3279. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
  3280. if (!smid) {
  3281. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  3282. if (!delayed_tr)
  3283. goto out;
  3284. INIT_LIST_HEAD(&delayed_tr->list);
  3285. delayed_tr->handle = handle;
  3286. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
  3287. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3288. "DELAYED:tr:handle(0x%04x), (open)\n",
  3289. ioc->name, handle));
  3290. goto out;
  3291. }
  3292. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3293. "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3294. ioc->name, handle, smid,
  3295. ioc->tm_tr_cb_idx));
  3296. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3297. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  3298. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  3299. mpi_request->DevHandle = cpu_to_le16(handle);
  3300. mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  3301. mpi_request->MsgFlags = tr_method;
  3302. set_bit(handle, ioc->device_remove_in_progress);
  3303. mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
  3304. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
  3305. out:
  3306. if (sas_device)
  3307. sas_device_put(sas_device);
  3308. if (pcie_device)
  3309. pcie_device_put(pcie_device);
  3310. }
  3311. /**
  3312. * _scsih_tm_tr_complete -
  3313. * @ioc: per adapter object
  3314. * @smid: system request message index
  3315. * @msix_index: MSIX table index supplied by the OS
  3316. * @reply: reply message frame(lower 32bit addr)
  3317. * Context: interrupt time.
  3318. *
  3319. * This is the target reset completion routine.
  3320. * This code is part of the code to initiate the device removal
  3321. * handshake protocol with controller firmware.
  3322. * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
  3323. *
  3324. * Return 1 meaning mf should be freed from _base_interrupt
  3325. * 0 means the mf is freed from this function.
  3326. */
  3327. static u8
  3328. _scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  3329. u32 reply)
  3330. {
  3331. u16 handle;
  3332. Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
  3333. Mpi2SCSITaskManagementReply_t *mpi_reply =
  3334. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3335. Mpi2SasIoUnitControlRequest_t *mpi_request;
  3336. u16 smid_sas_ctrl;
  3337. u32 ioc_state;
  3338. struct _sc_list *delayed_sc;
  3339. if (ioc->pci_error_recovery) {
  3340. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3341. "%s: host in pci error recovery\n", __func__,
  3342. ioc->name));
  3343. return 1;
  3344. }
  3345. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  3346. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  3347. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3348. "%s: host is not operational\n", __func__, ioc->name));
  3349. return 1;
  3350. }
  3351. if (unlikely(!mpi_reply)) {
  3352. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  3353. ioc->name, __FILE__, __LINE__, __func__);
  3354. return 1;
  3355. }
  3356. mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
  3357. handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3358. if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
  3359. dewtprintk(ioc, pr_err(MPT3SAS_FMT
  3360. "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
  3361. ioc->name, handle,
  3362. le16_to_cpu(mpi_reply->DevHandle), smid));
  3363. return 0;
  3364. }
  3365. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
  3366. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3367. "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
  3368. "loginfo(0x%08x), completed(%d)\n", ioc->name,
  3369. handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
  3370. le32_to_cpu(mpi_reply->IOCLogInfo),
  3371. le32_to_cpu(mpi_reply->TerminationCount)));
  3372. smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
  3373. if (!smid_sas_ctrl) {
  3374. delayed_sc = kzalloc(sizeof(*delayed_sc), GFP_ATOMIC);
  3375. if (!delayed_sc)
  3376. return _scsih_check_for_pending_tm(ioc, smid);
  3377. INIT_LIST_HEAD(&delayed_sc->list);
  3378. delayed_sc->handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3379. list_add_tail(&delayed_sc->list, &ioc->delayed_sc_list);
  3380. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3381. "DELAYED:sc:handle(0x%04x), (open)\n",
  3382. ioc->name, handle));
  3383. return _scsih_check_for_pending_tm(ioc, smid);
  3384. }
  3385. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3386. "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3387. ioc->name, handle, smid_sas_ctrl,
  3388. ioc->tm_sas_control_cb_idx));
  3389. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
  3390. memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
  3391. mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  3392. mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  3393. mpi_request->DevHandle = mpi_request_tm->DevHandle;
  3394. mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
  3395. return _scsih_check_for_pending_tm(ioc, smid);
  3396. }
  3397. /**
  3398. * _scsih_sas_control_complete - completion routine
  3399. * @ioc: per adapter object
  3400. * @smid: system request message index
  3401. * @msix_index: MSIX table index supplied by the OS
  3402. * @reply: reply message frame(lower 32bit addr)
  3403. * Context: interrupt time.
  3404. *
  3405. * This is the sas iounit control completion routine.
  3406. * This code is part of the code to initiate the device removal
  3407. * handshake protocol with controller firmware.
  3408. *
  3409. * Return 1 meaning mf should be freed from _base_interrupt
  3410. * 0 means the mf is freed from this function.
  3411. */
  3412. static u8
  3413. _scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3414. u8 msix_index, u32 reply)
  3415. {
  3416. Mpi2SasIoUnitControlReply_t *mpi_reply =
  3417. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3418. if (likely(mpi_reply)) {
  3419. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3420. "sc_complete:handle(0x%04x), (open) "
  3421. "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
  3422. ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
  3423. le16_to_cpu(mpi_reply->IOCStatus),
  3424. le32_to_cpu(mpi_reply->IOCLogInfo)));
  3425. if (le16_to_cpu(mpi_reply->IOCStatus) ==
  3426. MPI2_IOCSTATUS_SUCCESS) {
  3427. clear_bit(le16_to_cpu(mpi_reply->DevHandle),
  3428. ioc->device_remove_in_progress);
  3429. }
  3430. } else {
  3431. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  3432. ioc->name, __FILE__, __LINE__, __func__);
  3433. }
  3434. return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
  3435. }
  3436. /**
  3437. * _scsih_tm_tr_volume_send - send target reset request for volumes
  3438. * @ioc: per adapter object
  3439. * @handle: device handle
  3440. * Context: interrupt time.
  3441. *
  3442. * This is designed to send muliple task management request at the same
  3443. * time to the fifo. If the fifo is full, we will append the request,
  3444. * and process it in a future completion.
  3445. */
  3446. static void
  3447. _scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3448. {
  3449. Mpi2SCSITaskManagementRequest_t *mpi_request;
  3450. u16 smid;
  3451. struct _tr_list *delayed_tr;
  3452. if (ioc->pci_error_recovery) {
  3453. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3454. "%s: host reset in progress!\n",
  3455. __func__, ioc->name));
  3456. return;
  3457. }
  3458. smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
  3459. if (!smid) {
  3460. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  3461. if (!delayed_tr)
  3462. return;
  3463. INIT_LIST_HEAD(&delayed_tr->list);
  3464. delayed_tr->handle = handle;
  3465. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
  3466. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3467. "DELAYED:tr:handle(0x%04x), (open)\n",
  3468. ioc->name, handle));
  3469. return;
  3470. }
  3471. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3472. "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3473. ioc->name, handle, smid,
  3474. ioc->tm_tr_volume_cb_idx));
  3475. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3476. memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
  3477. mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
  3478. mpi_request->DevHandle = cpu_to_le16(handle);
  3479. mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
  3480. mpt3sas_base_put_smid_hi_priority(ioc, smid, 0);
  3481. }
  3482. /**
  3483. * _scsih_tm_volume_tr_complete - target reset completion
  3484. * @ioc: per adapter object
  3485. * @smid: system request message index
  3486. * @msix_index: MSIX table index supplied by the OS
  3487. * @reply: reply message frame(lower 32bit addr)
  3488. * Context: interrupt time.
  3489. *
  3490. * Return 1 meaning mf should be freed from _base_interrupt
  3491. * 0 means the mf is freed from this function.
  3492. */
  3493. static u8
  3494. _scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3495. u8 msix_index, u32 reply)
  3496. {
  3497. u16 handle;
  3498. Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
  3499. Mpi2SCSITaskManagementReply_t *mpi_reply =
  3500. mpt3sas_base_get_reply_virt_addr(ioc, reply);
  3501. if (ioc->shost_recovery || ioc->pci_error_recovery) {
  3502. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3503. "%s: host reset in progress!\n",
  3504. __func__, ioc->name));
  3505. return 1;
  3506. }
  3507. if (unlikely(!mpi_reply)) {
  3508. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  3509. ioc->name, __FILE__, __LINE__, __func__);
  3510. return 1;
  3511. }
  3512. mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
  3513. handle = le16_to_cpu(mpi_request_tm->DevHandle);
  3514. if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
  3515. dewtprintk(ioc, pr_err(MPT3SAS_FMT
  3516. "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
  3517. ioc->name, handle,
  3518. le16_to_cpu(mpi_reply->DevHandle), smid));
  3519. return 0;
  3520. }
  3521. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3522. "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
  3523. "loginfo(0x%08x), completed(%d)\n", ioc->name,
  3524. handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
  3525. le32_to_cpu(mpi_reply->IOCLogInfo),
  3526. le32_to_cpu(mpi_reply->TerminationCount)));
  3527. return _scsih_check_for_pending_tm(ioc, smid);
  3528. }
  3529. /**
  3530. * _scsih_issue_delayed_event_ack - issue delayed Event ACK messages
  3531. * @ioc: per adapter object
  3532. * @smid: system request message index
  3533. * @event: Event ID
  3534. * @event_context: used to track events uniquely
  3535. *
  3536. * Context - processed in interrupt context.
  3537. */
  3538. static void
  3539. _scsih_issue_delayed_event_ack(struct MPT3SAS_ADAPTER *ioc, u16 smid, U16 event,
  3540. U32 event_context)
  3541. {
  3542. Mpi2EventAckRequest_t *ack_request;
  3543. int i = smid - ioc->internal_smid;
  3544. unsigned long flags;
  3545. /* Without releasing the smid just update the
  3546. * call back index and reuse the same smid for
  3547. * processing this delayed request
  3548. */
  3549. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  3550. ioc->internal_lookup[i].cb_idx = ioc->base_cb_idx;
  3551. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  3552. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3553. "EVENT ACK: event(0x%04x), smid(%d), cb(%d)\n",
  3554. ioc->name, le16_to_cpu(event), smid,
  3555. ioc->base_cb_idx));
  3556. ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3557. memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
  3558. ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
  3559. ack_request->Event = event;
  3560. ack_request->EventContext = event_context;
  3561. ack_request->VF_ID = 0; /* TODO */
  3562. ack_request->VP_ID = 0;
  3563. mpt3sas_base_put_smid_default(ioc, smid);
  3564. }
  3565. /**
  3566. * _scsih_issue_delayed_sas_io_unit_ctrl - issue delayed
  3567. * sas_io_unit_ctrl messages
  3568. * @ioc: per adapter object
  3569. * @smid: system request message index
  3570. * @handle: device handle
  3571. *
  3572. * Context - processed in interrupt context.
  3573. */
  3574. static void
  3575. _scsih_issue_delayed_sas_io_unit_ctrl(struct MPT3SAS_ADAPTER *ioc,
  3576. u16 smid, u16 handle)
  3577. {
  3578. Mpi2SasIoUnitControlRequest_t *mpi_request;
  3579. u32 ioc_state;
  3580. int i = smid - ioc->internal_smid;
  3581. unsigned long flags;
  3582. if (ioc->remove_host) {
  3583. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3584. "%s: host has been removed\n",
  3585. __func__, ioc->name));
  3586. return;
  3587. } else if (ioc->pci_error_recovery) {
  3588. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3589. "%s: host in pci error recovery\n",
  3590. __func__, ioc->name));
  3591. return;
  3592. }
  3593. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  3594. if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  3595. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3596. "%s: host is not operational\n",
  3597. __func__, ioc->name));
  3598. return;
  3599. }
  3600. /* Without releasing the smid just update the
  3601. * call back index and reuse the same smid for
  3602. * processing this delayed request
  3603. */
  3604. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  3605. ioc->internal_lookup[i].cb_idx = ioc->tm_sas_control_cb_idx;
  3606. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  3607. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3608. "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
  3609. ioc->name, handle, smid,
  3610. ioc->tm_sas_control_cb_idx));
  3611. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3612. memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
  3613. mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
  3614. mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
  3615. mpi_request->DevHandle = cpu_to_le16(handle);
  3616. mpt3sas_base_put_smid_default(ioc, smid);
  3617. }
  3618. /**
  3619. * _scsih_check_for_pending_internal_cmds - check for pending internal messages
  3620. * @ioc: per adapter object
  3621. * @smid: system request message index
  3622. *
  3623. * Context: Executed in interrupt context
  3624. *
  3625. * This will check delayed internal messages list, and process the
  3626. * next request.
  3627. *
  3628. * Return 1 meaning mf should be freed from _base_interrupt
  3629. * 0 means the mf is freed from this function.
  3630. */
  3631. u8
  3632. mpt3sas_check_for_pending_internal_cmds(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  3633. {
  3634. struct _sc_list *delayed_sc;
  3635. struct _event_ack_list *delayed_event_ack;
  3636. if (!list_empty(&ioc->delayed_event_ack_list)) {
  3637. delayed_event_ack = list_entry(ioc->delayed_event_ack_list.next,
  3638. struct _event_ack_list, list);
  3639. _scsih_issue_delayed_event_ack(ioc, smid,
  3640. delayed_event_ack->Event, delayed_event_ack->EventContext);
  3641. list_del(&delayed_event_ack->list);
  3642. kfree(delayed_event_ack);
  3643. return 0;
  3644. }
  3645. if (!list_empty(&ioc->delayed_sc_list)) {
  3646. delayed_sc = list_entry(ioc->delayed_sc_list.next,
  3647. struct _sc_list, list);
  3648. _scsih_issue_delayed_sas_io_unit_ctrl(ioc, smid,
  3649. delayed_sc->handle);
  3650. list_del(&delayed_sc->list);
  3651. kfree(delayed_sc);
  3652. return 0;
  3653. }
  3654. return 1;
  3655. }
  3656. /**
  3657. * _scsih_check_for_pending_tm - check for pending task management
  3658. * @ioc: per adapter object
  3659. * @smid: system request message index
  3660. *
  3661. * This will check delayed target reset list, and feed the
  3662. * next reqeust.
  3663. *
  3664. * Return 1 meaning mf should be freed from _base_interrupt
  3665. * 0 means the mf is freed from this function.
  3666. */
  3667. static u8
  3668. _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  3669. {
  3670. struct _tr_list *delayed_tr;
  3671. if (!list_empty(&ioc->delayed_tr_volume_list)) {
  3672. delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
  3673. struct _tr_list, list);
  3674. mpt3sas_base_free_smid(ioc, smid);
  3675. _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
  3676. list_del(&delayed_tr->list);
  3677. kfree(delayed_tr);
  3678. return 0;
  3679. }
  3680. if (!list_empty(&ioc->delayed_tr_list)) {
  3681. delayed_tr = list_entry(ioc->delayed_tr_list.next,
  3682. struct _tr_list, list);
  3683. mpt3sas_base_free_smid(ioc, smid);
  3684. _scsih_tm_tr_send(ioc, delayed_tr->handle);
  3685. list_del(&delayed_tr->list);
  3686. kfree(delayed_tr);
  3687. return 0;
  3688. }
  3689. return 1;
  3690. }
  3691. /**
  3692. * _scsih_check_topo_delete_events - sanity check on topo events
  3693. * @ioc: per adapter object
  3694. * @event_data: the event data payload
  3695. *
  3696. * This routine added to better handle cable breaker.
  3697. *
  3698. * This handles the case where driver receives multiple expander
  3699. * add and delete events in a single shot. When there is a delete event
  3700. * the routine will void any pending add events waiting in the event queue.
  3701. *
  3702. * Return nothing.
  3703. */
  3704. static void
  3705. _scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
  3706. Mpi2EventDataSasTopologyChangeList_t *event_data)
  3707. {
  3708. struct fw_event_work *fw_event;
  3709. Mpi2EventDataSasTopologyChangeList_t *local_event_data;
  3710. u16 expander_handle;
  3711. struct _sas_node *sas_expander;
  3712. unsigned long flags;
  3713. int i, reason_code;
  3714. u16 handle;
  3715. for (i = 0 ; i < event_data->NumEntries; i++) {
  3716. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  3717. if (!handle)
  3718. continue;
  3719. reason_code = event_data->PHY[i].PhyStatus &
  3720. MPI2_EVENT_SAS_TOPO_RC_MASK;
  3721. if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
  3722. _scsih_tm_tr_send(ioc, handle);
  3723. }
  3724. expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
  3725. if (expander_handle < ioc->sas_hba.num_phys) {
  3726. _scsih_block_io_to_children_attached_directly(ioc, event_data);
  3727. return;
  3728. }
  3729. if (event_data->ExpStatus ==
  3730. MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
  3731. /* put expander attached devices into blocking state */
  3732. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  3733. sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
  3734. expander_handle);
  3735. _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
  3736. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  3737. do {
  3738. handle = find_first_bit(ioc->blocking_handles,
  3739. ioc->facts.MaxDevHandle);
  3740. if (handle < ioc->facts.MaxDevHandle)
  3741. _scsih_block_io_device(ioc, handle);
  3742. } while (test_and_clear_bit(handle, ioc->blocking_handles));
  3743. } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
  3744. _scsih_block_io_to_children_attached_directly(ioc, event_data);
  3745. if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
  3746. return;
  3747. /* mark ignore flag for pending events */
  3748. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  3749. list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
  3750. if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
  3751. fw_event->ignore)
  3752. continue;
  3753. local_event_data = (Mpi2EventDataSasTopologyChangeList_t *)
  3754. fw_event->event_data;
  3755. if (local_event_data->ExpStatus ==
  3756. MPI2_EVENT_SAS_TOPO_ES_ADDED ||
  3757. local_event_data->ExpStatus ==
  3758. MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
  3759. if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
  3760. expander_handle) {
  3761. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3762. "setting ignoring flag\n", ioc->name));
  3763. fw_event->ignore = 1;
  3764. }
  3765. }
  3766. }
  3767. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  3768. }
  3769. /**
  3770. * _scsih_check_pcie_topo_remove_events - sanity check on topo
  3771. * events
  3772. * @ioc: per adapter object
  3773. * @event_data: the event data payload
  3774. *
  3775. * This handles the case where driver receives multiple switch
  3776. * or device add and delete events in a single shot. When there
  3777. * is a delete event the routine will void any pending add
  3778. * events waiting in the event queue.
  3779. *
  3780. * Return nothing.
  3781. */
  3782. static void
  3783. _scsih_check_pcie_topo_remove_events(struct MPT3SAS_ADAPTER *ioc,
  3784. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  3785. {
  3786. struct fw_event_work *fw_event;
  3787. Mpi26EventDataPCIeTopologyChangeList_t *local_event_data;
  3788. unsigned long flags;
  3789. int i, reason_code;
  3790. u16 handle, switch_handle;
  3791. for (i = 0; i < event_data->NumEntries; i++) {
  3792. handle =
  3793. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  3794. if (!handle)
  3795. continue;
  3796. reason_code = event_data->PortEntry[i].PortStatus;
  3797. if (reason_code == MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)
  3798. _scsih_tm_tr_send(ioc, handle);
  3799. }
  3800. switch_handle = le16_to_cpu(event_data->SwitchDevHandle);
  3801. if (!switch_handle) {
  3802. _scsih_block_io_to_pcie_children_attached_directly(
  3803. ioc, event_data);
  3804. return;
  3805. }
  3806. /* TODO We are not supporting cascaded PCIe Switch removal yet*/
  3807. if ((event_data->SwitchStatus
  3808. == MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING) ||
  3809. (event_data->SwitchStatus ==
  3810. MPI26_EVENT_PCIE_TOPO_SS_RESPONDING))
  3811. _scsih_block_io_to_pcie_children_attached_directly(
  3812. ioc, event_data);
  3813. if (event_data->SwitchStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
  3814. return;
  3815. /* mark ignore flag for pending events */
  3816. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  3817. list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
  3818. if (fw_event->event != MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST ||
  3819. fw_event->ignore)
  3820. continue;
  3821. local_event_data =
  3822. (Mpi26EventDataPCIeTopologyChangeList_t *)
  3823. fw_event->event_data;
  3824. if (local_event_data->SwitchStatus ==
  3825. MPI2_EVENT_SAS_TOPO_ES_ADDED ||
  3826. local_event_data->SwitchStatus ==
  3827. MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
  3828. if (le16_to_cpu(local_event_data->SwitchDevHandle) ==
  3829. switch_handle) {
  3830. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3831. "setting ignoring flag for switch event\n",
  3832. ioc->name));
  3833. fw_event->ignore = 1;
  3834. }
  3835. }
  3836. }
  3837. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  3838. }
  3839. /**
  3840. * _scsih_set_volume_delete_flag - setting volume delete flag
  3841. * @ioc: per adapter object
  3842. * @handle: device handle
  3843. *
  3844. * This returns nothing.
  3845. */
  3846. static void
  3847. _scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  3848. {
  3849. struct _raid_device *raid_device;
  3850. struct MPT3SAS_TARGET *sas_target_priv_data;
  3851. unsigned long flags;
  3852. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  3853. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  3854. if (raid_device && raid_device->starget &&
  3855. raid_device->starget->hostdata) {
  3856. sas_target_priv_data =
  3857. raid_device->starget->hostdata;
  3858. sas_target_priv_data->deleted = 1;
  3859. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3860. "setting delete flag: handle(0x%04x), "
  3861. "wwid(0x%016llx)\n", ioc->name, handle,
  3862. (unsigned long long) raid_device->wwid));
  3863. }
  3864. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  3865. }
  3866. /**
  3867. * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
  3868. * @handle: input handle
  3869. * @a: handle for volume a
  3870. * @b: handle for volume b
  3871. *
  3872. * IR firmware only supports two raid volumes. The purpose of this
  3873. * routine is to set the volume handle in either a or b. When the given
  3874. * input handle is non-zero, or when a and b have not been set before.
  3875. */
  3876. static void
  3877. _scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
  3878. {
  3879. if (!handle || handle == *a || handle == *b)
  3880. return;
  3881. if (!*a)
  3882. *a = handle;
  3883. else if (!*b)
  3884. *b = handle;
  3885. }
  3886. /**
  3887. * _scsih_check_ir_config_unhide_events - check for UNHIDE events
  3888. * @ioc: per adapter object
  3889. * @event_data: the event data payload
  3890. * Context: interrupt time.
  3891. *
  3892. * This routine will send target reset to volume, followed by target
  3893. * resets to the PDs. This is called when a PD has been removed, or
  3894. * volume has been deleted or removed. When the target reset is sent
  3895. * to volume, the PD target resets need to be queued to start upon
  3896. * completion of the volume target reset.
  3897. *
  3898. * Return nothing.
  3899. */
  3900. static void
  3901. _scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
  3902. Mpi2EventDataIrConfigChangeList_t *event_data)
  3903. {
  3904. Mpi2EventIrConfigElement_t *element;
  3905. int i;
  3906. u16 handle, volume_handle, a, b;
  3907. struct _tr_list *delayed_tr;
  3908. a = 0;
  3909. b = 0;
  3910. if (ioc->is_warpdrive)
  3911. return;
  3912. /* Volume Resets for Deleted or Removed */
  3913. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  3914. for (i = 0; i < event_data->NumElements; i++, element++) {
  3915. if (le32_to_cpu(event_data->Flags) &
  3916. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
  3917. continue;
  3918. if (element->ReasonCode ==
  3919. MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
  3920. element->ReasonCode ==
  3921. MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
  3922. volume_handle = le16_to_cpu(element->VolDevHandle);
  3923. _scsih_set_volume_delete_flag(ioc, volume_handle);
  3924. _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
  3925. }
  3926. }
  3927. /* Volume Resets for UNHIDE events */
  3928. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  3929. for (i = 0; i < event_data->NumElements; i++, element++) {
  3930. if (le32_to_cpu(event_data->Flags) &
  3931. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
  3932. continue;
  3933. if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
  3934. volume_handle = le16_to_cpu(element->VolDevHandle);
  3935. _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
  3936. }
  3937. }
  3938. if (a)
  3939. _scsih_tm_tr_volume_send(ioc, a);
  3940. if (b)
  3941. _scsih_tm_tr_volume_send(ioc, b);
  3942. /* PD target resets */
  3943. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  3944. for (i = 0; i < event_data->NumElements; i++, element++) {
  3945. if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
  3946. continue;
  3947. handle = le16_to_cpu(element->PhysDiskDevHandle);
  3948. volume_handle = le16_to_cpu(element->VolDevHandle);
  3949. clear_bit(handle, ioc->pd_handles);
  3950. if (!volume_handle)
  3951. _scsih_tm_tr_send(ioc, handle);
  3952. else if (volume_handle == a || volume_handle == b) {
  3953. delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
  3954. BUG_ON(!delayed_tr);
  3955. INIT_LIST_HEAD(&delayed_tr->list);
  3956. delayed_tr->handle = handle;
  3957. list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
  3958. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  3959. "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
  3960. handle));
  3961. } else
  3962. _scsih_tm_tr_send(ioc, handle);
  3963. }
  3964. }
  3965. /**
  3966. * _scsih_check_volume_delete_events - set delete flag for volumes
  3967. * @ioc: per adapter object
  3968. * @event_data: the event data payload
  3969. * Context: interrupt time.
  3970. *
  3971. * This will handle the case when the cable connected to entire volume is
  3972. * pulled. We will take care of setting the deleted flag so normal IO will
  3973. * not be sent.
  3974. *
  3975. * Return nothing.
  3976. */
  3977. static void
  3978. _scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
  3979. Mpi2EventDataIrVolume_t *event_data)
  3980. {
  3981. u32 state;
  3982. if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
  3983. return;
  3984. state = le32_to_cpu(event_data->NewValue);
  3985. if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
  3986. MPI2_RAID_VOL_STATE_FAILED)
  3987. _scsih_set_volume_delete_flag(ioc,
  3988. le16_to_cpu(event_data->VolDevHandle));
  3989. }
  3990. /**
  3991. * _scsih_temp_threshold_events - display temperature threshold exceeded events
  3992. * @ioc: per adapter object
  3993. * @event_data: the temp threshold event data
  3994. * Context: interrupt time.
  3995. *
  3996. * Return nothing.
  3997. */
  3998. static void
  3999. _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
  4000. Mpi2EventDataTemperature_t *event_data)
  4001. {
  4002. if (ioc->temp_sensors_count >= event_data->SensorNum) {
  4003. pr_err(MPT3SAS_FMT "Temperature Threshold flags %s%s%s%s"
  4004. " exceeded for Sensor: %d !!!\n", ioc->name,
  4005. ((le16_to_cpu(event_data->Status) & 0x1) == 1) ? "0 " : " ",
  4006. ((le16_to_cpu(event_data->Status) & 0x2) == 2) ? "1 " : " ",
  4007. ((le16_to_cpu(event_data->Status) & 0x4) == 4) ? "2 " : " ",
  4008. ((le16_to_cpu(event_data->Status) & 0x8) == 8) ? "3 " : " ",
  4009. event_data->SensorNum);
  4010. pr_err(MPT3SAS_FMT "Current Temp In Celsius: %d\n",
  4011. ioc->name, event_data->CurrentTemperature);
  4012. }
  4013. }
  4014. static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
  4015. {
  4016. struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
  4017. if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
  4018. return 0;
  4019. if (pending)
  4020. return test_and_set_bit(0, &priv->ata_command_pending);
  4021. clear_bit(0, &priv->ata_command_pending);
  4022. return 0;
  4023. }
  4024. /**
  4025. * _scsih_flush_running_cmds - completing outstanding commands.
  4026. * @ioc: per adapter object
  4027. *
  4028. * The flushing out of all pending scmd commands following host reset,
  4029. * where all IO is dropped to the floor.
  4030. *
  4031. * Return nothing.
  4032. */
  4033. static void
  4034. _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
  4035. {
  4036. struct scsi_cmnd *scmd;
  4037. struct scsiio_tracker *st;
  4038. u16 smid;
  4039. int count = 0;
  4040. for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
  4041. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  4042. if (!scmd)
  4043. continue;
  4044. count++;
  4045. _scsih_set_satl_pending(scmd, false);
  4046. st = scsi_cmd_priv(scmd);
  4047. mpt3sas_base_clear_st(ioc, st);
  4048. scsi_dma_unmap(scmd);
  4049. if (ioc->pci_error_recovery || ioc->remove_host)
  4050. scmd->result = DID_NO_CONNECT << 16;
  4051. else
  4052. scmd->result = DID_RESET << 16;
  4053. scmd->scsi_done(scmd);
  4054. }
  4055. dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
  4056. ioc->name, count));
  4057. }
  4058. /**
  4059. * _scsih_setup_eedp - setup MPI request for EEDP transfer
  4060. * @ioc: per adapter object
  4061. * @scmd: pointer to scsi command object
  4062. * @mpi_request: pointer to the SCSI_IO request message frame
  4063. *
  4064. * Supporting protection 1 and 3.
  4065. *
  4066. * Returns nothing
  4067. */
  4068. static void
  4069. _scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
  4070. Mpi25SCSIIORequest_t *mpi_request)
  4071. {
  4072. u16 eedp_flags;
  4073. unsigned char prot_op = scsi_get_prot_op(scmd);
  4074. unsigned char prot_type = scsi_get_prot_type(scmd);
  4075. Mpi25SCSIIORequest_t *mpi_request_3v =
  4076. (Mpi25SCSIIORequest_t *)mpi_request;
  4077. if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
  4078. return;
  4079. if (prot_op == SCSI_PROT_READ_STRIP)
  4080. eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
  4081. else if (prot_op == SCSI_PROT_WRITE_INSERT)
  4082. eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
  4083. else
  4084. return;
  4085. switch (prot_type) {
  4086. case SCSI_PROT_DIF_TYPE1:
  4087. case SCSI_PROT_DIF_TYPE2:
  4088. /*
  4089. * enable ref/guard checking
  4090. * auto increment ref tag
  4091. */
  4092. eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
  4093. MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
  4094. MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
  4095. mpi_request->CDB.EEDP32.PrimaryReferenceTag =
  4096. cpu_to_be32(t10_pi_ref_tag(scmd->request));
  4097. break;
  4098. case SCSI_PROT_DIF_TYPE3:
  4099. /*
  4100. * enable guard checking
  4101. */
  4102. eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
  4103. break;
  4104. }
  4105. mpi_request_3v->EEDPBlockSize =
  4106. cpu_to_le16(scmd->device->sector_size);
  4107. if (ioc->is_gen35_ioc)
  4108. eedp_flags |= MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE;
  4109. mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
  4110. }
  4111. /**
  4112. * _scsih_eedp_error_handling - return sense code for EEDP errors
  4113. * @scmd: pointer to scsi command object
  4114. * @ioc_status: ioc status
  4115. *
  4116. * Returns nothing
  4117. */
  4118. static void
  4119. _scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
  4120. {
  4121. u8 ascq;
  4122. switch (ioc_status) {
  4123. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  4124. ascq = 0x01;
  4125. break;
  4126. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  4127. ascq = 0x02;
  4128. break;
  4129. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  4130. ascq = 0x03;
  4131. break;
  4132. default:
  4133. ascq = 0x00;
  4134. break;
  4135. }
  4136. scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
  4137. ascq);
  4138. scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
  4139. SAM_STAT_CHECK_CONDITION;
  4140. }
  4141. /**
  4142. * scsih_qcmd - main scsi request entry point
  4143. * @scmd: pointer to scsi command object
  4144. * @done: function pointer to be invoked on completion
  4145. *
  4146. * The callback index is set inside `ioc->scsi_io_cb_idx`.
  4147. *
  4148. * Returns 0 on success. If there's a failure, return either:
  4149. * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
  4150. * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  4151. */
  4152. static int
  4153. scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
  4154. {
  4155. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  4156. struct MPT3SAS_DEVICE *sas_device_priv_data;
  4157. struct MPT3SAS_TARGET *sas_target_priv_data;
  4158. struct _raid_device *raid_device;
  4159. struct request *rq = scmd->request;
  4160. int class;
  4161. Mpi25SCSIIORequest_t *mpi_request;
  4162. struct _pcie_device *pcie_device = NULL;
  4163. u32 mpi_control;
  4164. u16 smid;
  4165. u16 handle;
  4166. if (ioc->logging_level & MPT_DEBUG_SCSI)
  4167. scsi_print_command(scmd);
  4168. sas_device_priv_data = scmd->device->hostdata;
  4169. if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
  4170. scmd->result = DID_NO_CONNECT << 16;
  4171. scmd->scsi_done(scmd);
  4172. return 0;
  4173. }
  4174. if (ioc->pci_error_recovery || ioc->remove_host) {
  4175. scmd->result = DID_NO_CONNECT << 16;
  4176. scmd->scsi_done(scmd);
  4177. return 0;
  4178. }
  4179. sas_target_priv_data = sas_device_priv_data->sas_target;
  4180. /* invalid device handle */
  4181. handle = sas_target_priv_data->handle;
  4182. if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
  4183. scmd->result = DID_NO_CONNECT << 16;
  4184. scmd->scsi_done(scmd);
  4185. return 0;
  4186. }
  4187. /* host recovery or link resets sent via IOCTLs */
  4188. if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
  4189. return SCSI_MLQUEUE_HOST_BUSY;
  4190. /* device has been deleted */
  4191. else if (sas_target_priv_data->deleted) {
  4192. scmd->result = DID_NO_CONNECT << 16;
  4193. scmd->scsi_done(scmd);
  4194. return 0;
  4195. /* device busy with task management */
  4196. } else if (sas_target_priv_data->tm_busy ||
  4197. sas_device_priv_data->block)
  4198. return SCSI_MLQUEUE_DEVICE_BUSY;
  4199. /*
  4200. * Bug work around for firmware SATL handling. The loop
  4201. * is based on atomic operations and ensures consistency
  4202. * since we're lockless at this point
  4203. */
  4204. do {
  4205. if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
  4206. scmd->result = SAM_STAT_BUSY;
  4207. scmd->scsi_done(scmd);
  4208. return 0;
  4209. }
  4210. } while (_scsih_set_satl_pending(scmd, true));
  4211. if (scmd->sc_data_direction == DMA_FROM_DEVICE)
  4212. mpi_control = MPI2_SCSIIO_CONTROL_READ;
  4213. else if (scmd->sc_data_direction == DMA_TO_DEVICE)
  4214. mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
  4215. else
  4216. mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
  4217. /* set tags */
  4218. mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
  4219. /* NCQ Prio supported, make sure control indicated high priority */
  4220. if (sas_device_priv_data->ncq_prio_enable) {
  4221. class = IOPRIO_PRIO_CLASS(req_get_ioprio(rq));
  4222. if (class == IOPRIO_CLASS_RT)
  4223. mpi_control |= 1 << MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT;
  4224. }
  4225. /* Make sure Device is not raid volume.
  4226. * We do not expose raid functionality to upper layer for warpdrive.
  4227. */
  4228. if (((!ioc->is_warpdrive && !scsih_is_raid(&scmd->device->sdev_gendev))
  4229. && !scsih_is_nvme(&scmd->device->sdev_gendev))
  4230. && sas_is_tlr_enabled(scmd->device) && scmd->cmd_len != 32)
  4231. mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
  4232. smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
  4233. if (!smid) {
  4234. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4235. ioc->name, __func__);
  4236. _scsih_set_satl_pending(scmd, false);
  4237. goto out;
  4238. }
  4239. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4240. memset(mpi_request, 0, ioc->request_sz);
  4241. _scsih_setup_eedp(ioc, scmd, mpi_request);
  4242. if (scmd->cmd_len == 32)
  4243. mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
  4244. mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  4245. if (sas_device_priv_data->sas_target->flags &
  4246. MPT_TARGET_FLAGS_RAID_COMPONENT)
  4247. mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
  4248. else
  4249. mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
  4250. mpi_request->DevHandle = cpu_to_le16(handle);
  4251. mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
  4252. mpi_request->Control = cpu_to_le32(mpi_control);
  4253. mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
  4254. mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
  4255. mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
  4256. mpi_request->SenseBufferLowAddress =
  4257. mpt3sas_base_get_sense_buffer_dma(ioc, smid);
  4258. mpi_request->SGLOffset0 = offsetof(Mpi25SCSIIORequest_t, SGL) / 4;
  4259. int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
  4260. mpi_request->LUN);
  4261. memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
  4262. if (mpi_request->DataLength) {
  4263. pcie_device = sas_target_priv_data->pcie_dev;
  4264. if (ioc->build_sg_scmd(ioc, scmd, smid, pcie_device)) {
  4265. mpt3sas_base_free_smid(ioc, smid);
  4266. _scsih_set_satl_pending(scmd, false);
  4267. goto out;
  4268. }
  4269. } else
  4270. ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
  4271. raid_device = sas_target_priv_data->raid_device;
  4272. if (raid_device && raid_device->direct_io_enabled)
  4273. mpt3sas_setup_direct_io(ioc, scmd,
  4274. raid_device, mpi_request);
  4275. if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
  4276. if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
  4277. mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
  4278. MPI25_SCSIIO_IOFLAGS_FAST_PATH);
  4279. mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
  4280. } else
  4281. ioc->put_smid_scsi_io(ioc, smid,
  4282. le16_to_cpu(mpi_request->DevHandle));
  4283. } else
  4284. mpt3sas_base_put_smid_default(ioc, smid);
  4285. return 0;
  4286. out:
  4287. return SCSI_MLQUEUE_HOST_BUSY;
  4288. }
  4289. /**
  4290. * _scsih_normalize_sense - normalize descriptor and fixed format sense data
  4291. * @sense_buffer: sense data returned by target
  4292. * @data: normalized skey/asc/ascq
  4293. *
  4294. * Return nothing.
  4295. */
  4296. static void
  4297. _scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
  4298. {
  4299. if ((sense_buffer[0] & 0x7F) >= 0x72) {
  4300. /* descriptor format */
  4301. data->skey = sense_buffer[1] & 0x0F;
  4302. data->asc = sense_buffer[2];
  4303. data->ascq = sense_buffer[3];
  4304. } else {
  4305. /* fixed format */
  4306. data->skey = sense_buffer[2] & 0x0F;
  4307. data->asc = sense_buffer[12];
  4308. data->ascq = sense_buffer[13];
  4309. }
  4310. }
  4311. /**
  4312. * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
  4313. * @ioc: per adapter object
  4314. * @scmd: pointer to scsi command object
  4315. * @mpi_reply: reply mf payload returned from firmware
  4316. *
  4317. * scsi_status - SCSI Status code returned from target device
  4318. * scsi_state - state info associated with SCSI_IO determined by ioc
  4319. * ioc_status - ioc supplied status info
  4320. *
  4321. * Return nothing.
  4322. */
  4323. static void
  4324. _scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
  4325. Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
  4326. {
  4327. u32 response_info;
  4328. u8 *response_bytes;
  4329. u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
  4330. MPI2_IOCSTATUS_MASK;
  4331. u8 scsi_state = mpi_reply->SCSIState;
  4332. u8 scsi_status = mpi_reply->SCSIStatus;
  4333. char *desc_ioc_state = NULL;
  4334. char *desc_scsi_status = NULL;
  4335. char *desc_scsi_state = ioc->tmp_string;
  4336. u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  4337. struct _sas_device *sas_device = NULL;
  4338. struct _pcie_device *pcie_device = NULL;
  4339. struct scsi_target *starget = scmd->device->sdev_target;
  4340. struct MPT3SAS_TARGET *priv_target = starget->hostdata;
  4341. char *device_str = NULL;
  4342. if (!priv_target)
  4343. return;
  4344. if (ioc->hide_ir_msg)
  4345. device_str = "WarpDrive";
  4346. else
  4347. device_str = "volume";
  4348. if (log_info == 0x31170000)
  4349. return;
  4350. switch (ioc_status) {
  4351. case MPI2_IOCSTATUS_SUCCESS:
  4352. desc_ioc_state = "success";
  4353. break;
  4354. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  4355. desc_ioc_state = "invalid function";
  4356. break;
  4357. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  4358. desc_ioc_state = "scsi recovered error";
  4359. break;
  4360. case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
  4361. desc_ioc_state = "scsi invalid dev handle";
  4362. break;
  4363. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  4364. desc_ioc_state = "scsi device not there";
  4365. break;
  4366. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  4367. desc_ioc_state = "scsi data overrun";
  4368. break;
  4369. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  4370. desc_ioc_state = "scsi data underrun";
  4371. break;
  4372. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  4373. desc_ioc_state = "scsi io data error";
  4374. break;
  4375. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  4376. desc_ioc_state = "scsi protocol error";
  4377. break;
  4378. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  4379. desc_ioc_state = "scsi task terminated";
  4380. break;
  4381. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  4382. desc_ioc_state = "scsi residual mismatch";
  4383. break;
  4384. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  4385. desc_ioc_state = "scsi task mgmt failed";
  4386. break;
  4387. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  4388. desc_ioc_state = "scsi ioc terminated";
  4389. break;
  4390. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  4391. desc_ioc_state = "scsi ext terminated";
  4392. break;
  4393. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  4394. desc_ioc_state = "eedp guard error";
  4395. break;
  4396. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  4397. desc_ioc_state = "eedp ref tag error";
  4398. break;
  4399. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  4400. desc_ioc_state = "eedp app tag error";
  4401. break;
  4402. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  4403. desc_ioc_state = "insufficient power";
  4404. break;
  4405. default:
  4406. desc_ioc_state = "unknown";
  4407. break;
  4408. }
  4409. switch (scsi_status) {
  4410. case MPI2_SCSI_STATUS_GOOD:
  4411. desc_scsi_status = "good";
  4412. break;
  4413. case MPI2_SCSI_STATUS_CHECK_CONDITION:
  4414. desc_scsi_status = "check condition";
  4415. break;
  4416. case MPI2_SCSI_STATUS_CONDITION_MET:
  4417. desc_scsi_status = "condition met";
  4418. break;
  4419. case MPI2_SCSI_STATUS_BUSY:
  4420. desc_scsi_status = "busy";
  4421. break;
  4422. case MPI2_SCSI_STATUS_INTERMEDIATE:
  4423. desc_scsi_status = "intermediate";
  4424. break;
  4425. case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
  4426. desc_scsi_status = "intermediate condmet";
  4427. break;
  4428. case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
  4429. desc_scsi_status = "reservation conflict";
  4430. break;
  4431. case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
  4432. desc_scsi_status = "command terminated";
  4433. break;
  4434. case MPI2_SCSI_STATUS_TASK_SET_FULL:
  4435. desc_scsi_status = "task set full";
  4436. break;
  4437. case MPI2_SCSI_STATUS_ACA_ACTIVE:
  4438. desc_scsi_status = "aca active";
  4439. break;
  4440. case MPI2_SCSI_STATUS_TASK_ABORTED:
  4441. desc_scsi_status = "task aborted";
  4442. break;
  4443. default:
  4444. desc_scsi_status = "unknown";
  4445. break;
  4446. }
  4447. desc_scsi_state[0] = '\0';
  4448. if (!scsi_state)
  4449. desc_scsi_state = " ";
  4450. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
  4451. strcat(desc_scsi_state, "response info ");
  4452. if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  4453. strcat(desc_scsi_state, "state terminated ");
  4454. if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
  4455. strcat(desc_scsi_state, "no status ");
  4456. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
  4457. strcat(desc_scsi_state, "autosense failed ");
  4458. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
  4459. strcat(desc_scsi_state, "autosense valid ");
  4460. scsi_print_command(scmd);
  4461. if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
  4462. pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
  4463. device_str, (unsigned long long)priv_target->sas_address);
  4464. } else if (priv_target->flags & MPT_TARGET_FLAGS_PCIE_DEVICE) {
  4465. pcie_device = mpt3sas_get_pdev_from_target(ioc, priv_target);
  4466. if (pcie_device) {
  4467. pr_info(MPT3SAS_FMT "\twwid(0x%016llx), port(%d)\n",
  4468. ioc->name,
  4469. (unsigned long long)pcie_device->wwid,
  4470. pcie_device->port_num);
  4471. if (pcie_device->enclosure_handle != 0)
  4472. pr_info(MPT3SAS_FMT
  4473. "\tenclosure logical id(0x%016llx), "
  4474. "slot(%d)\n", ioc->name,
  4475. (unsigned long long)
  4476. pcie_device->enclosure_logical_id,
  4477. pcie_device->slot);
  4478. if (pcie_device->connector_name[0])
  4479. pr_info(MPT3SAS_FMT
  4480. "\tenclosure level(0x%04x),"
  4481. "connector name( %s)\n",
  4482. ioc->name, pcie_device->enclosure_level,
  4483. pcie_device->connector_name);
  4484. pcie_device_put(pcie_device);
  4485. }
  4486. } else {
  4487. sas_device = mpt3sas_get_sdev_from_target(ioc, priv_target);
  4488. if (sas_device) {
  4489. pr_warn(MPT3SAS_FMT
  4490. "\tsas_address(0x%016llx), phy(%d)\n",
  4491. ioc->name, (unsigned long long)
  4492. sas_device->sas_address, sas_device->phy);
  4493. _scsih_display_enclosure_chassis_info(ioc, sas_device,
  4494. NULL, NULL);
  4495. sas_device_put(sas_device);
  4496. }
  4497. }
  4498. pr_warn(MPT3SAS_FMT
  4499. "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
  4500. ioc->name, le16_to_cpu(mpi_reply->DevHandle),
  4501. desc_ioc_state, ioc_status, smid);
  4502. pr_warn(MPT3SAS_FMT
  4503. "\trequest_len(%d), underflow(%d), resid(%d)\n",
  4504. ioc->name, scsi_bufflen(scmd), scmd->underflow,
  4505. scsi_get_resid(scmd));
  4506. pr_warn(MPT3SAS_FMT
  4507. "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
  4508. ioc->name, le16_to_cpu(mpi_reply->TaskTag),
  4509. le32_to_cpu(mpi_reply->TransferCount), scmd->result);
  4510. pr_warn(MPT3SAS_FMT
  4511. "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
  4512. ioc->name, desc_scsi_status,
  4513. scsi_status, desc_scsi_state, scsi_state);
  4514. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
  4515. struct sense_info data;
  4516. _scsih_normalize_sense(scmd->sense_buffer, &data);
  4517. pr_warn(MPT3SAS_FMT
  4518. "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
  4519. ioc->name, data.skey,
  4520. data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
  4521. }
  4522. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
  4523. response_info = le32_to_cpu(mpi_reply->ResponseInfo);
  4524. response_bytes = (u8 *)&response_info;
  4525. _scsih_response_code(ioc, response_bytes[0]);
  4526. }
  4527. }
  4528. /**
  4529. * _scsih_turn_on_pfa_led - illuminate PFA LED
  4530. * @ioc: per adapter object
  4531. * @handle: device handle
  4532. * Context: process
  4533. *
  4534. * Return nothing.
  4535. */
  4536. static void
  4537. _scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  4538. {
  4539. Mpi2SepReply_t mpi_reply;
  4540. Mpi2SepRequest_t mpi_request;
  4541. struct _sas_device *sas_device;
  4542. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  4543. if (!sas_device)
  4544. return;
  4545. memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
  4546. mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
  4547. mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
  4548. mpi_request.SlotStatus =
  4549. cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
  4550. mpi_request.DevHandle = cpu_to_le16(handle);
  4551. mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
  4552. if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
  4553. &mpi_request)) != 0) {
  4554. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
  4555. __FILE__, __LINE__, __func__);
  4556. goto out;
  4557. }
  4558. sas_device->pfa_led_on = 1;
  4559. if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
  4560. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  4561. "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
  4562. ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
  4563. le32_to_cpu(mpi_reply.IOCLogInfo)));
  4564. goto out;
  4565. }
  4566. out:
  4567. sas_device_put(sas_device);
  4568. }
  4569. /**
  4570. * _scsih_turn_off_pfa_led - turn off Fault LED
  4571. * @ioc: per adapter object
  4572. * @sas_device: sas device whose PFA LED has to turned off
  4573. * Context: process
  4574. *
  4575. * Return nothing.
  4576. */
  4577. static void
  4578. _scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
  4579. struct _sas_device *sas_device)
  4580. {
  4581. Mpi2SepReply_t mpi_reply;
  4582. Mpi2SepRequest_t mpi_request;
  4583. memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
  4584. mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
  4585. mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
  4586. mpi_request.SlotStatus = 0;
  4587. mpi_request.Slot = cpu_to_le16(sas_device->slot);
  4588. mpi_request.DevHandle = 0;
  4589. mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
  4590. mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
  4591. if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
  4592. &mpi_request)) != 0) {
  4593. printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
  4594. __FILE__, __LINE__, __func__);
  4595. return;
  4596. }
  4597. if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
  4598. dewtprintk(ioc, printk(MPT3SAS_FMT
  4599. "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
  4600. ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
  4601. le32_to_cpu(mpi_reply.IOCLogInfo)));
  4602. return;
  4603. }
  4604. }
  4605. /**
  4606. * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
  4607. * @ioc: per adapter object
  4608. * @handle: device handle
  4609. * Context: interrupt.
  4610. *
  4611. * Return nothing.
  4612. */
  4613. static void
  4614. _scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  4615. {
  4616. struct fw_event_work *fw_event;
  4617. fw_event = alloc_fw_event_work(0);
  4618. if (!fw_event)
  4619. return;
  4620. fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
  4621. fw_event->device_handle = handle;
  4622. fw_event->ioc = ioc;
  4623. _scsih_fw_event_add(ioc, fw_event);
  4624. fw_event_work_put(fw_event);
  4625. }
  4626. /**
  4627. * _scsih_smart_predicted_fault - process smart errors
  4628. * @ioc: per adapter object
  4629. * @handle: device handle
  4630. * Context: interrupt.
  4631. *
  4632. * Return nothing.
  4633. */
  4634. static void
  4635. _scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  4636. {
  4637. struct scsi_target *starget;
  4638. struct MPT3SAS_TARGET *sas_target_priv_data;
  4639. Mpi2EventNotificationReply_t *event_reply;
  4640. Mpi2EventDataSasDeviceStatusChange_t *event_data;
  4641. struct _sas_device *sas_device;
  4642. ssize_t sz;
  4643. unsigned long flags;
  4644. /* only handle non-raid devices */
  4645. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  4646. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  4647. if (!sas_device)
  4648. goto out_unlock;
  4649. starget = sas_device->starget;
  4650. sas_target_priv_data = starget->hostdata;
  4651. if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
  4652. ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)))
  4653. goto out_unlock;
  4654. _scsih_display_enclosure_chassis_info(NULL, sas_device, NULL, starget);
  4655. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  4656. if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
  4657. _scsih_send_event_to_turn_on_pfa_led(ioc, handle);
  4658. /* insert into event log */
  4659. sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
  4660. sizeof(Mpi2EventDataSasDeviceStatusChange_t);
  4661. event_reply = kzalloc(sz, GFP_KERNEL);
  4662. if (!event_reply) {
  4663. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  4664. ioc->name, __FILE__, __LINE__, __func__);
  4665. goto out;
  4666. }
  4667. event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
  4668. event_reply->Event =
  4669. cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  4670. event_reply->MsgLength = sz/4;
  4671. event_reply->EventDataLength =
  4672. cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
  4673. event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
  4674. event_reply->EventData;
  4675. event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
  4676. event_data->ASC = 0x5D;
  4677. event_data->DevHandle = cpu_to_le16(handle);
  4678. event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
  4679. mpt3sas_ctl_add_to_event_log(ioc, event_reply);
  4680. kfree(event_reply);
  4681. out:
  4682. if (sas_device)
  4683. sas_device_put(sas_device);
  4684. return;
  4685. out_unlock:
  4686. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  4687. goto out;
  4688. }
  4689. /**
  4690. * _scsih_io_done - scsi request callback
  4691. * @ioc: per adapter object
  4692. * @smid: system request message index
  4693. * @msix_index: MSIX table index supplied by the OS
  4694. * @reply: reply message frame(lower 32bit addr)
  4695. *
  4696. * Callback handler when using _scsih_qcmd.
  4697. *
  4698. * Return 1 meaning mf should be freed from _base_interrupt
  4699. * 0 means the mf is freed from this function.
  4700. */
  4701. static u8
  4702. _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  4703. {
  4704. Mpi25SCSIIORequest_t *mpi_request;
  4705. Mpi2SCSIIOReply_t *mpi_reply;
  4706. struct scsi_cmnd *scmd;
  4707. struct scsiio_tracker *st;
  4708. u16 ioc_status;
  4709. u32 xfer_cnt;
  4710. u8 scsi_state;
  4711. u8 scsi_status;
  4712. u32 log_info;
  4713. struct MPT3SAS_DEVICE *sas_device_priv_data;
  4714. u32 response_code = 0;
  4715. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  4716. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  4717. if (scmd == NULL)
  4718. return 1;
  4719. _scsih_set_satl_pending(scmd, false);
  4720. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4721. if (mpi_reply == NULL) {
  4722. scmd->result = DID_OK << 16;
  4723. goto out;
  4724. }
  4725. sas_device_priv_data = scmd->device->hostdata;
  4726. if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
  4727. sas_device_priv_data->sas_target->deleted) {
  4728. scmd->result = DID_NO_CONNECT << 16;
  4729. goto out;
  4730. }
  4731. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  4732. /*
  4733. * WARPDRIVE: If direct_io is set then it is directIO,
  4734. * the failed direct I/O should be redirected to volume
  4735. */
  4736. st = scsi_cmd_priv(scmd);
  4737. if (st->direct_io &&
  4738. ((ioc_status & MPI2_IOCSTATUS_MASK)
  4739. != MPI2_IOCSTATUS_SCSI_TASK_TERMINATED)) {
  4740. st->direct_io = 0;
  4741. memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
  4742. mpi_request->DevHandle =
  4743. cpu_to_le16(sas_device_priv_data->sas_target->handle);
  4744. ioc->put_smid_scsi_io(ioc, smid,
  4745. sas_device_priv_data->sas_target->handle);
  4746. return 0;
  4747. }
  4748. /* turning off TLR */
  4749. scsi_state = mpi_reply->SCSIState;
  4750. if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
  4751. response_code =
  4752. le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
  4753. if (!sas_device_priv_data->tlr_snoop_check) {
  4754. sas_device_priv_data->tlr_snoop_check++;
  4755. if ((!ioc->is_warpdrive &&
  4756. !scsih_is_raid(&scmd->device->sdev_gendev) &&
  4757. !scsih_is_nvme(&scmd->device->sdev_gendev))
  4758. && sas_is_tlr_enabled(scmd->device) &&
  4759. response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME) {
  4760. sas_disable_tlr(scmd->device);
  4761. sdev_printk(KERN_INFO, scmd->device, "TLR disabled\n");
  4762. }
  4763. }
  4764. xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
  4765. scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
  4766. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
  4767. log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  4768. else
  4769. log_info = 0;
  4770. ioc_status &= MPI2_IOCSTATUS_MASK;
  4771. scsi_status = mpi_reply->SCSIStatus;
  4772. if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
  4773. (scsi_status == MPI2_SCSI_STATUS_BUSY ||
  4774. scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
  4775. scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
  4776. ioc_status = MPI2_IOCSTATUS_SUCCESS;
  4777. }
  4778. if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
  4779. struct sense_info data;
  4780. const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
  4781. smid);
  4782. u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
  4783. le32_to_cpu(mpi_reply->SenseCount));
  4784. memcpy(scmd->sense_buffer, sense_data, sz);
  4785. _scsih_normalize_sense(scmd->sense_buffer, &data);
  4786. /* failure prediction threshold exceeded */
  4787. if (data.asc == 0x5D)
  4788. _scsih_smart_predicted_fault(ioc,
  4789. le16_to_cpu(mpi_reply->DevHandle));
  4790. mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
  4791. if ((ioc->logging_level & MPT_DEBUG_REPLY) &&
  4792. ((scmd->sense_buffer[2] == UNIT_ATTENTION) ||
  4793. (scmd->sense_buffer[2] == MEDIUM_ERROR) ||
  4794. (scmd->sense_buffer[2] == HARDWARE_ERROR)))
  4795. _scsih_scsi_ioc_info(ioc, scmd, mpi_reply, smid);
  4796. }
  4797. switch (ioc_status) {
  4798. case MPI2_IOCSTATUS_BUSY:
  4799. case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
  4800. scmd->result = SAM_STAT_BUSY;
  4801. break;
  4802. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  4803. scmd->result = DID_NO_CONNECT << 16;
  4804. break;
  4805. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  4806. if (sas_device_priv_data->block) {
  4807. scmd->result = DID_TRANSPORT_DISRUPTED << 16;
  4808. goto out;
  4809. }
  4810. if (log_info == 0x31110630) {
  4811. if (scmd->retries > 2) {
  4812. scmd->result = DID_NO_CONNECT << 16;
  4813. scsi_device_set_state(scmd->device,
  4814. SDEV_OFFLINE);
  4815. } else {
  4816. scmd->result = DID_SOFT_ERROR << 16;
  4817. scmd->device->expecting_cc_ua = 1;
  4818. }
  4819. break;
  4820. } else if (log_info == VIRTUAL_IO_FAILED_RETRY) {
  4821. scmd->result = DID_RESET << 16;
  4822. break;
  4823. } else if ((scmd->device->channel == RAID_CHANNEL) &&
  4824. (scsi_state == (MPI2_SCSI_STATE_TERMINATED |
  4825. MPI2_SCSI_STATE_NO_SCSI_STATUS))) {
  4826. scmd->result = DID_RESET << 16;
  4827. break;
  4828. }
  4829. scmd->result = DID_SOFT_ERROR << 16;
  4830. break;
  4831. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  4832. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  4833. scmd->result = DID_RESET << 16;
  4834. break;
  4835. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  4836. if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
  4837. scmd->result = DID_SOFT_ERROR << 16;
  4838. else
  4839. scmd->result = (DID_OK << 16) | scsi_status;
  4840. break;
  4841. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  4842. scmd->result = (DID_OK << 16) | scsi_status;
  4843. if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
  4844. break;
  4845. if (xfer_cnt < scmd->underflow) {
  4846. if (scsi_status == SAM_STAT_BUSY)
  4847. scmd->result = SAM_STAT_BUSY;
  4848. else
  4849. scmd->result = DID_SOFT_ERROR << 16;
  4850. } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
  4851. MPI2_SCSI_STATE_NO_SCSI_STATUS))
  4852. scmd->result = DID_SOFT_ERROR << 16;
  4853. else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  4854. scmd->result = DID_RESET << 16;
  4855. else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
  4856. mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
  4857. mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
  4858. scmd->result = (DRIVER_SENSE << 24) |
  4859. SAM_STAT_CHECK_CONDITION;
  4860. scmd->sense_buffer[0] = 0x70;
  4861. scmd->sense_buffer[2] = ILLEGAL_REQUEST;
  4862. scmd->sense_buffer[12] = 0x20;
  4863. scmd->sense_buffer[13] = 0;
  4864. }
  4865. break;
  4866. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  4867. scsi_set_resid(scmd, 0);
  4868. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  4869. case MPI2_IOCSTATUS_SUCCESS:
  4870. scmd->result = (DID_OK << 16) | scsi_status;
  4871. if (response_code ==
  4872. MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
  4873. (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
  4874. MPI2_SCSI_STATE_NO_SCSI_STATUS)))
  4875. scmd->result = DID_SOFT_ERROR << 16;
  4876. else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
  4877. scmd->result = DID_RESET << 16;
  4878. break;
  4879. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  4880. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  4881. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  4882. _scsih_eedp_error_handling(scmd, ioc_status);
  4883. break;
  4884. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  4885. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  4886. case MPI2_IOCSTATUS_INVALID_SGL:
  4887. case MPI2_IOCSTATUS_INTERNAL_ERROR:
  4888. case MPI2_IOCSTATUS_INVALID_FIELD:
  4889. case MPI2_IOCSTATUS_INVALID_STATE:
  4890. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  4891. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  4892. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  4893. default:
  4894. scmd->result = DID_SOFT_ERROR << 16;
  4895. break;
  4896. }
  4897. if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
  4898. _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
  4899. out:
  4900. scsi_dma_unmap(scmd);
  4901. mpt3sas_base_free_smid(ioc, smid);
  4902. scmd->scsi_done(scmd);
  4903. return 0;
  4904. }
  4905. /**
  4906. * _scsih_sas_host_refresh - refreshing sas host object contents
  4907. * @ioc: per adapter object
  4908. * Context: user
  4909. *
  4910. * During port enable, fw will send topology events for every device. Its
  4911. * possible that the handles may change from the previous setting, so this
  4912. * code keeping handles updating if changed.
  4913. *
  4914. * Return nothing.
  4915. */
  4916. static void
  4917. _scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
  4918. {
  4919. u16 sz;
  4920. u16 ioc_status;
  4921. int i;
  4922. Mpi2ConfigReply_t mpi_reply;
  4923. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  4924. u16 attached_handle;
  4925. u8 link_rate;
  4926. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  4927. "updating handles for sas_host(0x%016llx)\n",
  4928. ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
  4929. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
  4930. * sizeof(Mpi2SasIOUnit0PhyData_t));
  4931. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  4932. if (!sas_iounit_pg0) {
  4933. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  4934. ioc->name, __FILE__, __LINE__, __func__);
  4935. return;
  4936. }
  4937. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  4938. sas_iounit_pg0, sz)) != 0)
  4939. goto out;
  4940. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  4941. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  4942. goto out;
  4943. for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
  4944. link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
  4945. if (i == 0)
  4946. ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
  4947. PhyData[0].ControllerDevHandle);
  4948. ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
  4949. attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
  4950. AttachedDevHandle);
  4951. if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
  4952. link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
  4953. mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
  4954. attached_handle, i, link_rate);
  4955. }
  4956. out:
  4957. kfree(sas_iounit_pg0);
  4958. }
  4959. /**
  4960. * _scsih_sas_host_add - create sas host object
  4961. * @ioc: per adapter object
  4962. *
  4963. * Creating host side data object, stored in ioc->sas_hba
  4964. *
  4965. * Return nothing.
  4966. */
  4967. static void
  4968. _scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
  4969. {
  4970. int i;
  4971. Mpi2ConfigReply_t mpi_reply;
  4972. Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
  4973. Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
  4974. Mpi2SasPhyPage0_t phy_pg0;
  4975. Mpi2SasDevicePage0_t sas_device_pg0;
  4976. Mpi2SasEnclosurePage0_t enclosure_pg0;
  4977. u16 ioc_status;
  4978. u16 sz;
  4979. u8 device_missing_delay;
  4980. u8 num_phys;
  4981. mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
  4982. if (!num_phys) {
  4983. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  4984. ioc->name, __FILE__, __LINE__, __func__);
  4985. return;
  4986. }
  4987. ioc->sas_hba.phy = kcalloc(num_phys,
  4988. sizeof(struct _sas_phy), GFP_KERNEL);
  4989. if (!ioc->sas_hba.phy) {
  4990. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  4991. ioc->name, __FILE__, __LINE__, __func__);
  4992. goto out;
  4993. }
  4994. ioc->sas_hba.num_phys = num_phys;
  4995. /* sas_iounit page 0 */
  4996. sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
  4997. sizeof(Mpi2SasIOUnit0PhyData_t));
  4998. sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
  4999. if (!sas_iounit_pg0) {
  5000. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5001. ioc->name, __FILE__, __LINE__, __func__);
  5002. return;
  5003. }
  5004. if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
  5005. sas_iounit_pg0, sz))) {
  5006. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5007. ioc->name, __FILE__, __LINE__, __func__);
  5008. goto out;
  5009. }
  5010. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5011. MPI2_IOCSTATUS_MASK;
  5012. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5013. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5014. ioc->name, __FILE__, __LINE__, __func__);
  5015. goto out;
  5016. }
  5017. /* sas_iounit page 1 */
  5018. sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
  5019. sizeof(Mpi2SasIOUnit1PhyData_t));
  5020. sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
  5021. if (!sas_iounit_pg1) {
  5022. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5023. ioc->name, __FILE__, __LINE__, __func__);
  5024. goto out;
  5025. }
  5026. if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
  5027. sas_iounit_pg1, sz))) {
  5028. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5029. ioc->name, __FILE__, __LINE__, __func__);
  5030. goto out;
  5031. }
  5032. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5033. MPI2_IOCSTATUS_MASK;
  5034. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5035. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5036. ioc->name, __FILE__, __LINE__, __func__);
  5037. goto out;
  5038. }
  5039. ioc->io_missing_delay =
  5040. sas_iounit_pg1->IODeviceMissingDelay;
  5041. device_missing_delay =
  5042. sas_iounit_pg1->ReportDeviceMissingDelay;
  5043. if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  5044. ioc->device_missing_delay = (device_missing_delay &
  5045. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  5046. else
  5047. ioc->device_missing_delay = device_missing_delay &
  5048. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  5049. ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
  5050. for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
  5051. if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
  5052. i))) {
  5053. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5054. ioc->name, __FILE__, __LINE__, __func__);
  5055. goto out;
  5056. }
  5057. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5058. MPI2_IOCSTATUS_MASK;
  5059. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5060. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5061. ioc->name, __FILE__, __LINE__, __func__);
  5062. goto out;
  5063. }
  5064. if (i == 0)
  5065. ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
  5066. PhyData[0].ControllerDevHandle);
  5067. ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
  5068. ioc->sas_hba.phy[i].phy_id = i;
  5069. mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
  5070. phy_pg0, ioc->sas_hba.parent_dev);
  5071. }
  5072. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  5073. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
  5074. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5075. ioc->name, __FILE__, __LINE__, __func__);
  5076. goto out;
  5077. }
  5078. ioc->sas_hba.enclosure_handle =
  5079. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  5080. ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  5081. pr_info(MPT3SAS_FMT
  5082. "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
  5083. ioc->name, ioc->sas_hba.handle,
  5084. (unsigned long long) ioc->sas_hba.sas_address,
  5085. ioc->sas_hba.num_phys) ;
  5086. if (ioc->sas_hba.enclosure_handle) {
  5087. if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  5088. &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
  5089. ioc->sas_hba.enclosure_handle)))
  5090. ioc->sas_hba.enclosure_logical_id =
  5091. le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
  5092. }
  5093. out:
  5094. kfree(sas_iounit_pg1);
  5095. kfree(sas_iounit_pg0);
  5096. }
  5097. /**
  5098. * _scsih_expander_add - creating expander object
  5099. * @ioc: per adapter object
  5100. * @handle: expander handle
  5101. *
  5102. * Creating expander object, stored in ioc->sas_expander_list.
  5103. *
  5104. * Return 0 for success, else error.
  5105. */
  5106. static int
  5107. _scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  5108. {
  5109. struct _sas_node *sas_expander;
  5110. struct _enclosure_node *enclosure_dev;
  5111. Mpi2ConfigReply_t mpi_reply;
  5112. Mpi2ExpanderPage0_t expander_pg0;
  5113. Mpi2ExpanderPage1_t expander_pg1;
  5114. u32 ioc_status;
  5115. u16 parent_handle;
  5116. u64 sas_address, sas_address_parent = 0;
  5117. int i;
  5118. unsigned long flags;
  5119. struct _sas_port *mpt3sas_port = NULL;
  5120. int rc = 0;
  5121. if (!handle)
  5122. return -1;
  5123. if (ioc->shost_recovery || ioc->pci_error_recovery)
  5124. return -1;
  5125. if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  5126. MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
  5127. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5128. ioc->name, __FILE__, __LINE__, __func__);
  5129. return -1;
  5130. }
  5131. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5132. MPI2_IOCSTATUS_MASK;
  5133. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5134. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5135. ioc->name, __FILE__, __LINE__, __func__);
  5136. return -1;
  5137. }
  5138. /* handle out of order topology events */
  5139. parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
  5140. if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
  5141. != 0) {
  5142. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5143. ioc->name, __FILE__, __LINE__, __func__);
  5144. return -1;
  5145. }
  5146. if (sas_address_parent != ioc->sas_hba.sas_address) {
  5147. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  5148. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  5149. sas_address_parent);
  5150. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  5151. if (!sas_expander) {
  5152. rc = _scsih_expander_add(ioc, parent_handle);
  5153. if (rc != 0)
  5154. return rc;
  5155. }
  5156. }
  5157. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  5158. sas_address = le64_to_cpu(expander_pg0.SASAddress);
  5159. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  5160. sas_address);
  5161. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  5162. if (sas_expander)
  5163. return 0;
  5164. sas_expander = kzalloc(sizeof(struct _sas_node),
  5165. GFP_KERNEL);
  5166. if (!sas_expander) {
  5167. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5168. ioc->name, __FILE__, __LINE__, __func__);
  5169. return -1;
  5170. }
  5171. sas_expander->handle = handle;
  5172. sas_expander->num_phys = expander_pg0.NumPhys;
  5173. sas_expander->sas_address_parent = sas_address_parent;
  5174. sas_expander->sas_address = sas_address;
  5175. pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
  5176. " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
  5177. handle, parent_handle, (unsigned long long)
  5178. sas_expander->sas_address, sas_expander->num_phys);
  5179. if (!sas_expander->num_phys)
  5180. goto out_fail;
  5181. sas_expander->phy = kcalloc(sas_expander->num_phys,
  5182. sizeof(struct _sas_phy), GFP_KERNEL);
  5183. if (!sas_expander->phy) {
  5184. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5185. ioc->name, __FILE__, __LINE__, __func__);
  5186. rc = -1;
  5187. goto out_fail;
  5188. }
  5189. INIT_LIST_HEAD(&sas_expander->sas_port_list);
  5190. mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
  5191. sas_address_parent);
  5192. if (!mpt3sas_port) {
  5193. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5194. ioc->name, __FILE__, __LINE__, __func__);
  5195. rc = -1;
  5196. goto out_fail;
  5197. }
  5198. sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
  5199. for (i = 0 ; i < sas_expander->num_phys ; i++) {
  5200. if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
  5201. &expander_pg1, i, handle))) {
  5202. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5203. ioc->name, __FILE__, __LINE__, __func__);
  5204. rc = -1;
  5205. goto out_fail;
  5206. }
  5207. sas_expander->phy[i].handle = handle;
  5208. sas_expander->phy[i].phy_id = i;
  5209. if ((mpt3sas_transport_add_expander_phy(ioc,
  5210. &sas_expander->phy[i], expander_pg1,
  5211. sas_expander->parent_dev))) {
  5212. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5213. ioc->name, __FILE__, __LINE__, __func__);
  5214. rc = -1;
  5215. goto out_fail;
  5216. }
  5217. }
  5218. if (sas_expander->enclosure_handle) {
  5219. enclosure_dev =
  5220. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  5221. sas_expander->enclosure_handle);
  5222. if (enclosure_dev)
  5223. sas_expander->enclosure_logical_id =
  5224. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  5225. }
  5226. _scsih_expander_node_add(ioc, sas_expander);
  5227. return 0;
  5228. out_fail:
  5229. if (mpt3sas_port)
  5230. mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
  5231. sas_address_parent);
  5232. kfree(sas_expander);
  5233. return rc;
  5234. }
  5235. /**
  5236. * mpt3sas_expander_remove - removing expander object
  5237. * @ioc: per adapter object
  5238. * @sas_address: expander sas_address
  5239. *
  5240. * Return nothing.
  5241. */
  5242. void
  5243. mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
  5244. {
  5245. struct _sas_node *sas_expander;
  5246. unsigned long flags;
  5247. if (ioc->shost_recovery)
  5248. return;
  5249. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  5250. sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
  5251. sas_address);
  5252. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  5253. if (sas_expander)
  5254. _scsih_expander_node_remove(ioc, sas_expander);
  5255. }
  5256. /**
  5257. * _scsih_done - internal SCSI_IO callback handler.
  5258. * @ioc: per adapter object
  5259. * @smid: system request message index
  5260. * @msix_index: MSIX table index supplied by the OS
  5261. * @reply: reply message frame(lower 32bit addr)
  5262. *
  5263. * Callback handler when sending internal generated SCSI_IO.
  5264. * The callback index passed is `ioc->scsih_cb_idx`
  5265. *
  5266. * Return 1 meaning mf should be freed from _base_interrupt
  5267. * 0 means the mf is freed from this function.
  5268. */
  5269. static u8
  5270. _scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
  5271. {
  5272. MPI2DefaultReply_t *mpi_reply;
  5273. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  5274. if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
  5275. return 1;
  5276. if (ioc->scsih_cmds.smid != smid)
  5277. return 1;
  5278. ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
  5279. if (mpi_reply) {
  5280. memcpy(ioc->scsih_cmds.reply, mpi_reply,
  5281. mpi_reply->MsgLength*4);
  5282. ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
  5283. }
  5284. ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
  5285. complete(&ioc->scsih_cmds.done);
  5286. return 1;
  5287. }
  5288. #define MPT3_MAX_LUNS (255)
  5289. /**
  5290. * _scsih_check_access_status - check access flags
  5291. * @ioc: per adapter object
  5292. * @sas_address: sas address
  5293. * @handle: sas device handle
  5294. * @access_flags: errors returned during discovery of the device
  5295. *
  5296. * Return 0 for success, else failure
  5297. */
  5298. static u8
  5299. _scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
  5300. u16 handle, u8 access_status)
  5301. {
  5302. u8 rc = 1;
  5303. char *desc = NULL;
  5304. switch (access_status) {
  5305. case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
  5306. case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
  5307. rc = 0;
  5308. break;
  5309. case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
  5310. desc = "sata capability failed";
  5311. break;
  5312. case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
  5313. desc = "sata affiliation conflict";
  5314. break;
  5315. case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
  5316. desc = "route not addressable";
  5317. break;
  5318. case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
  5319. desc = "smp error not addressable";
  5320. break;
  5321. case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
  5322. desc = "device blocked";
  5323. break;
  5324. case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
  5325. case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
  5326. case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
  5327. case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
  5328. case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
  5329. case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
  5330. case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
  5331. case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
  5332. case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
  5333. case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
  5334. case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
  5335. case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
  5336. desc = "sata initialization failed";
  5337. break;
  5338. default:
  5339. desc = "unknown";
  5340. break;
  5341. }
  5342. if (!rc)
  5343. return 0;
  5344. pr_err(MPT3SAS_FMT
  5345. "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
  5346. ioc->name, desc, (unsigned long long)sas_address, handle);
  5347. return rc;
  5348. }
  5349. /**
  5350. * _scsih_check_device - checking device responsiveness
  5351. * @ioc: per adapter object
  5352. * @parent_sas_address: sas address of parent expander or sas host
  5353. * @handle: attached device handle
  5354. * @phy_numberv: phy number
  5355. * @link_rate: new link rate
  5356. *
  5357. * Returns nothing.
  5358. */
  5359. static void
  5360. _scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
  5361. u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
  5362. {
  5363. Mpi2ConfigReply_t mpi_reply;
  5364. Mpi2SasDevicePage0_t sas_device_pg0;
  5365. struct _sas_device *sas_device;
  5366. struct _enclosure_node *enclosure_dev = NULL;
  5367. u32 ioc_status;
  5368. unsigned long flags;
  5369. u64 sas_address;
  5370. struct scsi_target *starget;
  5371. struct MPT3SAS_TARGET *sas_target_priv_data;
  5372. u32 device_info;
  5373. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  5374. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
  5375. return;
  5376. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  5377. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  5378. return;
  5379. /* wide port handling ~ we need only handle device once for the phy that
  5380. * is matched in sas device page zero
  5381. */
  5382. if (phy_number != sas_device_pg0.PhyNum)
  5383. return;
  5384. /* check if this is end device */
  5385. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  5386. if (!(_scsih_is_end_device(device_info)))
  5387. return;
  5388. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  5389. sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  5390. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  5391. sas_address);
  5392. if (!sas_device)
  5393. goto out_unlock;
  5394. if (unlikely(sas_device->handle != handle)) {
  5395. starget = sas_device->starget;
  5396. sas_target_priv_data = starget->hostdata;
  5397. starget_printk(KERN_INFO, starget,
  5398. "handle changed from(0x%04x) to (0x%04x)!!!\n",
  5399. sas_device->handle, handle);
  5400. sas_target_priv_data->handle = handle;
  5401. sas_device->handle = handle;
  5402. if (le16_to_cpu(sas_device_pg0.Flags) &
  5403. MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  5404. sas_device->enclosure_level =
  5405. sas_device_pg0.EnclosureLevel;
  5406. memcpy(sas_device->connector_name,
  5407. sas_device_pg0.ConnectorName, 4);
  5408. sas_device->connector_name[4] = '\0';
  5409. } else {
  5410. sas_device->enclosure_level = 0;
  5411. sas_device->connector_name[0] = '\0';
  5412. }
  5413. sas_device->enclosure_handle =
  5414. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  5415. sas_device->is_chassis_slot_valid = 0;
  5416. enclosure_dev = mpt3sas_scsih_enclosure_find_by_handle(ioc,
  5417. sas_device->enclosure_handle);
  5418. if (enclosure_dev) {
  5419. sas_device->enclosure_logical_id =
  5420. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  5421. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  5422. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  5423. sas_device->is_chassis_slot_valid = 1;
  5424. sas_device->chassis_slot =
  5425. enclosure_dev->pg0.ChassisSlot;
  5426. }
  5427. }
  5428. }
  5429. /* check if device is present */
  5430. if (!(le16_to_cpu(sas_device_pg0.Flags) &
  5431. MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
  5432. pr_err(MPT3SAS_FMT
  5433. "device is not present handle(0x%04x), flags!!!\n",
  5434. ioc->name, handle);
  5435. goto out_unlock;
  5436. }
  5437. /* check if there were any issues with discovery */
  5438. if (_scsih_check_access_status(ioc, sas_address, handle,
  5439. sas_device_pg0.AccessStatus))
  5440. goto out_unlock;
  5441. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  5442. _scsih_ublock_io_device(ioc, sas_address);
  5443. if (sas_device)
  5444. sas_device_put(sas_device);
  5445. return;
  5446. out_unlock:
  5447. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  5448. if (sas_device)
  5449. sas_device_put(sas_device);
  5450. }
  5451. /**
  5452. * _scsih_add_device - creating sas device object
  5453. * @ioc: per adapter object
  5454. * @handle: sas device handle
  5455. * @phy_num: phy number end device attached to
  5456. * @is_pd: is this hidden raid component
  5457. *
  5458. * Creating end device object, stored in ioc->sas_device_list.
  5459. *
  5460. * Returns 0 for success, non-zero for failure.
  5461. */
  5462. static int
  5463. _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
  5464. u8 is_pd)
  5465. {
  5466. Mpi2ConfigReply_t mpi_reply;
  5467. Mpi2SasDevicePage0_t sas_device_pg0;
  5468. struct _sas_device *sas_device;
  5469. struct _enclosure_node *enclosure_dev = NULL;
  5470. u32 ioc_status;
  5471. u64 sas_address;
  5472. u32 device_info;
  5473. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  5474. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  5475. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5476. ioc->name, __FILE__, __LINE__, __func__);
  5477. return -1;
  5478. }
  5479. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  5480. MPI2_IOCSTATUS_MASK;
  5481. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5482. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5483. ioc->name, __FILE__, __LINE__, __func__);
  5484. return -1;
  5485. }
  5486. /* check if this is end device */
  5487. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  5488. if (!(_scsih_is_end_device(device_info)))
  5489. return -1;
  5490. set_bit(handle, ioc->pend_os_device_add);
  5491. sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
  5492. /* check if device is present */
  5493. if (!(le16_to_cpu(sas_device_pg0.Flags) &
  5494. MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
  5495. pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
  5496. ioc->name, handle);
  5497. return -1;
  5498. }
  5499. /* check if there were any issues with discovery */
  5500. if (_scsih_check_access_status(ioc, sas_address, handle,
  5501. sas_device_pg0.AccessStatus))
  5502. return -1;
  5503. sas_device = mpt3sas_get_sdev_by_addr(ioc,
  5504. sas_address);
  5505. if (sas_device) {
  5506. clear_bit(handle, ioc->pend_os_device_add);
  5507. sas_device_put(sas_device);
  5508. return -1;
  5509. }
  5510. if (sas_device_pg0.EnclosureHandle) {
  5511. enclosure_dev =
  5512. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  5513. le16_to_cpu(sas_device_pg0.EnclosureHandle));
  5514. if (enclosure_dev == NULL)
  5515. pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
  5516. "doesn't match with enclosure device!\n",
  5517. ioc->name, sas_device_pg0.EnclosureHandle);
  5518. }
  5519. sas_device = kzalloc(sizeof(struct _sas_device),
  5520. GFP_KERNEL);
  5521. if (!sas_device) {
  5522. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5523. ioc->name, __FILE__, __LINE__, __func__);
  5524. return 0;
  5525. }
  5526. kref_init(&sas_device->refcount);
  5527. sas_device->handle = handle;
  5528. if (_scsih_get_sas_address(ioc,
  5529. le16_to_cpu(sas_device_pg0.ParentDevHandle),
  5530. &sas_device->sas_address_parent) != 0)
  5531. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  5532. ioc->name, __FILE__, __LINE__, __func__);
  5533. sas_device->enclosure_handle =
  5534. le16_to_cpu(sas_device_pg0.EnclosureHandle);
  5535. if (sas_device->enclosure_handle != 0)
  5536. sas_device->slot =
  5537. le16_to_cpu(sas_device_pg0.Slot);
  5538. sas_device->device_info = device_info;
  5539. sas_device->sas_address = sas_address;
  5540. sas_device->phy = sas_device_pg0.PhyNum;
  5541. sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
  5542. MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
  5543. if (le16_to_cpu(sas_device_pg0.Flags)
  5544. & MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  5545. sas_device->enclosure_level =
  5546. sas_device_pg0.EnclosureLevel;
  5547. memcpy(sas_device->connector_name,
  5548. sas_device_pg0.ConnectorName, 4);
  5549. sas_device->connector_name[4] = '\0';
  5550. } else {
  5551. sas_device->enclosure_level = 0;
  5552. sas_device->connector_name[0] = '\0';
  5553. }
  5554. /* get enclosure_logical_id & chassis_slot*/
  5555. sas_device->is_chassis_slot_valid = 0;
  5556. if (enclosure_dev) {
  5557. sas_device->enclosure_logical_id =
  5558. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  5559. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  5560. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  5561. sas_device->is_chassis_slot_valid = 1;
  5562. sas_device->chassis_slot =
  5563. enclosure_dev->pg0.ChassisSlot;
  5564. }
  5565. }
  5566. /* get device name */
  5567. sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
  5568. if (ioc->wait_for_discovery_to_complete)
  5569. _scsih_sas_device_init_add(ioc, sas_device);
  5570. else
  5571. _scsih_sas_device_add(ioc, sas_device);
  5572. sas_device_put(sas_device);
  5573. return 0;
  5574. }
  5575. /**
  5576. * _scsih_remove_device - removing sas device object
  5577. * @ioc: per adapter object
  5578. * @sas_device_delete: the sas_device object
  5579. *
  5580. * Return nothing.
  5581. */
  5582. static void
  5583. _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
  5584. struct _sas_device *sas_device)
  5585. {
  5586. struct MPT3SAS_TARGET *sas_target_priv_data;
  5587. if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
  5588. (sas_device->pfa_led_on)) {
  5589. _scsih_turn_off_pfa_led(ioc, sas_device);
  5590. sas_device->pfa_led_on = 0;
  5591. }
  5592. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  5593. "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
  5594. ioc->name, __func__,
  5595. sas_device->handle, (unsigned long long)
  5596. sas_device->sas_address));
  5597. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  5598. NULL, NULL));
  5599. if (sas_device->starget && sas_device->starget->hostdata) {
  5600. sas_target_priv_data = sas_device->starget->hostdata;
  5601. sas_target_priv_data->deleted = 1;
  5602. _scsih_ublock_io_device(ioc, sas_device->sas_address);
  5603. sas_target_priv_data->handle =
  5604. MPT3SAS_INVALID_DEVICE_HANDLE;
  5605. }
  5606. if (!ioc->hide_drives)
  5607. mpt3sas_transport_port_remove(ioc,
  5608. sas_device->sas_address,
  5609. sas_device->sas_address_parent);
  5610. pr_info(MPT3SAS_FMT
  5611. "removing handle(0x%04x), sas_addr(0x%016llx)\n",
  5612. ioc->name, sas_device->handle,
  5613. (unsigned long long) sas_device->sas_address);
  5614. _scsih_display_enclosure_chassis_info(ioc, sas_device, NULL, NULL);
  5615. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  5616. "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
  5617. ioc->name, __func__,
  5618. sas_device->handle, (unsigned long long)
  5619. sas_device->sas_address));
  5620. dewtprintk(ioc, _scsih_display_enclosure_chassis_info(ioc, sas_device,
  5621. NULL, NULL));
  5622. }
  5623. /**
  5624. * _scsih_sas_topology_change_event_debug - debug for topology event
  5625. * @ioc: per adapter object
  5626. * @event_data: event data payload
  5627. * Context: user.
  5628. */
  5629. static void
  5630. _scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  5631. Mpi2EventDataSasTopologyChangeList_t *event_data)
  5632. {
  5633. int i;
  5634. u16 handle;
  5635. u16 reason_code;
  5636. u8 phy_number;
  5637. char *status_str = NULL;
  5638. u8 link_rate, prev_link_rate;
  5639. switch (event_data->ExpStatus) {
  5640. case MPI2_EVENT_SAS_TOPO_ES_ADDED:
  5641. status_str = "add";
  5642. break;
  5643. case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
  5644. status_str = "remove";
  5645. break;
  5646. case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
  5647. case 0:
  5648. status_str = "responding";
  5649. break;
  5650. case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
  5651. status_str = "remove delay";
  5652. break;
  5653. default:
  5654. status_str = "unknown status";
  5655. break;
  5656. }
  5657. pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
  5658. ioc->name, status_str);
  5659. pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
  5660. "start_phy(%02d), count(%d)\n",
  5661. le16_to_cpu(event_data->ExpanderDevHandle),
  5662. le16_to_cpu(event_data->EnclosureHandle),
  5663. event_data->StartPhyNum, event_data->NumEntries);
  5664. for (i = 0; i < event_data->NumEntries; i++) {
  5665. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  5666. if (!handle)
  5667. continue;
  5668. phy_number = event_data->StartPhyNum + i;
  5669. reason_code = event_data->PHY[i].PhyStatus &
  5670. MPI2_EVENT_SAS_TOPO_RC_MASK;
  5671. switch (reason_code) {
  5672. case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
  5673. status_str = "target add";
  5674. break;
  5675. case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
  5676. status_str = "target remove";
  5677. break;
  5678. case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
  5679. status_str = "delay target remove";
  5680. break;
  5681. case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
  5682. status_str = "link rate change";
  5683. break;
  5684. case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
  5685. status_str = "target responding";
  5686. break;
  5687. default:
  5688. status_str = "unknown";
  5689. break;
  5690. }
  5691. link_rate = event_data->PHY[i].LinkRate >> 4;
  5692. prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
  5693. pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
  5694. " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
  5695. handle, status_str, link_rate, prev_link_rate);
  5696. }
  5697. }
  5698. /**
  5699. * _scsih_sas_topology_change_event - handle topology changes
  5700. * @ioc: per adapter object
  5701. * @fw_event: The fw_event_work object
  5702. * Context: user.
  5703. *
  5704. */
  5705. static int
  5706. _scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
  5707. struct fw_event_work *fw_event)
  5708. {
  5709. int i;
  5710. u16 parent_handle, handle;
  5711. u16 reason_code;
  5712. u8 phy_number, max_phys;
  5713. struct _sas_node *sas_expander;
  5714. u64 sas_address;
  5715. unsigned long flags;
  5716. u8 link_rate, prev_link_rate;
  5717. Mpi2EventDataSasTopologyChangeList_t *event_data =
  5718. (Mpi2EventDataSasTopologyChangeList_t *)
  5719. fw_event->event_data;
  5720. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  5721. _scsih_sas_topology_change_event_debug(ioc, event_data);
  5722. if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
  5723. return 0;
  5724. if (!ioc->sas_hba.num_phys)
  5725. _scsih_sas_host_add(ioc);
  5726. else
  5727. _scsih_sas_host_refresh(ioc);
  5728. if (fw_event->ignore) {
  5729. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  5730. "ignoring expander event\n", ioc->name));
  5731. return 0;
  5732. }
  5733. parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
  5734. /* handle expander add */
  5735. if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
  5736. if (_scsih_expander_add(ioc, parent_handle) != 0)
  5737. return 0;
  5738. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  5739. sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
  5740. parent_handle);
  5741. if (sas_expander) {
  5742. sas_address = sas_expander->sas_address;
  5743. max_phys = sas_expander->num_phys;
  5744. } else if (parent_handle < ioc->sas_hba.num_phys) {
  5745. sas_address = ioc->sas_hba.sas_address;
  5746. max_phys = ioc->sas_hba.num_phys;
  5747. } else {
  5748. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  5749. return 0;
  5750. }
  5751. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  5752. /* handle siblings events */
  5753. for (i = 0; i < event_data->NumEntries; i++) {
  5754. if (fw_event->ignore) {
  5755. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  5756. "ignoring expander event\n", ioc->name));
  5757. return 0;
  5758. }
  5759. if (ioc->remove_host || ioc->pci_error_recovery)
  5760. return 0;
  5761. phy_number = event_data->StartPhyNum + i;
  5762. if (phy_number >= max_phys)
  5763. continue;
  5764. reason_code = event_data->PHY[i].PhyStatus &
  5765. MPI2_EVENT_SAS_TOPO_RC_MASK;
  5766. if ((event_data->PHY[i].PhyStatus &
  5767. MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
  5768. MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
  5769. continue;
  5770. handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
  5771. if (!handle)
  5772. continue;
  5773. link_rate = event_data->PHY[i].LinkRate >> 4;
  5774. prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
  5775. switch (reason_code) {
  5776. case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
  5777. if (ioc->shost_recovery)
  5778. break;
  5779. if (link_rate == prev_link_rate)
  5780. break;
  5781. mpt3sas_transport_update_links(ioc, sas_address,
  5782. handle, phy_number, link_rate);
  5783. if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
  5784. break;
  5785. _scsih_check_device(ioc, sas_address, handle,
  5786. phy_number, link_rate);
  5787. if (!test_bit(handle, ioc->pend_os_device_add))
  5788. break;
  5789. case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
  5790. if (ioc->shost_recovery)
  5791. break;
  5792. mpt3sas_transport_update_links(ioc, sas_address,
  5793. handle, phy_number, link_rate);
  5794. _scsih_add_device(ioc, handle, phy_number, 0);
  5795. break;
  5796. case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
  5797. _scsih_device_remove_by_handle(ioc, handle);
  5798. break;
  5799. }
  5800. }
  5801. /* handle expander removal */
  5802. if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
  5803. sas_expander)
  5804. mpt3sas_expander_remove(ioc, sas_address);
  5805. return 0;
  5806. }
  5807. /**
  5808. * _scsih_sas_device_status_change_event_debug - debug for device event
  5809. * @event_data: event data payload
  5810. * Context: user.
  5811. *
  5812. * Return nothing.
  5813. */
  5814. static void
  5815. _scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  5816. Mpi2EventDataSasDeviceStatusChange_t *event_data)
  5817. {
  5818. char *reason_str = NULL;
  5819. switch (event_data->ReasonCode) {
  5820. case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
  5821. reason_str = "smart data";
  5822. break;
  5823. case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
  5824. reason_str = "unsupported device discovered";
  5825. break;
  5826. case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
  5827. reason_str = "internal device reset";
  5828. break;
  5829. case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
  5830. reason_str = "internal task abort";
  5831. break;
  5832. case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
  5833. reason_str = "internal task abort set";
  5834. break;
  5835. case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
  5836. reason_str = "internal clear task set";
  5837. break;
  5838. case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
  5839. reason_str = "internal query task";
  5840. break;
  5841. case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
  5842. reason_str = "sata init failure";
  5843. break;
  5844. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
  5845. reason_str = "internal device reset complete";
  5846. break;
  5847. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
  5848. reason_str = "internal task abort complete";
  5849. break;
  5850. case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
  5851. reason_str = "internal async notification";
  5852. break;
  5853. case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
  5854. reason_str = "expander reduced functionality";
  5855. break;
  5856. case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
  5857. reason_str = "expander reduced functionality complete";
  5858. break;
  5859. default:
  5860. reason_str = "unknown reason";
  5861. break;
  5862. }
  5863. pr_info(MPT3SAS_FMT "device status change: (%s)\n"
  5864. "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
  5865. ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
  5866. (unsigned long long)le64_to_cpu(event_data->SASAddress),
  5867. le16_to_cpu(event_data->TaskTag));
  5868. if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
  5869. pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
  5870. event_data->ASC, event_data->ASCQ);
  5871. pr_info("\n");
  5872. }
  5873. /**
  5874. * _scsih_sas_device_status_change_event - handle device status change
  5875. * @ioc: per adapter object
  5876. * @fw_event: The fw_event_work object
  5877. * Context: user.
  5878. *
  5879. * Return nothing.
  5880. */
  5881. static void
  5882. _scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  5883. struct fw_event_work *fw_event)
  5884. {
  5885. struct MPT3SAS_TARGET *target_priv_data;
  5886. struct _sas_device *sas_device;
  5887. u64 sas_address;
  5888. unsigned long flags;
  5889. Mpi2EventDataSasDeviceStatusChange_t *event_data =
  5890. (Mpi2EventDataSasDeviceStatusChange_t *)
  5891. fw_event->event_data;
  5892. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  5893. _scsih_sas_device_status_change_event_debug(ioc,
  5894. event_data);
  5895. /* In MPI Revision K (0xC), the internal device reset complete was
  5896. * implemented, so avoid setting tm_busy flag for older firmware.
  5897. */
  5898. if ((ioc->facts.HeaderVersion >> 8) < 0xC)
  5899. return;
  5900. if (event_data->ReasonCode !=
  5901. MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
  5902. event_data->ReasonCode !=
  5903. MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
  5904. return;
  5905. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  5906. sas_address = le64_to_cpu(event_data->SASAddress);
  5907. sas_device = __mpt3sas_get_sdev_by_addr(ioc,
  5908. sas_address);
  5909. if (!sas_device || !sas_device->starget)
  5910. goto out;
  5911. target_priv_data = sas_device->starget->hostdata;
  5912. if (!target_priv_data)
  5913. goto out;
  5914. if (event_data->ReasonCode ==
  5915. MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
  5916. target_priv_data->tm_busy = 1;
  5917. else
  5918. target_priv_data->tm_busy = 0;
  5919. out:
  5920. if (sas_device)
  5921. sas_device_put(sas_device);
  5922. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  5923. }
  5924. /**
  5925. * _scsih_check_pcie_access_status - check access flags
  5926. * @ioc: per adapter object
  5927. * @wwid: wwid
  5928. * @handle: sas device handle
  5929. * @access_flags: errors returned during discovery of the device
  5930. *
  5931. * Return 0 for success, else failure
  5932. */
  5933. static u8
  5934. _scsih_check_pcie_access_status(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
  5935. u16 handle, u8 access_status)
  5936. {
  5937. u8 rc = 1;
  5938. char *desc = NULL;
  5939. switch (access_status) {
  5940. case MPI26_PCIEDEV0_ASTATUS_NO_ERRORS:
  5941. case MPI26_PCIEDEV0_ASTATUS_NEEDS_INITIALIZATION:
  5942. rc = 0;
  5943. break;
  5944. case MPI26_PCIEDEV0_ASTATUS_CAPABILITY_FAILED:
  5945. desc = "PCIe device capability failed";
  5946. break;
  5947. case MPI26_PCIEDEV0_ASTATUS_DEVICE_BLOCKED:
  5948. desc = "PCIe device blocked";
  5949. break;
  5950. case MPI26_PCIEDEV0_ASTATUS_MEMORY_SPACE_ACCESS_FAILED:
  5951. desc = "PCIe device mem space access failed";
  5952. break;
  5953. case MPI26_PCIEDEV0_ASTATUS_UNSUPPORTED_DEVICE:
  5954. desc = "PCIe device unsupported";
  5955. break;
  5956. case MPI26_PCIEDEV0_ASTATUS_MSIX_REQUIRED:
  5957. desc = "PCIe device MSIx Required";
  5958. break;
  5959. case MPI26_PCIEDEV0_ASTATUS_INIT_FAIL_MAX:
  5960. desc = "PCIe device init fail max";
  5961. break;
  5962. case MPI26_PCIEDEV0_ASTATUS_UNKNOWN:
  5963. desc = "PCIe device status unknown";
  5964. break;
  5965. case MPI26_PCIEDEV0_ASTATUS_NVME_READY_TIMEOUT:
  5966. desc = "nvme ready timeout";
  5967. break;
  5968. case MPI26_PCIEDEV0_ASTATUS_NVME_DEVCFG_UNSUPPORTED:
  5969. desc = "nvme device configuration unsupported";
  5970. break;
  5971. case MPI26_PCIEDEV0_ASTATUS_NVME_IDENTIFY_FAILED:
  5972. desc = "nvme identify failed";
  5973. break;
  5974. case MPI26_PCIEDEV0_ASTATUS_NVME_QCONFIG_FAILED:
  5975. desc = "nvme qconfig failed";
  5976. break;
  5977. case MPI26_PCIEDEV0_ASTATUS_NVME_QCREATION_FAILED:
  5978. desc = "nvme qcreation failed";
  5979. break;
  5980. case MPI26_PCIEDEV0_ASTATUS_NVME_EVENTCFG_FAILED:
  5981. desc = "nvme eventcfg failed";
  5982. break;
  5983. case MPI26_PCIEDEV0_ASTATUS_NVME_GET_FEATURE_STAT_FAILED:
  5984. desc = "nvme get feature stat failed";
  5985. break;
  5986. case MPI26_PCIEDEV0_ASTATUS_NVME_IDLE_TIMEOUT:
  5987. desc = "nvme idle timeout";
  5988. break;
  5989. case MPI26_PCIEDEV0_ASTATUS_NVME_FAILURE_STATUS:
  5990. desc = "nvme failure status";
  5991. break;
  5992. default:
  5993. pr_err(MPT3SAS_FMT
  5994. " NVMe discovery error(0x%02x): wwid(0x%016llx),"
  5995. "handle(0x%04x)\n", ioc->name, access_status,
  5996. (unsigned long long)wwid, handle);
  5997. return rc;
  5998. }
  5999. if (!rc)
  6000. return rc;
  6001. pr_info(MPT3SAS_FMT
  6002. "NVMe discovery error(%s): wwid(0x%016llx), handle(0x%04x)\n",
  6003. ioc->name, desc,
  6004. (unsigned long long)wwid, handle);
  6005. return rc;
  6006. }
  6007. /**
  6008. * _scsih_pcie_device_remove_from_sml - removing pcie device
  6009. * from SML and free up associated memory
  6010. * @ioc: per adapter object
  6011. * @pcie_device: the pcie_device object
  6012. *
  6013. * Return nothing.
  6014. */
  6015. static void
  6016. _scsih_pcie_device_remove_from_sml(struct MPT3SAS_ADAPTER *ioc,
  6017. struct _pcie_device *pcie_device)
  6018. {
  6019. struct MPT3SAS_TARGET *sas_target_priv_data;
  6020. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6021. "%s: enter: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
  6022. pcie_device->handle, (unsigned long long)
  6023. pcie_device->wwid));
  6024. if (pcie_device->enclosure_handle != 0)
  6025. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6026. "%s: enter: enclosure logical id(0x%016llx), slot(%d)\n",
  6027. ioc->name, __func__,
  6028. (unsigned long long)pcie_device->enclosure_logical_id,
  6029. pcie_device->slot));
  6030. if (pcie_device->connector_name[0] != '\0')
  6031. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6032. "%s: enter: enclosure level(0x%04x), connector name( %s)\n",
  6033. ioc->name, __func__,
  6034. pcie_device->enclosure_level,
  6035. pcie_device->connector_name));
  6036. if (pcie_device->starget && pcie_device->starget->hostdata) {
  6037. sas_target_priv_data = pcie_device->starget->hostdata;
  6038. sas_target_priv_data->deleted = 1;
  6039. _scsih_ublock_io_device(ioc, pcie_device->wwid);
  6040. sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
  6041. }
  6042. pr_info(MPT3SAS_FMT
  6043. "removing handle(0x%04x), wwid (0x%016llx)\n",
  6044. ioc->name, pcie_device->handle,
  6045. (unsigned long long) pcie_device->wwid);
  6046. if (pcie_device->enclosure_handle != 0)
  6047. pr_info(MPT3SAS_FMT
  6048. "removing : enclosure logical id(0x%016llx), slot(%d)\n",
  6049. ioc->name,
  6050. (unsigned long long)pcie_device->enclosure_logical_id,
  6051. pcie_device->slot);
  6052. if (pcie_device->connector_name[0] != '\0')
  6053. pr_info(MPT3SAS_FMT
  6054. "removing: enclosure level(0x%04x), connector name( %s)\n",
  6055. ioc->name, pcie_device->enclosure_level,
  6056. pcie_device->connector_name);
  6057. if (pcie_device->starget)
  6058. scsi_remove_target(&pcie_device->starget->dev);
  6059. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6060. "%s: exit: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
  6061. pcie_device->handle, (unsigned long long)
  6062. pcie_device->wwid));
  6063. if (pcie_device->enclosure_handle != 0)
  6064. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6065. "%s: exit: enclosure logical id(0x%016llx), slot(%d)\n",
  6066. ioc->name, __func__,
  6067. (unsigned long long)pcie_device->enclosure_logical_id,
  6068. pcie_device->slot));
  6069. if (pcie_device->connector_name[0] != '\0')
  6070. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6071. "%s: exit: enclosure level(0x%04x), connector name( %s)\n",
  6072. ioc->name, __func__, pcie_device->enclosure_level,
  6073. pcie_device->connector_name));
  6074. kfree(pcie_device->serial_number);
  6075. }
  6076. /**
  6077. * _scsih_pcie_check_device - checking device responsiveness
  6078. * @ioc: per adapter object
  6079. * @handle: attached device handle
  6080. *
  6081. * Returns nothing.
  6082. */
  6083. static void
  6084. _scsih_pcie_check_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  6085. {
  6086. Mpi2ConfigReply_t mpi_reply;
  6087. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  6088. u32 ioc_status;
  6089. struct _pcie_device *pcie_device;
  6090. u64 wwid;
  6091. unsigned long flags;
  6092. struct scsi_target *starget;
  6093. struct MPT3SAS_TARGET *sas_target_priv_data;
  6094. u32 device_info;
  6095. if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  6096. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle)))
  6097. return;
  6098. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  6099. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  6100. return;
  6101. /* check if this is end device */
  6102. device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  6103. if (!(_scsih_is_nvme_device(device_info)))
  6104. return;
  6105. wwid = le64_to_cpu(pcie_device_pg0.WWID);
  6106. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  6107. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  6108. if (!pcie_device) {
  6109. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6110. return;
  6111. }
  6112. if (unlikely(pcie_device->handle != handle)) {
  6113. starget = pcie_device->starget;
  6114. sas_target_priv_data = starget->hostdata;
  6115. starget_printk(KERN_INFO, starget,
  6116. "handle changed from(0x%04x) to (0x%04x)!!!\n",
  6117. pcie_device->handle, handle);
  6118. sas_target_priv_data->handle = handle;
  6119. pcie_device->handle = handle;
  6120. if (le32_to_cpu(pcie_device_pg0.Flags) &
  6121. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
  6122. pcie_device->enclosure_level =
  6123. pcie_device_pg0.EnclosureLevel;
  6124. memcpy(&pcie_device->connector_name[0],
  6125. &pcie_device_pg0.ConnectorName[0], 4);
  6126. } else {
  6127. pcie_device->enclosure_level = 0;
  6128. pcie_device->connector_name[0] = '\0';
  6129. }
  6130. }
  6131. /* check if device is present */
  6132. if (!(le32_to_cpu(pcie_device_pg0.Flags) &
  6133. MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
  6134. pr_info(MPT3SAS_FMT
  6135. "device is not present handle(0x%04x), flags!!!\n",
  6136. ioc->name, handle);
  6137. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6138. pcie_device_put(pcie_device);
  6139. return;
  6140. }
  6141. /* check if there were any issues with discovery */
  6142. if (_scsih_check_pcie_access_status(ioc, wwid, handle,
  6143. pcie_device_pg0.AccessStatus)) {
  6144. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6145. pcie_device_put(pcie_device);
  6146. return;
  6147. }
  6148. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6149. pcie_device_put(pcie_device);
  6150. _scsih_ublock_io_device(ioc, wwid);
  6151. return;
  6152. }
  6153. /**
  6154. * _scsih_pcie_add_device - creating pcie device object
  6155. * @ioc: per adapter object
  6156. * @handle: pcie device handle
  6157. *
  6158. * Creating end device object, stored in ioc->pcie_device_list.
  6159. *
  6160. * Return 1 means queue the event later, 0 means complete the event
  6161. */
  6162. static int
  6163. _scsih_pcie_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  6164. {
  6165. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  6166. Mpi26PCIeDevicePage2_t pcie_device_pg2;
  6167. Mpi2ConfigReply_t mpi_reply;
  6168. struct _pcie_device *pcie_device;
  6169. struct _enclosure_node *enclosure_dev;
  6170. u32 pcie_device_type;
  6171. u32 ioc_status;
  6172. u64 wwid;
  6173. if ((mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  6174. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, handle))) {
  6175. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  6176. ioc->name, __FILE__, __LINE__, __func__);
  6177. return 0;
  6178. }
  6179. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  6180. MPI2_IOCSTATUS_MASK;
  6181. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6182. pr_err(MPT3SAS_FMT
  6183. "failure at %s:%d/%s()!\n",
  6184. ioc->name, __FILE__, __LINE__, __func__);
  6185. return 0;
  6186. }
  6187. set_bit(handle, ioc->pend_os_device_add);
  6188. wwid = le64_to_cpu(pcie_device_pg0.WWID);
  6189. /* check if device is present */
  6190. if (!(le32_to_cpu(pcie_device_pg0.Flags) &
  6191. MPI26_PCIEDEV0_FLAGS_DEVICE_PRESENT)) {
  6192. pr_err(MPT3SAS_FMT
  6193. "device is not present handle(0x04%x)!!!\n",
  6194. ioc->name, handle);
  6195. return 0;
  6196. }
  6197. /* check if there were any issues with discovery */
  6198. if (_scsih_check_pcie_access_status(ioc, wwid, handle,
  6199. pcie_device_pg0.AccessStatus))
  6200. return 0;
  6201. if (!(_scsih_is_nvme_device(le32_to_cpu(pcie_device_pg0.DeviceInfo))))
  6202. return 0;
  6203. pcie_device = mpt3sas_get_pdev_by_wwid(ioc, wwid);
  6204. if (pcie_device) {
  6205. clear_bit(handle, ioc->pend_os_device_add);
  6206. pcie_device_put(pcie_device);
  6207. return 0;
  6208. }
  6209. pcie_device = kzalloc(sizeof(struct _pcie_device), GFP_KERNEL);
  6210. if (!pcie_device) {
  6211. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  6212. ioc->name, __FILE__, __LINE__, __func__);
  6213. return 0;
  6214. }
  6215. kref_init(&pcie_device->refcount);
  6216. pcie_device->id = ioc->pcie_target_id++;
  6217. pcie_device->channel = PCIE_CHANNEL;
  6218. pcie_device->handle = handle;
  6219. pcie_device->device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  6220. pcie_device->wwid = wwid;
  6221. pcie_device->port_num = pcie_device_pg0.PortNum;
  6222. pcie_device->fast_path = (le32_to_cpu(pcie_device_pg0.Flags) &
  6223. MPI26_PCIEDEV0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
  6224. pcie_device_type = pcie_device->device_info &
  6225. MPI26_PCIE_DEVINFO_MASK_DEVICE_TYPE;
  6226. pcie_device->enclosure_handle =
  6227. le16_to_cpu(pcie_device_pg0.EnclosureHandle);
  6228. if (pcie_device->enclosure_handle != 0)
  6229. pcie_device->slot = le16_to_cpu(pcie_device_pg0.Slot);
  6230. if (le32_to_cpu(pcie_device_pg0.Flags) &
  6231. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) {
  6232. pcie_device->enclosure_level = pcie_device_pg0.EnclosureLevel;
  6233. memcpy(&pcie_device->connector_name[0],
  6234. &pcie_device_pg0.ConnectorName[0], 4);
  6235. } else {
  6236. pcie_device->enclosure_level = 0;
  6237. pcie_device->connector_name[0] = '\0';
  6238. }
  6239. /* get enclosure_logical_id */
  6240. if (pcie_device->enclosure_handle) {
  6241. enclosure_dev =
  6242. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  6243. pcie_device->enclosure_handle);
  6244. if (enclosure_dev)
  6245. pcie_device->enclosure_logical_id =
  6246. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  6247. }
  6248. /* TODO -- Add device name once FW supports it */
  6249. if (mpt3sas_config_get_pcie_device_pg2(ioc, &mpi_reply,
  6250. &pcie_device_pg2, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)) {
  6251. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  6252. ioc->name, __FILE__, __LINE__, __func__);
  6253. kfree(pcie_device);
  6254. return 0;
  6255. }
  6256. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  6257. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6258. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  6259. ioc->name, __FILE__, __LINE__, __func__);
  6260. kfree(pcie_device);
  6261. return 0;
  6262. }
  6263. pcie_device->nvme_mdts =
  6264. le32_to_cpu(pcie_device_pg2.MaximumDataTransferSize);
  6265. if (pcie_device_pg2.ControllerResetTO)
  6266. pcie_device->reset_timeout =
  6267. pcie_device_pg2.ControllerResetTO;
  6268. else
  6269. pcie_device->reset_timeout = 30;
  6270. if (ioc->wait_for_discovery_to_complete)
  6271. _scsih_pcie_device_init_add(ioc, pcie_device);
  6272. else
  6273. _scsih_pcie_device_add(ioc, pcie_device);
  6274. pcie_device_put(pcie_device);
  6275. return 0;
  6276. }
  6277. /**
  6278. * _scsih_pcie_topology_change_event_debug - debug for topology
  6279. * event
  6280. * @ioc: per adapter object
  6281. * @event_data: event data payload
  6282. * Context: user.
  6283. */
  6284. static void
  6285. _scsih_pcie_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  6286. Mpi26EventDataPCIeTopologyChangeList_t *event_data)
  6287. {
  6288. int i;
  6289. u16 handle;
  6290. u16 reason_code;
  6291. u8 port_number;
  6292. char *status_str = NULL;
  6293. u8 link_rate, prev_link_rate;
  6294. switch (event_data->SwitchStatus) {
  6295. case MPI26_EVENT_PCIE_TOPO_SS_ADDED:
  6296. status_str = "add";
  6297. break;
  6298. case MPI26_EVENT_PCIE_TOPO_SS_NOT_RESPONDING:
  6299. status_str = "remove";
  6300. break;
  6301. case MPI26_EVENT_PCIE_TOPO_SS_RESPONDING:
  6302. case 0:
  6303. status_str = "responding";
  6304. break;
  6305. case MPI26_EVENT_PCIE_TOPO_SS_DELAY_NOT_RESPONDING:
  6306. status_str = "remove delay";
  6307. break;
  6308. default:
  6309. status_str = "unknown status";
  6310. break;
  6311. }
  6312. pr_info(MPT3SAS_FMT "pcie topology change: (%s)\n",
  6313. ioc->name, status_str);
  6314. pr_info("\tswitch_handle(0x%04x), enclosure_handle(0x%04x)"
  6315. "start_port(%02d), count(%d)\n",
  6316. le16_to_cpu(event_data->SwitchDevHandle),
  6317. le16_to_cpu(event_data->EnclosureHandle),
  6318. event_data->StartPortNum, event_data->NumEntries);
  6319. for (i = 0; i < event_data->NumEntries; i++) {
  6320. handle =
  6321. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  6322. if (!handle)
  6323. continue;
  6324. port_number = event_data->StartPortNum + i;
  6325. reason_code = event_data->PortEntry[i].PortStatus;
  6326. switch (reason_code) {
  6327. case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
  6328. status_str = "target add";
  6329. break;
  6330. case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  6331. status_str = "target remove";
  6332. break;
  6333. case MPI26_EVENT_PCIE_TOPO_PS_DELAY_NOT_RESPONDING:
  6334. status_str = "delay target remove";
  6335. break;
  6336. case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  6337. status_str = "link rate change";
  6338. break;
  6339. case MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE:
  6340. status_str = "target responding";
  6341. break;
  6342. default:
  6343. status_str = "unknown";
  6344. break;
  6345. }
  6346. link_rate = event_data->PortEntry[i].CurrentPortInfo &
  6347. MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  6348. prev_link_rate = event_data->PortEntry[i].PreviousPortInfo &
  6349. MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  6350. pr_info("\tport(%02d), attached_handle(0x%04x): %s:"
  6351. " link rate: new(0x%02x), old(0x%02x)\n", port_number,
  6352. handle, status_str, link_rate, prev_link_rate);
  6353. }
  6354. }
  6355. /**
  6356. * _scsih_pcie_topology_change_event - handle PCIe topology
  6357. * changes
  6358. * @ioc: per adapter object
  6359. * @fw_event: The fw_event_work object
  6360. * Context: user.
  6361. *
  6362. */
  6363. static void
  6364. _scsih_pcie_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
  6365. struct fw_event_work *fw_event)
  6366. {
  6367. int i;
  6368. u16 handle;
  6369. u16 reason_code;
  6370. u8 link_rate, prev_link_rate;
  6371. unsigned long flags;
  6372. int rc;
  6373. Mpi26EventDataPCIeTopologyChangeList_t *event_data =
  6374. (Mpi26EventDataPCIeTopologyChangeList_t *) fw_event->event_data;
  6375. struct _pcie_device *pcie_device;
  6376. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  6377. _scsih_pcie_topology_change_event_debug(ioc, event_data);
  6378. if (ioc->shost_recovery || ioc->remove_host ||
  6379. ioc->pci_error_recovery)
  6380. return;
  6381. if (fw_event->ignore) {
  6382. dewtprintk(ioc, pr_info(MPT3SAS_FMT "ignoring switch event\n",
  6383. ioc->name));
  6384. return;
  6385. }
  6386. /* handle siblings events */
  6387. for (i = 0; i < event_data->NumEntries; i++) {
  6388. if (fw_event->ignore) {
  6389. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6390. "ignoring switch event\n", ioc->name));
  6391. return;
  6392. }
  6393. if (ioc->remove_host || ioc->pci_error_recovery)
  6394. return;
  6395. reason_code = event_data->PortEntry[i].PortStatus;
  6396. handle =
  6397. le16_to_cpu(event_data->PortEntry[i].AttachedDevHandle);
  6398. if (!handle)
  6399. continue;
  6400. link_rate = event_data->PortEntry[i].CurrentPortInfo
  6401. & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  6402. prev_link_rate = event_data->PortEntry[i].PreviousPortInfo
  6403. & MPI26_EVENT_PCIE_TOPO_PI_RATE_MASK;
  6404. switch (reason_code) {
  6405. case MPI26_EVENT_PCIE_TOPO_PS_PORT_CHANGED:
  6406. if (ioc->shost_recovery)
  6407. break;
  6408. if (link_rate == prev_link_rate)
  6409. break;
  6410. if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
  6411. break;
  6412. _scsih_pcie_check_device(ioc, handle);
  6413. /* This code after this point handles the test case
  6414. * where a device has been added, however its returning
  6415. * BUSY for sometime. Then before the Device Missing
  6416. * Delay expires and the device becomes READY, the
  6417. * device is removed and added back.
  6418. */
  6419. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  6420. pcie_device = __mpt3sas_get_pdev_by_handle(ioc, handle);
  6421. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6422. if (pcie_device) {
  6423. pcie_device_put(pcie_device);
  6424. break;
  6425. }
  6426. if (!test_bit(handle, ioc->pend_os_device_add))
  6427. break;
  6428. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6429. "handle(0x%04x) device not found: convert "
  6430. "event to a device add\n", ioc->name, handle));
  6431. event_data->PortEntry[i].PortStatus &= 0xF0;
  6432. event_data->PortEntry[i].PortStatus |=
  6433. MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED;
  6434. case MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED:
  6435. if (ioc->shost_recovery)
  6436. break;
  6437. if (link_rate < MPI26_EVENT_PCIE_TOPO_PI_RATE_2_5)
  6438. break;
  6439. rc = _scsih_pcie_add_device(ioc, handle);
  6440. if (!rc) {
  6441. /* mark entry vacant */
  6442. /* TODO This needs to be reviewed and fixed,
  6443. * we dont have an entry
  6444. * to make an event void like vacant
  6445. */
  6446. event_data->PortEntry[i].PortStatus |=
  6447. MPI26_EVENT_PCIE_TOPO_PS_NO_CHANGE;
  6448. }
  6449. break;
  6450. case MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING:
  6451. _scsih_pcie_device_remove_by_handle(ioc, handle);
  6452. break;
  6453. }
  6454. }
  6455. }
  6456. /**
  6457. * _scsih_pcie_device_status_change_event_debug - debug for
  6458. * device event
  6459. * @event_data: event data payload
  6460. * Context: user.
  6461. *
  6462. * Return nothing.
  6463. */
  6464. static void
  6465. _scsih_pcie_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  6466. Mpi26EventDataPCIeDeviceStatusChange_t *event_data)
  6467. {
  6468. char *reason_str = NULL;
  6469. switch (event_data->ReasonCode) {
  6470. case MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA:
  6471. reason_str = "smart data";
  6472. break;
  6473. case MPI26_EVENT_PCIDEV_STAT_RC_UNSUPPORTED:
  6474. reason_str = "unsupported device discovered";
  6475. break;
  6476. case MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET:
  6477. reason_str = "internal device reset";
  6478. break;
  6479. case MPI26_EVENT_PCIDEV_STAT_RC_TASK_ABORT_INTERNAL:
  6480. reason_str = "internal task abort";
  6481. break;
  6482. case MPI26_EVENT_PCIDEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
  6483. reason_str = "internal task abort set";
  6484. break;
  6485. case MPI26_EVENT_PCIDEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
  6486. reason_str = "internal clear task set";
  6487. break;
  6488. case MPI26_EVENT_PCIDEV_STAT_RC_QUERY_TASK_INTERNAL:
  6489. reason_str = "internal query task";
  6490. break;
  6491. case MPI26_EVENT_PCIDEV_STAT_RC_DEV_INIT_FAILURE:
  6492. reason_str = "device init failure";
  6493. break;
  6494. case MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
  6495. reason_str = "internal device reset complete";
  6496. break;
  6497. case MPI26_EVENT_PCIDEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
  6498. reason_str = "internal task abort complete";
  6499. break;
  6500. case MPI26_EVENT_PCIDEV_STAT_RC_ASYNC_NOTIFICATION:
  6501. reason_str = "internal async notification";
  6502. break;
  6503. case MPI26_EVENT_PCIDEV_STAT_RC_PCIE_HOT_RESET_FAILED:
  6504. reason_str = "pcie hot reset failed";
  6505. break;
  6506. default:
  6507. reason_str = "unknown reason";
  6508. break;
  6509. }
  6510. pr_info(MPT3SAS_FMT "PCIE device status change: (%s)\n"
  6511. "\thandle(0x%04x), WWID(0x%016llx), tag(%d)",
  6512. ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
  6513. (unsigned long long)le64_to_cpu(event_data->WWID),
  6514. le16_to_cpu(event_data->TaskTag));
  6515. if (event_data->ReasonCode == MPI26_EVENT_PCIDEV_STAT_RC_SMART_DATA)
  6516. pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
  6517. event_data->ASC, event_data->ASCQ);
  6518. pr_info("\n");
  6519. }
  6520. /**
  6521. * _scsih_pcie_device_status_change_event - handle device status
  6522. * change
  6523. * @ioc: per adapter object
  6524. * @fw_event: The fw_event_work object
  6525. * Context: user.
  6526. *
  6527. * Return nothing.
  6528. */
  6529. static void
  6530. _scsih_pcie_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  6531. struct fw_event_work *fw_event)
  6532. {
  6533. struct MPT3SAS_TARGET *target_priv_data;
  6534. struct _pcie_device *pcie_device;
  6535. u64 wwid;
  6536. unsigned long flags;
  6537. Mpi26EventDataPCIeDeviceStatusChange_t *event_data =
  6538. (Mpi26EventDataPCIeDeviceStatusChange_t *)fw_event->event_data;
  6539. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  6540. _scsih_pcie_device_status_change_event_debug(ioc,
  6541. event_data);
  6542. if (event_data->ReasonCode !=
  6543. MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET &&
  6544. event_data->ReasonCode !=
  6545. MPI26_EVENT_PCIDEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
  6546. return;
  6547. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  6548. wwid = le64_to_cpu(event_data->WWID);
  6549. pcie_device = __mpt3sas_get_pdev_by_wwid(ioc, wwid);
  6550. if (!pcie_device || !pcie_device->starget)
  6551. goto out;
  6552. target_priv_data = pcie_device->starget->hostdata;
  6553. if (!target_priv_data)
  6554. goto out;
  6555. if (event_data->ReasonCode ==
  6556. MPI26_EVENT_PCIDEV_STAT_RC_INTERNAL_DEVICE_RESET)
  6557. target_priv_data->tm_busy = 1;
  6558. else
  6559. target_priv_data->tm_busy = 0;
  6560. out:
  6561. if (pcie_device)
  6562. pcie_device_put(pcie_device);
  6563. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  6564. }
  6565. /**
  6566. * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
  6567. * event
  6568. * @ioc: per adapter object
  6569. * @event_data: event data payload
  6570. * Context: user.
  6571. *
  6572. * Return nothing.
  6573. */
  6574. static void
  6575. _scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  6576. Mpi2EventDataSasEnclDevStatusChange_t *event_data)
  6577. {
  6578. char *reason_str = NULL;
  6579. switch (event_data->ReasonCode) {
  6580. case MPI2_EVENT_SAS_ENCL_RC_ADDED:
  6581. reason_str = "enclosure add";
  6582. break;
  6583. case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
  6584. reason_str = "enclosure remove";
  6585. break;
  6586. default:
  6587. reason_str = "unknown reason";
  6588. break;
  6589. }
  6590. pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
  6591. "\thandle(0x%04x), enclosure logical id(0x%016llx)"
  6592. " number slots(%d)\n", ioc->name, reason_str,
  6593. le16_to_cpu(event_data->EnclosureHandle),
  6594. (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
  6595. le16_to_cpu(event_data->StartSlot));
  6596. }
  6597. /**
  6598. * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
  6599. * @ioc: per adapter object
  6600. * @fw_event: The fw_event_work object
  6601. * Context: user.
  6602. *
  6603. * Return nothing.
  6604. */
  6605. static void
  6606. _scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
  6607. struct fw_event_work *fw_event)
  6608. {
  6609. Mpi2ConfigReply_t mpi_reply;
  6610. struct _enclosure_node *enclosure_dev = NULL;
  6611. Mpi2EventDataSasEnclDevStatusChange_t *event_data =
  6612. (Mpi2EventDataSasEnclDevStatusChange_t *)fw_event->event_data;
  6613. int rc;
  6614. u16 enclosure_handle = le16_to_cpu(event_data->EnclosureHandle);
  6615. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
  6616. _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
  6617. (Mpi2EventDataSasEnclDevStatusChange_t *)
  6618. fw_event->event_data);
  6619. if (ioc->shost_recovery)
  6620. return;
  6621. if (enclosure_handle)
  6622. enclosure_dev =
  6623. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  6624. enclosure_handle);
  6625. switch (event_data->ReasonCode) {
  6626. case MPI2_EVENT_SAS_ENCL_RC_ADDED:
  6627. if (!enclosure_dev) {
  6628. enclosure_dev =
  6629. kzalloc(sizeof(struct _enclosure_node),
  6630. GFP_KERNEL);
  6631. if (!enclosure_dev) {
  6632. pr_info(MPT3SAS_FMT
  6633. "failure at %s:%d/%s()!\n", ioc->name,
  6634. __FILE__, __LINE__, __func__);
  6635. return;
  6636. }
  6637. rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  6638. &enclosure_dev->pg0,
  6639. MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
  6640. enclosure_handle);
  6641. if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
  6642. MPI2_IOCSTATUS_MASK)) {
  6643. kfree(enclosure_dev);
  6644. return;
  6645. }
  6646. list_add_tail(&enclosure_dev->list,
  6647. &ioc->enclosure_list);
  6648. }
  6649. break;
  6650. case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
  6651. if (enclosure_dev) {
  6652. list_del(&enclosure_dev->list);
  6653. kfree(enclosure_dev);
  6654. }
  6655. break;
  6656. default:
  6657. break;
  6658. }
  6659. }
  6660. /**
  6661. * _scsih_sas_broadcast_primitive_event - handle broadcast events
  6662. * @ioc: per adapter object
  6663. * @fw_event: The fw_event_work object
  6664. * Context: user.
  6665. *
  6666. * Return nothing.
  6667. */
  6668. static void
  6669. _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
  6670. struct fw_event_work *fw_event)
  6671. {
  6672. struct scsi_cmnd *scmd;
  6673. struct scsi_device *sdev;
  6674. struct scsiio_tracker *st;
  6675. u16 smid, handle;
  6676. u32 lun;
  6677. struct MPT3SAS_DEVICE *sas_device_priv_data;
  6678. u32 termination_count;
  6679. u32 query_count;
  6680. Mpi2SCSITaskManagementReply_t *mpi_reply;
  6681. Mpi2EventDataSasBroadcastPrimitive_t *event_data =
  6682. (Mpi2EventDataSasBroadcastPrimitive_t *)
  6683. fw_event->event_data;
  6684. u16 ioc_status;
  6685. unsigned long flags;
  6686. int r;
  6687. u8 max_retries = 0;
  6688. u8 task_abort_retries;
  6689. mutex_lock(&ioc->tm_cmds.mutex);
  6690. pr_info(MPT3SAS_FMT
  6691. "%s: enter: phy number(%d), width(%d)\n",
  6692. ioc->name, __func__, event_data->PhyNum,
  6693. event_data->PortWidth);
  6694. _scsih_block_io_all_device(ioc);
  6695. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6696. mpi_reply = ioc->tm_cmds.reply;
  6697. broadcast_aen_retry:
  6698. /* sanity checks for retrying this loop */
  6699. if (max_retries++ == 5) {
  6700. dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
  6701. ioc->name, __func__));
  6702. goto out;
  6703. } else if (max_retries > 1)
  6704. dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
  6705. ioc->name, __func__, max_retries - 1));
  6706. termination_count = 0;
  6707. query_count = 0;
  6708. for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
  6709. if (ioc->shost_recovery)
  6710. goto out;
  6711. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  6712. if (!scmd)
  6713. continue;
  6714. st = scsi_cmd_priv(scmd);
  6715. sdev = scmd->device;
  6716. sas_device_priv_data = sdev->hostdata;
  6717. if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
  6718. continue;
  6719. /* skip hidden raid components */
  6720. if (sas_device_priv_data->sas_target->flags &
  6721. MPT_TARGET_FLAGS_RAID_COMPONENT)
  6722. continue;
  6723. /* skip volumes */
  6724. if (sas_device_priv_data->sas_target->flags &
  6725. MPT_TARGET_FLAGS_VOLUME)
  6726. continue;
  6727. handle = sas_device_priv_data->sas_target->handle;
  6728. lun = sas_device_priv_data->lun;
  6729. query_count++;
  6730. if (ioc->shost_recovery)
  6731. goto out;
  6732. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  6733. r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
  6734. MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
  6735. st->msix_io, 30, 0);
  6736. if (r == FAILED) {
  6737. sdev_printk(KERN_WARNING, sdev,
  6738. "mpt3sas_scsih_issue_tm: FAILED when sending "
  6739. "QUERY_TASK: scmd(%p)\n", scmd);
  6740. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6741. goto broadcast_aen_retry;
  6742. }
  6743. ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
  6744. & MPI2_IOCSTATUS_MASK;
  6745. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6746. sdev_printk(KERN_WARNING, sdev,
  6747. "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
  6748. ioc_status, scmd);
  6749. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6750. goto broadcast_aen_retry;
  6751. }
  6752. /* see if IO is still owned by IOC and target */
  6753. if (mpi_reply->ResponseCode ==
  6754. MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
  6755. mpi_reply->ResponseCode ==
  6756. MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
  6757. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6758. continue;
  6759. }
  6760. task_abort_retries = 0;
  6761. tm_retry:
  6762. if (task_abort_retries++ == 60) {
  6763. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6764. "%s: ABORT_TASK: giving up\n", ioc->name,
  6765. __func__));
  6766. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6767. goto broadcast_aen_retry;
  6768. }
  6769. if (ioc->shost_recovery)
  6770. goto out_no_lock;
  6771. r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
  6772. MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
  6773. st->msix_io, 30, 0);
  6774. if (r == FAILED || st->cb_idx != 0xFF) {
  6775. sdev_printk(KERN_WARNING, sdev,
  6776. "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
  6777. "scmd(%p)\n", scmd);
  6778. goto tm_retry;
  6779. }
  6780. if (task_abort_retries > 1)
  6781. sdev_printk(KERN_WARNING, sdev,
  6782. "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
  6783. " scmd(%p)\n",
  6784. task_abort_retries - 1, scmd);
  6785. termination_count += le32_to_cpu(mpi_reply->TerminationCount);
  6786. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  6787. }
  6788. if (ioc->broadcast_aen_pending) {
  6789. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6790. "%s: loop back due to pending AEN\n",
  6791. ioc->name, __func__));
  6792. ioc->broadcast_aen_pending = 0;
  6793. goto broadcast_aen_retry;
  6794. }
  6795. out:
  6796. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  6797. out_no_lock:
  6798. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6799. "%s - exit, query_count = %d termination_count = %d\n",
  6800. ioc->name, __func__, query_count, termination_count));
  6801. ioc->broadcast_aen_busy = 0;
  6802. if (!ioc->shost_recovery)
  6803. _scsih_ublock_io_all_device(ioc);
  6804. mutex_unlock(&ioc->tm_cmds.mutex);
  6805. }
  6806. /**
  6807. * _scsih_sas_discovery_event - handle discovery events
  6808. * @ioc: per adapter object
  6809. * @fw_event: The fw_event_work object
  6810. * Context: user.
  6811. *
  6812. * Return nothing.
  6813. */
  6814. static void
  6815. _scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
  6816. struct fw_event_work *fw_event)
  6817. {
  6818. Mpi2EventDataSasDiscovery_t *event_data =
  6819. (Mpi2EventDataSasDiscovery_t *) fw_event->event_data;
  6820. if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
  6821. pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
  6822. (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
  6823. "start" : "stop");
  6824. if (event_data->DiscoveryStatus)
  6825. pr_info("discovery_status(0x%08x)",
  6826. le32_to_cpu(event_data->DiscoveryStatus));
  6827. pr_info("\n");
  6828. }
  6829. if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
  6830. !ioc->sas_hba.num_phys) {
  6831. if (disable_discovery > 0 && ioc->shost_recovery) {
  6832. /* Wait for the reset to complete */
  6833. while (ioc->shost_recovery)
  6834. ssleep(1);
  6835. }
  6836. _scsih_sas_host_add(ioc);
  6837. }
  6838. }
  6839. /**
  6840. * _scsih_sas_device_discovery_error_event - display SAS device discovery error
  6841. * events
  6842. * @ioc: per adapter object
  6843. * @fw_event: The fw_event_work object
  6844. * Context: user.
  6845. *
  6846. * Return nothing.
  6847. */
  6848. static void
  6849. _scsih_sas_device_discovery_error_event(struct MPT3SAS_ADAPTER *ioc,
  6850. struct fw_event_work *fw_event)
  6851. {
  6852. Mpi25EventDataSasDeviceDiscoveryError_t *event_data =
  6853. (Mpi25EventDataSasDeviceDiscoveryError_t *)fw_event->event_data;
  6854. switch (event_data->ReasonCode) {
  6855. case MPI25_EVENT_SAS_DISC_ERR_SMP_FAILED:
  6856. pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
  6857. "(handle:0x%04x, sas_address:0x%016llx,"
  6858. "physical_port:0x%02x) has failed",
  6859. ioc->name, le16_to_cpu(event_data->DevHandle),
  6860. (unsigned long long)le64_to_cpu(event_data->SASAddress),
  6861. event_data->PhysicalPort);
  6862. break;
  6863. case MPI25_EVENT_SAS_DISC_ERR_SMP_TIMEOUT:
  6864. pr_warn(MPT3SAS_FMT "SMP command sent to the expander"
  6865. "(handle:0x%04x, sas_address:0x%016llx,"
  6866. "physical_port:0x%02x) has timed out",
  6867. ioc->name, le16_to_cpu(event_data->DevHandle),
  6868. (unsigned long long)le64_to_cpu(event_data->SASAddress),
  6869. event_data->PhysicalPort);
  6870. break;
  6871. default:
  6872. break;
  6873. }
  6874. }
  6875. /**
  6876. * _scsih_pcie_enumeration_event - handle enumeration events
  6877. * @ioc: per adapter object
  6878. * @fw_event: The fw_event_work object
  6879. * Context: user.
  6880. *
  6881. * Return nothing.
  6882. */
  6883. static void
  6884. _scsih_pcie_enumeration_event(struct MPT3SAS_ADAPTER *ioc,
  6885. struct fw_event_work *fw_event)
  6886. {
  6887. Mpi26EventDataPCIeEnumeration_t *event_data =
  6888. (Mpi26EventDataPCIeEnumeration_t *)fw_event->event_data;
  6889. if (!(ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK))
  6890. return;
  6891. pr_info(MPT3SAS_FMT "pcie enumeration event: (%s) Flag 0x%02x",
  6892. ioc->name,
  6893. (event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
  6894. "started" : "completed",
  6895. event_data->Flags);
  6896. if (event_data->EnumerationStatus)
  6897. pr_cont("enumeration_status(0x%08x)",
  6898. le32_to_cpu(event_data->EnumerationStatus));
  6899. pr_cont("\n");
  6900. }
  6901. /**
  6902. * _scsih_ir_fastpath - turn on fastpath for IR physdisk
  6903. * @ioc: per adapter object
  6904. * @handle: device handle for physical disk
  6905. * @phys_disk_num: physical disk number
  6906. *
  6907. * Return 0 for success, else failure.
  6908. */
  6909. static int
  6910. _scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
  6911. {
  6912. Mpi2RaidActionRequest_t *mpi_request;
  6913. Mpi2RaidActionReply_t *mpi_reply;
  6914. u16 smid;
  6915. u8 issue_reset = 0;
  6916. int rc = 0;
  6917. u16 ioc_status;
  6918. u32 log_info;
  6919. if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
  6920. return rc;
  6921. mutex_lock(&ioc->scsih_cmds.mutex);
  6922. if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
  6923. pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
  6924. ioc->name, __func__);
  6925. rc = -EAGAIN;
  6926. goto out;
  6927. }
  6928. ioc->scsih_cmds.status = MPT3_CMD_PENDING;
  6929. smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
  6930. if (!smid) {
  6931. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  6932. ioc->name, __func__);
  6933. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  6934. rc = -EAGAIN;
  6935. goto out;
  6936. }
  6937. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  6938. ioc->scsih_cmds.smid = smid;
  6939. memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
  6940. mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
  6941. mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
  6942. mpi_request->PhysDiskNum = phys_disk_num;
  6943. dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
  6944. "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
  6945. handle, phys_disk_num));
  6946. init_completion(&ioc->scsih_cmds.done);
  6947. mpt3sas_base_put_smid_default(ioc, smid);
  6948. wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  6949. if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
  6950. pr_err(MPT3SAS_FMT "%s: timeout\n",
  6951. ioc->name, __func__);
  6952. if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
  6953. issue_reset = 1;
  6954. rc = -EFAULT;
  6955. goto out;
  6956. }
  6957. if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
  6958. mpi_reply = ioc->scsih_cmds.reply;
  6959. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  6960. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
  6961. log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
  6962. else
  6963. log_info = 0;
  6964. ioc_status &= MPI2_IOCSTATUS_MASK;
  6965. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  6966. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6967. "IR RAID_ACTION: failed: ioc_status(0x%04x), "
  6968. "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
  6969. log_info));
  6970. rc = -EFAULT;
  6971. } else
  6972. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  6973. "IR RAID_ACTION: completed successfully\n",
  6974. ioc->name));
  6975. }
  6976. out:
  6977. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  6978. mutex_unlock(&ioc->scsih_cmds.mutex);
  6979. if (issue_reset)
  6980. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  6981. return rc;
  6982. }
  6983. /**
  6984. * _scsih_reprobe_lun - reprobing lun
  6985. * @sdev: scsi device struct
  6986. * @no_uld_attach: sdev->no_uld_attach flag setting
  6987. *
  6988. **/
  6989. static void
  6990. _scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
  6991. {
  6992. sdev->no_uld_attach = no_uld_attach ? 1 : 0;
  6993. sdev_printk(KERN_INFO, sdev, "%s raid component\n",
  6994. sdev->no_uld_attach ? "hiding" : "exposing");
  6995. WARN_ON(scsi_device_reprobe(sdev));
  6996. }
  6997. /**
  6998. * _scsih_sas_volume_add - add new volume
  6999. * @ioc: per adapter object
  7000. * @element: IR config element data
  7001. * Context: user.
  7002. *
  7003. * Return nothing.
  7004. */
  7005. static void
  7006. _scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
  7007. Mpi2EventIrConfigElement_t *element)
  7008. {
  7009. struct _raid_device *raid_device;
  7010. unsigned long flags;
  7011. u64 wwid;
  7012. u16 handle = le16_to_cpu(element->VolDevHandle);
  7013. int rc;
  7014. mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
  7015. if (!wwid) {
  7016. pr_err(MPT3SAS_FMT
  7017. "failure at %s:%d/%s()!\n", ioc->name,
  7018. __FILE__, __LINE__, __func__);
  7019. return;
  7020. }
  7021. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7022. raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
  7023. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7024. if (raid_device)
  7025. return;
  7026. raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
  7027. if (!raid_device) {
  7028. pr_err(MPT3SAS_FMT
  7029. "failure at %s:%d/%s()!\n", ioc->name,
  7030. __FILE__, __LINE__, __func__);
  7031. return;
  7032. }
  7033. raid_device->id = ioc->sas_id++;
  7034. raid_device->channel = RAID_CHANNEL;
  7035. raid_device->handle = handle;
  7036. raid_device->wwid = wwid;
  7037. _scsih_raid_device_add(ioc, raid_device);
  7038. if (!ioc->wait_for_discovery_to_complete) {
  7039. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  7040. raid_device->id, 0);
  7041. if (rc)
  7042. _scsih_raid_device_remove(ioc, raid_device);
  7043. } else {
  7044. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7045. _scsih_determine_boot_device(ioc, raid_device, 1);
  7046. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7047. }
  7048. }
  7049. /**
  7050. * _scsih_sas_volume_delete - delete volume
  7051. * @ioc: per adapter object
  7052. * @handle: volume device handle
  7053. * Context: user.
  7054. *
  7055. * Return nothing.
  7056. */
  7057. static void
  7058. _scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
  7059. {
  7060. struct _raid_device *raid_device;
  7061. unsigned long flags;
  7062. struct MPT3SAS_TARGET *sas_target_priv_data;
  7063. struct scsi_target *starget = NULL;
  7064. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7065. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  7066. if (raid_device) {
  7067. if (raid_device->starget) {
  7068. starget = raid_device->starget;
  7069. sas_target_priv_data = starget->hostdata;
  7070. sas_target_priv_data->deleted = 1;
  7071. }
  7072. pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
  7073. ioc->name, raid_device->handle,
  7074. (unsigned long long) raid_device->wwid);
  7075. list_del(&raid_device->list);
  7076. kfree(raid_device);
  7077. }
  7078. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7079. if (starget)
  7080. scsi_remove_target(&starget->dev);
  7081. }
  7082. /**
  7083. * _scsih_sas_pd_expose - expose pd component to /dev/sdX
  7084. * @ioc: per adapter object
  7085. * @element: IR config element data
  7086. * Context: user.
  7087. *
  7088. * Return nothing.
  7089. */
  7090. static void
  7091. _scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
  7092. Mpi2EventIrConfigElement_t *element)
  7093. {
  7094. struct _sas_device *sas_device;
  7095. struct scsi_target *starget = NULL;
  7096. struct MPT3SAS_TARGET *sas_target_priv_data;
  7097. unsigned long flags;
  7098. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  7099. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  7100. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  7101. if (sas_device) {
  7102. sas_device->volume_handle = 0;
  7103. sas_device->volume_wwid = 0;
  7104. clear_bit(handle, ioc->pd_handles);
  7105. if (sas_device->starget && sas_device->starget->hostdata) {
  7106. starget = sas_device->starget;
  7107. sas_target_priv_data = starget->hostdata;
  7108. sas_target_priv_data->flags &=
  7109. ~MPT_TARGET_FLAGS_RAID_COMPONENT;
  7110. }
  7111. }
  7112. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  7113. if (!sas_device)
  7114. return;
  7115. /* exposing raid component */
  7116. if (starget)
  7117. starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
  7118. sas_device_put(sas_device);
  7119. }
  7120. /**
  7121. * _scsih_sas_pd_hide - hide pd component from /dev/sdX
  7122. * @ioc: per adapter object
  7123. * @element: IR config element data
  7124. * Context: user.
  7125. *
  7126. * Return nothing.
  7127. */
  7128. static void
  7129. _scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
  7130. Mpi2EventIrConfigElement_t *element)
  7131. {
  7132. struct _sas_device *sas_device;
  7133. struct scsi_target *starget = NULL;
  7134. struct MPT3SAS_TARGET *sas_target_priv_data;
  7135. unsigned long flags;
  7136. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  7137. u16 volume_handle = 0;
  7138. u64 volume_wwid = 0;
  7139. mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
  7140. if (volume_handle)
  7141. mpt3sas_config_get_volume_wwid(ioc, volume_handle,
  7142. &volume_wwid);
  7143. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  7144. sas_device = __mpt3sas_get_sdev_by_handle(ioc, handle);
  7145. if (sas_device) {
  7146. set_bit(handle, ioc->pd_handles);
  7147. if (sas_device->starget && sas_device->starget->hostdata) {
  7148. starget = sas_device->starget;
  7149. sas_target_priv_data = starget->hostdata;
  7150. sas_target_priv_data->flags |=
  7151. MPT_TARGET_FLAGS_RAID_COMPONENT;
  7152. sas_device->volume_handle = volume_handle;
  7153. sas_device->volume_wwid = volume_wwid;
  7154. }
  7155. }
  7156. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  7157. if (!sas_device)
  7158. return;
  7159. /* hiding raid component */
  7160. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  7161. if (starget)
  7162. starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
  7163. sas_device_put(sas_device);
  7164. }
  7165. /**
  7166. * _scsih_sas_pd_delete - delete pd component
  7167. * @ioc: per adapter object
  7168. * @element: IR config element data
  7169. * Context: user.
  7170. *
  7171. * Return nothing.
  7172. */
  7173. static void
  7174. _scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
  7175. Mpi2EventIrConfigElement_t *element)
  7176. {
  7177. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  7178. _scsih_device_remove_by_handle(ioc, handle);
  7179. }
  7180. /**
  7181. * _scsih_sas_pd_add - remove pd component
  7182. * @ioc: per adapter object
  7183. * @element: IR config element data
  7184. * Context: user.
  7185. *
  7186. * Return nothing.
  7187. */
  7188. static void
  7189. _scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
  7190. Mpi2EventIrConfigElement_t *element)
  7191. {
  7192. struct _sas_device *sas_device;
  7193. u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
  7194. Mpi2ConfigReply_t mpi_reply;
  7195. Mpi2SasDevicePage0_t sas_device_pg0;
  7196. u32 ioc_status;
  7197. u64 sas_address;
  7198. u16 parent_handle;
  7199. set_bit(handle, ioc->pd_handles);
  7200. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  7201. if (sas_device) {
  7202. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  7203. sas_device_put(sas_device);
  7204. return;
  7205. }
  7206. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
  7207. MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
  7208. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  7209. ioc->name, __FILE__, __LINE__, __func__);
  7210. return;
  7211. }
  7212. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7213. MPI2_IOCSTATUS_MASK;
  7214. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7215. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  7216. ioc->name, __FILE__, __LINE__, __func__);
  7217. return;
  7218. }
  7219. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  7220. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
  7221. mpt3sas_transport_update_links(ioc, sas_address, handle,
  7222. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
  7223. _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
  7224. _scsih_add_device(ioc, handle, 0, 1);
  7225. }
  7226. /**
  7227. * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
  7228. * @ioc: per adapter object
  7229. * @event_data: event data payload
  7230. * Context: user.
  7231. *
  7232. * Return nothing.
  7233. */
  7234. static void
  7235. _scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
  7236. Mpi2EventDataIrConfigChangeList_t *event_data)
  7237. {
  7238. Mpi2EventIrConfigElement_t *element;
  7239. u8 element_type;
  7240. int i;
  7241. char *reason_str = NULL, *element_str = NULL;
  7242. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  7243. pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
  7244. ioc->name, (le32_to_cpu(event_data->Flags) &
  7245. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
  7246. "foreign" : "native", event_data->NumElements);
  7247. for (i = 0; i < event_data->NumElements; i++, element++) {
  7248. switch (element->ReasonCode) {
  7249. case MPI2_EVENT_IR_CHANGE_RC_ADDED:
  7250. reason_str = "add";
  7251. break;
  7252. case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
  7253. reason_str = "remove";
  7254. break;
  7255. case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
  7256. reason_str = "no change";
  7257. break;
  7258. case MPI2_EVENT_IR_CHANGE_RC_HIDE:
  7259. reason_str = "hide";
  7260. break;
  7261. case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
  7262. reason_str = "unhide";
  7263. break;
  7264. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
  7265. reason_str = "volume_created";
  7266. break;
  7267. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
  7268. reason_str = "volume_deleted";
  7269. break;
  7270. case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
  7271. reason_str = "pd_created";
  7272. break;
  7273. case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
  7274. reason_str = "pd_deleted";
  7275. break;
  7276. default:
  7277. reason_str = "unknown reason";
  7278. break;
  7279. }
  7280. element_type = le16_to_cpu(element->ElementFlags) &
  7281. MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
  7282. switch (element_type) {
  7283. case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
  7284. element_str = "volume";
  7285. break;
  7286. case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
  7287. element_str = "phys disk";
  7288. break;
  7289. case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
  7290. element_str = "hot spare";
  7291. break;
  7292. default:
  7293. element_str = "unknown element";
  7294. break;
  7295. }
  7296. pr_info("\t(%s:%s), vol handle(0x%04x), " \
  7297. "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
  7298. reason_str, le16_to_cpu(element->VolDevHandle),
  7299. le16_to_cpu(element->PhysDiskDevHandle),
  7300. element->PhysDiskNum);
  7301. }
  7302. }
  7303. /**
  7304. * _scsih_sas_ir_config_change_event - handle ir configuration change events
  7305. * @ioc: per adapter object
  7306. * @fw_event: The fw_event_work object
  7307. * Context: user.
  7308. *
  7309. * Return nothing.
  7310. */
  7311. static void
  7312. _scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
  7313. struct fw_event_work *fw_event)
  7314. {
  7315. Mpi2EventIrConfigElement_t *element;
  7316. int i;
  7317. u8 foreign_config;
  7318. Mpi2EventDataIrConfigChangeList_t *event_data =
  7319. (Mpi2EventDataIrConfigChangeList_t *)
  7320. fw_event->event_data;
  7321. if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
  7322. (!ioc->hide_ir_msg))
  7323. _scsih_sas_ir_config_change_event_debug(ioc, event_data);
  7324. foreign_config = (le32_to_cpu(event_data->Flags) &
  7325. MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
  7326. element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
  7327. if (ioc->shost_recovery &&
  7328. ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  7329. for (i = 0; i < event_data->NumElements; i++, element++) {
  7330. if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
  7331. _scsih_ir_fastpath(ioc,
  7332. le16_to_cpu(element->PhysDiskDevHandle),
  7333. element->PhysDiskNum);
  7334. }
  7335. return;
  7336. }
  7337. for (i = 0; i < event_data->NumElements; i++, element++) {
  7338. switch (element->ReasonCode) {
  7339. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
  7340. case MPI2_EVENT_IR_CHANGE_RC_ADDED:
  7341. if (!foreign_config)
  7342. _scsih_sas_volume_add(ioc, element);
  7343. break;
  7344. case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
  7345. case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
  7346. if (!foreign_config)
  7347. _scsih_sas_volume_delete(ioc,
  7348. le16_to_cpu(element->VolDevHandle));
  7349. break;
  7350. case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
  7351. if (!ioc->is_warpdrive)
  7352. _scsih_sas_pd_hide(ioc, element);
  7353. break;
  7354. case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
  7355. if (!ioc->is_warpdrive)
  7356. _scsih_sas_pd_expose(ioc, element);
  7357. break;
  7358. case MPI2_EVENT_IR_CHANGE_RC_HIDE:
  7359. if (!ioc->is_warpdrive)
  7360. _scsih_sas_pd_add(ioc, element);
  7361. break;
  7362. case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
  7363. if (!ioc->is_warpdrive)
  7364. _scsih_sas_pd_delete(ioc, element);
  7365. break;
  7366. }
  7367. }
  7368. }
  7369. /**
  7370. * _scsih_sas_ir_volume_event - IR volume event
  7371. * @ioc: per adapter object
  7372. * @fw_event: The fw_event_work object
  7373. * Context: user.
  7374. *
  7375. * Return nothing.
  7376. */
  7377. static void
  7378. _scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
  7379. struct fw_event_work *fw_event)
  7380. {
  7381. u64 wwid;
  7382. unsigned long flags;
  7383. struct _raid_device *raid_device;
  7384. u16 handle;
  7385. u32 state;
  7386. int rc;
  7387. Mpi2EventDataIrVolume_t *event_data =
  7388. (Mpi2EventDataIrVolume_t *) fw_event->event_data;
  7389. if (ioc->shost_recovery)
  7390. return;
  7391. if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
  7392. return;
  7393. handle = le16_to_cpu(event_data->VolDevHandle);
  7394. state = le32_to_cpu(event_data->NewValue);
  7395. if (!ioc->hide_ir_msg)
  7396. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  7397. "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
  7398. ioc->name, __func__, handle,
  7399. le32_to_cpu(event_data->PreviousValue), state));
  7400. switch (state) {
  7401. case MPI2_RAID_VOL_STATE_MISSING:
  7402. case MPI2_RAID_VOL_STATE_FAILED:
  7403. _scsih_sas_volume_delete(ioc, handle);
  7404. break;
  7405. case MPI2_RAID_VOL_STATE_ONLINE:
  7406. case MPI2_RAID_VOL_STATE_DEGRADED:
  7407. case MPI2_RAID_VOL_STATE_OPTIMAL:
  7408. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7409. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  7410. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7411. if (raid_device)
  7412. break;
  7413. mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
  7414. if (!wwid) {
  7415. pr_err(MPT3SAS_FMT
  7416. "failure at %s:%d/%s()!\n", ioc->name,
  7417. __FILE__, __LINE__, __func__);
  7418. break;
  7419. }
  7420. raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
  7421. if (!raid_device) {
  7422. pr_err(MPT3SAS_FMT
  7423. "failure at %s:%d/%s()!\n", ioc->name,
  7424. __FILE__, __LINE__, __func__);
  7425. break;
  7426. }
  7427. raid_device->id = ioc->sas_id++;
  7428. raid_device->channel = RAID_CHANNEL;
  7429. raid_device->handle = handle;
  7430. raid_device->wwid = wwid;
  7431. _scsih_raid_device_add(ioc, raid_device);
  7432. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  7433. raid_device->id, 0);
  7434. if (rc)
  7435. _scsih_raid_device_remove(ioc, raid_device);
  7436. break;
  7437. case MPI2_RAID_VOL_STATE_INITIALIZING:
  7438. default:
  7439. break;
  7440. }
  7441. }
  7442. /**
  7443. * _scsih_sas_ir_physical_disk_event - PD event
  7444. * @ioc: per adapter object
  7445. * @fw_event: The fw_event_work object
  7446. * Context: user.
  7447. *
  7448. * Return nothing.
  7449. */
  7450. static void
  7451. _scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
  7452. struct fw_event_work *fw_event)
  7453. {
  7454. u16 handle, parent_handle;
  7455. u32 state;
  7456. struct _sas_device *sas_device;
  7457. Mpi2ConfigReply_t mpi_reply;
  7458. Mpi2SasDevicePage0_t sas_device_pg0;
  7459. u32 ioc_status;
  7460. Mpi2EventDataIrPhysicalDisk_t *event_data =
  7461. (Mpi2EventDataIrPhysicalDisk_t *) fw_event->event_data;
  7462. u64 sas_address;
  7463. if (ioc->shost_recovery)
  7464. return;
  7465. if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
  7466. return;
  7467. handle = le16_to_cpu(event_data->PhysDiskDevHandle);
  7468. state = le32_to_cpu(event_data->NewValue);
  7469. if (!ioc->hide_ir_msg)
  7470. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  7471. "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
  7472. ioc->name, __func__, handle,
  7473. le32_to_cpu(event_data->PreviousValue), state));
  7474. switch (state) {
  7475. case MPI2_RAID_PD_STATE_ONLINE:
  7476. case MPI2_RAID_PD_STATE_DEGRADED:
  7477. case MPI2_RAID_PD_STATE_REBUILDING:
  7478. case MPI2_RAID_PD_STATE_OPTIMAL:
  7479. case MPI2_RAID_PD_STATE_HOT_SPARE:
  7480. if (!ioc->is_warpdrive)
  7481. set_bit(handle, ioc->pd_handles);
  7482. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  7483. if (sas_device) {
  7484. sas_device_put(sas_device);
  7485. return;
  7486. }
  7487. if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  7488. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  7489. handle))) {
  7490. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  7491. ioc->name, __FILE__, __LINE__, __func__);
  7492. return;
  7493. }
  7494. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7495. MPI2_IOCSTATUS_MASK;
  7496. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7497. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  7498. ioc->name, __FILE__, __LINE__, __func__);
  7499. return;
  7500. }
  7501. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  7502. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
  7503. mpt3sas_transport_update_links(ioc, sas_address, handle,
  7504. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
  7505. _scsih_add_device(ioc, handle, 0, 1);
  7506. break;
  7507. case MPI2_RAID_PD_STATE_OFFLINE:
  7508. case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
  7509. case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
  7510. default:
  7511. break;
  7512. }
  7513. }
  7514. /**
  7515. * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
  7516. * @ioc: per adapter object
  7517. * @event_data: event data payload
  7518. * Context: user.
  7519. *
  7520. * Return nothing.
  7521. */
  7522. static void
  7523. _scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
  7524. Mpi2EventDataIrOperationStatus_t *event_data)
  7525. {
  7526. char *reason_str = NULL;
  7527. switch (event_data->RAIDOperation) {
  7528. case MPI2_EVENT_IR_RAIDOP_RESYNC:
  7529. reason_str = "resync";
  7530. break;
  7531. case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
  7532. reason_str = "online capacity expansion";
  7533. break;
  7534. case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
  7535. reason_str = "consistency check";
  7536. break;
  7537. case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
  7538. reason_str = "background init";
  7539. break;
  7540. case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
  7541. reason_str = "make data consistent";
  7542. break;
  7543. }
  7544. if (!reason_str)
  7545. return;
  7546. pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
  7547. "\thandle(0x%04x), percent complete(%d)\n",
  7548. ioc->name, reason_str,
  7549. le16_to_cpu(event_data->VolDevHandle),
  7550. event_data->PercentComplete);
  7551. }
  7552. /**
  7553. * _scsih_sas_ir_operation_status_event - handle RAID operation events
  7554. * @ioc: per adapter object
  7555. * @fw_event: The fw_event_work object
  7556. * Context: user.
  7557. *
  7558. * Return nothing.
  7559. */
  7560. static void
  7561. _scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
  7562. struct fw_event_work *fw_event)
  7563. {
  7564. Mpi2EventDataIrOperationStatus_t *event_data =
  7565. (Mpi2EventDataIrOperationStatus_t *)
  7566. fw_event->event_data;
  7567. static struct _raid_device *raid_device;
  7568. unsigned long flags;
  7569. u16 handle;
  7570. if ((ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) &&
  7571. (!ioc->hide_ir_msg))
  7572. _scsih_sas_ir_operation_status_event_debug(ioc,
  7573. event_data);
  7574. /* code added for raid transport support */
  7575. if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
  7576. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7577. handle = le16_to_cpu(event_data->VolDevHandle);
  7578. raid_device = mpt3sas_raid_device_find_by_handle(ioc, handle);
  7579. if (raid_device)
  7580. raid_device->percent_complete =
  7581. event_data->PercentComplete;
  7582. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7583. }
  7584. }
  7585. /**
  7586. * _scsih_prep_device_scan - initialize parameters prior to device scan
  7587. * @ioc: per adapter object
  7588. *
  7589. * Set the deleted flag prior to device scan. If the device is found during
  7590. * the scan, then we clear the deleted flag.
  7591. */
  7592. static void
  7593. _scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
  7594. {
  7595. struct MPT3SAS_DEVICE *sas_device_priv_data;
  7596. struct scsi_device *sdev;
  7597. shost_for_each_device(sdev, ioc->shost) {
  7598. sas_device_priv_data = sdev->hostdata;
  7599. if (sas_device_priv_data && sas_device_priv_data->sas_target)
  7600. sas_device_priv_data->sas_target->deleted = 1;
  7601. }
  7602. }
  7603. /**
  7604. * _scsih_mark_responding_sas_device - mark a sas_devices as responding
  7605. * @ioc: per adapter object
  7606. * @sas_device_pg0: SAS Device page 0
  7607. *
  7608. * After host reset, find out whether devices are still responding.
  7609. * Used in _scsih_remove_unresponsive_sas_devices.
  7610. *
  7611. * Return nothing.
  7612. */
  7613. static void
  7614. _scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc,
  7615. Mpi2SasDevicePage0_t *sas_device_pg0)
  7616. {
  7617. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  7618. struct scsi_target *starget;
  7619. struct _sas_device *sas_device = NULL;
  7620. struct _enclosure_node *enclosure_dev = NULL;
  7621. unsigned long flags;
  7622. if (sas_device_pg0->EnclosureHandle) {
  7623. enclosure_dev =
  7624. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  7625. le16_to_cpu(sas_device_pg0->EnclosureHandle));
  7626. if (enclosure_dev == NULL)
  7627. pr_info(MPT3SAS_FMT "Enclosure handle(0x%04x)"
  7628. "doesn't match with enclosure device!\n",
  7629. ioc->name, sas_device_pg0->EnclosureHandle);
  7630. }
  7631. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  7632. list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
  7633. if ((sas_device->sas_address == le64_to_cpu(
  7634. sas_device_pg0->SASAddress)) && (sas_device->slot ==
  7635. le16_to_cpu(sas_device_pg0->Slot))) {
  7636. sas_device->responding = 1;
  7637. starget = sas_device->starget;
  7638. if (starget && starget->hostdata) {
  7639. sas_target_priv_data = starget->hostdata;
  7640. sas_target_priv_data->tm_busy = 0;
  7641. sas_target_priv_data->deleted = 0;
  7642. } else
  7643. sas_target_priv_data = NULL;
  7644. if (starget) {
  7645. starget_printk(KERN_INFO, starget,
  7646. "handle(0x%04x), sas_addr(0x%016llx)\n",
  7647. le16_to_cpu(sas_device_pg0->DevHandle),
  7648. (unsigned long long)
  7649. sas_device->sas_address);
  7650. if (sas_device->enclosure_handle != 0)
  7651. starget_printk(KERN_INFO, starget,
  7652. "enclosure logical id(0x%016llx),"
  7653. " slot(%d)\n",
  7654. (unsigned long long)
  7655. sas_device->enclosure_logical_id,
  7656. sas_device->slot);
  7657. }
  7658. if (le16_to_cpu(sas_device_pg0->Flags) &
  7659. MPI2_SAS_DEVICE0_FLAGS_ENCL_LEVEL_VALID) {
  7660. sas_device->enclosure_level =
  7661. sas_device_pg0->EnclosureLevel;
  7662. memcpy(&sas_device->connector_name[0],
  7663. &sas_device_pg0->ConnectorName[0], 4);
  7664. } else {
  7665. sas_device->enclosure_level = 0;
  7666. sas_device->connector_name[0] = '\0';
  7667. }
  7668. sas_device->enclosure_handle =
  7669. le16_to_cpu(sas_device_pg0->EnclosureHandle);
  7670. sas_device->is_chassis_slot_valid = 0;
  7671. if (enclosure_dev) {
  7672. sas_device->enclosure_logical_id = le64_to_cpu(
  7673. enclosure_dev->pg0.EnclosureLogicalID);
  7674. if (le16_to_cpu(enclosure_dev->pg0.Flags) &
  7675. MPI2_SAS_ENCLS0_FLAGS_CHASSIS_SLOT_VALID) {
  7676. sas_device->is_chassis_slot_valid = 1;
  7677. sas_device->chassis_slot =
  7678. enclosure_dev->pg0.ChassisSlot;
  7679. }
  7680. }
  7681. if (sas_device->handle == le16_to_cpu(
  7682. sas_device_pg0->DevHandle))
  7683. goto out;
  7684. pr_info("\thandle changed from(0x%04x)!!!\n",
  7685. sas_device->handle);
  7686. sas_device->handle = le16_to_cpu(
  7687. sas_device_pg0->DevHandle);
  7688. if (sas_target_priv_data)
  7689. sas_target_priv_data->handle =
  7690. le16_to_cpu(sas_device_pg0->DevHandle);
  7691. goto out;
  7692. }
  7693. }
  7694. out:
  7695. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  7696. }
  7697. /**
  7698. * _scsih_create_enclosure_list_after_reset - Free Existing list,
  7699. * And create enclosure list by scanning all Enclosure Page(0)s
  7700. * @ioc: per adapter object
  7701. *
  7702. * Return nothing.
  7703. */
  7704. static void
  7705. _scsih_create_enclosure_list_after_reset(struct MPT3SAS_ADAPTER *ioc)
  7706. {
  7707. struct _enclosure_node *enclosure_dev;
  7708. Mpi2ConfigReply_t mpi_reply;
  7709. u16 enclosure_handle;
  7710. int rc;
  7711. /* Free existing enclosure list */
  7712. mpt3sas_free_enclosure_list(ioc);
  7713. /* Re constructing enclosure list after reset*/
  7714. enclosure_handle = 0xFFFF;
  7715. do {
  7716. enclosure_dev =
  7717. kzalloc(sizeof(struct _enclosure_node), GFP_KERNEL);
  7718. if (!enclosure_dev) {
  7719. pr_err(MPT3SAS_FMT
  7720. "failure at %s:%d/%s()!\n", ioc->name,
  7721. __FILE__, __LINE__, __func__);
  7722. return;
  7723. }
  7724. rc = mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
  7725. &enclosure_dev->pg0,
  7726. MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE,
  7727. enclosure_handle);
  7728. if (rc || (le16_to_cpu(mpi_reply.IOCStatus) &
  7729. MPI2_IOCSTATUS_MASK)) {
  7730. kfree(enclosure_dev);
  7731. return;
  7732. }
  7733. list_add_tail(&enclosure_dev->list,
  7734. &ioc->enclosure_list);
  7735. enclosure_handle =
  7736. le16_to_cpu(enclosure_dev->pg0.EnclosureHandle);
  7737. } while (1);
  7738. }
  7739. /**
  7740. * _scsih_search_responding_sas_devices -
  7741. * @ioc: per adapter object
  7742. *
  7743. * After host reset, find out whether devices are still responding.
  7744. * If not remove.
  7745. *
  7746. * Return nothing.
  7747. */
  7748. static void
  7749. _scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
  7750. {
  7751. Mpi2SasDevicePage0_t sas_device_pg0;
  7752. Mpi2ConfigReply_t mpi_reply;
  7753. u16 ioc_status;
  7754. u16 handle;
  7755. u32 device_info;
  7756. pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
  7757. if (list_empty(&ioc->sas_device_list))
  7758. goto out;
  7759. handle = 0xFFFF;
  7760. while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  7761. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  7762. handle))) {
  7763. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7764. MPI2_IOCSTATUS_MASK;
  7765. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  7766. break;
  7767. handle = le16_to_cpu(sas_device_pg0.DevHandle);
  7768. device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
  7769. if (!(_scsih_is_end_device(device_info)))
  7770. continue;
  7771. _scsih_mark_responding_sas_device(ioc, &sas_device_pg0);
  7772. }
  7773. out:
  7774. pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
  7775. ioc->name);
  7776. }
  7777. /**
  7778. * _scsih_mark_responding_pcie_device - mark a pcie_device as responding
  7779. * @ioc: per adapter object
  7780. * @pcie_device_pg0: PCIe Device page 0
  7781. *
  7782. * After host reset, find out whether devices are still responding.
  7783. * Used in _scsih_remove_unresponding_devices.
  7784. *
  7785. * Return nothing.
  7786. */
  7787. static void
  7788. _scsih_mark_responding_pcie_device(struct MPT3SAS_ADAPTER *ioc,
  7789. Mpi26PCIeDevicePage0_t *pcie_device_pg0)
  7790. {
  7791. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  7792. struct scsi_target *starget;
  7793. struct _pcie_device *pcie_device;
  7794. unsigned long flags;
  7795. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  7796. list_for_each_entry(pcie_device, &ioc->pcie_device_list, list) {
  7797. if ((pcie_device->wwid == le64_to_cpu(pcie_device_pg0->WWID))
  7798. && (pcie_device->slot == le16_to_cpu(
  7799. pcie_device_pg0->Slot))) {
  7800. pcie_device->responding = 1;
  7801. starget = pcie_device->starget;
  7802. if (starget && starget->hostdata) {
  7803. sas_target_priv_data = starget->hostdata;
  7804. sas_target_priv_data->tm_busy = 0;
  7805. sas_target_priv_data->deleted = 0;
  7806. } else
  7807. sas_target_priv_data = NULL;
  7808. if (starget) {
  7809. starget_printk(KERN_INFO, starget,
  7810. "handle(0x%04x), wwid(0x%016llx) ",
  7811. pcie_device->handle,
  7812. (unsigned long long)pcie_device->wwid);
  7813. if (pcie_device->enclosure_handle != 0)
  7814. starget_printk(KERN_INFO, starget,
  7815. "enclosure logical id(0x%016llx), "
  7816. "slot(%d)\n",
  7817. (unsigned long long)
  7818. pcie_device->enclosure_logical_id,
  7819. pcie_device->slot);
  7820. }
  7821. if (((le32_to_cpu(pcie_device_pg0->Flags)) &
  7822. MPI26_PCIEDEV0_FLAGS_ENCL_LEVEL_VALID) &&
  7823. (ioc->hba_mpi_version_belonged != MPI2_VERSION)) {
  7824. pcie_device->enclosure_level =
  7825. pcie_device_pg0->EnclosureLevel;
  7826. memcpy(&pcie_device->connector_name[0],
  7827. &pcie_device_pg0->ConnectorName[0], 4);
  7828. } else {
  7829. pcie_device->enclosure_level = 0;
  7830. pcie_device->connector_name[0] = '\0';
  7831. }
  7832. if (pcie_device->handle == le16_to_cpu(
  7833. pcie_device_pg0->DevHandle))
  7834. goto out;
  7835. pr_info("\thandle changed from(0x%04x)!!!\n",
  7836. pcie_device->handle);
  7837. pcie_device->handle = le16_to_cpu(
  7838. pcie_device_pg0->DevHandle);
  7839. if (sas_target_priv_data)
  7840. sas_target_priv_data->handle =
  7841. le16_to_cpu(pcie_device_pg0->DevHandle);
  7842. goto out;
  7843. }
  7844. }
  7845. out:
  7846. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  7847. }
  7848. /**
  7849. * _scsih_search_responding_pcie_devices -
  7850. * @ioc: per adapter object
  7851. *
  7852. * After host reset, find out whether devices are still responding.
  7853. * If not remove.
  7854. *
  7855. * Return nothing.
  7856. */
  7857. static void
  7858. _scsih_search_responding_pcie_devices(struct MPT3SAS_ADAPTER *ioc)
  7859. {
  7860. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  7861. Mpi2ConfigReply_t mpi_reply;
  7862. u16 ioc_status;
  7863. u16 handle;
  7864. u32 device_info;
  7865. pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
  7866. if (list_empty(&ioc->pcie_device_list))
  7867. goto out;
  7868. handle = 0xFFFF;
  7869. while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  7870. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  7871. handle))) {
  7872. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7873. MPI2_IOCSTATUS_MASK;
  7874. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  7875. pr_info(MPT3SAS_FMT "\tbreak from %s: "
  7876. "ioc_status(0x%04x), loginfo(0x%08x)\n", ioc->name,
  7877. __func__, ioc_status,
  7878. le32_to_cpu(mpi_reply.IOCLogInfo));
  7879. break;
  7880. }
  7881. handle = le16_to_cpu(pcie_device_pg0.DevHandle);
  7882. device_info = le32_to_cpu(pcie_device_pg0.DeviceInfo);
  7883. if (!(_scsih_is_nvme_device(device_info)))
  7884. continue;
  7885. _scsih_mark_responding_pcie_device(ioc, &pcie_device_pg0);
  7886. }
  7887. out:
  7888. pr_info(MPT3SAS_FMT "search for PCIe end-devices: complete\n",
  7889. ioc->name);
  7890. }
  7891. /**
  7892. * _scsih_mark_responding_raid_device - mark a raid_device as responding
  7893. * @ioc: per adapter object
  7894. * @wwid: world wide identifier for raid volume
  7895. * @handle: device handle
  7896. *
  7897. * After host reset, find out whether devices are still responding.
  7898. * Used in _scsih_remove_unresponsive_raid_devices.
  7899. *
  7900. * Return nothing.
  7901. */
  7902. static void
  7903. _scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
  7904. u16 handle)
  7905. {
  7906. struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
  7907. struct scsi_target *starget;
  7908. struct _raid_device *raid_device;
  7909. unsigned long flags;
  7910. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7911. list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
  7912. if (raid_device->wwid == wwid && raid_device->starget) {
  7913. starget = raid_device->starget;
  7914. if (starget && starget->hostdata) {
  7915. sas_target_priv_data = starget->hostdata;
  7916. sas_target_priv_data->deleted = 0;
  7917. } else
  7918. sas_target_priv_data = NULL;
  7919. raid_device->responding = 1;
  7920. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7921. starget_printk(KERN_INFO, raid_device->starget,
  7922. "handle(0x%04x), wwid(0x%016llx)\n", handle,
  7923. (unsigned long long)raid_device->wwid);
  7924. /*
  7925. * WARPDRIVE: The handles of the PDs might have changed
  7926. * across the host reset so re-initialize the
  7927. * required data for Direct IO
  7928. */
  7929. mpt3sas_init_warpdrive_properties(ioc, raid_device);
  7930. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  7931. if (raid_device->handle == handle) {
  7932. spin_unlock_irqrestore(&ioc->raid_device_lock,
  7933. flags);
  7934. return;
  7935. }
  7936. pr_info("\thandle changed from(0x%04x)!!!\n",
  7937. raid_device->handle);
  7938. raid_device->handle = handle;
  7939. if (sas_target_priv_data)
  7940. sas_target_priv_data->handle = handle;
  7941. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7942. return;
  7943. }
  7944. }
  7945. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  7946. }
  7947. /**
  7948. * _scsih_search_responding_raid_devices -
  7949. * @ioc: per adapter object
  7950. *
  7951. * After host reset, find out whether devices are still responding.
  7952. * If not remove.
  7953. *
  7954. * Return nothing.
  7955. */
  7956. static void
  7957. _scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
  7958. {
  7959. Mpi2RaidVolPage1_t volume_pg1;
  7960. Mpi2RaidVolPage0_t volume_pg0;
  7961. Mpi2RaidPhysDiskPage0_t pd_pg0;
  7962. Mpi2ConfigReply_t mpi_reply;
  7963. u16 ioc_status;
  7964. u16 handle;
  7965. u8 phys_disk_num;
  7966. if (!ioc->ir_firmware)
  7967. return;
  7968. pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
  7969. ioc->name);
  7970. if (list_empty(&ioc->raid_device_list))
  7971. goto out;
  7972. handle = 0xFFFF;
  7973. while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
  7974. &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
  7975. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7976. MPI2_IOCSTATUS_MASK;
  7977. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  7978. break;
  7979. handle = le16_to_cpu(volume_pg1.DevHandle);
  7980. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
  7981. &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  7982. sizeof(Mpi2RaidVolPage0_t)))
  7983. continue;
  7984. if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
  7985. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
  7986. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
  7987. _scsih_mark_responding_raid_device(ioc,
  7988. le64_to_cpu(volume_pg1.WWID), handle);
  7989. }
  7990. /* refresh the pd_handles */
  7991. if (!ioc->is_warpdrive) {
  7992. phys_disk_num = 0xFF;
  7993. memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
  7994. while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  7995. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
  7996. phys_disk_num))) {
  7997. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  7998. MPI2_IOCSTATUS_MASK;
  7999. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  8000. break;
  8001. phys_disk_num = pd_pg0.PhysDiskNum;
  8002. handle = le16_to_cpu(pd_pg0.DevHandle);
  8003. set_bit(handle, ioc->pd_handles);
  8004. }
  8005. }
  8006. out:
  8007. pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
  8008. ioc->name);
  8009. }
  8010. /**
  8011. * _scsih_mark_responding_expander - mark a expander as responding
  8012. * @ioc: per adapter object
  8013. * @expander_pg0:SAS Expander Config Page0
  8014. *
  8015. * After host reset, find out whether devices are still responding.
  8016. * Used in _scsih_remove_unresponsive_expanders.
  8017. *
  8018. * Return nothing.
  8019. */
  8020. static void
  8021. _scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc,
  8022. Mpi2ExpanderPage0_t *expander_pg0)
  8023. {
  8024. struct _sas_node *sas_expander = NULL;
  8025. unsigned long flags;
  8026. int i;
  8027. struct _enclosure_node *enclosure_dev = NULL;
  8028. u16 handle = le16_to_cpu(expander_pg0->DevHandle);
  8029. u16 enclosure_handle = le16_to_cpu(expander_pg0->EnclosureHandle);
  8030. u64 sas_address = le64_to_cpu(expander_pg0->SASAddress);
  8031. if (enclosure_handle)
  8032. enclosure_dev =
  8033. mpt3sas_scsih_enclosure_find_by_handle(ioc,
  8034. enclosure_handle);
  8035. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  8036. list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
  8037. if (sas_expander->sas_address != sas_address)
  8038. continue;
  8039. sas_expander->responding = 1;
  8040. if (enclosure_dev) {
  8041. sas_expander->enclosure_logical_id =
  8042. le64_to_cpu(enclosure_dev->pg0.EnclosureLogicalID);
  8043. sas_expander->enclosure_handle =
  8044. le16_to_cpu(expander_pg0->EnclosureHandle);
  8045. }
  8046. if (sas_expander->handle == handle)
  8047. goto out;
  8048. pr_info("\texpander(0x%016llx): handle changed" \
  8049. " from(0x%04x) to (0x%04x)!!!\n",
  8050. (unsigned long long)sas_expander->sas_address,
  8051. sas_expander->handle, handle);
  8052. sas_expander->handle = handle;
  8053. for (i = 0 ; i < sas_expander->num_phys ; i++)
  8054. sas_expander->phy[i].handle = handle;
  8055. goto out;
  8056. }
  8057. out:
  8058. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  8059. }
  8060. /**
  8061. * _scsih_search_responding_expanders -
  8062. * @ioc: per adapter object
  8063. *
  8064. * After host reset, find out whether devices are still responding.
  8065. * If not remove.
  8066. *
  8067. * Return nothing.
  8068. */
  8069. static void
  8070. _scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
  8071. {
  8072. Mpi2ExpanderPage0_t expander_pg0;
  8073. Mpi2ConfigReply_t mpi_reply;
  8074. u16 ioc_status;
  8075. u64 sas_address;
  8076. u16 handle;
  8077. pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
  8078. if (list_empty(&ioc->sas_expander_list))
  8079. goto out;
  8080. handle = 0xFFFF;
  8081. while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  8082. MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
  8083. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8084. MPI2_IOCSTATUS_MASK;
  8085. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  8086. break;
  8087. handle = le16_to_cpu(expander_pg0.DevHandle);
  8088. sas_address = le64_to_cpu(expander_pg0.SASAddress);
  8089. pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
  8090. handle,
  8091. (unsigned long long)sas_address);
  8092. _scsih_mark_responding_expander(ioc, &expander_pg0);
  8093. }
  8094. out:
  8095. pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
  8096. }
  8097. /**
  8098. * _scsih_remove_unresponding_devices - removing unresponding devices
  8099. * @ioc: per adapter object
  8100. *
  8101. * Return nothing.
  8102. */
  8103. static void
  8104. _scsih_remove_unresponding_devices(struct MPT3SAS_ADAPTER *ioc)
  8105. {
  8106. struct _sas_device *sas_device, *sas_device_next;
  8107. struct _sas_node *sas_expander, *sas_expander_next;
  8108. struct _raid_device *raid_device, *raid_device_next;
  8109. struct _pcie_device *pcie_device, *pcie_device_next;
  8110. struct list_head tmp_list;
  8111. unsigned long flags;
  8112. LIST_HEAD(head);
  8113. pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
  8114. ioc->name);
  8115. /* removing unresponding end devices */
  8116. pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
  8117. ioc->name);
  8118. /*
  8119. * Iterate, pulling off devices marked as non-responding. We become the
  8120. * owner for the reference the list had on any object we prune.
  8121. */
  8122. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  8123. list_for_each_entry_safe(sas_device, sas_device_next,
  8124. &ioc->sas_device_list, list) {
  8125. if (!sas_device->responding)
  8126. list_move_tail(&sas_device->list, &head);
  8127. else
  8128. sas_device->responding = 0;
  8129. }
  8130. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  8131. /*
  8132. * Now, uninitialize and remove the unresponding devices we pruned.
  8133. */
  8134. list_for_each_entry_safe(sas_device, sas_device_next, &head, list) {
  8135. _scsih_remove_device(ioc, sas_device);
  8136. list_del_init(&sas_device->list);
  8137. sas_device_put(sas_device);
  8138. }
  8139. pr_info(MPT3SAS_FMT
  8140. " Removing unresponding devices: pcie end-devices\n"
  8141. , ioc->name);
  8142. INIT_LIST_HEAD(&head);
  8143. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  8144. list_for_each_entry_safe(pcie_device, pcie_device_next,
  8145. &ioc->pcie_device_list, list) {
  8146. if (!pcie_device->responding)
  8147. list_move_tail(&pcie_device->list, &head);
  8148. else
  8149. pcie_device->responding = 0;
  8150. }
  8151. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  8152. list_for_each_entry_safe(pcie_device, pcie_device_next, &head, list) {
  8153. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  8154. list_del_init(&pcie_device->list);
  8155. pcie_device_put(pcie_device);
  8156. }
  8157. /* removing unresponding volumes */
  8158. if (ioc->ir_firmware) {
  8159. pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
  8160. ioc->name);
  8161. list_for_each_entry_safe(raid_device, raid_device_next,
  8162. &ioc->raid_device_list, list) {
  8163. if (!raid_device->responding)
  8164. _scsih_sas_volume_delete(ioc,
  8165. raid_device->handle);
  8166. else
  8167. raid_device->responding = 0;
  8168. }
  8169. }
  8170. /* removing unresponding expanders */
  8171. pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
  8172. ioc->name);
  8173. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  8174. INIT_LIST_HEAD(&tmp_list);
  8175. list_for_each_entry_safe(sas_expander, sas_expander_next,
  8176. &ioc->sas_expander_list, list) {
  8177. if (!sas_expander->responding)
  8178. list_move_tail(&sas_expander->list, &tmp_list);
  8179. else
  8180. sas_expander->responding = 0;
  8181. }
  8182. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  8183. list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
  8184. list) {
  8185. _scsih_expander_node_remove(ioc, sas_expander);
  8186. }
  8187. pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
  8188. ioc->name);
  8189. /* unblock devices */
  8190. _scsih_ublock_io_all_device(ioc);
  8191. }
  8192. static void
  8193. _scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
  8194. struct _sas_node *sas_expander, u16 handle)
  8195. {
  8196. Mpi2ExpanderPage1_t expander_pg1;
  8197. Mpi2ConfigReply_t mpi_reply;
  8198. int i;
  8199. for (i = 0 ; i < sas_expander->num_phys ; i++) {
  8200. if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
  8201. &expander_pg1, i, handle))) {
  8202. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  8203. ioc->name, __FILE__, __LINE__, __func__);
  8204. return;
  8205. }
  8206. mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
  8207. le16_to_cpu(expander_pg1.AttachedDevHandle), i,
  8208. expander_pg1.NegotiatedLinkRate >> 4);
  8209. }
  8210. }
  8211. /**
  8212. * _scsih_scan_for_devices_after_reset - scan for devices after host reset
  8213. * @ioc: per adapter object
  8214. *
  8215. * Return nothing.
  8216. */
  8217. static void
  8218. _scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
  8219. {
  8220. Mpi2ExpanderPage0_t expander_pg0;
  8221. Mpi2SasDevicePage0_t sas_device_pg0;
  8222. Mpi26PCIeDevicePage0_t pcie_device_pg0;
  8223. Mpi2RaidVolPage1_t volume_pg1;
  8224. Mpi2RaidVolPage0_t volume_pg0;
  8225. Mpi2RaidPhysDiskPage0_t pd_pg0;
  8226. Mpi2EventIrConfigElement_t element;
  8227. Mpi2ConfigReply_t mpi_reply;
  8228. u8 phys_disk_num;
  8229. u16 ioc_status;
  8230. u16 handle, parent_handle;
  8231. u64 sas_address;
  8232. struct _sas_device *sas_device;
  8233. struct _pcie_device *pcie_device;
  8234. struct _sas_node *expander_device;
  8235. static struct _raid_device *raid_device;
  8236. u8 retry_count;
  8237. unsigned long flags;
  8238. pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
  8239. _scsih_sas_host_refresh(ioc);
  8240. pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
  8241. /* expanders */
  8242. handle = 0xFFFF;
  8243. while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
  8244. MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
  8245. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8246. MPI2_IOCSTATUS_MASK;
  8247. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8248. pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
  8249. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  8250. ioc->name, ioc_status,
  8251. le32_to_cpu(mpi_reply.IOCLogInfo));
  8252. break;
  8253. }
  8254. handle = le16_to_cpu(expander_pg0.DevHandle);
  8255. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  8256. expander_device = mpt3sas_scsih_expander_find_by_sas_address(
  8257. ioc, le64_to_cpu(expander_pg0.SASAddress));
  8258. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  8259. if (expander_device)
  8260. _scsih_refresh_expander_links(ioc, expander_device,
  8261. handle);
  8262. else {
  8263. pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
  8264. "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
  8265. handle, (unsigned long long)
  8266. le64_to_cpu(expander_pg0.SASAddress));
  8267. _scsih_expander_add(ioc, handle);
  8268. pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
  8269. "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
  8270. handle, (unsigned long long)
  8271. le64_to_cpu(expander_pg0.SASAddress));
  8272. }
  8273. }
  8274. pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
  8275. ioc->name);
  8276. if (!ioc->ir_firmware)
  8277. goto skip_to_sas;
  8278. pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
  8279. /* phys disk */
  8280. phys_disk_num = 0xFF;
  8281. while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
  8282. &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
  8283. phys_disk_num))) {
  8284. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8285. MPI2_IOCSTATUS_MASK;
  8286. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8287. pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
  8288. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  8289. ioc->name, ioc_status,
  8290. le32_to_cpu(mpi_reply.IOCLogInfo));
  8291. break;
  8292. }
  8293. phys_disk_num = pd_pg0.PhysDiskNum;
  8294. handle = le16_to_cpu(pd_pg0.DevHandle);
  8295. sas_device = mpt3sas_get_sdev_by_handle(ioc, handle);
  8296. if (sas_device) {
  8297. sas_device_put(sas_device);
  8298. continue;
  8299. }
  8300. if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  8301. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
  8302. handle) != 0)
  8303. continue;
  8304. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8305. MPI2_IOCSTATUS_MASK;
  8306. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8307. pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
  8308. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  8309. ioc->name, ioc_status,
  8310. le32_to_cpu(mpi_reply.IOCLogInfo));
  8311. break;
  8312. }
  8313. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  8314. if (!_scsih_get_sas_address(ioc, parent_handle,
  8315. &sas_address)) {
  8316. pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
  8317. " handle (0x%04x), sas_addr(0x%016llx)\n",
  8318. ioc->name, handle, (unsigned long long)
  8319. le64_to_cpu(sas_device_pg0.SASAddress));
  8320. mpt3sas_transport_update_links(ioc, sas_address,
  8321. handle, sas_device_pg0.PhyNum,
  8322. MPI2_SAS_NEG_LINK_RATE_1_5);
  8323. set_bit(handle, ioc->pd_handles);
  8324. retry_count = 0;
  8325. /* This will retry adding the end device.
  8326. * _scsih_add_device() will decide on retries and
  8327. * return "1" when it should be retried
  8328. */
  8329. while (_scsih_add_device(ioc, handle, retry_count++,
  8330. 1)) {
  8331. ssleep(1);
  8332. }
  8333. pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
  8334. " handle (0x%04x), sas_addr(0x%016llx)\n",
  8335. ioc->name, handle, (unsigned long long)
  8336. le64_to_cpu(sas_device_pg0.SASAddress));
  8337. }
  8338. }
  8339. pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
  8340. ioc->name);
  8341. pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
  8342. /* volumes */
  8343. handle = 0xFFFF;
  8344. while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
  8345. &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
  8346. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8347. MPI2_IOCSTATUS_MASK;
  8348. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8349. pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
  8350. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  8351. ioc->name, ioc_status,
  8352. le32_to_cpu(mpi_reply.IOCLogInfo));
  8353. break;
  8354. }
  8355. handle = le16_to_cpu(volume_pg1.DevHandle);
  8356. spin_lock_irqsave(&ioc->raid_device_lock, flags);
  8357. raid_device = _scsih_raid_device_find_by_wwid(ioc,
  8358. le64_to_cpu(volume_pg1.WWID));
  8359. spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
  8360. if (raid_device)
  8361. continue;
  8362. if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
  8363. &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
  8364. sizeof(Mpi2RaidVolPage0_t)))
  8365. continue;
  8366. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8367. MPI2_IOCSTATUS_MASK;
  8368. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8369. pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
  8370. "ioc_status(0x%04x), loginfo(0x%08x)\n",
  8371. ioc->name, ioc_status,
  8372. le32_to_cpu(mpi_reply.IOCLogInfo));
  8373. break;
  8374. }
  8375. if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
  8376. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
  8377. volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
  8378. memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
  8379. element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
  8380. element.VolDevHandle = volume_pg1.DevHandle;
  8381. pr_info(MPT3SAS_FMT
  8382. "\tBEFORE adding volume: handle (0x%04x)\n",
  8383. ioc->name, volume_pg1.DevHandle);
  8384. _scsih_sas_volume_add(ioc, &element);
  8385. pr_info(MPT3SAS_FMT
  8386. "\tAFTER adding volume: handle (0x%04x)\n",
  8387. ioc->name, volume_pg1.DevHandle);
  8388. }
  8389. }
  8390. pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
  8391. ioc->name);
  8392. skip_to_sas:
  8393. pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
  8394. ioc->name);
  8395. /* sas devices */
  8396. handle = 0xFFFF;
  8397. while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
  8398. &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  8399. handle))) {
  8400. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  8401. MPI2_IOCSTATUS_MASK;
  8402. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8403. pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
  8404. " ioc_status(0x%04x), loginfo(0x%08x)\n",
  8405. ioc->name, ioc_status,
  8406. le32_to_cpu(mpi_reply.IOCLogInfo));
  8407. break;
  8408. }
  8409. handle = le16_to_cpu(sas_device_pg0.DevHandle);
  8410. if (!(_scsih_is_end_device(
  8411. le32_to_cpu(sas_device_pg0.DeviceInfo))))
  8412. continue;
  8413. sas_device = mpt3sas_get_sdev_by_addr(ioc,
  8414. le64_to_cpu(sas_device_pg0.SASAddress));
  8415. if (sas_device) {
  8416. sas_device_put(sas_device);
  8417. continue;
  8418. }
  8419. parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
  8420. if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
  8421. pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
  8422. "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
  8423. handle, (unsigned long long)
  8424. le64_to_cpu(sas_device_pg0.SASAddress));
  8425. mpt3sas_transport_update_links(ioc, sas_address, handle,
  8426. sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
  8427. retry_count = 0;
  8428. /* This will retry adding the end device.
  8429. * _scsih_add_device() will decide on retries and
  8430. * return "1" when it should be retried
  8431. */
  8432. while (_scsih_add_device(ioc, handle, retry_count++,
  8433. 0)) {
  8434. ssleep(1);
  8435. }
  8436. pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
  8437. "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
  8438. handle, (unsigned long long)
  8439. le64_to_cpu(sas_device_pg0.SASAddress));
  8440. }
  8441. }
  8442. pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
  8443. ioc->name);
  8444. pr_info(MPT3SAS_FMT "\tscan devices: pcie end devices start\n",
  8445. ioc->name);
  8446. /* pcie devices */
  8447. handle = 0xFFFF;
  8448. while (!(mpt3sas_config_get_pcie_device_pg0(ioc, &mpi_reply,
  8449. &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
  8450. handle))) {
  8451. ioc_status = le16_to_cpu(mpi_reply.IOCStatus)
  8452. & MPI2_IOCSTATUS_MASK;
  8453. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  8454. pr_info(MPT3SAS_FMT "\tbreak from pcie end device"
  8455. " scan: ioc_status(0x%04x), loginfo(0x%08x)\n",
  8456. ioc->name, ioc_status,
  8457. le32_to_cpu(mpi_reply.IOCLogInfo));
  8458. break;
  8459. }
  8460. handle = le16_to_cpu(pcie_device_pg0.DevHandle);
  8461. if (!(_scsih_is_nvme_device(
  8462. le32_to_cpu(pcie_device_pg0.DeviceInfo))))
  8463. continue;
  8464. pcie_device = mpt3sas_get_pdev_by_wwid(ioc,
  8465. le64_to_cpu(pcie_device_pg0.WWID));
  8466. if (pcie_device) {
  8467. pcie_device_put(pcie_device);
  8468. continue;
  8469. }
  8470. retry_count = 0;
  8471. parent_handle = le16_to_cpu(pcie_device_pg0.ParentDevHandle);
  8472. _scsih_pcie_add_device(ioc, handle);
  8473. pr_info(MPT3SAS_FMT "\tAFTER adding pcie end device: "
  8474. "handle (0x%04x), wwid(0x%016llx)\n", ioc->name,
  8475. handle,
  8476. (unsigned long long) le64_to_cpu(pcie_device_pg0.WWID));
  8477. }
  8478. pr_info(MPT3SAS_FMT "\tpcie devices: pcie end devices complete\n",
  8479. ioc->name);
  8480. pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
  8481. }
  8482. /**
  8483. * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
  8484. * @ioc: per adapter object
  8485. * @reset_phase: phase
  8486. *
  8487. * The handler for doing any required cleanup or initialization.
  8488. *
  8489. * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
  8490. * MPT3_IOC_DONE_RESET
  8491. *
  8492. * Return nothing.
  8493. */
  8494. void
  8495. mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
  8496. {
  8497. switch (reset_phase) {
  8498. case MPT3_IOC_PRE_RESET:
  8499. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  8500. "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
  8501. break;
  8502. case MPT3_IOC_AFTER_RESET:
  8503. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  8504. "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
  8505. if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
  8506. ioc->scsih_cmds.status |= MPT3_CMD_RESET;
  8507. mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
  8508. complete(&ioc->scsih_cmds.done);
  8509. }
  8510. if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
  8511. ioc->tm_cmds.status |= MPT3_CMD_RESET;
  8512. mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
  8513. complete(&ioc->tm_cmds.done);
  8514. }
  8515. memset(ioc->pend_os_device_add, 0, ioc->pend_os_device_add_sz);
  8516. memset(ioc->device_remove_in_progress, 0,
  8517. ioc->device_remove_in_progress_sz);
  8518. _scsih_fw_event_cleanup_queue(ioc);
  8519. _scsih_flush_running_cmds(ioc);
  8520. break;
  8521. case MPT3_IOC_DONE_RESET:
  8522. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  8523. "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
  8524. if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
  8525. !ioc->sas_hba.num_phys)) {
  8526. _scsih_prep_device_scan(ioc);
  8527. _scsih_create_enclosure_list_after_reset(ioc);
  8528. _scsih_search_responding_sas_devices(ioc);
  8529. _scsih_search_responding_pcie_devices(ioc);
  8530. _scsih_search_responding_raid_devices(ioc);
  8531. _scsih_search_responding_expanders(ioc);
  8532. _scsih_error_recovery_delete_devices(ioc);
  8533. }
  8534. break;
  8535. }
  8536. }
  8537. /**
  8538. * _mpt3sas_fw_work - delayed task for processing firmware events
  8539. * @ioc: per adapter object
  8540. * @fw_event: The fw_event_work object
  8541. * Context: user.
  8542. *
  8543. * Return nothing.
  8544. */
  8545. static void
  8546. _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
  8547. {
  8548. _scsih_fw_event_del_from_list(ioc, fw_event);
  8549. /* the queue is being flushed so ignore this event */
  8550. if (ioc->remove_host || ioc->pci_error_recovery) {
  8551. fw_event_work_put(fw_event);
  8552. return;
  8553. }
  8554. switch (fw_event->event) {
  8555. case MPT3SAS_PROCESS_TRIGGER_DIAG:
  8556. mpt3sas_process_trigger_data(ioc,
  8557. (struct SL_WH_TRIGGERS_EVENT_DATA_T *)
  8558. fw_event->event_data);
  8559. break;
  8560. case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
  8561. while (scsi_host_in_recovery(ioc->shost) ||
  8562. ioc->shost_recovery) {
  8563. /*
  8564. * If we're unloading, bail. Otherwise, this can become
  8565. * an infinite loop.
  8566. */
  8567. if (ioc->remove_host)
  8568. goto out;
  8569. ssleep(1);
  8570. }
  8571. _scsih_remove_unresponding_devices(ioc);
  8572. _scsih_scan_for_devices_after_reset(ioc);
  8573. break;
  8574. case MPT3SAS_PORT_ENABLE_COMPLETE:
  8575. ioc->start_scan = 0;
  8576. if (missing_delay[0] != -1 && missing_delay[1] != -1)
  8577. mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
  8578. missing_delay[1]);
  8579. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  8580. "port enable: complete from worker thread\n",
  8581. ioc->name));
  8582. break;
  8583. case MPT3SAS_TURN_ON_PFA_LED:
  8584. _scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
  8585. break;
  8586. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  8587. _scsih_sas_topology_change_event(ioc, fw_event);
  8588. break;
  8589. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  8590. _scsih_sas_device_status_change_event(ioc, fw_event);
  8591. break;
  8592. case MPI2_EVENT_SAS_DISCOVERY:
  8593. _scsih_sas_discovery_event(ioc, fw_event);
  8594. break;
  8595. case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  8596. _scsih_sas_device_discovery_error_event(ioc, fw_event);
  8597. break;
  8598. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  8599. _scsih_sas_broadcast_primitive_event(ioc, fw_event);
  8600. break;
  8601. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  8602. _scsih_sas_enclosure_dev_status_change_event(ioc,
  8603. fw_event);
  8604. break;
  8605. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  8606. _scsih_sas_ir_config_change_event(ioc, fw_event);
  8607. break;
  8608. case MPI2_EVENT_IR_VOLUME:
  8609. _scsih_sas_ir_volume_event(ioc, fw_event);
  8610. break;
  8611. case MPI2_EVENT_IR_PHYSICAL_DISK:
  8612. _scsih_sas_ir_physical_disk_event(ioc, fw_event);
  8613. break;
  8614. case MPI2_EVENT_IR_OPERATION_STATUS:
  8615. _scsih_sas_ir_operation_status_event(ioc, fw_event);
  8616. break;
  8617. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  8618. _scsih_pcie_device_status_change_event(ioc, fw_event);
  8619. break;
  8620. case MPI2_EVENT_PCIE_ENUMERATION:
  8621. _scsih_pcie_enumeration_event(ioc, fw_event);
  8622. break;
  8623. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  8624. _scsih_pcie_topology_change_event(ioc, fw_event);
  8625. return;
  8626. break;
  8627. }
  8628. out:
  8629. fw_event_work_put(fw_event);
  8630. }
  8631. /**
  8632. * _firmware_event_work
  8633. * @ioc: per adapter object
  8634. * @work: The fw_event_work object
  8635. * Context: user.
  8636. *
  8637. * wrappers for the work thread handling firmware events
  8638. *
  8639. * Return nothing.
  8640. */
  8641. static void
  8642. _firmware_event_work(struct work_struct *work)
  8643. {
  8644. struct fw_event_work *fw_event = container_of(work,
  8645. struct fw_event_work, work);
  8646. _mpt3sas_fw_work(fw_event->ioc, fw_event);
  8647. }
  8648. /**
  8649. * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
  8650. * @ioc: per adapter object
  8651. * @msix_index: MSIX table index supplied by the OS
  8652. * @reply: reply message frame(lower 32bit addr)
  8653. * Context: interrupt.
  8654. *
  8655. * This function merely adds a new work task into ioc->firmware_event_thread.
  8656. * The tasks are worked from _firmware_event_work in user context.
  8657. *
  8658. * Return 1 meaning mf should be freed from _base_interrupt
  8659. * 0 means the mf is freed from this function.
  8660. */
  8661. u8
  8662. mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
  8663. u32 reply)
  8664. {
  8665. struct fw_event_work *fw_event;
  8666. Mpi2EventNotificationReply_t *mpi_reply;
  8667. u16 event;
  8668. u16 sz;
  8669. Mpi26EventDataActiveCableExcept_t *ActiveCableEventData;
  8670. /* events turned off due to host reset */
  8671. if (ioc->pci_error_recovery)
  8672. return 1;
  8673. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  8674. if (unlikely(!mpi_reply)) {
  8675. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  8676. ioc->name, __FILE__, __LINE__, __func__);
  8677. return 1;
  8678. }
  8679. event = le16_to_cpu(mpi_reply->Event);
  8680. if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
  8681. mpt3sas_trigger_event(ioc, event, 0);
  8682. switch (event) {
  8683. /* handle these */
  8684. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  8685. {
  8686. Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
  8687. (Mpi2EventDataSasBroadcastPrimitive_t *)
  8688. mpi_reply->EventData;
  8689. if (baen_data->Primitive !=
  8690. MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
  8691. return 1;
  8692. if (ioc->broadcast_aen_busy) {
  8693. ioc->broadcast_aen_pending++;
  8694. return 1;
  8695. } else
  8696. ioc->broadcast_aen_busy = 1;
  8697. break;
  8698. }
  8699. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  8700. _scsih_check_topo_delete_events(ioc,
  8701. (Mpi2EventDataSasTopologyChangeList_t *)
  8702. mpi_reply->EventData);
  8703. break;
  8704. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  8705. _scsih_check_pcie_topo_remove_events(ioc,
  8706. (Mpi26EventDataPCIeTopologyChangeList_t *)
  8707. mpi_reply->EventData);
  8708. break;
  8709. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  8710. _scsih_check_ir_config_unhide_events(ioc,
  8711. (Mpi2EventDataIrConfigChangeList_t *)
  8712. mpi_reply->EventData);
  8713. break;
  8714. case MPI2_EVENT_IR_VOLUME:
  8715. _scsih_check_volume_delete_events(ioc,
  8716. (Mpi2EventDataIrVolume_t *)
  8717. mpi_reply->EventData);
  8718. break;
  8719. case MPI2_EVENT_LOG_ENTRY_ADDED:
  8720. {
  8721. Mpi2EventDataLogEntryAdded_t *log_entry;
  8722. u32 *log_code;
  8723. if (!ioc->is_warpdrive)
  8724. break;
  8725. log_entry = (Mpi2EventDataLogEntryAdded_t *)
  8726. mpi_reply->EventData;
  8727. log_code = (u32 *)log_entry->LogData;
  8728. if (le16_to_cpu(log_entry->LogEntryQualifier)
  8729. != MPT2_WARPDRIVE_LOGENTRY)
  8730. break;
  8731. switch (le32_to_cpu(*log_code)) {
  8732. case MPT2_WARPDRIVE_LC_SSDT:
  8733. pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
  8734. "IO Throttling has occurred in the WarpDrive "
  8735. "subsystem. Check WarpDrive documentation for "
  8736. "additional details.\n", ioc->name);
  8737. break;
  8738. case MPT2_WARPDRIVE_LC_SSDLW:
  8739. pr_warn(MPT3SAS_FMT "WarpDrive Warning: "
  8740. "Program/Erase Cycles for the WarpDrive subsystem "
  8741. "in degraded range. Check WarpDrive documentation "
  8742. "for additional details.\n", ioc->name);
  8743. break;
  8744. case MPT2_WARPDRIVE_LC_SSDLF:
  8745. pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
  8746. "There are no Program/Erase Cycles for the "
  8747. "WarpDrive subsystem. The storage device will be "
  8748. "in read-only mode. Check WarpDrive documentation "
  8749. "for additional details.\n", ioc->name);
  8750. break;
  8751. case MPT2_WARPDRIVE_LC_BRMF:
  8752. pr_err(MPT3SAS_FMT "WarpDrive Fatal Error: "
  8753. "The Backup Rail Monitor has failed on the "
  8754. "WarpDrive subsystem. Check WarpDrive "
  8755. "documentation for additional details.\n",
  8756. ioc->name);
  8757. break;
  8758. }
  8759. break;
  8760. }
  8761. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  8762. case MPI2_EVENT_IR_OPERATION_STATUS:
  8763. case MPI2_EVENT_SAS_DISCOVERY:
  8764. case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  8765. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  8766. case MPI2_EVENT_IR_PHYSICAL_DISK:
  8767. case MPI2_EVENT_PCIE_ENUMERATION:
  8768. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  8769. break;
  8770. case MPI2_EVENT_TEMP_THRESHOLD:
  8771. _scsih_temp_threshold_events(ioc,
  8772. (Mpi2EventDataTemperature_t *)
  8773. mpi_reply->EventData);
  8774. break;
  8775. case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
  8776. ActiveCableEventData =
  8777. (Mpi26EventDataActiveCableExcept_t *) mpi_reply->EventData;
  8778. switch (ActiveCableEventData->ReasonCode) {
  8779. case MPI26_EVENT_ACTIVE_CABLE_INSUFFICIENT_POWER:
  8780. pr_notice(MPT3SAS_FMT
  8781. "Currently an active cable with ReceptacleID %d\n",
  8782. ioc->name, ActiveCableEventData->ReceptacleID);
  8783. pr_notice("cannot be powered and devices connected\n");
  8784. pr_notice("to this active cable will not be seen\n");
  8785. pr_notice("This active cable requires %d mW of power\n",
  8786. ActiveCableEventData->ActiveCablePowerRequirement);
  8787. break;
  8788. case MPI26_EVENT_ACTIVE_CABLE_DEGRADED:
  8789. pr_notice(MPT3SAS_FMT
  8790. "Currently a cable with ReceptacleID %d\n",
  8791. ioc->name, ActiveCableEventData->ReceptacleID);
  8792. pr_notice(
  8793. "is not running at optimal speed(12 Gb/s rate)\n");
  8794. break;
  8795. }
  8796. break;
  8797. default: /* ignore the rest */
  8798. return 1;
  8799. }
  8800. sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
  8801. fw_event = alloc_fw_event_work(sz);
  8802. if (!fw_event) {
  8803. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  8804. ioc->name, __FILE__, __LINE__, __func__);
  8805. return 1;
  8806. }
  8807. memcpy(fw_event->event_data, mpi_reply->EventData, sz);
  8808. fw_event->ioc = ioc;
  8809. fw_event->VF_ID = mpi_reply->VF_ID;
  8810. fw_event->VP_ID = mpi_reply->VP_ID;
  8811. fw_event->event = event;
  8812. _scsih_fw_event_add(ioc, fw_event);
  8813. fw_event_work_put(fw_event);
  8814. return 1;
  8815. }
  8816. /**
  8817. * _scsih_expander_node_remove - removing expander device from list.
  8818. * @ioc: per adapter object
  8819. * @sas_expander: the sas_device object
  8820. *
  8821. * Removing object and freeing associated memory from the
  8822. * ioc->sas_expander_list.
  8823. *
  8824. * Return nothing.
  8825. */
  8826. static void
  8827. _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
  8828. struct _sas_node *sas_expander)
  8829. {
  8830. struct _sas_port *mpt3sas_port, *next;
  8831. unsigned long flags;
  8832. /* remove sibling ports attached to this expander */
  8833. list_for_each_entry_safe(mpt3sas_port, next,
  8834. &sas_expander->sas_port_list, port_list) {
  8835. if (ioc->shost_recovery)
  8836. return;
  8837. if (mpt3sas_port->remote_identify.device_type ==
  8838. SAS_END_DEVICE)
  8839. mpt3sas_device_remove_by_sas_address(ioc,
  8840. mpt3sas_port->remote_identify.sas_address);
  8841. else if (mpt3sas_port->remote_identify.device_type ==
  8842. SAS_EDGE_EXPANDER_DEVICE ||
  8843. mpt3sas_port->remote_identify.device_type ==
  8844. SAS_FANOUT_EXPANDER_DEVICE)
  8845. mpt3sas_expander_remove(ioc,
  8846. mpt3sas_port->remote_identify.sas_address);
  8847. }
  8848. mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
  8849. sas_expander->sas_address_parent);
  8850. pr_info(MPT3SAS_FMT
  8851. "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
  8852. ioc->name,
  8853. sas_expander->handle, (unsigned long long)
  8854. sas_expander->sas_address);
  8855. spin_lock_irqsave(&ioc->sas_node_lock, flags);
  8856. list_del(&sas_expander->list);
  8857. spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
  8858. kfree(sas_expander->phy);
  8859. kfree(sas_expander);
  8860. }
  8861. /**
  8862. * _scsih_ir_shutdown - IR shutdown notification
  8863. * @ioc: per adapter object
  8864. *
  8865. * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
  8866. * the host system is shutting down.
  8867. *
  8868. * Return nothing.
  8869. */
  8870. static void
  8871. _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
  8872. {
  8873. Mpi2RaidActionRequest_t *mpi_request;
  8874. Mpi2RaidActionReply_t *mpi_reply;
  8875. u16 smid;
  8876. /* is IR firmware build loaded ? */
  8877. if (!ioc->ir_firmware)
  8878. return;
  8879. /* are there any volumes ? */
  8880. if (list_empty(&ioc->raid_device_list))
  8881. return;
  8882. mutex_lock(&ioc->scsih_cmds.mutex);
  8883. if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
  8884. pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
  8885. ioc->name, __func__);
  8886. goto out;
  8887. }
  8888. ioc->scsih_cmds.status = MPT3_CMD_PENDING;
  8889. smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
  8890. if (!smid) {
  8891. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  8892. ioc->name, __func__);
  8893. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  8894. goto out;
  8895. }
  8896. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  8897. ioc->scsih_cmds.smid = smid;
  8898. memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
  8899. mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
  8900. mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
  8901. if (!ioc->hide_ir_msg)
  8902. pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
  8903. init_completion(&ioc->scsih_cmds.done);
  8904. mpt3sas_base_put_smid_default(ioc, smid);
  8905. wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
  8906. if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
  8907. pr_err(MPT3SAS_FMT "%s: timeout\n",
  8908. ioc->name, __func__);
  8909. goto out;
  8910. }
  8911. if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
  8912. mpi_reply = ioc->scsih_cmds.reply;
  8913. if (!ioc->hide_ir_msg)
  8914. pr_info(MPT3SAS_FMT "IR shutdown "
  8915. "(complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
  8916. ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
  8917. le32_to_cpu(mpi_reply->IOCLogInfo));
  8918. }
  8919. out:
  8920. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  8921. mutex_unlock(&ioc->scsih_cmds.mutex);
  8922. }
  8923. /**
  8924. * scsih_remove - detach and remove add host
  8925. * @pdev: PCI device struct
  8926. *
  8927. * Routine called when unloading the driver.
  8928. * Return nothing.
  8929. */
  8930. static void scsih_remove(struct pci_dev *pdev)
  8931. {
  8932. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  8933. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  8934. struct _sas_port *mpt3sas_port, *next_port;
  8935. struct _raid_device *raid_device, *next;
  8936. struct MPT3SAS_TARGET *sas_target_priv_data;
  8937. struct _pcie_device *pcie_device, *pcienext;
  8938. struct workqueue_struct *wq;
  8939. unsigned long flags;
  8940. ioc->remove_host = 1;
  8941. mpt3sas_wait_for_commands_to_complete(ioc);
  8942. _scsih_flush_running_cmds(ioc);
  8943. _scsih_fw_event_cleanup_queue(ioc);
  8944. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  8945. wq = ioc->firmware_event_thread;
  8946. ioc->firmware_event_thread = NULL;
  8947. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  8948. if (wq)
  8949. destroy_workqueue(wq);
  8950. /* release all the volumes */
  8951. _scsih_ir_shutdown(ioc);
  8952. list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
  8953. list) {
  8954. if (raid_device->starget) {
  8955. sas_target_priv_data =
  8956. raid_device->starget->hostdata;
  8957. sas_target_priv_data->deleted = 1;
  8958. scsi_remove_target(&raid_device->starget->dev);
  8959. }
  8960. pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
  8961. ioc->name, raid_device->handle,
  8962. (unsigned long long) raid_device->wwid);
  8963. _scsih_raid_device_remove(ioc, raid_device);
  8964. }
  8965. list_for_each_entry_safe(pcie_device, pcienext, &ioc->pcie_device_list,
  8966. list) {
  8967. _scsih_pcie_device_remove_from_sml(ioc, pcie_device);
  8968. list_del_init(&pcie_device->list);
  8969. pcie_device_put(pcie_device);
  8970. }
  8971. /* free ports attached to the sas_host */
  8972. list_for_each_entry_safe(mpt3sas_port, next_port,
  8973. &ioc->sas_hba.sas_port_list, port_list) {
  8974. if (mpt3sas_port->remote_identify.device_type ==
  8975. SAS_END_DEVICE)
  8976. mpt3sas_device_remove_by_sas_address(ioc,
  8977. mpt3sas_port->remote_identify.sas_address);
  8978. else if (mpt3sas_port->remote_identify.device_type ==
  8979. SAS_EDGE_EXPANDER_DEVICE ||
  8980. mpt3sas_port->remote_identify.device_type ==
  8981. SAS_FANOUT_EXPANDER_DEVICE)
  8982. mpt3sas_expander_remove(ioc,
  8983. mpt3sas_port->remote_identify.sas_address);
  8984. }
  8985. /* free phys attached to the sas_host */
  8986. if (ioc->sas_hba.num_phys) {
  8987. kfree(ioc->sas_hba.phy);
  8988. ioc->sas_hba.phy = NULL;
  8989. ioc->sas_hba.num_phys = 0;
  8990. }
  8991. sas_remove_host(shost);
  8992. mpt3sas_base_detach(ioc);
  8993. spin_lock(&gioc_lock);
  8994. list_del(&ioc->list);
  8995. spin_unlock(&gioc_lock);
  8996. scsi_host_put(shost);
  8997. }
  8998. /**
  8999. * scsih_shutdown - routine call during system shutdown
  9000. * @pdev: PCI device struct
  9001. *
  9002. * Return nothing.
  9003. */
  9004. static void
  9005. scsih_shutdown(struct pci_dev *pdev)
  9006. {
  9007. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9008. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9009. struct workqueue_struct *wq;
  9010. unsigned long flags;
  9011. ioc->remove_host = 1;
  9012. mpt3sas_wait_for_commands_to_complete(ioc);
  9013. _scsih_flush_running_cmds(ioc);
  9014. _scsih_fw_event_cleanup_queue(ioc);
  9015. spin_lock_irqsave(&ioc->fw_event_lock, flags);
  9016. wq = ioc->firmware_event_thread;
  9017. ioc->firmware_event_thread = NULL;
  9018. spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
  9019. if (wq)
  9020. destroy_workqueue(wq);
  9021. _scsih_ir_shutdown(ioc);
  9022. mpt3sas_base_detach(ioc);
  9023. }
  9024. /**
  9025. * _scsih_probe_boot_devices - reports 1st device
  9026. * @ioc: per adapter object
  9027. *
  9028. * If specified in bios page 2, this routine reports the 1st
  9029. * device scsi-ml or sas transport for persistent boot device
  9030. * purposes. Please refer to function _scsih_determine_boot_device()
  9031. */
  9032. static void
  9033. _scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
  9034. {
  9035. u32 channel;
  9036. void *device;
  9037. struct _sas_device *sas_device;
  9038. struct _raid_device *raid_device;
  9039. struct _pcie_device *pcie_device;
  9040. u16 handle;
  9041. u64 sas_address_parent;
  9042. u64 sas_address;
  9043. unsigned long flags;
  9044. int rc;
  9045. int tid;
  9046. /* no Bios, return immediately */
  9047. if (!ioc->bios_pg3.BiosVersion)
  9048. return;
  9049. device = NULL;
  9050. if (ioc->req_boot_device.device) {
  9051. device = ioc->req_boot_device.device;
  9052. channel = ioc->req_boot_device.channel;
  9053. } else if (ioc->req_alt_boot_device.device) {
  9054. device = ioc->req_alt_boot_device.device;
  9055. channel = ioc->req_alt_boot_device.channel;
  9056. } else if (ioc->current_boot_device.device) {
  9057. device = ioc->current_boot_device.device;
  9058. channel = ioc->current_boot_device.channel;
  9059. }
  9060. if (!device)
  9061. return;
  9062. if (channel == RAID_CHANNEL) {
  9063. raid_device = device;
  9064. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  9065. raid_device->id, 0);
  9066. if (rc)
  9067. _scsih_raid_device_remove(ioc, raid_device);
  9068. } else if (channel == PCIE_CHANNEL) {
  9069. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  9070. pcie_device = device;
  9071. tid = pcie_device->id;
  9072. list_move_tail(&pcie_device->list, &ioc->pcie_device_list);
  9073. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  9074. rc = scsi_add_device(ioc->shost, PCIE_CHANNEL, tid, 0);
  9075. if (rc)
  9076. _scsih_pcie_device_remove(ioc, pcie_device);
  9077. } else {
  9078. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  9079. sas_device = device;
  9080. handle = sas_device->handle;
  9081. sas_address_parent = sas_device->sas_address_parent;
  9082. sas_address = sas_device->sas_address;
  9083. list_move_tail(&sas_device->list, &ioc->sas_device_list);
  9084. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  9085. if (ioc->hide_drives)
  9086. return;
  9087. if (!mpt3sas_transport_port_add(ioc, handle,
  9088. sas_address_parent)) {
  9089. _scsih_sas_device_remove(ioc, sas_device);
  9090. } else if (!sas_device->starget) {
  9091. if (!ioc->is_driver_loading) {
  9092. mpt3sas_transport_port_remove(ioc,
  9093. sas_address,
  9094. sas_address_parent);
  9095. _scsih_sas_device_remove(ioc, sas_device);
  9096. }
  9097. }
  9098. }
  9099. }
  9100. /**
  9101. * _scsih_probe_raid - reporting raid volumes to scsi-ml
  9102. * @ioc: per adapter object
  9103. *
  9104. * Called during initial loading of the driver.
  9105. */
  9106. static void
  9107. _scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
  9108. {
  9109. struct _raid_device *raid_device, *raid_next;
  9110. int rc;
  9111. list_for_each_entry_safe(raid_device, raid_next,
  9112. &ioc->raid_device_list, list) {
  9113. if (raid_device->starget)
  9114. continue;
  9115. rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
  9116. raid_device->id, 0);
  9117. if (rc)
  9118. _scsih_raid_device_remove(ioc, raid_device);
  9119. }
  9120. }
  9121. static struct _sas_device *get_next_sas_device(struct MPT3SAS_ADAPTER *ioc)
  9122. {
  9123. struct _sas_device *sas_device = NULL;
  9124. unsigned long flags;
  9125. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  9126. if (!list_empty(&ioc->sas_device_init_list)) {
  9127. sas_device = list_first_entry(&ioc->sas_device_init_list,
  9128. struct _sas_device, list);
  9129. sas_device_get(sas_device);
  9130. }
  9131. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  9132. return sas_device;
  9133. }
  9134. static void sas_device_make_active(struct MPT3SAS_ADAPTER *ioc,
  9135. struct _sas_device *sas_device)
  9136. {
  9137. unsigned long flags;
  9138. spin_lock_irqsave(&ioc->sas_device_lock, flags);
  9139. /*
  9140. * Since we dropped the lock during the call to port_add(), we need to
  9141. * be careful here that somebody else didn't move or delete this item
  9142. * while we were busy with other things.
  9143. *
  9144. * If it was on the list, we need a put() for the reference the list
  9145. * had. Either way, we need a get() for the destination list.
  9146. */
  9147. if (!list_empty(&sas_device->list)) {
  9148. list_del_init(&sas_device->list);
  9149. sas_device_put(sas_device);
  9150. }
  9151. sas_device_get(sas_device);
  9152. list_add_tail(&sas_device->list, &ioc->sas_device_list);
  9153. spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
  9154. }
  9155. /**
  9156. * _scsih_probe_sas - reporting sas devices to sas transport
  9157. * @ioc: per adapter object
  9158. *
  9159. * Called during initial loading of the driver.
  9160. */
  9161. static void
  9162. _scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
  9163. {
  9164. struct _sas_device *sas_device;
  9165. if (ioc->hide_drives)
  9166. return;
  9167. while ((sas_device = get_next_sas_device(ioc))) {
  9168. if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
  9169. sas_device->sas_address_parent)) {
  9170. _scsih_sas_device_remove(ioc, sas_device);
  9171. sas_device_put(sas_device);
  9172. continue;
  9173. } else if (!sas_device->starget) {
  9174. /*
  9175. * When asyn scanning is enabled, its not possible to
  9176. * remove devices while scanning is turned on due to an
  9177. * oops in scsi_sysfs_add_sdev()->add_device()->
  9178. * sysfs_addrm_start()
  9179. */
  9180. if (!ioc->is_driver_loading) {
  9181. mpt3sas_transport_port_remove(ioc,
  9182. sas_device->sas_address,
  9183. sas_device->sas_address_parent);
  9184. _scsih_sas_device_remove(ioc, sas_device);
  9185. sas_device_put(sas_device);
  9186. continue;
  9187. }
  9188. }
  9189. sas_device_make_active(ioc, sas_device);
  9190. sas_device_put(sas_device);
  9191. }
  9192. }
  9193. /**
  9194. * get_next_pcie_device - Get the next pcie device
  9195. * @ioc: per adapter object
  9196. *
  9197. * Get the next pcie device from pcie_device_init_list list.
  9198. *
  9199. * Returns pcie device structure if pcie_device_init_list list is not empty
  9200. * otherwise returns NULL
  9201. */
  9202. static struct _pcie_device *get_next_pcie_device(struct MPT3SAS_ADAPTER *ioc)
  9203. {
  9204. struct _pcie_device *pcie_device = NULL;
  9205. unsigned long flags;
  9206. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  9207. if (!list_empty(&ioc->pcie_device_init_list)) {
  9208. pcie_device = list_first_entry(&ioc->pcie_device_init_list,
  9209. struct _pcie_device, list);
  9210. pcie_device_get(pcie_device);
  9211. }
  9212. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  9213. return pcie_device;
  9214. }
  9215. /**
  9216. * pcie_device_make_active - Add pcie device to pcie_device_list list
  9217. * @ioc: per adapter object
  9218. * @pcie_device: pcie device object
  9219. *
  9220. * Add the pcie device which has registered with SCSI Transport Later to
  9221. * pcie_device_list list
  9222. */
  9223. static void pcie_device_make_active(struct MPT3SAS_ADAPTER *ioc,
  9224. struct _pcie_device *pcie_device)
  9225. {
  9226. unsigned long flags;
  9227. spin_lock_irqsave(&ioc->pcie_device_lock, flags);
  9228. if (!list_empty(&pcie_device->list)) {
  9229. list_del_init(&pcie_device->list);
  9230. pcie_device_put(pcie_device);
  9231. }
  9232. pcie_device_get(pcie_device);
  9233. list_add_tail(&pcie_device->list, &ioc->pcie_device_list);
  9234. spin_unlock_irqrestore(&ioc->pcie_device_lock, flags);
  9235. }
  9236. /**
  9237. * _scsih_probe_pcie - reporting PCIe devices to scsi-ml
  9238. * @ioc: per adapter object
  9239. *
  9240. * Called during initial loading of the driver.
  9241. */
  9242. static void
  9243. _scsih_probe_pcie(struct MPT3SAS_ADAPTER *ioc)
  9244. {
  9245. struct _pcie_device *pcie_device;
  9246. int rc;
  9247. /* PCIe Device List */
  9248. while ((pcie_device = get_next_pcie_device(ioc))) {
  9249. if (pcie_device->starget) {
  9250. pcie_device_put(pcie_device);
  9251. continue;
  9252. }
  9253. rc = scsi_add_device(ioc->shost, PCIE_CHANNEL,
  9254. pcie_device->id, 0);
  9255. if (rc) {
  9256. _scsih_pcie_device_remove(ioc, pcie_device);
  9257. pcie_device_put(pcie_device);
  9258. continue;
  9259. } else if (!pcie_device->starget) {
  9260. /*
  9261. * When async scanning is enabled, its not possible to
  9262. * remove devices while scanning is turned on due to an
  9263. * oops in scsi_sysfs_add_sdev()->add_device()->
  9264. * sysfs_addrm_start()
  9265. */
  9266. if (!ioc->is_driver_loading) {
  9267. /* TODO-- Need to find out whether this condition will
  9268. * occur or not
  9269. */
  9270. _scsih_pcie_device_remove(ioc, pcie_device);
  9271. pcie_device_put(pcie_device);
  9272. continue;
  9273. }
  9274. }
  9275. pcie_device_make_active(ioc, pcie_device);
  9276. pcie_device_put(pcie_device);
  9277. }
  9278. }
  9279. /**
  9280. * _scsih_probe_devices - probing for devices
  9281. * @ioc: per adapter object
  9282. *
  9283. * Called during initial loading of the driver.
  9284. */
  9285. static void
  9286. _scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
  9287. {
  9288. u16 volume_mapping_flags;
  9289. if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
  9290. return; /* return when IOC doesn't support initiator mode */
  9291. _scsih_probe_boot_devices(ioc);
  9292. if (ioc->ir_firmware) {
  9293. volume_mapping_flags =
  9294. le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
  9295. MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
  9296. if (volume_mapping_flags ==
  9297. MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
  9298. _scsih_probe_raid(ioc);
  9299. _scsih_probe_sas(ioc);
  9300. } else {
  9301. _scsih_probe_sas(ioc);
  9302. _scsih_probe_raid(ioc);
  9303. }
  9304. } else {
  9305. _scsih_probe_sas(ioc);
  9306. _scsih_probe_pcie(ioc);
  9307. }
  9308. }
  9309. /**
  9310. * scsih_scan_start - scsi lld callback for .scan_start
  9311. * @shost: SCSI host pointer
  9312. *
  9313. * The shost has the ability to discover targets on its own instead
  9314. * of scanning the entire bus. In our implemention, we will kick off
  9315. * firmware discovery.
  9316. */
  9317. static void
  9318. scsih_scan_start(struct Scsi_Host *shost)
  9319. {
  9320. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9321. int rc;
  9322. if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
  9323. mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
  9324. if (disable_discovery > 0)
  9325. return;
  9326. ioc->start_scan = 1;
  9327. rc = mpt3sas_port_enable(ioc);
  9328. if (rc != 0)
  9329. pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
  9330. }
  9331. /**
  9332. * scsih_scan_finished - scsi lld callback for .scan_finished
  9333. * @shost: SCSI host pointer
  9334. * @time: elapsed time of the scan in jiffies
  9335. *
  9336. * This function will be called periodicallyn until it returns 1 with the
  9337. * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
  9338. * we wait for firmware discovery to complete, then return 1.
  9339. */
  9340. static int
  9341. scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
  9342. {
  9343. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9344. if (disable_discovery > 0) {
  9345. ioc->is_driver_loading = 0;
  9346. ioc->wait_for_discovery_to_complete = 0;
  9347. return 1;
  9348. }
  9349. if (time >= (300 * HZ)) {
  9350. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  9351. pr_info(MPT3SAS_FMT
  9352. "port enable: FAILED with timeout (timeout=300s)\n",
  9353. ioc->name);
  9354. ioc->is_driver_loading = 0;
  9355. return 1;
  9356. }
  9357. if (ioc->start_scan)
  9358. return 0;
  9359. if (ioc->start_scan_failed) {
  9360. pr_info(MPT3SAS_FMT
  9361. "port enable: FAILED with (ioc_status=0x%08x)\n",
  9362. ioc->name, ioc->start_scan_failed);
  9363. ioc->is_driver_loading = 0;
  9364. ioc->wait_for_discovery_to_complete = 0;
  9365. ioc->remove_host = 1;
  9366. return 1;
  9367. }
  9368. pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
  9369. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  9370. if (ioc->wait_for_discovery_to_complete) {
  9371. ioc->wait_for_discovery_to_complete = 0;
  9372. _scsih_probe_devices(ioc);
  9373. }
  9374. mpt3sas_base_start_watchdog(ioc);
  9375. ioc->is_driver_loading = 0;
  9376. return 1;
  9377. }
  9378. /* shost template for SAS 2.0 HBA devices */
  9379. static struct scsi_host_template mpt2sas_driver_template = {
  9380. .module = THIS_MODULE,
  9381. .name = "Fusion MPT SAS Host",
  9382. .proc_name = MPT2SAS_DRIVER_NAME,
  9383. .queuecommand = scsih_qcmd,
  9384. .target_alloc = scsih_target_alloc,
  9385. .slave_alloc = scsih_slave_alloc,
  9386. .slave_configure = scsih_slave_configure,
  9387. .target_destroy = scsih_target_destroy,
  9388. .slave_destroy = scsih_slave_destroy,
  9389. .scan_finished = scsih_scan_finished,
  9390. .scan_start = scsih_scan_start,
  9391. .change_queue_depth = scsih_change_queue_depth,
  9392. .eh_abort_handler = scsih_abort,
  9393. .eh_device_reset_handler = scsih_dev_reset,
  9394. .eh_target_reset_handler = scsih_target_reset,
  9395. .eh_host_reset_handler = scsih_host_reset,
  9396. .bios_param = scsih_bios_param,
  9397. .can_queue = 1,
  9398. .this_id = -1,
  9399. .sg_tablesize = MPT2SAS_SG_DEPTH,
  9400. .max_sectors = 32767,
  9401. .cmd_per_lun = 7,
  9402. .use_clustering = ENABLE_CLUSTERING,
  9403. .shost_attrs = mpt3sas_host_attrs,
  9404. .sdev_attrs = mpt3sas_dev_attrs,
  9405. .track_queue_depth = 1,
  9406. .cmd_size = sizeof(struct scsiio_tracker),
  9407. };
  9408. /* raid transport support for SAS 2.0 HBA devices */
  9409. static struct raid_function_template mpt2sas_raid_functions = {
  9410. .cookie = &mpt2sas_driver_template,
  9411. .is_raid = scsih_is_raid,
  9412. .get_resync = scsih_get_resync,
  9413. .get_state = scsih_get_state,
  9414. };
  9415. /* shost template for SAS 3.0 HBA devices */
  9416. static struct scsi_host_template mpt3sas_driver_template = {
  9417. .module = THIS_MODULE,
  9418. .name = "Fusion MPT SAS Host",
  9419. .proc_name = MPT3SAS_DRIVER_NAME,
  9420. .queuecommand = scsih_qcmd,
  9421. .target_alloc = scsih_target_alloc,
  9422. .slave_alloc = scsih_slave_alloc,
  9423. .slave_configure = scsih_slave_configure,
  9424. .target_destroy = scsih_target_destroy,
  9425. .slave_destroy = scsih_slave_destroy,
  9426. .scan_finished = scsih_scan_finished,
  9427. .scan_start = scsih_scan_start,
  9428. .change_queue_depth = scsih_change_queue_depth,
  9429. .eh_abort_handler = scsih_abort,
  9430. .eh_device_reset_handler = scsih_dev_reset,
  9431. .eh_target_reset_handler = scsih_target_reset,
  9432. .eh_host_reset_handler = scsih_host_reset,
  9433. .bios_param = scsih_bios_param,
  9434. .can_queue = 1,
  9435. .this_id = -1,
  9436. .sg_tablesize = MPT3SAS_SG_DEPTH,
  9437. .max_sectors = 32767,
  9438. .cmd_per_lun = 7,
  9439. .use_clustering = ENABLE_CLUSTERING,
  9440. .shost_attrs = mpt3sas_host_attrs,
  9441. .sdev_attrs = mpt3sas_dev_attrs,
  9442. .track_queue_depth = 1,
  9443. .cmd_size = sizeof(struct scsiio_tracker),
  9444. };
  9445. /* raid transport support for SAS 3.0 HBA devices */
  9446. static struct raid_function_template mpt3sas_raid_functions = {
  9447. .cookie = &mpt3sas_driver_template,
  9448. .is_raid = scsih_is_raid,
  9449. .get_resync = scsih_get_resync,
  9450. .get_state = scsih_get_state,
  9451. };
  9452. /**
  9453. * _scsih_determine_hba_mpi_version - determine in which MPI version class
  9454. * this device belongs to.
  9455. * @pdev: PCI device struct
  9456. *
  9457. * return MPI2_VERSION for SAS 2.0 HBA devices,
  9458. * MPI25_VERSION for SAS 3.0 HBA devices, and
  9459. * MPI26 VERSION for Cutlass & Invader SAS 3.0 HBA devices
  9460. */
  9461. static u16
  9462. _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
  9463. {
  9464. switch (pdev->device) {
  9465. case MPI2_MFGPAGE_DEVID_SSS6200:
  9466. case MPI2_MFGPAGE_DEVID_SAS2004:
  9467. case MPI2_MFGPAGE_DEVID_SAS2008:
  9468. case MPI2_MFGPAGE_DEVID_SAS2108_1:
  9469. case MPI2_MFGPAGE_DEVID_SAS2108_2:
  9470. case MPI2_MFGPAGE_DEVID_SAS2108_3:
  9471. case MPI2_MFGPAGE_DEVID_SAS2116_1:
  9472. case MPI2_MFGPAGE_DEVID_SAS2116_2:
  9473. case MPI2_MFGPAGE_DEVID_SAS2208_1:
  9474. case MPI2_MFGPAGE_DEVID_SAS2208_2:
  9475. case MPI2_MFGPAGE_DEVID_SAS2208_3:
  9476. case MPI2_MFGPAGE_DEVID_SAS2208_4:
  9477. case MPI2_MFGPAGE_DEVID_SAS2208_5:
  9478. case MPI2_MFGPAGE_DEVID_SAS2208_6:
  9479. case MPI2_MFGPAGE_DEVID_SAS2308_1:
  9480. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  9481. case MPI2_MFGPAGE_DEVID_SAS2308_3:
  9482. case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
  9483. return MPI2_VERSION;
  9484. case MPI25_MFGPAGE_DEVID_SAS3004:
  9485. case MPI25_MFGPAGE_DEVID_SAS3008:
  9486. case MPI25_MFGPAGE_DEVID_SAS3108_1:
  9487. case MPI25_MFGPAGE_DEVID_SAS3108_2:
  9488. case MPI25_MFGPAGE_DEVID_SAS3108_5:
  9489. case MPI25_MFGPAGE_DEVID_SAS3108_6:
  9490. return MPI25_VERSION;
  9491. case MPI26_MFGPAGE_DEVID_SAS3216:
  9492. case MPI26_MFGPAGE_DEVID_SAS3224:
  9493. case MPI26_MFGPAGE_DEVID_SAS3316_1:
  9494. case MPI26_MFGPAGE_DEVID_SAS3316_2:
  9495. case MPI26_MFGPAGE_DEVID_SAS3316_3:
  9496. case MPI26_MFGPAGE_DEVID_SAS3316_4:
  9497. case MPI26_MFGPAGE_DEVID_SAS3324_1:
  9498. case MPI26_MFGPAGE_DEVID_SAS3324_2:
  9499. case MPI26_MFGPAGE_DEVID_SAS3324_3:
  9500. case MPI26_MFGPAGE_DEVID_SAS3324_4:
  9501. case MPI26_MFGPAGE_DEVID_SAS3508:
  9502. case MPI26_MFGPAGE_DEVID_SAS3508_1:
  9503. case MPI26_MFGPAGE_DEVID_SAS3408:
  9504. case MPI26_MFGPAGE_DEVID_SAS3516:
  9505. case MPI26_MFGPAGE_DEVID_SAS3516_1:
  9506. case MPI26_MFGPAGE_DEVID_SAS3416:
  9507. case MPI26_MFGPAGE_DEVID_SAS3616:
  9508. return MPI26_VERSION;
  9509. }
  9510. return 0;
  9511. }
  9512. /**
  9513. * _scsih_probe - attach and add scsi host
  9514. * @pdev: PCI device struct
  9515. * @id: pci device id
  9516. *
  9517. * Returns 0 success, anything else error.
  9518. */
  9519. static int
  9520. _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  9521. {
  9522. struct MPT3SAS_ADAPTER *ioc;
  9523. struct Scsi_Host *shost = NULL;
  9524. int rv;
  9525. u16 hba_mpi_version;
  9526. /* Determine in which MPI version class this pci device belongs */
  9527. hba_mpi_version = _scsih_determine_hba_mpi_version(pdev);
  9528. if (hba_mpi_version == 0)
  9529. return -ENODEV;
  9530. /* Enumerate only SAS 2.0 HBA's if hbas_to_enumerate is one,
  9531. * for other generation HBA's return with -ENODEV
  9532. */
  9533. if ((hbas_to_enumerate == 1) && (hba_mpi_version != MPI2_VERSION))
  9534. return -ENODEV;
  9535. /* Enumerate only SAS 3.0 HBA's if hbas_to_enumerate is two,
  9536. * for other generation HBA's return with -ENODEV
  9537. */
  9538. if ((hbas_to_enumerate == 2) && (!(hba_mpi_version == MPI25_VERSION
  9539. || hba_mpi_version == MPI26_VERSION)))
  9540. return -ENODEV;
  9541. switch (hba_mpi_version) {
  9542. case MPI2_VERSION:
  9543. pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
  9544. PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
  9545. /* Use mpt2sas driver host template for SAS 2.0 HBA's */
  9546. shost = scsi_host_alloc(&mpt2sas_driver_template,
  9547. sizeof(struct MPT3SAS_ADAPTER));
  9548. if (!shost)
  9549. return -ENODEV;
  9550. ioc = shost_priv(shost);
  9551. memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
  9552. ioc->hba_mpi_version_belonged = hba_mpi_version;
  9553. ioc->id = mpt2_ids++;
  9554. sprintf(ioc->driver_name, "%s", MPT2SAS_DRIVER_NAME);
  9555. switch (pdev->device) {
  9556. case MPI2_MFGPAGE_DEVID_SSS6200:
  9557. ioc->is_warpdrive = 1;
  9558. ioc->hide_ir_msg = 1;
  9559. break;
  9560. case MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP:
  9561. ioc->is_mcpu_endpoint = 1;
  9562. break;
  9563. default:
  9564. ioc->mfg_pg10_hide_flag = MFG_PAGE10_EXPOSE_ALL_DISKS;
  9565. break;
  9566. }
  9567. break;
  9568. case MPI25_VERSION:
  9569. case MPI26_VERSION:
  9570. /* Use mpt3sas driver host template for SAS 3.0 HBA's */
  9571. shost = scsi_host_alloc(&mpt3sas_driver_template,
  9572. sizeof(struct MPT3SAS_ADAPTER));
  9573. if (!shost)
  9574. return -ENODEV;
  9575. ioc = shost_priv(shost);
  9576. memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
  9577. ioc->hba_mpi_version_belonged = hba_mpi_version;
  9578. ioc->id = mpt3_ids++;
  9579. sprintf(ioc->driver_name, "%s", MPT3SAS_DRIVER_NAME);
  9580. switch (pdev->device) {
  9581. case MPI26_MFGPAGE_DEVID_SAS3508:
  9582. case MPI26_MFGPAGE_DEVID_SAS3508_1:
  9583. case MPI26_MFGPAGE_DEVID_SAS3408:
  9584. case MPI26_MFGPAGE_DEVID_SAS3516:
  9585. case MPI26_MFGPAGE_DEVID_SAS3516_1:
  9586. case MPI26_MFGPAGE_DEVID_SAS3416:
  9587. case MPI26_MFGPAGE_DEVID_SAS3616:
  9588. ioc->is_gen35_ioc = 1;
  9589. break;
  9590. default:
  9591. ioc->is_gen35_ioc = 0;
  9592. }
  9593. if ((ioc->hba_mpi_version_belonged == MPI25_VERSION &&
  9594. pdev->revision >= SAS3_PCI_DEVICE_C0_REVISION) ||
  9595. (ioc->hba_mpi_version_belonged == MPI26_VERSION)) {
  9596. ioc->combined_reply_queue = 1;
  9597. if (ioc->is_gen35_ioc)
  9598. ioc->combined_reply_index_count =
  9599. MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G35;
  9600. else
  9601. ioc->combined_reply_index_count =
  9602. MPT3_SUP_REPLY_POST_HOST_INDEX_REG_COUNT_G3;
  9603. }
  9604. break;
  9605. default:
  9606. return -ENODEV;
  9607. }
  9608. INIT_LIST_HEAD(&ioc->list);
  9609. spin_lock(&gioc_lock);
  9610. list_add_tail(&ioc->list, &mpt3sas_ioc_list);
  9611. spin_unlock(&gioc_lock);
  9612. ioc->shost = shost;
  9613. ioc->pdev = pdev;
  9614. ioc->scsi_io_cb_idx = scsi_io_cb_idx;
  9615. ioc->tm_cb_idx = tm_cb_idx;
  9616. ioc->ctl_cb_idx = ctl_cb_idx;
  9617. ioc->base_cb_idx = base_cb_idx;
  9618. ioc->port_enable_cb_idx = port_enable_cb_idx;
  9619. ioc->transport_cb_idx = transport_cb_idx;
  9620. ioc->scsih_cb_idx = scsih_cb_idx;
  9621. ioc->config_cb_idx = config_cb_idx;
  9622. ioc->tm_tr_cb_idx = tm_tr_cb_idx;
  9623. ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
  9624. ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
  9625. ioc->logging_level = logging_level;
  9626. ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
  9627. /* misc semaphores and spin locks */
  9628. mutex_init(&ioc->reset_in_progress_mutex);
  9629. /* initializing pci_access_mutex lock */
  9630. mutex_init(&ioc->pci_access_mutex);
  9631. spin_lock_init(&ioc->ioc_reset_in_progress_lock);
  9632. spin_lock_init(&ioc->scsi_lookup_lock);
  9633. spin_lock_init(&ioc->sas_device_lock);
  9634. spin_lock_init(&ioc->sas_node_lock);
  9635. spin_lock_init(&ioc->fw_event_lock);
  9636. spin_lock_init(&ioc->raid_device_lock);
  9637. spin_lock_init(&ioc->pcie_device_lock);
  9638. spin_lock_init(&ioc->diag_trigger_lock);
  9639. INIT_LIST_HEAD(&ioc->sas_device_list);
  9640. INIT_LIST_HEAD(&ioc->sas_device_init_list);
  9641. INIT_LIST_HEAD(&ioc->sas_expander_list);
  9642. INIT_LIST_HEAD(&ioc->enclosure_list);
  9643. INIT_LIST_HEAD(&ioc->pcie_device_list);
  9644. INIT_LIST_HEAD(&ioc->pcie_device_init_list);
  9645. INIT_LIST_HEAD(&ioc->fw_event_list);
  9646. INIT_LIST_HEAD(&ioc->raid_device_list);
  9647. INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
  9648. INIT_LIST_HEAD(&ioc->delayed_tr_list);
  9649. INIT_LIST_HEAD(&ioc->delayed_sc_list);
  9650. INIT_LIST_HEAD(&ioc->delayed_event_ack_list);
  9651. INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
  9652. INIT_LIST_HEAD(&ioc->reply_queue_list);
  9653. sprintf(ioc->name, "%s_cm%d", ioc->driver_name, ioc->id);
  9654. /* init shost parameters */
  9655. shost->max_cmd_len = 32;
  9656. shost->max_lun = max_lun;
  9657. shost->transportt = mpt3sas_transport_template;
  9658. shost->unique_id = ioc->id;
  9659. if (ioc->is_mcpu_endpoint) {
  9660. /* mCPU MPI support 64K max IO */
  9661. shost->max_sectors = 128;
  9662. pr_info(MPT3SAS_FMT
  9663. "The max_sectors value is set to %d\n",
  9664. ioc->name, shost->max_sectors);
  9665. } else {
  9666. if (max_sectors != 0xFFFF) {
  9667. if (max_sectors < 64) {
  9668. shost->max_sectors = 64;
  9669. pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
  9670. "for max_sectors, range is 64 to 32767. " \
  9671. "Assigning value of 64.\n", \
  9672. ioc->name, max_sectors);
  9673. } else if (max_sectors > 32767) {
  9674. shost->max_sectors = 32767;
  9675. pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
  9676. "for max_sectors, range is 64 to 32767." \
  9677. "Assigning default value of 32767.\n", \
  9678. ioc->name, max_sectors);
  9679. } else {
  9680. shost->max_sectors = max_sectors & 0xFFFE;
  9681. pr_info(MPT3SAS_FMT
  9682. "The max_sectors value is set to %d\n",
  9683. ioc->name, shost->max_sectors);
  9684. }
  9685. }
  9686. }
  9687. /* register EEDP capabilities with SCSI layer */
  9688. if (prot_mask > 0)
  9689. scsi_host_set_prot(shost, prot_mask);
  9690. else
  9691. scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
  9692. | SHOST_DIF_TYPE2_PROTECTION
  9693. | SHOST_DIF_TYPE3_PROTECTION);
  9694. scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
  9695. /* event thread */
  9696. snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
  9697. "fw_event_%s%d", ioc->driver_name, ioc->id);
  9698. ioc->firmware_event_thread = alloc_ordered_workqueue(
  9699. ioc->firmware_event_name, 0);
  9700. if (!ioc->firmware_event_thread) {
  9701. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  9702. ioc->name, __FILE__, __LINE__, __func__);
  9703. rv = -ENODEV;
  9704. goto out_thread_fail;
  9705. }
  9706. ioc->is_driver_loading = 1;
  9707. if ((mpt3sas_base_attach(ioc))) {
  9708. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  9709. ioc->name, __FILE__, __LINE__, __func__);
  9710. rv = -ENODEV;
  9711. goto out_attach_fail;
  9712. }
  9713. if (ioc->is_warpdrive) {
  9714. if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_EXPOSE_ALL_DISKS)
  9715. ioc->hide_drives = 0;
  9716. else if (ioc->mfg_pg10_hide_flag == MFG_PAGE10_HIDE_ALL_DISKS)
  9717. ioc->hide_drives = 1;
  9718. else {
  9719. if (mpt3sas_get_num_volumes(ioc))
  9720. ioc->hide_drives = 1;
  9721. else
  9722. ioc->hide_drives = 0;
  9723. }
  9724. } else
  9725. ioc->hide_drives = 0;
  9726. rv = scsi_add_host(shost, &pdev->dev);
  9727. if (rv) {
  9728. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  9729. ioc->name, __FILE__, __LINE__, __func__);
  9730. goto out_add_shost_fail;
  9731. }
  9732. scsi_scan_host(shost);
  9733. return 0;
  9734. out_add_shost_fail:
  9735. mpt3sas_base_detach(ioc);
  9736. out_attach_fail:
  9737. destroy_workqueue(ioc->firmware_event_thread);
  9738. out_thread_fail:
  9739. spin_lock(&gioc_lock);
  9740. list_del(&ioc->list);
  9741. spin_unlock(&gioc_lock);
  9742. scsi_host_put(shost);
  9743. return rv;
  9744. }
  9745. #ifdef CONFIG_PM
  9746. /**
  9747. * scsih_suspend - power management suspend main entry point
  9748. * @pdev: PCI device struct
  9749. * @state: PM state change to (usually PCI_D3)
  9750. *
  9751. * Returns 0 success, anything else error.
  9752. */
  9753. static int
  9754. scsih_suspend(struct pci_dev *pdev, pm_message_t state)
  9755. {
  9756. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9757. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9758. pci_power_t device_state;
  9759. mpt3sas_base_stop_watchdog(ioc);
  9760. flush_scheduled_work();
  9761. scsi_block_requests(shost);
  9762. device_state = pci_choose_state(pdev, state);
  9763. pr_info(MPT3SAS_FMT
  9764. "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
  9765. ioc->name, pdev, pci_name(pdev), device_state);
  9766. pci_save_state(pdev);
  9767. mpt3sas_base_free_resources(ioc);
  9768. pci_set_power_state(pdev, device_state);
  9769. return 0;
  9770. }
  9771. /**
  9772. * scsih_resume - power management resume main entry point
  9773. * @pdev: PCI device struct
  9774. *
  9775. * Returns 0 success, anything else error.
  9776. */
  9777. static int
  9778. scsih_resume(struct pci_dev *pdev)
  9779. {
  9780. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9781. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9782. pci_power_t device_state = pdev->current_state;
  9783. int r;
  9784. pr_info(MPT3SAS_FMT
  9785. "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
  9786. ioc->name, pdev, pci_name(pdev), device_state);
  9787. pci_set_power_state(pdev, PCI_D0);
  9788. pci_enable_wake(pdev, PCI_D0, 0);
  9789. pci_restore_state(pdev);
  9790. ioc->pdev = pdev;
  9791. r = mpt3sas_base_map_resources(ioc);
  9792. if (r)
  9793. return r;
  9794. mpt3sas_base_hard_reset_handler(ioc, SOFT_RESET);
  9795. scsi_unblock_requests(shost);
  9796. mpt3sas_base_start_watchdog(ioc);
  9797. return 0;
  9798. }
  9799. #endif /* CONFIG_PM */
  9800. /**
  9801. * scsih_pci_error_detected - Called when a PCI error is detected.
  9802. * @pdev: PCI device struct
  9803. * @state: PCI channel state
  9804. *
  9805. * Description: Called when a PCI error is detected.
  9806. *
  9807. * Return value:
  9808. * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
  9809. */
  9810. static pci_ers_result_t
  9811. scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
  9812. {
  9813. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9814. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9815. pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
  9816. ioc->name, state);
  9817. switch (state) {
  9818. case pci_channel_io_normal:
  9819. return PCI_ERS_RESULT_CAN_RECOVER;
  9820. case pci_channel_io_frozen:
  9821. /* Fatal error, prepare for slot reset */
  9822. ioc->pci_error_recovery = 1;
  9823. scsi_block_requests(ioc->shost);
  9824. mpt3sas_base_stop_watchdog(ioc);
  9825. mpt3sas_base_free_resources(ioc);
  9826. return PCI_ERS_RESULT_NEED_RESET;
  9827. case pci_channel_io_perm_failure:
  9828. /* Permanent error, prepare for device removal */
  9829. ioc->pci_error_recovery = 1;
  9830. mpt3sas_base_stop_watchdog(ioc);
  9831. _scsih_flush_running_cmds(ioc);
  9832. return PCI_ERS_RESULT_DISCONNECT;
  9833. }
  9834. return PCI_ERS_RESULT_NEED_RESET;
  9835. }
  9836. /**
  9837. * scsih_pci_slot_reset - Called when PCI slot has been reset.
  9838. * @pdev: PCI device struct
  9839. *
  9840. * Description: This routine is called by the pci error recovery
  9841. * code after the PCI slot has been reset, just before we
  9842. * should resume normal operations.
  9843. */
  9844. static pci_ers_result_t
  9845. scsih_pci_slot_reset(struct pci_dev *pdev)
  9846. {
  9847. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9848. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9849. int rc;
  9850. pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
  9851. ioc->name);
  9852. ioc->pci_error_recovery = 0;
  9853. ioc->pdev = pdev;
  9854. pci_restore_state(pdev);
  9855. rc = mpt3sas_base_map_resources(ioc);
  9856. if (rc)
  9857. return PCI_ERS_RESULT_DISCONNECT;
  9858. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  9859. pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
  9860. (rc == 0) ? "success" : "failed");
  9861. if (!rc)
  9862. return PCI_ERS_RESULT_RECOVERED;
  9863. else
  9864. return PCI_ERS_RESULT_DISCONNECT;
  9865. }
  9866. /**
  9867. * scsih_pci_resume() - resume normal ops after PCI reset
  9868. * @pdev: pointer to PCI device
  9869. *
  9870. * Called when the error recovery driver tells us that its
  9871. * OK to resume normal operation. Use completion to allow
  9872. * halted scsi ops to resume.
  9873. */
  9874. static void
  9875. scsih_pci_resume(struct pci_dev *pdev)
  9876. {
  9877. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9878. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9879. pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
  9880. pci_cleanup_aer_uncorrect_error_status(pdev);
  9881. mpt3sas_base_start_watchdog(ioc);
  9882. scsi_unblock_requests(ioc->shost);
  9883. }
  9884. /**
  9885. * scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
  9886. * @pdev: pointer to PCI device
  9887. */
  9888. static pci_ers_result_t
  9889. scsih_pci_mmio_enabled(struct pci_dev *pdev)
  9890. {
  9891. struct Scsi_Host *shost = pci_get_drvdata(pdev);
  9892. struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
  9893. pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
  9894. ioc->name);
  9895. /* TODO - dump whatever for debugging purposes */
  9896. /* This called only if scsih_pci_error_detected returns
  9897. * PCI_ERS_RESULT_CAN_RECOVER. Read/write to the device still
  9898. * works, no need to reset slot.
  9899. */
  9900. return PCI_ERS_RESULT_RECOVERED;
  9901. }
  9902. /**
  9903. * scsih__ncq_prio_supp - Check for NCQ command priority support
  9904. * @sdev: scsi device struct
  9905. *
  9906. * This is called when a user indicates they would like to enable
  9907. * ncq command priorities. This works only on SATA devices.
  9908. */
  9909. bool scsih_ncq_prio_supp(struct scsi_device *sdev)
  9910. {
  9911. unsigned char *buf;
  9912. bool ncq_prio_supp = false;
  9913. if (!scsi_device_supports_vpd(sdev))
  9914. return ncq_prio_supp;
  9915. buf = kmalloc(SCSI_VPD_PG_LEN, GFP_KERNEL);
  9916. if (!buf)
  9917. return ncq_prio_supp;
  9918. if (!scsi_get_vpd_page(sdev, 0x89, buf, SCSI_VPD_PG_LEN))
  9919. ncq_prio_supp = (buf[213] >> 4) & 1;
  9920. kfree(buf);
  9921. return ncq_prio_supp;
  9922. }
  9923. /*
  9924. * The pci device ids are defined in mpi/mpi2_cnfg.h.
  9925. */
  9926. static const struct pci_device_id mpt3sas_pci_table[] = {
  9927. /* Spitfire ~ 2004 */
  9928. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004,
  9929. PCI_ANY_ID, PCI_ANY_ID },
  9930. /* Falcon ~ 2008 */
  9931. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008,
  9932. PCI_ANY_ID, PCI_ANY_ID },
  9933. /* Liberator ~ 2108 */
  9934. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1,
  9935. PCI_ANY_ID, PCI_ANY_ID },
  9936. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2,
  9937. PCI_ANY_ID, PCI_ANY_ID },
  9938. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3,
  9939. PCI_ANY_ID, PCI_ANY_ID },
  9940. /* Meteor ~ 2116 */
  9941. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1,
  9942. PCI_ANY_ID, PCI_ANY_ID },
  9943. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2,
  9944. PCI_ANY_ID, PCI_ANY_ID },
  9945. /* Thunderbolt ~ 2208 */
  9946. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1,
  9947. PCI_ANY_ID, PCI_ANY_ID },
  9948. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2,
  9949. PCI_ANY_ID, PCI_ANY_ID },
  9950. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3,
  9951. PCI_ANY_ID, PCI_ANY_ID },
  9952. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4,
  9953. PCI_ANY_ID, PCI_ANY_ID },
  9954. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5,
  9955. PCI_ANY_ID, PCI_ANY_ID },
  9956. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6,
  9957. PCI_ANY_ID, PCI_ANY_ID },
  9958. /* Mustang ~ 2308 */
  9959. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1,
  9960. PCI_ANY_ID, PCI_ANY_ID },
  9961. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2,
  9962. PCI_ANY_ID, PCI_ANY_ID },
  9963. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3,
  9964. PCI_ANY_ID, PCI_ANY_ID },
  9965. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_MPI_EP,
  9966. PCI_ANY_ID, PCI_ANY_ID },
  9967. /* SSS6200 */
  9968. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200,
  9969. PCI_ANY_ID, PCI_ANY_ID },
  9970. /* Fury ~ 3004 and 3008 */
  9971. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
  9972. PCI_ANY_ID, PCI_ANY_ID },
  9973. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
  9974. PCI_ANY_ID, PCI_ANY_ID },
  9975. /* Invader ~ 3108 */
  9976. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
  9977. PCI_ANY_ID, PCI_ANY_ID },
  9978. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
  9979. PCI_ANY_ID, PCI_ANY_ID },
  9980. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
  9981. PCI_ANY_ID, PCI_ANY_ID },
  9982. { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
  9983. PCI_ANY_ID, PCI_ANY_ID },
  9984. /* Cutlass ~ 3216 and 3224 */
  9985. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216,
  9986. PCI_ANY_ID, PCI_ANY_ID },
  9987. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224,
  9988. PCI_ANY_ID, PCI_ANY_ID },
  9989. /* Intruder ~ 3316 and 3324 */
  9990. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1,
  9991. PCI_ANY_ID, PCI_ANY_ID },
  9992. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2,
  9993. PCI_ANY_ID, PCI_ANY_ID },
  9994. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_3,
  9995. PCI_ANY_ID, PCI_ANY_ID },
  9996. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_4,
  9997. PCI_ANY_ID, PCI_ANY_ID },
  9998. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1,
  9999. PCI_ANY_ID, PCI_ANY_ID },
  10000. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2,
  10001. PCI_ANY_ID, PCI_ANY_ID },
  10002. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_3,
  10003. PCI_ANY_ID, PCI_ANY_ID },
  10004. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_4,
  10005. PCI_ANY_ID, PCI_ANY_ID },
  10006. /* Ventura, Crusader, Harpoon & Tomcat ~ 3516, 3416, 3508 & 3408*/
  10007. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508,
  10008. PCI_ANY_ID, PCI_ANY_ID },
  10009. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1,
  10010. PCI_ANY_ID, PCI_ANY_ID },
  10011. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408,
  10012. PCI_ANY_ID, PCI_ANY_ID },
  10013. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516,
  10014. PCI_ANY_ID, PCI_ANY_ID },
  10015. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1,
  10016. PCI_ANY_ID, PCI_ANY_ID },
  10017. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416,
  10018. PCI_ANY_ID, PCI_ANY_ID },
  10019. /* Mercator ~ 3616*/
  10020. { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616,
  10021. PCI_ANY_ID, PCI_ANY_ID },
  10022. {0} /* Terminating entry */
  10023. };
  10024. MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
  10025. static struct pci_error_handlers _mpt3sas_err_handler = {
  10026. .error_detected = scsih_pci_error_detected,
  10027. .mmio_enabled = scsih_pci_mmio_enabled,
  10028. .slot_reset = scsih_pci_slot_reset,
  10029. .resume = scsih_pci_resume,
  10030. };
  10031. static struct pci_driver mpt3sas_driver = {
  10032. .name = MPT3SAS_DRIVER_NAME,
  10033. .id_table = mpt3sas_pci_table,
  10034. .probe = _scsih_probe,
  10035. .remove = scsih_remove,
  10036. .shutdown = scsih_shutdown,
  10037. .err_handler = &_mpt3sas_err_handler,
  10038. #ifdef CONFIG_PM
  10039. .suspend = scsih_suspend,
  10040. .resume = scsih_resume,
  10041. #endif
  10042. };
  10043. /**
  10044. * scsih_init - main entry point for this driver.
  10045. *
  10046. * Returns 0 success, anything else error.
  10047. */
  10048. static int
  10049. scsih_init(void)
  10050. {
  10051. mpt2_ids = 0;
  10052. mpt3_ids = 0;
  10053. mpt3sas_base_initialize_callback_handler();
  10054. /* queuecommand callback hander */
  10055. scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
  10056. /* task management callback handler */
  10057. tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
  10058. /* base internal commands callback handler */
  10059. base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
  10060. port_enable_cb_idx = mpt3sas_base_register_callback_handler(
  10061. mpt3sas_port_enable_done);
  10062. /* transport internal commands callback handler */
  10063. transport_cb_idx = mpt3sas_base_register_callback_handler(
  10064. mpt3sas_transport_done);
  10065. /* scsih internal commands callback handler */
  10066. scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
  10067. /* configuration page API internal commands callback handler */
  10068. config_cb_idx = mpt3sas_base_register_callback_handler(
  10069. mpt3sas_config_done);
  10070. /* ctl module callback handler */
  10071. ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
  10072. tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
  10073. _scsih_tm_tr_complete);
  10074. tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
  10075. _scsih_tm_volume_tr_complete);
  10076. tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
  10077. _scsih_sas_control_complete);
  10078. return 0;
  10079. }
  10080. /**
  10081. * scsih_exit - exit point for this driver (when it is a module).
  10082. *
  10083. * Returns 0 success, anything else error.
  10084. */
  10085. static void
  10086. scsih_exit(void)
  10087. {
  10088. mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
  10089. mpt3sas_base_release_callback_handler(tm_cb_idx);
  10090. mpt3sas_base_release_callback_handler(base_cb_idx);
  10091. mpt3sas_base_release_callback_handler(port_enable_cb_idx);
  10092. mpt3sas_base_release_callback_handler(transport_cb_idx);
  10093. mpt3sas_base_release_callback_handler(scsih_cb_idx);
  10094. mpt3sas_base_release_callback_handler(config_cb_idx);
  10095. mpt3sas_base_release_callback_handler(ctl_cb_idx);
  10096. mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
  10097. mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
  10098. mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
  10099. /* raid transport support */
  10100. if (hbas_to_enumerate != 1)
  10101. raid_class_release(mpt3sas_raid_template);
  10102. if (hbas_to_enumerate != 2)
  10103. raid_class_release(mpt2sas_raid_template);
  10104. sas_release_transport(mpt3sas_transport_template);
  10105. }
  10106. /**
  10107. * _mpt3sas_init - main entry point for this driver.
  10108. *
  10109. * Returns 0 success, anything else error.
  10110. */
  10111. static int __init
  10112. _mpt3sas_init(void)
  10113. {
  10114. int error;
  10115. pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
  10116. MPT3SAS_DRIVER_VERSION);
  10117. mpt3sas_transport_template =
  10118. sas_attach_transport(&mpt3sas_transport_functions);
  10119. if (!mpt3sas_transport_template)
  10120. return -ENODEV;
  10121. /* No need attach mpt3sas raid functions template
  10122. * if hbas_to_enumarate value is one.
  10123. */
  10124. if (hbas_to_enumerate != 1) {
  10125. mpt3sas_raid_template =
  10126. raid_class_attach(&mpt3sas_raid_functions);
  10127. if (!mpt3sas_raid_template) {
  10128. sas_release_transport(mpt3sas_transport_template);
  10129. return -ENODEV;
  10130. }
  10131. }
  10132. /* No need to attach mpt2sas raid functions template
  10133. * if hbas_to_enumarate value is two
  10134. */
  10135. if (hbas_to_enumerate != 2) {
  10136. mpt2sas_raid_template =
  10137. raid_class_attach(&mpt2sas_raid_functions);
  10138. if (!mpt2sas_raid_template) {
  10139. sas_release_transport(mpt3sas_transport_template);
  10140. return -ENODEV;
  10141. }
  10142. }
  10143. error = scsih_init();
  10144. if (error) {
  10145. scsih_exit();
  10146. return error;
  10147. }
  10148. mpt3sas_ctl_init(hbas_to_enumerate);
  10149. error = pci_register_driver(&mpt3sas_driver);
  10150. if (error)
  10151. scsih_exit();
  10152. return error;
  10153. }
  10154. /**
  10155. * _mpt3sas_exit - exit point for this driver (when it is a module).
  10156. *
  10157. */
  10158. static void __exit
  10159. _mpt3sas_exit(void)
  10160. {
  10161. pr_info("mpt3sas version %s unloading\n",
  10162. MPT3SAS_DRIVER_VERSION);
  10163. mpt3sas_ctl_exit(hbas_to_enumerate);
  10164. pci_unregister_driver(&mpt3sas_driver);
  10165. scsih_exit();
  10166. }
  10167. module_init(_mpt3sas_init);
  10168. module_exit(_mpt3sas_exit);