mpt3sas_base.c 183 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578357935803581358235833584358535863587358835893590359135923593359435953596359735983599360036013602360336043605360636073608360936103611361236133614361536163617361836193620362136223623362436253626362736283629363036313632363336343635363636373638363936403641364236433644364536463647364836493650365136523653365436553656365736583659366036613662366336643665366636673668366936703671367236733674367536763677367836793680368136823683368436853686368736883689369036913692369336943695369636973698369937003701370237033704370537063707370837093710371137123713371437153716371737183719372037213722372337243725372637273728372937303731373237333734373537363737373837393740374137423743374437453746374737483749375037513752375337543755375637573758375937603761376237633764376537663767376837693770377137723773377437753776377737783779378037813782378337843785378637873788378937903791379237933794379537963797379837993800380138023803380438053806380738083809381038113812381338143815381638173818381938203821382238233824382538263827382838293830383138323833383438353836383738383839384038413842384338443845384638473848384938503851385238533854385538563857385838593860386138623863386438653866386738683869387038713872387338743875387638773878387938803881388238833884388538863887388838893890389138923893389438953896389738983899390039013902390339043905390639073908390939103911391239133914391539163917391839193920392139223923392439253926392739283929393039313932393339343935393639373938393939403941394239433944394539463947394839493950395139523953395439553956395739583959396039613962396339643965396639673968396939703971397239733974397539763977397839793980398139823983398439853986398739883989399039913992399339943995399639973998399940004001400240034004400540064007400840094010401140124013401440154016401740184019402040214022402340244025402640274028402940304031403240334034403540364037403840394040404140424043404440454046404740484049405040514052405340544055405640574058405940604061406240634064406540664067406840694070407140724073407440754076407740784079408040814082408340844085408640874088408940904091409240934094409540964097409840994100410141024103410441054106410741084109411041114112411341144115411641174118411941204121412241234124412541264127412841294130413141324133413441354136413741384139414041414142414341444145414641474148414941504151415241534154415541564157415841594160416141624163416441654166416741684169417041714172417341744175417641774178417941804181418241834184418541864187418841894190419141924193419441954196419741984199420042014202420342044205420642074208420942104211421242134214421542164217421842194220422142224223422442254226422742284229423042314232423342344235423642374238423942404241424242434244424542464247424842494250425142524253425442554256425742584259426042614262426342644265426642674268426942704271427242734274427542764277427842794280428142824283428442854286428742884289429042914292429342944295429642974298429943004301430243034304430543064307430843094310431143124313431443154316431743184319432043214322432343244325432643274328432943304331433243334334433543364337433843394340434143424343434443454346434743484349435043514352435343544355435643574358435943604361436243634364436543664367436843694370437143724373437443754376437743784379438043814382438343844385438643874388438943904391439243934394439543964397439843994400440144024403440444054406440744084409441044114412441344144415441644174418441944204421442244234424442544264427442844294430443144324433443444354436443744384439444044414442444344444445444644474448444944504451445244534454445544564457445844594460446144624463446444654466446744684469447044714472447344744475447644774478447944804481448244834484448544864487448844894490449144924493449444954496449744984499450045014502450345044505450645074508450945104511451245134514451545164517451845194520452145224523452445254526452745284529453045314532453345344535453645374538453945404541454245434544454545464547454845494550455145524553455445554556455745584559456045614562456345644565456645674568456945704571457245734574457545764577457845794580458145824583458445854586458745884589459045914592459345944595459645974598459946004601460246034604460546064607460846094610461146124613461446154616461746184619462046214622462346244625462646274628462946304631463246334634463546364637463846394640464146424643464446454646464746484649465046514652465346544655465646574658465946604661466246634664466546664667466846694670467146724673467446754676467746784679468046814682468346844685468646874688468946904691469246934694469546964697469846994700470147024703470447054706470747084709471047114712471347144715471647174718471947204721472247234724472547264727472847294730473147324733473447354736473747384739474047414742474347444745474647474748474947504751475247534754475547564757475847594760476147624763476447654766476747684769477047714772477347744775477647774778477947804781478247834784478547864787478847894790479147924793479447954796479747984799480048014802480348044805480648074808480948104811481248134814481548164817481848194820482148224823482448254826482748284829483048314832483348344835483648374838483948404841484248434844484548464847484848494850485148524853485448554856485748584859486048614862486348644865486648674868486948704871487248734874487548764877487848794880488148824883488448854886488748884889489048914892489348944895489648974898489949004901490249034904490549064907490849094910491149124913491449154916491749184919492049214922492349244925492649274928492949304931493249334934493549364937493849394940494149424943494449454946494749484949495049514952495349544955495649574958495949604961496249634964496549664967496849694970497149724973497449754976497749784979498049814982498349844985498649874988498949904991499249934994499549964997499849995000500150025003500450055006500750085009501050115012501350145015501650175018501950205021502250235024502550265027502850295030503150325033503450355036503750385039504050415042504350445045504650475048504950505051505250535054505550565057505850595060506150625063506450655066506750685069507050715072507350745075507650775078507950805081508250835084508550865087508850895090509150925093509450955096509750985099510051015102510351045105510651075108510951105111511251135114511551165117511851195120512151225123512451255126512751285129513051315132513351345135513651375138513951405141514251435144514551465147514851495150515151525153515451555156515751585159516051615162516351645165516651675168516951705171517251735174517551765177517851795180518151825183518451855186518751885189519051915192519351945195519651975198519952005201520252035204520552065207520852095210521152125213521452155216521752185219522052215222522352245225522652275228522952305231523252335234523552365237523852395240524152425243524452455246524752485249525052515252525352545255525652575258525952605261526252635264526552665267526852695270527152725273527452755276527752785279528052815282528352845285528652875288528952905291529252935294529552965297529852995300530153025303530453055306530753085309531053115312531353145315531653175318531953205321532253235324532553265327532853295330533153325333533453355336533753385339534053415342534353445345534653475348534953505351535253535354535553565357535853595360536153625363536453655366536753685369537053715372537353745375537653775378537953805381538253835384538553865387538853895390539153925393539453955396539753985399540054015402540354045405540654075408540954105411541254135414541554165417541854195420542154225423542454255426542754285429543054315432543354345435543654375438543954405441544254435444544554465447544854495450545154525453545454555456545754585459546054615462546354645465546654675468546954705471547254735474547554765477547854795480548154825483548454855486548754885489549054915492549354945495549654975498549955005501550255035504550555065507550855095510551155125513551455155516551755185519552055215522552355245525552655275528552955305531553255335534553555365537553855395540554155425543554455455546554755485549555055515552555355545555555655575558555955605561556255635564556555665567556855695570557155725573557455755576557755785579558055815582558355845585558655875588558955905591559255935594559555965597559855995600560156025603560456055606560756085609561056115612561356145615561656175618561956205621562256235624562556265627562856295630563156325633563456355636563756385639564056415642564356445645564656475648564956505651565256535654565556565657565856595660566156625663566456655666566756685669567056715672567356745675567656775678567956805681568256835684568556865687568856895690569156925693569456955696569756985699570057015702570357045705570657075708570957105711571257135714571557165717571857195720572157225723572457255726572757285729573057315732573357345735573657375738573957405741574257435744574557465747574857495750575157525753575457555756575757585759576057615762576357645765576657675768576957705771577257735774577557765777577857795780578157825783578457855786578757885789579057915792579357945795579657975798579958005801580258035804580558065807580858095810581158125813581458155816581758185819582058215822582358245825582658275828582958305831583258335834583558365837583858395840584158425843584458455846584758485849585058515852585358545855585658575858585958605861586258635864586558665867586858695870587158725873587458755876587758785879588058815882588358845885588658875888588958905891589258935894589558965897589858995900590159025903590459055906590759085909591059115912591359145915591659175918591959205921592259235924592559265927592859295930593159325933593459355936593759385939594059415942594359445945594659475948594959505951595259535954595559565957595859595960596159625963596459655966596759685969597059715972597359745975597659775978597959805981598259835984598559865987598859895990599159925993599459955996599759985999600060016002600360046005600660076008600960106011601260136014601560166017601860196020602160226023602460256026602760286029603060316032603360346035603660376038603960406041604260436044604560466047604860496050605160526053605460556056605760586059606060616062606360646065606660676068606960706071607260736074607560766077607860796080608160826083608460856086608760886089609060916092609360946095609660976098609961006101610261036104610561066107610861096110611161126113611461156116611761186119612061216122612361246125612661276128612961306131613261336134613561366137613861396140614161426143614461456146614761486149615061516152615361546155615661576158615961606161616261636164616561666167616861696170617161726173617461756176617761786179618061816182618361846185618661876188618961906191619261936194619561966197619861996200620162026203620462056206620762086209621062116212621362146215621662176218621962206221622262236224622562266227622862296230623162326233623462356236623762386239624062416242624362446245624662476248624962506251625262536254625562566257625862596260626162626263626462656266626762686269627062716272627362746275627662776278627962806281628262836284628562866287628862896290629162926293629462956296629762986299630063016302630363046305630663076308630963106311631263136314631563166317631863196320632163226323632463256326632763286329633063316332633363346335633663376338633963406341634263436344634563466347634863496350635163526353635463556356635763586359636063616362636363646365636663676368636963706371637263736374637563766377637863796380638163826383638463856386638763886389639063916392639363946395639663976398639964006401640264036404640564066407640864096410641164126413641464156416641764186419642064216422642364246425642664276428
  1. /*
  2. * This is the Fusion MPT base driver providing common API layer interface
  3. * for access to MPT (Message Passing Technology) firmware.
  4. *
  5. * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
  6. * Copyright (C) 2012-2014 LSI Corporation
  7. * Copyright (C) 2013-2014 Avago Technologies
  8. * (mailto: MPT-FusionLinux.pdl@avagotech.com)
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * NO WARRANTY
  21. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25. * solely responsible for determining the appropriateness of using and
  26. * distributing the Program and assumes all risks associated with its
  27. * exercise of rights under this Agreement, including but not limited to
  28. * the risks and costs of program errors, damage to or loss of data,
  29. * programs or equipment, and unavailability or interruption of operations.
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include <linux/kernel.h>
  44. #include <linux/module.h>
  45. #include <linux/errno.h>
  46. #include <linux/init.h>
  47. #include <linux/slab.h>
  48. #include <linux/types.h>
  49. #include <linux/pci.h>
  50. #include <linux/kdev_t.h>
  51. #include <linux/blkdev.h>
  52. #include <linux/delay.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/dma-mapping.h>
  55. #include <linux/io.h>
  56. #include <linux/time.h>
  57. #include <linux/ktime.h>
  58. #include <linux/kthread.h>
  59. #include <asm/page.h> /* To get host page size per arch */
  60. #include <linux/aer.h>
  61. #include "mpt3sas_base.h"
  62. static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
  63. #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  64. /* maximum controller queue depth */
  65. #define MAX_HBA_QUEUE_DEPTH 30000
  66. #define MAX_CHAIN_DEPTH 100000
  67. static int max_queue_depth = -1;
  68. module_param(max_queue_depth, int, 0);
  69. MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  70. static int max_sgl_entries = -1;
  71. module_param(max_sgl_entries, int, 0);
  72. MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  73. static int msix_disable = -1;
  74. module_param(msix_disable, int, 0);
  75. MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  76. static int smp_affinity_enable = 1;
  77. module_param(smp_affinity_enable, int, S_IRUGO);
  78. MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
  79. static int max_msix_vectors = -1;
  80. module_param(max_msix_vectors, int, 0);
  81. MODULE_PARM_DESC(max_msix_vectors,
  82. " max msix vectors");
  83. static int mpt3sas_fwfault_debug;
  84. MODULE_PARM_DESC(mpt3sas_fwfault_debug,
  85. " enable detection of firmware fault and halt firmware - (default=0)");
  86. static int
  87. _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
  88. /**
  89. * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  90. *
  91. */
  92. static int
  93. _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
  94. {
  95. int ret = param_set_int(val, kp);
  96. struct MPT3SAS_ADAPTER *ioc;
  97. if (ret)
  98. return ret;
  99. /* global ioc spinlock to protect controller list on list operations */
  100. pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
  101. spin_lock(&gioc_lock);
  102. list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
  103. ioc->fwfault_debug = mpt3sas_fwfault_debug;
  104. spin_unlock(&gioc_lock);
  105. return 0;
  106. }
  107. module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
  108. param_get_int, &mpt3sas_fwfault_debug, 0644);
  109. /**
  110. * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
  111. * @arg: input argument, used to derive ioc
  112. *
  113. * Return 0 if controller is removed from pci subsystem.
  114. * Return -1 for other case.
  115. */
  116. static int mpt3sas_remove_dead_ioc_func(void *arg)
  117. {
  118. struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
  119. struct pci_dev *pdev;
  120. if ((ioc == NULL))
  121. return -1;
  122. pdev = ioc->pdev;
  123. if ((pdev == NULL))
  124. return -1;
  125. pci_stop_and_remove_bus_device_locked(pdev);
  126. return 0;
  127. }
  128. /**
  129. * _base_fault_reset_work - workq handling ioc fault conditions
  130. * @work: input argument, used to derive ioc
  131. * Context: sleep.
  132. *
  133. * Return nothing.
  134. */
  135. static void
  136. _base_fault_reset_work(struct work_struct *work)
  137. {
  138. struct MPT3SAS_ADAPTER *ioc =
  139. container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
  140. unsigned long flags;
  141. u32 doorbell;
  142. int rc;
  143. struct task_struct *p;
  144. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  145. if (ioc->shost_recovery || ioc->pci_error_recovery)
  146. goto rearm_timer;
  147. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  148. doorbell = mpt3sas_base_get_iocstate(ioc, 0);
  149. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
  150. pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
  151. ioc->name);
  152. /* It may be possible that EEH recovery can resolve some of
  153. * pci bus failure issues rather removing the dead ioc function
  154. * by considering controller is in a non-operational state. So
  155. * here priority is given to the EEH recovery. If it doesn't
  156. * not resolve this issue, mpt3sas driver will consider this
  157. * controller to non-operational state and remove the dead ioc
  158. * function.
  159. */
  160. if (ioc->non_operational_loop++ < 5) {
  161. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
  162. flags);
  163. goto rearm_timer;
  164. }
  165. /*
  166. * Call _scsih_flush_pending_cmds callback so that we flush all
  167. * pending commands back to OS. This call is required to aovid
  168. * deadlock at block layer. Dead IOC will fail to do diag reset,
  169. * and this call is safe since dead ioc will never return any
  170. * command back from HW.
  171. */
  172. ioc->schedule_dead_ioc_flush_running_cmds(ioc);
  173. /*
  174. * Set remove_host flag early since kernel thread will
  175. * take some time to execute.
  176. */
  177. ioc->remove_host = 1;
  178. /*Remove the Dead Host */
  179. p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
  180. "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
  181. if (IS_ERR(p))
  182. pr_err(MPT3SAS_FMT
  183. "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
  184. ioc->name, __func__);
  185. else
  186. pr_err(MPT3SAS_FMT
  187. "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
  188. ioc->name, __func__);
  189. return; /* don't rearm timer */
  190. }
  191. ioc->non_operational_loop = 0;
  192. if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
  193. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  194. pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
  195. __func__, (rc == 0) ? "success" : "failed");
  196. doorbell = mpt3sas_base_get_iocstate(ioc, 0);
  197. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  198. mpt3sas_base_fault_info(ioc, doorbell &
  199. MPI2_DOORBELL_DATA_MASK);
  200. if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
  201. MPI2_IOC_STATE_OPERATIONAL)
  202. return; /* don't rearm timer */
  203. }
  204. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  205. rearm_timer:
  206. if (ioc->fault_reset_work_q)
  207. queue_delayed_work(ioc->fault_reset_work_q,
  208. &ioc->fault_reset_work,
  209. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  210. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  211. }
  212. /**
  213. * mpt3sas_base_start_watchdog - start the fault_reset_work_q
  214. * @ioc: per adapter object
  215. * Context: sleep.
  216. *
  217. * Return nothing.
  218. */
  219. void
  220. mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
  221. {
  222. unsigned long flags;
  223. if (ioc->fault_reset_work_q)
  224. return;
  225. /* initialize fault polling */
  226. INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
  227. snprintf(ioc->fault_reset_work_q_name,
  228. sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
  229. ioc->driver_name, ioc->id);
  230. ioc->fault_reset_work_q =
  231. create_singlethread_workqueue(ioc->fault_reset_work_q_name);
  232. if (!ioc->fault_reset_work_q) {
  233. pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
  234. ioc->name, __func__, __LINE__);
  235. return;
  236. }
  237. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  238. if (ioc->fault_reset_work_q)
  239. queue_delayed_work(ioc->fault_reset_work_q,
  240. &ioc->fault_reset_work,
  241. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  242. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  243. }
  244. /**
  245. * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
  246. * @ioc: per adapter object
  247. * Context: sleep.
  248. *
  249. * Return nothing.
  250. */
  251. void
  252. mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
  253. {
  254. unsigned long flags;
  255. struct workqueue_struct *wq;
  256. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  257. wq = ioc->fault_reset_work_q;
  258. ioc->fault_reset_work_q = NULL;
  259. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  260. if (wq) {
  261. if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
  262. flush_workqueue(wq);
  263. destroy_workqueue(wq);
  264. }
  265. }
  266. /**
  267. * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
  268. * @ioc: per adapter object
  269. * @fault_code: fault code
  270. *
  271. * Return nothing.
  272. */
  273. void
  274. mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
  275. {
  276. pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
  277. ioc->name, fault_code);
  278. }
  279. /**
  280. * mpt3sas_halt_firmware - halt's mpt controller firmware
  281. * @ioc: per adapter object
  282. *
  283. * For debugging timeout related issues. Writing 0xCOFFEE00
  284. * to the doorbell register will halt controller firmware. With
  285. * the purpose to stop both driver and firmware, the enduser can
  286. * obtain a ring buffer from controller UART.
  287. */
  288. void
  289. mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
  290. {
  291. u32 doorbell;
  292. if (!ioc->fwfault_debug)
  293. return;
  294. dump_stack();
  295. doorbell = readl(&ioc->chip->Doorbell);
  296. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  297. mpt3sas_base_fault_info(ioc , doorbell);
  298. else {
  299. writel(0xC0FFEE00, &ioc->chip->Doorbell);
  300. pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
  301. ioc->name);
  302. }
  303. if (ioc->fwfault_debug == 2)
  304. for (;;)
  305. ;
  306. else
  307. panic("panic in %s\n", __func__);
  308. }
  309. /**
  310. * _base_sas_ioc_info - verbose translation of the ioc status
  311. * @ioc: per adapter object
  312. * @mpi_reply: reply mf payload returned from firmware
  313. * @request_hdr: request mf
  314. *
  315. * Return nothing.
  316. */
  317. static void
  318. _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
  319. MPI2RequestHeader_t *request_hdr)
  320. {
  321. u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
  322. MPI2_IOCSTATUS_MASK;
  323. char *desc = NULL;
  324. u16 frame_sz;
  325. char *func_str = NULL;
  326. /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
  327. if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  328. request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
  329. request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
  330. return;
  331. if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
  332. return;
  333. switch (ioc_status) {
  334. /****************************************************************************
  335. * Common IOCStatus values for all replies
  336. ****************************************************************************/
  337. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  338. desc = "invalid function";
  339. break;
  340. case MPI2_IOCSTATUS_BUSY:
  341. desc = "busy";
  342. break;
  343. case MPI2_IOCSTATUS_INVALID_SGL:
  344. desc = "invalid sgl";
  345. break;
  346. case MPI2_IOCSTATUS_INTERNAL_ERROR:
  347. desc = "internal error";
  348. break;
  349. case MPI2_IOCSTATUS_INVALID_VPID:
  350. desc = "invalid vpid";
  351. break;
  352. case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
  353. desc = "insufficient resources";
  354. break;
  355. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  356. desc = "insufficient power";
  357. break;
  358. case MPI2_IOCSTATUS_INVALID_FIELD:
  359. desc = "invalid field";
  360. break;
  361. case MPI2_IOCSTATUS_INVALID_STATE:
  362. desc = "invalid state";
  363. break;
  364. case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
  365. desc = "op state not supported";
  366. break;
  367. /****************************************************************************
  368. * Config IOCStatus values
  369. ****************************************************************************/
  370. case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
  371. desc = "config invalid action";
  372. break;
  373. case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
  374. desc = "config invalid type";
  375. break;
  376. case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
  377. desc = "config invalid page";
  378. break;
  379. case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
  380. desc = "config invalid data";
  381. break;
  382. case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
  383. desc = "config no defaults";
  384. break;
  385. case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
  386. desc = "config cant commit";
  387. break;
  388. /****************************************************************************
  389. * SCSI IO Reply
  390. ****************************************************************************/
  391. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  392. case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
  393. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  394. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  395. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  396. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  397. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  398. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  399. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  400. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  401. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  402. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  403. break;
  404. /****************************************************************************
  405. * For use by SCSI Initiator and SCSI Target end-to-end data protection
  406. ****************************************************************************/
  407. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  408. desc = "eedp guard error";
  409. break;
  410. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  411. desc = "eedp ref tag error";
  412. break;
  413. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  414. desc = "eedp app tag error";
  415. break;
  416. /****************************************************************************
  417. * SCSI Target values
  418. ****************************************************************************/
  419. case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
  420. desc = "target invalid io index";
  421. break;
  422. case MPI2_IOCSTATUS_TARGET_ABORTED:
  423. desc = "target aborted";
  424. break;
  425. case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
  426. desc = "target no conn retryable";
  427. break;
  428. case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
  429. desc = "target no connection";
  430. break;
  431. case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
  432. desc = "target xfer count mismatch";
  433. break;
  434. case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
  435. desc = "target data offset error";
  436. break;
  437. case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
  438. desc = "target too much write data";
  439. break;
  440. case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
  441. desc = "target iu too short";
  442. break;
  443. case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
  444. desc = "target ack nak timeout";
  445. break;
  446. case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
  447. desc = "target nak received";
  448. break;
  449. /****************************************************************************
  450. * Serial Attached SCSI values
  451. ****************************************************************************/
  452. case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
  453. desc = "smp request failed";
  454. break;
  455. case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
  456. desc = "smp data overrun";
  457. break;
  458. /****************************************************************************
  459. * Diagnostic Buffer Post / Diagnostic Release values
  460. ****************************************************************************/
  461. case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
  462. desc = "diagnostic released";
  463. break;
  464. default:
  465. break;
  466. }
  467. if (!desc)
  468. return;
  469. switch (request_hdr->Function) {
  470. case MPI2_FUNCTION_CONFIG:
  471. frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
  472. func_str = "config_page";
  473. break;
  474. case MPI2_FUNCTION_SCSI_TASK_MGMT:
  475. frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
  476. func_str = "task_mgmt";
  477. break;
  478. case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
  479. frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
  480. func_str = "sas_iounit_ctl";
  481. break;
  482. case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
  483. frame_sz = sizeof(Mpi2SepRequest_t);
  484. func_str = "enclosure";
  485. break;
  486. case MPI2_FUNCTION_IOC_INIT:
  487. frame_sz = sizeof(Mpi2IOCInitRequest_t);
  488. func_str = "ioc_init";
  489. break;
  490. case MPI2_FUNCTION_PORT_ENABLE:
  491. frame_sz = sizeof(Mpi2PortEnableRequest_t);
  492. func_str = "port_enable";
  493. break;
  494. case MPI2_FUNCTION_SMP_PASSTHROUGH:
  495. frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
  496. func_str = "smp_passthru";
  497. break;
  498. case MPI2_FUNCTION_NVME_ENCAPSULATED:
  499. frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
  500. ioc->sge_size;
  501. func_str = "nvme_encapsulated";
  502. break;
  503. default:
  504. frame_sz = 32;
  505. func_str = "unknown";
  506. break;
  507. }
  508. pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
  509. ioc->name, desc, ioc_status, request_hdr, func_str);
  510. _debug_dump_mf(request_hdr, frame_sz/4);
  511. }
  512. /**
  513. * _base_display_event_data - verbose translation of firmware asyn events
  514. * @ioc: per adapter object
  515. * @mpi_reply: reply mf payload returned from firmware
  516. *
  517. * Return nothing.
  518. */
  519. static void
  520. _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
  521. Mpi2EventNotificationReply_t *mpi_reply)
  522. {
  523. char *desc = NULL;
  524. u16 event;
  525. if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
  526. return;
  527. event = le16_to_cpu(mpi_reply->Event);
  528. switch (event) {
  529. case MPI2_EVENT_LOG_DATA:
  530. desc = "Log Data";
  531. break;
  532. case MPI2_EVENT_STATE_CHANGE:
  533. desc = "Status Change";
  534. break;
  535. case MPI2_EVENT_HARD_RESET_RECEIVED:
  536. desc = "Hard Reset Received";
  537. break;
  538. case MPI2_EVENT_EVENT_CHANGE:
  539. desc = "Event Change";
  540. break;
  541. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  542. desc = "Device Status Change";
  543. break;
  544. case MPI2_EVENT_IR_OPERATION_STATUS:
  545. if (!ioc->hide_ir_msg)
  546. desc = "IR Operation Status";
  547. break;
  548. case MPI2_EVENT_SAS_DISCOVERY:
  549. {
  550. Mpi2EventDataSasDiscovery_t *event_data =
  551. (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
  552. pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
  553. (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
  554. "start" : "stop");
  555. if (event_data->DiscoveryStatus)
  556. pr_cont(" discovery_status(0x%08x)",
  557. le32_to_cpu(event_data->DiscoveryStatus));
  558. pr_cont("\n");
  559. return;
  560. }
  561. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  562. desc = "SAS Broadcast Primitive";
  563. break;
  564. case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
  565. desc = "SAS Init Device Status Change";
  566. break;
  567. case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
  568. desc = "SAS Init Table Overflow";
  569. break;
  570. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  571. desc = "SAS Topology Change List";
  572. break;
  573. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  574. desc = "SAS Enclosure Device Status Change";
  575. break;
  576. case MPI2_EVENT_IR_VOLUME:
  577. if (!ioc->hide_ir_msg)
  578. desc = "IR Volume";
  579. break;
  580. case MPI2_EVENT_IR_PHYSICAL_DISK:
  581. if (!ioc->hide_ir_msg)
  582. desc = "IR Physical Disk";
  583. break;
  584. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  585. if (!ioc->hide_ir_msg)
  586. desc = "IR Configuration Change List";
  587. break;
  588. case MPI2_EVENT_LOG_ENTRY_ADDED:
  589. if (!ioc->hide_ir_msg)
  590. desc = "Log Entry Added";
  591. break;
  592. case MPI2_EVENT_TEMP_THRESHOLD:
  593. desc = "Temperature Threshold";
  594. break;
  595. case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
  596. desc = "Cable Event";
  597. break;
  598. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  599. desc = "PCIE Device Status Change";
  600. break;
  601. case MPI2_EVENT_PCIE_ENUMERATION:
  602. {
  603. Mpi26EventDataPCIeEnumeration_t *event_data =
  604. (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
  605. pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
  606. (event_data->ReasonCode ==
  607. MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
  608. "start" : "stop");
  609. if (event_data->EnumerationStatus)
  610. pr_info("enumeration_status(0x%08x)",
  611. le32_to_cpu(event_data->EnumerationStatus));
  612. pr_info("\n");
  613. return;
  614. }
  615. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  616. desc = "PCIE Topology Change List";
  617. break;
  618. }
  619. if (!desc)
  620. return;
  621. pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
  622. }
  623. /**
  624. * _base_sas_log_info - verbose translation of firmware log info
  625. * @ioc: per adapter object
  626. * @log_info: log info
  627. *
  628. * Return nothing.
  629. */
  630. static void
  631. _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
  632. {
  633. union loginfo_type {
  634. u32 loginfo;
  635. struct {
  636. u32 subcode:16;
  637. u32 code:8;
  638. u32 originator:4;
  639. u32 bus_type:4;
  640. } dw;
  641. };
  642. union loginfo_type sas_loginfo;
  643. char *originator_str = NULL;
  644. sas_loginfo.loginfo = log_info;
  645. if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
  646. return;
  647. /* each nexus loss loginfo */
  648. if (log_info == 0x31170000)
  649. return;
  650. /* eat the loginfos associated with task aborts */
  651. if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
  652. 0x31140000 || log_info == 0x31130000))
  653. return;
  654. switch (sas_loginfo.dw.originator) {
  655. case 0:
  656. originator_str = "IOP";
  657. break;
  658. case 1:
  659. originator_str = "PL";
  660. break;
  661. case 2:
  662. if (!ioc->hide_ir_msg)
  663. originator_str = "IR";
  664. else
  665. originator_str = "WarpDrive";
  666. break;
  667. }
  668. pr_warn(MPT3SAS_FMT
  669. "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
  670. ioc->name, log_info,
  671. originator_str, sas_loginfo.dw.code,
  672. sas_loginfo.dw.subcode);
  673. }
  674. /**
  675. * _base_display_reply_info -
  676. * @ioc: per adapter object
  677. * @smid: system request message index
  678. * @msix_index: MSIX table index supplied by the OS
  679. * @reply: reply message frame(lower 32bit addr)
  680. *
  681. * Return nothing.
  682. */
  683. static void
  684. _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  685. u32 reply)
  686. {
  687. MPI2DefaultReply_t *mpi_reply;
  688. u16 ioc_status;
  689. u32 loginfo = 0;
  690. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  691. if (unlikely(!mpi_reply)) {
  692. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  693. ioc->name, __FILE__, __LINE__, __func__);
  694. return;
  695. }
  696. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  697. if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
  698. (ioc->logging_level & MPT_DEBUG_REPLY)) {
  699. _base_sas_ioc_info(ioc , mpi_reply,
  700. mpt3sas_base_get_msg_frame(ioc, smid));
  701. }
  702. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
  703. loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
  704. _base_sas_log_info(ioc, loginfo);
  705. }
  706. if (ioc_status || loginfo) {
  707. ioc_status &= MPI2_IOCSTATUS_MASK;
  708. mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
  709. }
  710. }
  711. /**
  712. * mpt3sas_base_done - base internal command completion routine
  713. * @ioc: per adapter object
  714. * @smid: system request message index
  715. * @msix_index: MSIX table index supplied by the OS
  716. * @reply: reply message frame(lower 32bit addr)
  717. *
  718. * Return 1 meaning mf should be freed from _base_interrupt
  719. * 0 means the mf is freed from this function.
  720. */
  721. u8
  722. mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  723. u32 reply)
  724. {
  725. MPI2DefaultReply_t *mpi_reply;
  726. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  727. if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
  728. return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
  729. if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
  730. return 1;
  731. ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
  732. if (mpi_reply) {
  733. ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
  734. memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  735. }
  736. ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
  737. complete(&ioc->base_cmds.done);
  738. return 1;
  739. }
  740. /**
  741. * _base_async_event - main callback handler for firmware asyn events
  742. * @ioc: per adapter object
  743. * @msix_index: MSIX table index supplied by the OS
  744. * @reply: reply message frame(lower 32bit addr)
  745. *
  746. * Return 1 meaning mf should be freed from _base_interrupt
  747. * 0 means the mf is freed from this function.
  748. */
  749. static u8
  750. _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
  751. {
  752. Mpi2EventNotificationReply_t *mpi_reply;
  753. Mpi2EventAckRequest_t *ack_request;
  754. u16 smid;
  755. struct _event_ack_list *delayed_event_ack;
  756. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  757. if (!mpi_reply)
  758. return 1;
  759. if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
  760. return 1;
  761. _base_display_event_data(ioc, mpi_reply);
  762. if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
  763. goto out;
  764. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  765. if (!smid) {
  766. delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
  767. GFP_ATOMIC);
  768. if (!delayed_event_ack)
  769. goto out;
  770. INIT_LIST_HEAD(&delayed_event_ack->list);
  771. delayed_event_ack->Event = mpi_reply->Event;
  772. delayed_event_ack->EventContext = mpi_reply->EventContext;
  773. list_add_tail(&delayed_event_ack->list,
  774. &ioc->delayed_event_ack_list);
  775. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  776. "DELAYED: EVENT ACK: event (0x%04x)\n",
  777. ioc->name, le16_to_cpu(mpi_reply->Event)));
  778. goto out;
  779. }
  780. ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
  781. memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
  782. ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
  783. ack_request->Event = mpi_reply->Event;
  784. ack_request->EventContext = mpi_reply->EventContext;
  785. ack_request->VF_ID = 0; /* TODO */
  786. ack_request->VP_ID = 0;
  787. ioc->put_smid_default(ioc, smid);
  788. out:
  789. /* scsih callback handler */
  790. mpt3sas_scsih_event_callback(ioc, msix_index, reply);
  791. /* ctl callback handler */
  792. mpt3sas_ctl_event_callback(ioc, msix_index, reply);
  793. return 1;
  794. }
  795. /**
  796. * _base_get_cb_idx - obtain the callback index
  797. * @ioc: per adapter object
  798. * @smid: system request message index
  799. *
  800. * Return callback index.
  801. */
  802. static u8
  803. _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  804. {
  805. int i;
  806. u8 cb_idx;
  807. if (smid < ioc->hi_priority_smid) {
  808. i = smid - 1;
  809. cb_idx = ioc->scsi_lookup[i].cb_idx;
  810. } else if (smid < ioc->internal_smid) {
  811. i = smid - ioc->hi_priority_smid;
  812. cb_idx = ioc->hpr_lookup[i].cb_idx;
  813. } else if (smid <= ioc->hba_queue_depth) {
  814. i = smid - ioc->internal_smid;
  815. cb_idx = ioc->internal_lookup[i].cb_idx;
  816. } else
  817. cb_idx = 0xFF;
  818. return cb_idx;
  819. }
  820. /**
  821. * _base_mask_interrupts - disable interrupts
  822. * @ioc: per adapter object
  823. *
  824. * Disabling ResetIRQ, Reply and Doorbell Interrupts
  825. *
  826. * Return nothing.
  827. */
  828. static void
  829. _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
  830. {
  831. u32 him_register;
  832. ioc->mask_interrupts = 1;
  833. him_register = readl(&ioc->chip->HostInterruptMask);
  834. him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
  835. writel(him_register, &ioc->chip->HostInterruptMask);
  836. readl(&ioc->chip->HostInterruptMask);
  837. }
  838. /**
  839. * _base_unmask_interrupts - enable interrupts
  840. * @ioc: per adapter object
  841. *
  842. * Enabling only Reply Interrupts
  843. *
  844. * Return nothing.
  845. */
  846. static void
  847. _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
  848. {
  849. u32 him_register;
  850. him_register = readl(&ioc->chip->HostInterruptMask);
  851. him_register &= ~MPI2_HIM_RIM;
  852. writel(him_register, &ioc->chip->HostInterruptMask);
  853. ioc->mask_interrupts = 0;
  854. }
  855. union reply_descriptor {
  856. u64 word;
  857. struct {
  858. u32 low;
  859. u32 high;
  860. } u;
  861. };
  862. /**
  863. * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
  864. * @irq: irq number (not used)
  865. * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
  866. * @r: pt_regs pointer (not used)
  867. *
  868. * Return IRQ_HANDLE if processed, else IRQ_NONE.
  869. */
  870. static irqreturn_t
  871. _base_interrupt(int irq, void *bus_id)
  872. {
  873. struct adapter_reply_queue *reply_q = bus_id;
  874. union reply_descriptor rd;
  875. u32 completed_cmds;
  876. u8 request_desript_type;
  877. u16 smid;
  878. u8 cb_idx;
  879. u32 reply;
  880. u8 msix_index = reply_q->msix_index;
  881. struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
  882. Mpi2ReplyDescriptorsUnion_t *rpf;
  883. u8 rc;
  884. if (ioc->mask_interrupts)
  885. return IRQ_NONE;
  886. if (!atomic_add_unless(&reply_q->busy, 1, 1))
  887. return IRQ_NONE;
  888. rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
  889. request_desript_type = rpf->Default.ReplyFlags
  890. & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  891. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
  892. atomic_dec(&reply_q->busy);
  893. return IRQ_NONE;
  894. }
  895. completed_cmds = 0;
  896. cb_idx = 0xFF;
  897. do {
  898. rd.word = le64_to_cpu(rpf->Words);
  899. if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
  900. goto out;
  901. reply = 0;
  902. smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
  903. if (request_desript_type ==
  904. MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
  905. request_desript_type ==
  906. MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
  907. request_desript_type ==
  908. MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
  909. cb_idx = _base_get_cb_idx(ioc, smid);
  910. if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
  911. (likely(mpt_callbacks[cb_idx] != NULL))) {
  912. rc = mpt_callbacks[cb_idx](ioc, smid,
  913. msix_index, 0);
  914. if (rc)
  915. mpt3sas_base_free_smid(ioc, smid);
  916. }
  917. } else if (request_desript_type ==
  918. MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
  919. reply = le32_to_cpu(
  920. rpf->AddressReply.ReplyFrameAddress);
  921. if (reply > ioc->reply_dma_max_address ||
  922. reply < ioc->reply_dma_min_address)
  923. reply = 0;
  924. if (smid) {
  925. cb_idx = _base_get_cb_idx(ioc, smid);
  926. if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
  927. (likely(mpt_callbacks[cb_idx] != NULL))) {
  928. rc = mpt_callbacks[cb_idx](ioc, smid,
  929. msix_index, reply);
  930. if (reply)
  931. _base_display_reply_info(ioc,
  932. smid, msix_index, reply);
  933. if (rc)
  934. mpt3sas_base_free_smid(ioc,
  935. smid);
  936. }
  937. } else {
  938. _base_async_event(ioc, msix_index, reply);
  939. }
  940. /* reply free queue handling */
  941. if (reply) {
  942. ioc->reply_free_host_index =
  943. (ioc->reply_free_host_index ==
  944. (ioc->reply_free_queue_depth - 1)) ?
  945. 0 : ioc->reply_free_host_index + 1;
  946. ioc->reply_free[ioc->reply_free_host_index] =
  947. cpu_to_le32(reply);
  948. writel(ioc->reply_free_host_index,
  949. &ioc->chip->ReplyFreeHostIndex);
  950. }
  951. }
  952. rpf->Words = cpu_to_le64(ULLONG_MAX);
  953. reply_q->reply_post_host_index =
  954. (reply_q->reply_post_host_index ==
  955. (ioc->reply_post_queue_depth - 1)) ? 0 :
  956. reply_q->reply_post_host_index + 1;
  957. request_desript_type =
  958. reply_q->reply_post_free[reply_q->reply_post_host_index].
  959. Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  960. completed_cmds++;
  961. /* Update the reply post host index after continuously
  962. * processing the threshold number of Reply Descriptors.
  963. * So that FW can find enough entries to post the Reply
  964. * Descriptors in the reply descriptor post queue.
  965. */
  966. if (completed_cmds > ioc->hba_queue_depth/3) {
  967. if (ioc->combined_reply_queue) {
  968. writel(reply_q->reply_post_host_index |
  969. ((msix_index & 7) <<
  970. MPI2_RPHI_MSIX_INDEX_SHIFT),
  971. ioc->replyPostRegisterIndex[msix_index/8]);
  972. } else {
  973. writel(reply_q->reply_post_host_index |
  974. (msix_index <<
  975. MPI2_RPHI_MSIX_INDEX_SHIFT),
  976. &ioc->chip->ReplyPostHostIndex);
  977. }
  978. completed_cmds = 1;
  979. }
  980. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  981. goto out;
  982. if (!reply_q->reply_post_host_index)
  983. rpf = reply_q->reply_post_free;
  984. else
  985. rpf++;
  986. } while (1);
  987. out:
  988. if (!completed_cmds) {
  989. atomic_dec(&reply_q->busy);
  990. return IRQ_NONE;
  991. }
  992. if (ioc->is_warpdrive) {
  993. writel(reply_q->reply_post_host_index,
  994. ioc->reply_post_host_index[msix_index]);
  995. atomic_dec(&reply_q->busy);
  996. return IRQ_HANDLED;
  997. }
  998. /* Update Reply Post Host Index.
  999. * For those HBA's which support combined reply queue feature
  1000. * 1. Get the correct Supplemental Reply Post Host Index Register.
  1001. * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
  1002. * Index Register address bank i.e replyPostRegisterIndex[],
  1003. * 2. Then update this register with new reply host index value
  1004. * in ReplyPostIndex field and the MSIxIndex field with
  1005. * msix_index value reduced to a value between 0 and 7,
  1006. * using a modulo 8 operation. Since each Supplemental Reply Post
  1007. * Host Index Register supports 8 MSI-X vectors.
  1008. *
  1009. * For other HBA's just update the Reply Post Host Index register with
  1010. * new reply host index value in ReplyPostIndex Field and msix_index
  1011. * value in MSIxIndex field.
  1012. */
  1013. if (ioc->combined_reply_queue)
  1014. writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
  1015. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1016. ioc->replyPostRegisterIndex[msix_index/8]);
  1017. else
  1018. writel(reply_q->reply_post_host_index | (msix_index <<
  1019. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1020. &ioc->chip->ReplyPostHostIndex);
  1021. atomic_dec(&reply_q->busy);
  1022. return IRQ_HANDLED;
  1023. }
  1024. /**
  1025. * _base_is_controller_msix_enabled - is controller support muli-reply queues
  1026. * @ioc: per adapter object
  1027. *
  1028. */
  1029. static inline int
  1030. _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
  1031. {
  1032. return (ioc->facts.IOCCapabilities &
  1033. MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
  1034. }
  1035. /**
  1036. * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
  1037. * @ioc: per adapter object
  1038. * Context: non ISR conext
  1039. *
  1040. * Called when a Task Management request has completed.
  1041. *
  1042. * Return nothing.
  1043. */
  1044. void
  1045. mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
  1046. {
  1047. struct adapter_reply_queue *reply_q;
  1048. /* If MSIX capability is turned off
  1049. * then multi-queues are not enabled
  1050. */
  1051. if (!_base_is_controller_msix_enabled(ioc))
  1052. return;
  1053. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  1054. if (ioc->shost_recovery || ioc->remove_host ||
  1055. ioc->pci_error_recovery)
  1056. return;
  1057. /* TMs are on msix_index == 0 */
  1058. if (reply_q->msix_index == 0)
  1059. continue;
  1060. synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
  1061. }
  1062. }
  1063. /**
  1064. * mpt3sas_base_release_callback_handler - clear interrupt callback handler
  1065. * @cb_idx: callback index
  1066. *
  1067. * Return nothing.
  1068. */
  1069. void
  1070. mpt3sas_base_release_callback_handler(u8 cb_idx)
  1071. {
  1072. mpt_callbacks[cb_idx] = NULL;
  1073. }
  1074. /**
  1075. * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
  1076. * @cb_func: callback function
  1077. *
  1078. * Returns cb_func.
  1079. */
  1080. u8
  1081. mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
  1082. {
  1083. u8 cb_idx;
  1084. for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
  1085. if (mpt_callbacks[cb_idx] == NULL)
  1086. break;
  1087. mpt_callbacks[cb_idx] = cb_func;
  1088. return cb_idx;
  1089. }
  1090. /**
  1091. * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
  1092. *
  1093. * Return nothing.
  1094. */
  1095. void
  1096. mpt3sas_base_initialize_callback_handler(void)
  1097. {
  1098. u8 cb_idx;
  1099. for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
  1100. mpt3sas_base_release_callback_handler(cb_idx);
  1101. }
  1102. /**
  1103. * _base_build_zero_len_sge - build zero length sg entry
  1104. * @ioc: per adapter object
  1105. * @paddr: virtual address for SGE
  1106. *
  1107. * Create a zero length scatter gather entry to insure the IOCs hardware has
  1108. * something to use if the target device goes brain dead and tries
  1109. * to send data even when none is asked for.
  1110. *
  1111. * Return nothing.
  1112. */
  1113. static void
  1114. _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
  1115. {
  1116. u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
  1117. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
  1118. MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
  1119. MPI2_SGE_FLAGS_SHIFT);
  1120. ioc->base_add_sg_single(paddr, flags_length, -1);
  1121. }
  1122. /**
  1123. * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
  1124. * @paddr: virtual address for SGE
  1125. * @flags_length: SGE flags and data transfer length
  1126. * @dma_addr: Physical address
  1127. *
  1128. * Return nothing.
  1129. */
  1130. static void
  1131. _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  1132. {
  1133. Mpi2SGESimple32_t *sgel = paddr;
  1134. flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
  1135. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  1136. sgel->FlagsLength = cpu_to_le32(flags_length);
  1137. sgel->Address = cpu_to_le32(dma_addr);
  1138. }
  1139. /**
  1140. * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
  1141. * @paddr: virtual address for SGE
  1142. * @flags_length: SGE flags and data transfer length
  1143. * @dma_addr: Physical address
  1144. *
  1145. * Return nothing.
  1146. */
  1147. static void
  1148. _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  1149. {
  1150. Mpi2SGESimple64_t *sgel = paddr;
  1151. flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
  1152. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  1153. sgel->FlagsLength = cpu_to_le32(flags_length);
  1154. sgel->Address = cpu_to_le64(dma_addr);
  1155. }
  1156. /**
  1157. * _base_get_chain_buffer_tracker - obtain chain tracker
  1158. * @ioc: per adapter object
  1159. * @smid: smid associated to an IO request
  1160. *
  1161. * Returns chain tracker(from ioc->free_chain_list)
  1162. */
  1163. static struct chain_tracker *
  1164. _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  1165. {
  1166. struct chain_tracker *chain_req;
  1167. unsigned long flags;
  1168. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  1169. if (list_empty(&ioc->free_chain_list)) {
  1170. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1171. dfailprintk(ioc, pr_warn(MPT3SAS_FMT
  1172. "chain buffers not available\n", ioc->name));
  1173. return NULL;
  1174. }
  1175. chain_req = list_entry(ioc->free_chain_list.next,
  1176. struct chain_tracker, tracker_list);
  1177. list_del_init(&chain_req->tracker_list);
  1178. list_add_tail(&chain_req->tracker_list,
  1179. &ioc->scsi_lookup[smid - 1].chain_list);
  1180. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  1181. return chain_req;
  1182. }
  1183. /**
  1184. * _base_build_sg - build generic sg
  1185. * @ioc: per adapter object
  1186. * @psge: virtual address for SGE
  1187. * @data_out_dma: physical address for WRITES
  1188. * @data_out_sz: data xfer size for WRITES
  1189. * @data_in_dma: physical address for READS
  1190. * @data_in_sz: data xfer size for READS
  1191. *
  1192. * Return nothing.
  1193. */
  1194. static void
  1195. _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
  1196. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  1197. size_t data_in_sz)
  1198. {
  1199. u32 sgl_flags;
  1200. if (!data_out_sz && !data_in_sz) {
  1201. _base_build_zero_len_sge(ioc, psge);
  1202. return;
  1203. }
  1204. if (data_out_sz && data_in_sz) {
  1205. /* WRITE sgel first */
  1206. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1207. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
  1208. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1209. ioc->base_add_sg_single(psge, sgl_flags |
  1210. data_out_sz, data_out_dma);
  1211. /* incr sgel */
  1212. psge += ioc->sge_size;
  1213. /* READ sgel last */
  1214. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1215. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1216. MPI2_SGE_FLAGS_END_OF_LIST);
  1217. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1218. ioc->base_add_sg_single(psge, sgl_flags |
  1219. data_in_sz, data_in_dma);
  1220. } else if (data_out_sz) /* WRITE */ {
  1221. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1222. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1223. MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
  1224. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1225. ioc->base_add_sg_single(psge, sgl_flags |
  1226. data_out_sz, data_out_dma);
  1227. } else if (data_in_sz) /* READ */ {
  1228. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1229. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1230. MPI2_SGE_FLAGS_END_OF_LIST);
  1231. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1232. ioc->base_add_sg_single(psge, sgl_flags |
  1233. data_in_sz, data_in_dma);
  1234. }
  1235. }
  1236. /* IEEE format sgls */
  1237. /**
  1238. * _base_build_nvme_prp - This function is called for NVMe end devices to build
  1239. * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
  1240. * entry of the NVMe message (PRP1). If the data buffer is small enough to be
  1241. * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
  1242. * used to describe a larger data buffer. If the data buffer is too large to
  1243. * describe using the two PRP entriess inside the NVMe message, then PRP1
  1244. * describes the first data memory segment, and PRP2 contains a pointer to a PRP
  1245. * list located elsewhere in memory to describe the remaining data memory
  1246. * segments. The PRP list will be contiguous.
  1247. * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
  1248. * consists of a list of PRP entries to describe a number of noncontigous
  1249. * physical memory segments as a single memory buffer, just as a SGL does. Note
  1250. * however, that this function is only used by the IOCTL call, so the memory
  1251. * given will be guaranteed to be contiguous. There is no need to translate
  1252. * non-contiguous SGL into a PRP in this case. All PRPs will describe
  1253. * contiguous space that is one page size each.
  1254. *
  1255. * Each NVMe message contains two PRP entries. The first (PRP1) either contains
  1256. * a PRP list pointer or a PRP element, depending upon the command. PRP2
  1257. * contains the second PRP element if the memory being described fits within 2
  1258. * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
  1259. *
  1260. * A PRP list pointer contains the address of a PRP list, structured as a linear
  1261. * array of PRP entries. Each PRP entry in this list describes a segment of
  1262. * physical memory.
  1263. *
  1264. * Each 64-bit PRP entry comprises an address and an offset field. The address
  1265. * always points at the beginning of a 4KB physical memory page, and the offset
  1266. * describes where within that 4KB page the memory segment begins. Only the
  1267. * first element in a PRP list may contain a non-zero offest, implying that all
  1268. * memory segments following the first begin at the start of a 4KB page.
  1269. *
  1270. * Each PRP element normally describes 4KB of physical memory, with exceptions
  1271. * for the first and last elements in the list. If the memory being described
  1272. * by the list begins at a non-zero offset within the first 4KB page, then the
  1273. * first PRP element will contain a non-zero offset indicating where the region
  1274. * begins within the 4KB page. The last memory segment may end before the end
  1275. * of the 4KB segment, depending upon the overall size of the memory being
  1276. * described by the PRP list.
  1277. *
  1278. * Since PRP entries lack any indication of size, the overall data buffer length
  1279. * is used to determine where the end of the data memory buffer is located, and
  1280. * how many PRP entries are required to describe it.
  1281. *
  1282. * @ioc: per adapter object
  1283. * @smid: system request message index for getting asscociated SGL
  1284. * @nvme_encap_request: the NVMe request msg frame pointer
  1285. * @data_out_dma: physical address for WRITES
  1286. * @data_out_sz: data xfer size for WRITES
  1287. * @data_in_dma: physical address for READS
  1288. * @data_in_sz: data xfer size for READS
  1289. *
  1290. * Returns nothing.
  1291. */
  1292. static void
  1293. _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  1294. Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
  1295. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  1296. size_t data_in_sz)
  1297. {
  1298. int prp_size = NVME_PRP_SIZE;
  1299. __le64 *prp_entry, *prp1_entry, *prp2_entry;
  1300. __le64 *prp_page;
  1301. dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
  1302. u32 offset, entry_len;
  1303. u32 page_mask_result, page_mask;
  1304. size_t length;
  1305. /*
  1306. * Not all commands require a data transfer. If no data, just return
  1307. * without constructing any PRP.
  1308. */
  1309. if (!data_in_sz && !data_out_sz)
  1310. return;
  1311. /*
  1312. * Set pointers to PRP1 and PRP2, which are in the NVMe command.
  1313. * PRP1 is located at a 24 byte offset from the start of the NVMe
  1314. * command. Then set the current PRP entry pointer to PRP1.
  1315. */
  1316. prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
  1317. NVME_CMD_PRP1_OFFSET);
  1318. prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
  1319. NVME_CMD_PRP2_OFFSET);
  1320. prp_entry = prp1_entry;
  1321. /*
  1322. * For the PRP entries, use the specially allocated buffer of
  1323. * contiguous memory.
  1324. */
  1325. prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
  1326. prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
  1327. /*
  1328. * Check if we are within 1 entry of a page boundary we don't
  1329. * want our first entry to be a PRP List entry.
  1330. */
  1331. page_mask = ioc->page_size - 1;
  1332. page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
  1333. if (!page_mask_result) {
  1334. /* Bump up to next page boundary. */
  1335. prp_page = (__le64 *)((u8 *)prp_page + prp_size);
  1336. prp_page_dma = prp_page_dma + prp_size;
  1337. }
  1338. /*
  1339. * Set PRP physical pointer, which initially points to the current PRP
  1340. * DMA memory page.
  1341. */
  1342. prp_entry_dma = prp_page_dma;
  1343. /* Get physical address and length of the data buffer. */
  1344. if (data_in_sz) {
  1345. dma_addr = data_in_dma;
  1346. length = data_in_sz;
  1347. } else {
  1348. dma_addr = data_out_dma;
  1349. length = data_out_sz;
  1350. }
  1351. /* Loop while the length is not zero. */
  1352. while (length) {
  1353. /*
  1354. * Check if we need to put a list pointer here if we are at
  1355. * page boundary - prp_size (8 bytes).
  1356. */
  1357. page_mask_result = (prp_entry_dma + prp_size) & page_mask;
  1358. if (!page_mask_result) {
  1359. /*
  1360. * This is the last entry in a PRP List, so we need to
  1361. * put a PRP list pointer here. What this does is:
  1362. * - bump the current memory pointer to the next
  1363. * address, which will be the next full page.
  1364. * - set the PRP Entry to point to that page. This
  1365. * is now the PRP List pointer.
  1366. * - bump the PRP Entry pointer the start of the
  1367. * next page. Since all of this PRP memory is
  1368. * contiguous, no need to get a new page - it's
  1369. * just the next address.
  1370. */
  1371. prp_entry_dma++;
  1372. *prp_entry = cpu_to_le64(prp_entry_dma);
  1373. prp_entry++;
  1374. }
  1375. /* Need to handle if entry will be part of a page. */
  1376. offset = dma_addr & page_mask;
  1377. entry_len = ioc->page_size - offset;
  1378. if (prp_entry == prp1_entry) {
  1379. /*
  1380. * Must fill in the first PRP pointer (PRP1) before
  1381. * moving on.
  1382. */
  1383. *prp1_entry = cpu_to_le64(dma_addr);
  1384. /*
  1385. * Now point to the second PRP entry within the
  1386. * command (PRP2).
  1387. */
  1388. prp_entry = prp2_entry;
  1389. } else if (prp_entry == prp2_entry) {
  1390. /*
  1391. * Should the PRP2 entry be a PRP List pointer or just
  1392. * a regular PRP pointer? If there is more than one
  1393. * more page of data, must use a PRP List pointer.
  1394. */
  1395. if (length > ioc->page_size) {
  1396. /*
  1397. * PRP2 will contain a PRP List pointer because
  1398. * more PRP's are needed with this command. The
  1399. * list will start at the beginning of the
  1400. * contiguous buffer.
  1401. */
  1402. *prp2_entry = cpu_to_le64(prp_entry_dma);
  1403. /*
  1404. * The next PRP Entry will be the start of the
  1405. * first PRP List.
  1406. */
  1407. prp_entry = prp_page;
  1408. } else {
  1409. /*
  1410. * After this, the PRP Entries are complete.
  1411. * This command uses 2 PRP's and no PRP list.
  1412. */
  1413. *prp2_entry = cpu_to_le64(dma_addr);
  1414. }
  1415. } else {
  1416. /*
  1417. * Put entry in list and bump the addresses.
  1418. *
  1419. * After PRP1 and PRP2 are filled in, this will fill in
  1420. * all remaining PRP entries in a PRP List, one per
  1421. * each time through the loop.
  1422. */
  1423. *prp_entry = cpu_to_le64(dma_addr);
  1424. prp_entry++;
  1425. prp_entry_dma++;
  1426. }
  1427. /*
  1428. * Bump the phys address of the command's data buffer by the
  1429. * entry_len.
  1430. */
  1431. dma_addr += entry_len;
  1432. /* Decrement length accounting for last partial page. */
  1433. if (entry_len > length)
  1434. length = 0;
  1435. else
  1436. length -= entry_len;
  1437. }
  1438. }
  1439. /**
  1440. * base_make_prp_nvme -
  1441. * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
  1442. *
  1443. * @ioc: per adapter object
  1444. * @scmd: SCSI command from the mid-layer
  1445. * @mpi_request: mpi request
  1446. * @smid: msg Index
  1447. * @sge_count: scatter gather element count.
  1448. *
  1449. * Returns: true: PRPs are built
  1450. * false: IEEE SGLs needs to be built
  1451. */
  1452. static void
  1453. base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
  1454. struct scsi_cmnd *scmd,
  1455. Mpi25SCSIIORequest_t *mpi_request,
  1456. u16 smid, int sge_count)
  1457. {
  1458. int sge_len, num_prp_in_chain = 0;
  1459. Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
  1460. __le64 *curr_buff;
  1461. dma_addr_t msg_dma, sge_addr, offset;
  1462. u32 page_mask, page_mask_result;
  1463. struct scatterlist *sg_scmd;
  1464. u32 first_prp_len;
  1465. int data_len = scsi_bufflen(scmd);
  1466. u32 nvme_pg_size;
  1467. nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
  1468. /*
  1469. * Nvme has a very convoluted prp format. One prp is required
  1470. * for each page or partial page. Driver need to split up OS sg_list
  1471. * entries if it is longer than one page or cross a page
  1472. * boundary. Driver also have to insert a PRP list pointer entry as
  1473. * the last entry in each physical page of the PRP list.
  1474. *
  1475. * NOTE: The first PRP "entry" is actually placed in the first
  1476. * SGL entry in the main message as IEEE 64 format. The 2nd
  1477. * entry in the main message is the chain element, and the rest
  1478. * of the PRP entries are built in the contiguous pcie buffer.
  1479. */
  1480. page_mask = nvme_pg_size - 1;
  1481. /*
  1482. * Native SGL is needed.
  1483. * Put a chain element in main message frame that points to the first
  1484. * chain buffer.
  1485. *
  1486. * NOTE: The ChainOffset field must be 0 when using a chain pointer to
  1487. * a native SGL.
  1488. */
  1489. /* Set main message chain element pointer */
  1490. main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
  1491. /*
  1492. * For NVMe the chain element needs to be the 2nd SG entry in the main
  1493. * message.
  1494. */
  1495. main_chain_element = (Mpi25IeeeSgeChain64_t *)
  1496. ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
  1497. /*
  1498. * For the PRP entries, use the specially allocated buffer of
  1499. * contiguous memory. Normal chain buffers can't be used
  1500. * because each chain buffer would need to be the size of an OS
  1501. * page (4k).
  1502. */
  1503. curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
  1504. msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
  1505. main_chain_element->Address = cpu_to_le64(msg_dma);
  1506. main_chain_element->NextChainOffset = 0;
  1507. main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  1508. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
  1509. MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
  1510. /* Build first prp, sge need not to be page aligned*/
  1511. ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
  1512. sg_scmd = scsi_sglist(scmd);
  1513. sge_addr = sg_dma_address(sg_scmd);
  1514. sge_len = sg_dma_len(sg_scmd);
  1515. offset = sge_addr & page_mask;
  1516. first_prp_len = nvme_pg_size - offset;
  1517. ptr_first_sgl->Address = cpu_to_le64(sge_addr);
  1518. ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
  1519. data_len -= first_prp_len;
  1520. if (sge_len > first_prp_len) {
  1521. sge_addr += first_prp_len;
  1522. sge_len -= first_prp_len;
  1523. } else if (data_len && (sge_len == first_prp_len)) {
  1524. sg_scmd = sg_next(sg_scmd);
  1525. sge_addr = sg_dma_address(sg_scmd);
  1526. sge_len = sg_dma_len(sg_scmd);
  1527. }
  1528. for (;;) {
  1529. offset = sge_addr & page_mask;
  1530. /* Put PRP pointer due to page boundary*/
  1531. page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
  1532. if (unlikely(!page_mask_result)) {
  1533. scmd_printk(KERN_NOTICE,
  1534. scmd, "page boundary curr_buff: 0x%p\n",
  1535. curr_buff);
  1536. msg_dma += 8;
  1537. *curr_buff = cpu_to_le64(msg_dma);
  1538. curr_buff++;
  1539. num_prp_in_chain++;
  1540. }
  1541. *curr_buff = cpu_to_le64(sge_addr);
  1542. curr_buff++;
  1543. msg_dma += 8;
  1544. num_prp_in_chain++;
  1545. sge_addr += nvme_pg_size;
  1546. sge_len -= nvme_pg_size;
  1547. data_len -= nvme_pg_size;
  1548. if (data_len <= 0)
  1549. break;
  1550. if (sge_len > 0)
  1551. continue;
  1552. sg_scmd = sg_next(sg_scmd);
  1553. sge_addr = sg_dma_address(sg_scmd);
  1554. sge_len = sg_dma_len(sg_scmd);
  1555. }
  1556. main_chain_element->Length =
  1557. cpu_to_le32(num_prp_in_chain * sizeof(u64));
  1558. return;
  1559. }
  1560. static bool
  1561. base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
  1562. struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
  1563. {
  1564. u32 data_length = 0;
  1565. struct scatterlist *sg_scmd;
  1566. bool build_prp = true;
  1567. data_length = scsi_bufflen(scmd);
  1568. sg_scmd = scsi_sglist(scmd);
  1569. /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
  1570. * we built IEEE SGL
  1571. */
  1572. if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
  1573. build_prp = false;
  1574. return build_prp;
  1575. }
  1576. /**
  1577. * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
  1578. * determine if the driver needs to build a native SGL. If so, that native
  1579. * SGL is built in the special contiguous buffers allocated especially for
  1580. * PCIe SGL creation. If the driver will not build a native SGL, return
  1581. * TRUE and a normal IEEE SGL will be built. Currently this routine
  1582. * supports NVMe.
  1583. * @ioc: per adapter object
  1584. * @mpi_request: mf request pointer
  1585. * @smid: system request message index
  1586. * @scmd: scsi command
  1587. * @pcie_device: points to the PCIe device's info
  1588. *
  1589. * Returns 0 if native SGL was built, 1 if no SGL was built
  1590. */
  1591. static int
  1592. _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
  1593. Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
  1594. struct _pcie_device *pcie_device)
  1595. {
  1596. struct scatterlist *sg_scmd;
  1597. int sges_left;
  1598. /* Get the SG list pointer and info. */
  1599. sg_scmd = scsi_sglist(scmd);
  1600. sges_left = scsi_dma_map(scmd);
  1601. if (sges_left < 0) {
  1602. sdev_printk(KERN_ERR, scmd->device,
  1603. "scsi_dma_map failed: request for %d bytes!\n",
  1604. scsi_bufflen(scmd));
  1605. return 1;
  1606. }
  1607. /* Check if we need to build a native SG list. */
  1608. if (base_is_prp_possible(ioc, pcie_device,
  1609. scmd, sges_left) == 0) {
  1610. /* We built a native SG list, just return. */
  1611. goto out;
  1612. }
  1613. /*
  1614. * Build native NVMe PRP.
  1615. */
  1616. base_make_prp_nvme(ioc, scmd, mpi_request,
  1617. smid, sges_left);
  1618. return 0;
  1619. out:
  1620. scsi_dma_unmap(scmd);
  1621. return 1;
  1622. }
  1623. /**
  1624. * _base_add_sg_single_ieee - add sg element for IEEE format
  1625. * @paddr: virtual address for SGE
  1626. * @flags: SGE flags
  1627. * @chain_offset: number of 128 byte elements from start of segment
  1628. * @length: data transfer length
  1629. * @dma_addr: Physical address
  1630. *
  1631. * Return nothing.
  1632. */
  1633. static void
  1634. _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
  1635. dma_addr_t dma_addr)
  1636. {
  1637. Mpi25IeeeSgeChain64_t *sgel = paddr;
  1638. sgel->Flags = flags;
  1639. sgel->NextChainOffset = chain_offset;
  1640. sgel->Length = cpu_to_le32(length);
  1641. sgel->Address = cpu_to_le64(dma_addr);
  1642. }
  1643. /**
  1644. * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
  1645. * @ioc: per adapter object
  1646. * @paddr: virtual address for SGE
  1647. *
  1648. * Create a zero length scatter gather entry to insure the IOCs hardware has
  1649. * something to use if the target device goes brain dead and tries
  1650. * to send data even when none is asked for.
  1651. *
  1652. * Return nothing.
  1653. */
  1654. static void
  1655. _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
  1656. {
  1657. u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  1658. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
  1659. MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
  1660. _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
  1661. }
  1662. /**
  1663. * _base_build_sg_scmd - main sg creation routine
  1664. * pcie_device is unused here!
  1665. * @ioc: per adapter object
  1666. * @scmd: scsi command
  1667. * @smid: system request message index
  1668. * @unused: unused pcie_device pointer
  1669. * Context: none.
  1670. *
  1671. * The main routine that builds scatter gather table from a given
  1672. * scsi request sent via the .queuecommand main handler.
  1673. *
  1674. * Returns 0 success, anything else error
  1675. */
  1676. static int
  1677. _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
  1678. struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
  1679. {
  1680. Mpi2SCSIIORequest_t *mpi_request;
  1681. dma_addr_t chain_dma;
  1682. struct scatterlist *sg_scmd;
  1683. void *sg_local, *chain;
  1684. u32 chain_offset;
  1685. u32 chain_length;
  1686. u32 chain_flags;
  1687. int sges_left;
  1688. u32 sges_in_segment;
  1689. u32 sgl_flags;
  1690. u32 sgl_flags_last_element;
  1691. u32 sgl_flags_end_buffer;
  1692. struct chain_tracker *chain_req;
  1693. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1694. /* init scatter gather flags */
  1695. sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
  1696. if (scmd->sc_data_direction == DMA_TO_DEVICE)
  1697. sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
  1698. sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
  1699. << MPI2_SGE_FLAGS_SHIFT;
  1700. sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
  1701. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
  1702. << MPI2_SGE_FLAGS_SHIFT;
  1703. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1704. sg_scmd = scsi_sglist(scmd);
  1705. sges_left = scsi_dma_map(scmd);
  1706. if (sges_left < 0) {
  1707. sdev_printk(KERN_ERR, scmd->device,
  1708. "pci_map_sg failed: request for %d bytes!\n",
  1709. scsi_bufflen(scmd));
  1710. return -ENOMEM;
  1711. }
  1712. sg_local = &mpi_request->SGL;
  1713. sges_in_segment = ioc->max_sges_in_main_message;
  1714. if (sges_left <= sges_in_segment)
  1715. goto fill_in_last_segment;
  1716. mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
  1717. (sges_in_segment * ioc->sge_size))/4;
  1718. /* fill in main message segment when there is a chain following */
  1719. while (sges_in_segment) {
  1720. if (sges_in_segment == 1)
  1721. ioc->base_add_sg_single(sg_local,
  1722. sgl_flags_last_element | sg_dma_len(sg_scmd),
  1723. sg_dma_address(sg_scmd));
  1724. else
  1725. ioc->base_add_sg_single(sg_local, sgl_flags |
  1726. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1727. sg_scmd = sg_next(sg_scmd);
  1728. sg_local += ioc->sge_size;
  1729. sges_left--;
  1730. sges_in_segment--;
  1731. }
  1732. /* initializing the chain flags and pointers */
  1733. chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
  1734. chain_req = _base_get_chain_buffer_tracker(ioc, smid);
  1735. if (!chain_req)
  1736. return -1;
  1737. chain = chain_req->chain_buffer;
  1738. chain_dma = chain_req->chain_buffer_dma;
  1739. do {
  1740. sges_in_segment = (sges_left <=
  1741. ioc->max_sges_in_chain_message) ? sges_left :
  1742. ioc->max_sges_in_chain_message;
  1743. chain_offset = (sges_left == sges_in_segment) ?
  1744. 0 : (sges_in_segment * ioc->sge_size)/4;
  1745. chain_length = sges_in_segment * ioc->sge_size;
  1746. if (chain_offset) {
  1747. chain_offset = chain_offset <<
  1748. MPI2_SGE_CHAIN_OFFSET_SHIFT;
  1749. chain_length += ioc->sge_size;
  1750. }
  1751. ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
  1752. chain_length, chain_dma);
  1753. sg_local = chain;
  1754. if (!chain_offset)
  1755. goto fill_in_last_segment;
  1756. /* fill in chain segments */
  1757. while (sges_in_segment) {
  1758. if (sges_in_segment == 1)
  1759. ioc->base_add_sg_single(sg_local,
  1760. sgl_flags_last_element |
  1761. sg_dma_len(sg_scmd),
  1762. sg_dma_address(sg_scmd));
  1763. else
  1764. ioc->base_add_sg_single(sg_local, sgl_flags |
  1765. sg_dma_len(sg_scmd),
  1766. sg_dma_address(sg_scmd));
  1767. sg_scmd = sg_next(sg_scmd);
  1768. sg_local += ioc->sge_size;
  1769. sges_left--;
  1770. sges_in_segment--;
  1771. }
  1772. chain_req = _base_get_chain_buffer_tracker(ioc, smid);
  1773. if (!chain_req)
  1774. return -1;
  1775. chain = chain_req->chain_buffer;
  1776. chain_dma = chain_req->chain_buffer_dma;
  1777. } while (1);
  1778. fill_in_last_segment:
  1779. /* fill the last segment */
  1780. while (sges_left) {
  1781. if (sges_left == 1)
  1782. ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
  1783. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1784. else
  1785. ioc->base_add_sg_single(sg_local, sgl_flags |
  1786. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1787. sg_scmd = sg_next(sg_scmd);
  1788. sg_local += ioc->sge_size;
  1789. sges_left--;
  1790. }
  1791. return 0;
  1792. }
  1793. /**
  1794. * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
  1795. * @ioc: per adapter object
  1796. * @scmd: scsi command
  1797. * @smid: system request message index
  1798. * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
  1799. * constructed on need.
  1800. * Context: none.
  1801. *
  1802. * The main routine that builds scatter gather table from a given
  1803. * scsi request sent via the .queuecommand main handler.
  1804. *
  1805. * Returns 0 success, anything else error
  1806. */
  1807. static int
  1808. _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
  1809. struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
  1810. {
  1811. Mpi25SCSIIORequest_t *mpi_request;
  1812. dma_addr_t chain_dma;
  1813. struct scatterlist *sg_scmd;
  1814. void *sg_local, *chain;
  1815. u32 chain_offset;
  1816. u32 chain_length;
  1817. int sges_left;
  1818. u32 sges_in_segment;
  1819. u8 simple_sgl_flags;
  1820. u8 simple_sgl_flags_last;
  1821. u8 chain_sgl_flags;
  1822. struct chain_tracker *chain_req;
  1823. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1824. /* init scatter gather flags */
  1825. simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  1826. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  1827. simple_sgl_flags_last = simple_sgl_flags |
  1828. MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
  1829. chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  1830. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  1831. /* Check if we need to build a native SG list. */
  1832. if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
  1833. smid, scmd, pcie_device) == 0)) {
  1834. /* We built a native SG list, just return. */
  1835. return 0;
  1836. }
  1837. sg_scmd = scsi_sglist(scmd);
  1838. sges_left = scsi_dma_map(scmd);
  1839. if (sges_left < 0) {
  1840. sdev_printk(KERN_ERR, scmd->device,
  1841. "pci_map_sg failed: request for %d bytes!\n",
  1842. scsi_bufflen(scmd));
  1843. return -ENOMEM;
  1844. }
  1845. sg_local = &mpi_request->SGL;
  1846. sges_in_segment = (ioc->request_sz -
  1847. offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
  1848. if (sges_left <= sges_in_segment)
  1849. goto fill_in_last_segment;
  1850. mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
  1851. (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
  1852. /* fill in main message segment when there is a chain following */
  1853. while (sges_in_segment > 1) {
  1854. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  1855. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1856. sg_scmd = sg_next(sg_scmd);
  1857. sg_local += ioc->sge_size_ieee;
  1858. sges_left--;
  1859. sges_in_segment--;
  1860. }
  1861. /* initializing the pointers */
  1862. chain_req = _base_get_chain_buffer_tracker(ioc, smid);
  1863. if (!chain_req)
  1864. return -1;
  1865. chain = chain_req->chain_buffer;
  1866. chain_dma = chain_req->chain_buffer_dma;
  1867. do {
  1868. sges_in_segment = (sges_left <=
  1869. ioc->max_sges_in_chain_message) ? sges_left :
  1870. ioc->max_sges_in_chain_message;
  1871. chain_offset = (sges_left == sges_in_segment) ?
  1872. 0 : sges_in_segment;
  1873. chain_length = sges_in_segment * ioc->sge_size_ieee;
  1874. if (chain_offset)
  1875. chain_length += ioc->sge_size_ieee;
  1876. _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
  1877. chain_offset, chain_length, chain_dma);
  1878. sg_local = chain;
  1879. if (!chain_offset)
  1880. goto fill_in_last_segment;
  1881. /* fill in chain segments */
  1882. while (sges_in_segment) {
  1883. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  1884. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1885. sg_scmd = sg_next(sg_scmd);
  1886. sg_local += ioc->sge_size_ieee;
  1887. sges_left--;
  1888. sges_in_segment--;
  1889. }
  1890. chain_req = _base_get_chain_buffer_tracker(ioc, smid);
  1891. if (!chain_req)
  1892. return -1;
  1893. chain = chain_req->chain_buffer;
  1894. chain_dma = chain_req->chain_buffer_dma;
  1895. } while (1);
  1896. fill_in_last_segment:
  1897. /* fill the last segment */
  1898. while (sges_left > 0) {
  1899. if (sges_left == 1)
  1900. _base_add_sg_single_ieee(sg_local,
  1901. simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
  1902. sg_dma_address(sg_scmd));
  1903. else
  1904. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  1905. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  1906. sg_scmd = sg_next(sg_scmd);
  1907. sg_local += ioc->sge_size_ieee;
  1908. sges_left--;
  1909. }
  1910. return 0;
  1911. }
  1912. /**
  1913. * _base_build_sg_ieee - build generic sg for IEEE format
  1914. * @ioc: per adapter object
  1915. * @psge: virtual address for SGE
  1916. * @data_out_dma: physical address for WRITES
  1917. * @data_out_sz: data xfer size for WRITES
  1918. * @data_in_dma: physical address for READS
  1919. * @data_in_sz: data xfer size for READS
  1920. *
  1921. * Return nothing.
  1922. */
  1923. static void
  1924. _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
  1925. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  1926. size_t data_in_sz)
  1927. {
  1928. u8 sgl_flags;
  1929. if (!data_out_sz && !data_in_sz) {
  1930. _base_build_zero_len_sge_ieee(ioc, psge);
  1931. return;
  1932. }
  1933. if (data_out_sz && data_in_sz) {
  1934. /* WRITE sgel first */
  1935. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  1936. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  1937. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
  1938. data_out_dma);
  1939. /* incr sgel */
  1940. psge += ioc->sge_size_ieee;
  1941. /* READ sgel last */
  1942. sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
  1943. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
  1944. data_in_dma);
  1945. } else if (data_out_sz) /* WRITE */ {
  1946. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  1947. MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
  1948. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  1949. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
  1950. data_out_dma);
  1951. } else if (data_in_sz) /* READ */ {
  1952. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  1953. MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
  1954. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  1955. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
  1956. data_in_dma);
  1957. }
  1958. }
  1959. #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
  1960. /**
  1961. * _base_config_dma_addressing - set dma addressing
  1962. * @ioc: per adapter object
  1963. * @pdev: PCI device struct
  1964. *
  1965. * Returns 0 for success, non-zero for failure.
  1966. */
  1967. static int
  1968. _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
  1969. {
  1970. struct sysinfo s;
  1971. u64 consistent_dma_mask;
  1972. if (ioc->dma_mask)
  1973. consistent_dma_mask = DMA_BIT_MASK(64);
  1974. else
  1975. consistent_dma_mask = DMA_BIT_MASK(32);
  1976. if (sizeof(dma_addr_t) > 4) {
  1977. const uint64_t required_mask =
  1978. dma_get_required_mask(&pdev->dev);
  1979. if ((required_mask > DMA_BIT_MASK(32)) &&
  1980. !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
  1981. !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
  1982. ioc->base_add_sg_single = &_base_add_sg_single_64;
  1983. ioc->sge_size = sizeof(Mpi2SGESimple64_t);
  1984. ioc->dma_mask = 64;
  1985. goto out;
  1986. }
  1987. }
  1988. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
  1989. && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
  1990. ioc->base_add_sg_single = &_base_add_sg_single_32;
  1991. ioc->sge_size = sizeof(Mpi2SGESimple32_t);
  1992. ioc->dma_mask = 32;
  1993. } else
  1994. return -ENODEV;
  1995. out:
  1996. si_meminfo(&s);
  1997. pr_info(MPT3SAS_FMT
  1998. "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
  1999. ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
  2000. return 0;
  2001. }
  2002. static int
  2003. _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
  2004. struct pci_dev *pdev)
  2005. {
  2006. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2007. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
  2008. return -ENODEV;
  2009. }
  2010. return 0;
  2011. }
  2012. /**
  2013. * _base_check_enable_msix - checks MSIX capabable.
  2014. * @ioc: per adapter object
  2015. *
  2016. * Check to see if card is capable of MSIX, and set number
  2017. * of available msix vectors
  2018. */
  2019. static int
  2020. _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  2021. {
  2022. int base;
  2023. u16 message_control;
  2024. /* Check whether controller SAS2008 B0 controller,
  2025. * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
  2026. */
  2027. if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
  2028. ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
  2029. return -EINVAL;
  2030. }
  2031. base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
  2032. if (!base) {
  2033. dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
  2034. ioc->name));
  2035. return -EINVAL;
  2036. }
  2037. /* get msix vector count */
  2038. /* NUMA_IO not supported for older controllers */
  2039. if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
  2040. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
  2041. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
  2042. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
  2043. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
  2044. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
  2045. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
  2046. ioc->msix_vector_count = 1;
  2047. else {
  2048. pci_read_config_word(ioc->pdev, base + 2, &message_control);
  2049. ioc->msix_vector_count = (message_control & 0x3FF) + 1;
  2050. }
  2051. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  2052. "msix is supported, vector_count(%d)\n",
  2053. ioc->name, ioc->msix_vector_count));
  2054. return 0;
  2055. }
  2056. /**
  2057. * _base_free_irq - free irq
  2058. * @ioc: per adapter object
  2059. *
  2060. * Freeing respective reply_queue from the list.
  2061. */
  2062. static void
  2063. _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
  2064. {
  2065. struct adapter_reply_queue *reply_q, *next;
  2066. if (list_empty(&ioc->reply_queue_list))
  2067. return;
  2068. list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
  2069. list_del(&reply_q->list);
  2070. free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
  2071. reply_q);
  2072. kfree(reply_q);
  2073. }
  2074. }
  2075. /**
  2076. * _base_request_irq - request irq
  2077. * @ioc: per adapter object
  2078. * @index: msix index into vector table
  2079. *
  2080. * Inserting respective reply_queue into the list.
  2081. */
  2082. static int
  2083. _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
  2084. {
  2085. struct pci_dev *pdev = ioc->pdev;
  2086. struct adapter_reply_queue *reply_q;
  2087. int r;
  2088. reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
  2089. if (!reply_q) {
  2090. pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
  2091. ioc->name, (int)sizeof(struct adapter_reply_queue));
  2092. return -ENOMEM;
  2093. }
  2094. reply_q->ioc = ioc;
  2095. reply_q->msix_index = index;
  2096. atomic_set(&reply_q->busy, 0);
  2097. if (ioc->msix_enable)
  2098. snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
  2099. ioc->driver_name, ioc->id, index);
  2100. else
  2101. snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
  2102. ioc->driver_name, ioc->id);
  2103. r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
  2104. IRQF_SHARED, reply_q->name, reply_q);
  2105. if (r) {
  2106. pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
  2107. reply_q->name, pci_irq_vector(pdev, index));
  2108. kfree(reply_q);
  2109. return -EBUSY;
  2110. }
  2111. INIT_LIST_HEAD(&reply_q->list);
  2112. list_add_tail(&reply_q->list, &ioc->reply_queue_list);
  2113. return 0;
  2114. }
  2115. /**
  2116. * _base_assign_reply_queues - assigning msix index for each cpu
  2117. * @ioc: per adapter object
  2118. *
  2119. * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
  2120. *
  2121. * It would nice if we could call irq_set_affinity, however it is not
  2122. * an exported symbol
  2123. */
  2124. static void
  2125. _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
  2126. {
  2127. unsigned int cpu, nr_cpus, nr_msix, index = 0;
  2128. struct adapter_reply_queue *reply_q;
  2129. if (!_base_is_controller_msix_enabled(ioc))
  2130. return;
  2131. memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
  2132. nr_cpus = num_online_cpus();
  2133. nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
  2134. ioc->facts.MaxMSIxVectors);
  2135. if (!nr_msix)
  2136. return;
  2137. if (smp_affinity_enable) {
  2138. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  2139. const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
  2140. reply_q->msix_index);
  2141. if (!mask) {
  2142. pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
  2143. ioc->name, reply_q->msix_index);
  2144. continue;
  2145. }
  2146. for_each_cpu(cpu, mask)
  2147. ioc->cpu_msix_table[cpu] = reply_q->msix_index;
  2148. }
  2149. return;
  2150. }
  2151. cpu = cpumask_first(cpu_online_mask);
  2152. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  2153. unsigned int i, group = nr_cpus / nr_msix;
  2154. if (cpu >= nr_cpus)
  2155. break;
  2156. if (index < nr_cpus % nr_msix)
  2157. group++;
  2158. for (i = 0 ; i < group ; i++) {
  2159. ioc->cpu_msix_table[cpu] = reply_q->msix_index;
  2160. cpu = cpumask_next(cpu, cpu_online_mask);
  2161. }
  2162. index++;
  2163. }
  2164. }
  2165. /**
  2166. * _base_disable_msix - disables msix
  2167. * @ioc: per adapter object
  2168. *
  2169. */
  2170. static void
  2171. _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
  2172. {
  2173. if (!ioc->msix_enable)
  2174. return;
  2175. pci_disable_msix(ioc->pdev);
  2176. ioc->msix_enable = 0;
  2177. }
  2178. /**
  2179. * _base_enable_msix - enables msix, failback to io_apic
  2180. * @ioc: per adapter object
  2181. *
  2182. */
  2183. static int
  2184. _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  2185. {
  2186. int r;
  2187. int i, local_max_msix_vectors;
  2188. u8 try_msix = 0;
  2189. unsigned int irq_flags = PCI_IRQ_MSIX;
  2190. if (msix_disable == -1 || msix_disable == 0)
  2191. try_msix = 1;
  2192. if (!try_msix)
  2193. goto try_ioapic;
  2194. if (_base_check_enable_msix(ioc) != 0)
  2195. goto try_ioapic;
  2196. ioc->reply_queue_count = min_t(int, ioc->cpu_count,
  2197. ioc->msix_vector_count);
  2198. printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
  2199. ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
  2200. ioc->cpu_count, max_msix_vectors);
  2201. if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
  2202. local_max_msix_vectors = (reset_devices) ? 1 : 8;
  2203. else
  2204. local_max_msix_vectors = max_msix_vectors;
  2205. if (local_max_msix_vectors > 0)
  2206. ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
  2207. ioc->reply_queue_count);
  2208. else if (local_max_msix_vectors == 0)
  2209. goto try_ioapic;
  2210. if (ioc->msix_vector_count < ioc->cpu_count)
  2211. smp_affinity_enable = 0;
  2212. if (smp_affinity_enable)
  2213. irq_flags |= PCI_IRQ_AFFINITY;
  2214. r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
  2215. irq_flags);
  2216. if (r < 0) {
  2217. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  2218. "pci_alloc_irq_vectors failed (r=%d) !!!\n",
  2219. ioc->name, r));
  2220. goto try_ioapic;
  2221. }
  2222. ioc->msix_enable = 1;
  2223. ioc->reply_queue_count = r;
  2224. for (i = 0; i < ioc->reply_queue_count; i++) {
  2225. r = _base_request_irq(ioc, i);
  2226. if (r) {
  2227. _base_free_irq(ioc);
  2228. _base_disable_msix(ioc);
  2229. goto try_ioapic;
  2230. }
  2231. }
  2232. return 0;
  2233. /* failback to io_apic interrupt routing */
  2234. try_ioapic:
  2235. ioc->reply_queue_count = 1;
  2236. r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
  2237. if (r < 0) {
  2238. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  2239. "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
  2240. ioc->name, r));
  2241. } else
  2242. r = _base_request_irq(ioc, 0);
  2243. return r;
  2244. }
  2245. /**
  2246. * mpt3sas_base_unmap_resources - free controller resources
  2247. * @ioc: per adapter object
  2248. */
  2249. static void
  2250. mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
  2251. {
  2252. struct pci_dev *pdev = ioc->pdev;
  2253. dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
  2254. ioc->name, __func__));
  2255. _base_free_irq(ioc);
  2256. _base_disable_msix(ioc);
  2257. if (ioc->combined_reply_queue) {
  2258. kfree(ioc->replyPostRegisterIndex);
  2259. ioc->replyPostRegisterIndex = NULL;
  2260. }
  2261. if (ioc->chip_phys) {
  2262. iounmap(ioc->chip);
  2263. ioc->chip_phys = 0;
  2264. }
  2265. if (pci_is_enabled(pdev)) {
  2266. pci_release_selected_regions(ioc->pdev, ioc->bars);
  2267. pci_disable_pcie_error_reporting(pdev);
  2268. pci_disable_device(pdev);
  2269. }
  2270. }
  2271. /**
  2272. * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
  2273. * @ioc: per adapter object
  2274. *
  2275. * Returns 0 for success, non-zero for failure.
  2276. */
  2277. int
  2278. mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
  2279. {
  2280. struct pci_dev *pdev = ioc->pdev;
  2281. u32 memap_sz;
  2282. u32 pio_sz;
  2283. int i, r = 0;
  2284. u64 pio_chip = 0;
  2285. u64 chip_phys = 0;
  2286. struct adapter_reply_queue *reply_q;
  2287. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
  2288. ioc->name, __func__));
  2289. ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
  2290. if (pci_enable_device_mem(pdev)) {
  2291. pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
  2292. ioc->name);
  2293. ioc->bars = 0;
  2294. return -ENODEV;
  2295. }
  2296. if (pci_request_selected_regions(pdev, ioc->bars,
  2297. ioc->driver_name)) {
  2298. pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
  2299. ioc->name);
  2300. ioc->bars = 0;
  2301. r = -ENODEV;
  2302. goto out_fail;
  2303. }
  2304. /* AER (Advanced Error Reporting) hooks */
  2305. pci_enable_pcie_error_reporting(pdev);
  2306. pci_set_master(pdev);
  2307. if (_base_config_dma_addressing(ioc, pdev) != 0) {
  2308. pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
  2309. ioc->name, pci_name(pdev));
  2310. r = -ENODEV;
  2311. goto out_fail;
  2312. }
  2313. for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
  2314. (!memap_sz || !pio_sz); i++) {
  2315. if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  2316. if (pio_sz)
  2317. continue;
  2318. pio_chip = (u64)pci_resource_start(pdev, i);
  2319. pio_sz = pci_resource_len(pdev, i);
  2320. } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
  2321. if (memap_sz)
  2322. continue;
  2323. ioc->chip_phys = pci_resource_start(pdev, i);
  2324. chip_phys = (u64)ioc->chip_phys;
  2325. memap_sz = pci_resource_len(pdev, i);
  2326. ioc->chip = ioremap(ioc->chip_phys, memap_sz);
  2327. }
  2328. }
  2329. if (ioc->chip == NULL) {
  2330. pr_err(MPT3SAS_FMT "unable to map adapter memory! "
  2331. " or resource not found\n", ioc->name);
  2332. r = -EINVAL;
  2333. goto out_fail;
  2334. }
  2335. _base_mask_interrupts(ioc);
  2336. r = _base_get_ioc_facts(ioc);
  2337. if (r)
  2338. goto out_fail;
  2339. if (!ioc->rdpq_array_enable_assigned) {
  2340. ioc->rdpq_array_enable = ioc->rdpq_array_capable;
  2341. ioc->rdpq_array_enable_assigned = 1;
  2342. }
  2343. r = _base_enable_msix(ioc);
  2344. if (r)
  2345. goto out_fail;
  2346. /* Use the Combined reply queue feature only for SAS3 C0 & higher
  2347. * revision HBAs and also only when reply queue count is greater than 8
  2348. */
  2349. if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
  2350. /* Determine the Supplemental Reply Post Host Index Registers
  2351. * Addresse. Supplemental Reply Post Host Index Registers
  2352. * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
  2353. * each register is at offset bytes of
  2354. * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
  2355. */
  2356. ioc->replyPostRegisterIndex = kcalloc(
  2357. ioc->combined_reply_index_count,
  2358. sizeof(resource_size_t *), GFP_KERNEL);
  2359. if (!ioc->replyPostRegisterIndex) {
  2360. dfailprintk(ioc, printk(MPT3SAS_FMT
  2361. "allocation for reply Post Register Index failed!!!\n",
  2362. ioc->name));
  2363. r = -ENOMEM;
  2364. goto out_fail;
  2365. }
  2366. for (i = 0; i < ioc->combined_reply_index_count; i++) {
  2367. ioc->replyPostRegisterIndex[i] = (resource_size_t *)
  2368. ((u8 *)&ioc->chip->Doorbell +
  2369. MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
  2370. (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
  2371. }
  2372. } else
  2373. ioc->combined_reply_queue = 0;
  2374. if (ioc->is_warpdrive) {
  2375. ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
  2376. &ioc->chip->ReplyPostHostIndex;
  2377. for (i = 1; i < ioc->cpu_msix_table_sz; i++)
  2378. ioc->reply_post_host_index[i] =
  2379. (resource_size_t __iomem *)
  2380. ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
  2381. * 4)));
  2382. }
  2383. list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
  2384. pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
  2385. reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
  2386. "IO-APIC enabled"),
  2387. pci_irq_vector(ioc->pdev, reply_q->msix_index));
  2388. pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
  2389. ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
  2390. pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
  2391. ioc->name, (unsigned long long)pio_chip, pio_sz);
  2392. /* Save PCI configuration state for recovery from PCI AER/EEH errors */
  2393. pci_save_state(pdev);
  2394. return 0;
  2395. out_fail:
  2396. mpt3sas_base_unmap_resources(ioc);
  2397. return r;
  2398. }
  2399. /**
  2400. * mpt3sas_base_get_msg_frame - obtain request mf pointer
  2401. * @ioc: per adapter object
  2402. * @smid: system request message index(smid zero is invalid)
  2403. *
  2404. * Returns virt pointer to message frame.
  2405. */
  2406. void *
  2407. mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2408. {
  2409. return (void *)(ioc->request + (smid * ioc->request_sz));
  2410. }
  2411. /**
  2412. * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
  2413. * @ioc: per adapter object
  2414. * @smid: system request message index
  2415. *
  2416. * Returns virt pointer to sense buffer.
  2417. */
  2418. void *
  2419. mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2420. {
  2421. return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
  2422. }
  2423. /**
  2424. * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
  2425. * @ioc: per adapter object
  2426. * @smid: system request message index
  2427. *
  2428. * Returns phys pointer to the low 32bit address of the sense buffer.
  2429. */
  2430. __le32
  2431. mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2432. {
  2433. return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
  2434. SCSI_SENSE_BUFFERSIZE));
  2435. }
  2436. /**
  2437. * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
  2438. * @ioc: per adapter object
  2439. * @smid: system request message index
  2440. *
  2441. * Returns virt pointer to a PCIe SGL.
  2442. */
  2443. void *
  2444. mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2445. {
  2446. return (void *)(ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl);
  2447. }
  2448. /**
  2449. * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
  2450. * @ioc: per adapter object
  2451. * @smid: system request message index
  2452. *
  2453. * Returns phys pointer to the address of the PCIe buffer.
  2454. */
  2455. dma_addr_t
  2456. mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2457. {
  2458. return ioc->scsi_lookup[smid - 1].pcie_sg_list.pcie_sgl_dma;
  2459. }
  2460. /**
  2461. * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
  2462. * @ioc: per adapter object
  2463. * @phys_addr: lower 32 physical addr of the reply
  2464. *
  2465. * Converts 32bit lower physical addr into a virt address.
  2466. */
  2467. void *
  2468. mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
  2469. {
  2470. if (!phys_addr)
  2471. return NULL;
  2472. return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
  2473. }
  2474. static inline u8
  2475. _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
  2476. {
  2477. return ioc->cpu_msix_table[raw_smp_processor_id()];
  2478. }
  2479. /**
  2480. * mpt3sas_base_get_smid - obtain a free smid from internal queue
  2481. * @ioc: per adapter object
  2482. * @cb_idx: callback index
  2483. *
  2484. * Returns smid (zero is invalid)
  2485. */
  2486. u16
  2487. mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
  2488. {
  2489. unsigned long flags;
  2490. struct request_tracker *request;
  2491. u16 smid;
  2492. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2493. if (list_empty(&ioc->internal_free_list)) {
  2494. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2495. pr_err(MPT3SAS_FMT "%s: smid not available\n",
  2496. ioc->name, __func__);
  2497. return 0;
  2498. }
  2499. request = list_entry(ioc->internal_free_list.next,
  2500. struct request_tracker, tracker_list);
  2501. request->cb_idx = cb_idx;
  2502. smid = request->smid;
  2503. list_del(&request->tracker_list);
  2504. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2505. return smid;
  2506. }
  2507. /**
  2508. * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
  2509. * @ioc: per adapter object
  2510. * @cb_idx: callback index
  2511. * @scmd: pointer to scsi command object
  2512. *
  2513. * Returns smid (zero is invalid)
  2514. */
  2515. u16
  2516. mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
  2517. struct scsi_cmnd *scmd)
  2518. {
  2519. unsigned long flags;
  2520. struct scsiio_tracker *request;
  2521. u16 smid;
  2522. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2523. if (list_empty(&ioc->free_list)) {
  2524. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2525. pr_err(MPT3SAS_FMT "%s: smid not available\n",
  2526. ioc->name, __func__);
  2527. return 0;
  2528. }
  2529. request = list_entry(ioc->free_list.next,
  2530. struct scsiio_tracker, tracker_list);
  2531. request->scmd = scmd;
  2532. request->cb_idx = cb_idx;
  2533. smid = request->smid;
  2534. request->msix_io = _base_get_msix_index(ioc);
  2535. list_del(&request->tracker_list);
  2536. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2537. return smid;
  2538. }
  2539. /**
  2540. * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
  2541. * @ioc: per adapter object
  2542. * @cb_idx: callback index
  2543. *
  2544. * Returns smid (zero is invalid)
  2545. */
  2546. u16
  2547. mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
  2548. {
  2549. unsigned long flags;
  2550. struct request_tracker *request;
  2551. u16 smid;
  2552. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2553. if (list_empty(&ioc->hpr_free_list)) {
  2554. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2555. return 0;
  2556. }
  2557. request = list_entry(ioc->hpr_free_list.next,
  2558. struct request_tracker, tracker_list);
  2559. request->cb_idx = cb_idx;
  2560. smid = request->smid;
  2561. list_del(&request->tracker_list);
  2562. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2563. return smid;
  2564. }
  2565. /**
  2566. * mpt3sas_base_free_smid - put smid back on free_list
  2567. * @ioc: per adapter object
  2568. * @smid: system request message index
  2569. *
  2570. * Return nothing.
  2571. */
  2572. void
  2573. mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2574. {
  2575. unsigned long flags;
  2576. int i;
  2577. struct chain_tracker *chain_req, *next;
  2578. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2579. if (smid < ioc->hi_priority_smid) {
  2580. /* scsiio queue */
  2581. i = smid - 1;
  2582. if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
  2583. list_for_each_entry_safe(chain_req, next,
  2584. &ioc->scsi_lookup[i].chain_list, tracker_list) {
  2585. list_del_init(&chain_req->tracker_list);
  2586. list_add(&chain_req->tracker_list,
  2587. &ioc->free_chain_list);
  2588. }
  2589. }
  2590. ioc->scsi_lookup[i].cb_idx = 0xFF;
  2591. ioc->scsi_lookup[i].scmd = NULL;
  2592. ioc->scsi_lookup[i].direct_io = 0;
  2593. list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
  2594. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2595. /*
  2596. * See _wait_for_commands_to_complete() call with regards
  2597. * to this code.
  2598. */
  2599. if (ioc->shost_recovery && ioc->pending_io_count) {
  2600. if (ioc->pending_io_count == 1)
  2601. wake_up(&ioc->reset_wq);
  2602. ioc->pending_io_count--;
  2603. }
  2604. return;
  2605. } else if (smid < ioc->internal_smid) {
  2606. /* hi-priority */
  2607. i = smid - ioc->hi_priority_smid;
  2608. ioc->hpr_lookup[i].cb_idx = 0xFF;
  2609. list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
  2610. } else if (smid <= ioc->hba_queue_depth) {
  2611. /* internal queue */
  2612. i = smid - ioc->internal_smid;
  2613. ioc->internal_lookup[i].cb_idx = 0xFF;
  2614. list_add(&ioc->internal_lookup[i].tracker_list,
  2615. &ioc->internal_free_list);
  2616. }
  2617. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2618. }
  2619. /**
  2620. * _base_writeq - 64 bit write to MMIO
  2621. * @ioc: per adapter object
  2622. * @b: data payload
  2623. * @addr: address in MMIO space
  2624. * @writeq_lock: spin lock
  2625. *
  2626. * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
  2627. * care of 32 bit environment where its not quarenteed to send the entire word
  2628. * in one transfer.
  2629. */
  2630. #if defined(writeq) && defined(CONFIG_64BIT)
  2631. static inline void
  2632. _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
  2633. {
  2634. writeq(cpu_to_le64(b), addr);
  2635. }
  2636. #else
  2637. static inline void
  2638. _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
  2639. {
  2640. unsigned long flags;
  2641. __u64 data_out = cpu_to_le64(b);
  2642. spin_lock_irqsave(writeq_lock, flags);
  2643. writel((u32)(data_out), addr);
  2644. writel((u32)(data_out >> 32), (addr + 4));
  2645. spin_unlock_irqrestore(writeq_lock, flags);
  2646. }
  2647. #endif
  2648. /**
  2649. * _base_put_smid_scsi_io - send SCSI_IO request to firmware
  2650. * @ioc: per adapter object
  2651. * @smid: system request message index
  2652. * @handle: device handle
  2653. *
  2654. * Return nothing.
  2655. */
  2656. static void
  2657. _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
  2658. {
  2659. Mpi2RequestDescriptorUnion_t descriptor;
  2660. u64 *request = (u64 *)&descriptor;
  2661. descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
  2662. descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
  2663. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  2664. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  2665. descriptor.SCSIIO.LMID = 0;
  2666. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  2667. &ioc->scsi_lookup_lock);
  2668. }
  2669. /**
  2670. * _base_put_smid_fast_path - send fast path request to firmware
  2671. * @ioc: per adapter object
  2672. * @smid: system request message index
  2673. * @handle: device handle
  2674. *
  2675. * Return nothing.
  2676. */
  2677. static void
  2678. _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  2679. u16 handle)
  2680. {
  2681. Mpi2RequestDescriptorUnion_t descriptor;
  2682. u64 *request = (u64 *)&descriptor;
  2683. descriptor.SCSIIO.RequestFlags =
  2684. MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
  2685. descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
  2686. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  2687. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  2688. descriptor.SCSIIO.LMID = 0;
  2689. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  2690. &ioc->scsi_lookup_lock);
  2691. }
  2692. /**
  2693. * _base_put_smid_hi_priority - send Task Management request to firmware
  2694. * @ioc: per adapter object
  2695. * @smid: system request message index
  2696. * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
  2697. * Return nothing.
  2698. */
  2699. static void
  2700. _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  2701. u16 msix_task)
  2702. {
  2703. Mpi2RequestDescriptorUnion_t descriptor;
  2704. u64 *request = (u64 *)&descriptor;
  2705. descriptor.HighPriority.RequestFlags =
  2706. MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  2707. descriptor.HighPriority.MSIxIndex = msix_task;
  2708. descriptor.HighPriority.SMID = cpu_to_le16(smid);
  2709. descriptor.HighPriority.LMID = 0;
  2710. descriptor.HighPriority.Reserved1 = 0;
  2711. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  2712. &ioc->scsi_lookup_lock);
  2713. }
  2714. /**
  2715. * _base_put_smid_nvme_encap - send NVMe encapsulated request to
  2716. * firmware
  2717. * @ioc: per adapter object
  2718. * @smid: system request message index
  2719. *
  2720. * Return nothing.
  2721. */
  2722. static void
  2723. _base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2724. {
  2725. Mpi2RequestDescriptorUnion_t descriptor;
  2726. u64 *request = (u64 *)&descriptor;
  2727. descriptor.Default.RequestFlags =
  2728. MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
  2729. descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
  2730. descriptor.Default.SMID = cpu_to_le16(smid);
  2731. descriptor.Default.LMID = 0;
  2732. descriptor.Default.DescriptorTypeDependent = 0;
  2733. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  2734. &ioc->scsi_lookup_lock);
  2735. }
  2736. /**
  2737. * _base_put_smid_default - Default, primarily used for config pages
  2738. * @ioc: per adapter object
  2739. * @smid: system request message index
  2740. *
  2741. * Return nothing.
  2742. */
  2743. static void
  2744. _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2745. {
  2746. Mpi2RequestDescriptorUnion_t descriptor;
  2747. u64 *request = (u64 *)&descriptor;
  2748. descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  2749. descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
  2750. descriptor.Default.SMID = cpu_to_le16(smid);
  2751. descriptor.Default.LMID = 0;
  2752. descriptor.Default.DescriptorTypeDependent = 0;
  2753. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  2754. &ioc->scsi_lookup_lock);
  2755. }
  2756. /**
  2757. * _base_put_smid_scsi_io_atomic - send SCSI_IO request to firmware using
  2758. * Atomic Request Descriptor
  2759. * @ioc: per adapter object
  2760. * @smid: system request message index
  2761. * @handle: device handle, unused in this function, for function type match
  2762. *
  2763. * Return nothing.
  2764. */
  2765. static void
  2766. _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  2767. u16 handle)
  2768. {
  2769. Mpi26AtomicRequestDescriptor_t descriptor;
  2770. u32 *request = (u32 *)&descriptor;
  2771. descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
  2772. descriptor.MSIxIndex = _base_get_msix_index(ioc);
  2773. descriptor.SMID = cpu_to_le16(smid);
  2774. writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  2775. }
  2776. /**
  2777. * _base_put_smid_fast_path_atomic - send fast path request to firmware
  2778. * using Atomic Request Descriptor
  2779. * @ioc: per adapter object
  2780. * @smid: system request message index
  2781. * @handle: device handle, unused in this function, for function type match
  2782. * Return nothing
  2783. */
  2784. static void
  2785. _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  2786. u16 handle)
  2787. {
  2788. Mpi26AtomicRequestDescriptor_t descriptor;
  2789. u32 *request = (u32 *)&descriptor;
  2790. descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
  2791. descriptor.MSIxIndex = _base_get_msix_index(ioc);
  2792. descriptor.SMID = cpu_to_le16(smid);
  2793. writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  2794. }
  2795. /**
  2796. * _base_put_smid_hi_priority_atomic - send Task Management request to
  2797. * firmware using Atomic Request Descriptor
  2798. * @ioc: per adapter object
  2799. * @smid: system request message index
  2800. * @msix_task: msix_task will be same as msix of IO incase of task abort else 0
  2801. *
  2802. * Return nothing.
  2803. */
  2804. static void
  2805. _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  2806. u16 msix_task)
  2807. {
  2808. Mpi26AtomicRequestDescriptor_t descriptor;
  2809. u32 *request = (u32 *)&descriptor;
  2810. descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  2811. descriptor.MSIxIndex = msix_task;
  2812. descriptor.SMID = cpu_to_le16(smid);
  2813. writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  2814. }
  2815. /**
  2816. * _base_put_smid_nvme_encap_atomic - send NVMe encapsulated request to
  2817. * firmware using Atomic Request Descriptor
  2818. * @ioc: per adapter object
  2819. * @smid: system request message index
  2820. *
  2821. * Return nothing.
  2822. */
  2823. static void
  2824. _base_put_smid_nvme_encap_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2825. {
  2826. Mpi26AtomicRequestDescriptor_t descriptor;
  2827. u32 *request = (u32 *)&descriptor;
  2828. descriptor.RequestFlags = MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
  2829. descriptor.MSIxIndex = _base_get_msix_index(ioc);
  2830. descriptor.SMID = cpu_to_le16(smid);
  2831. writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  2832. }
  2833. /**
  2834. * _base_put_smid_default - Default, primarily used for config pages
  2835. * use Atomic Request Descriptor
  2836. * @ioc: per adapter object
  2837. * @smid: system request message index
  2838. *
  2839. * Return nothing.
  2840. */
  2841. static void
  2842. _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2843. {
  2844. Mpi26AtomicRequestDescriptor_t descriptor;
  2845. u32 *request = (u32 *)&descriptor;
  2846. descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  2847. descriptor.MSIxIndex = _base_get_msix_index(ioc);
  2848. descriptor.SMID = cpu_to_le16(smid);
  2849. writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
  2850. }
  2851. /**
  2852. * _base_display_OEMs_branding - Display branding string
  2853. * @ioc: per adapter object
  2854. *
  2855. * Return nothing.
  2856. */
  2857. static void
  2858. _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
  2859. {
  2860. if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
  2861. return;
  2862. switch (ioc->pdev->subsystem_vendor) {
  2863. case PCI_VENDOR_ID_INTEL:
  2864. switch (ioc->pdev->device) {
  2865. case MPI2_MFGPAGE_DEVID_SAS2008:
  2866. switch (ioc->pdev->subsystem_device) {
  2867. case MPT2SAS_INTEL_RMS2LL080_SSDID:
  2868. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2869. MPT2SAS_INTEL_RMS2LL080_BRANDING);
  2870. break;
  2871. case MPT2SAS_INTEL_RMS2LL040_SSDID:
  2872. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2873. MPT2SAS_INTEL_RMS2LL040_BRANDING);
  2874. break;
  2875. case MPT2SAS_INTEL_SSD910_SSDID:
  2876. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2877. MPT2SAS_INTEL_SSD910_BRANDING);
  2878. break;
  2879. default:
  2880. pr_info(MPT3SAS_FMT
  2881. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  2882. ioc->name, ioc->pdev->subsystem_device);
  2883. break;
  2884. }
  2885. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  2886. switch (ioc->pdev->subsystem_device) {
  2887. case MPT2SAS_INTEL_RS25GB008_SSDID:
  2888. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2889. MPT2SAS_INTEL_RS25GB008_BRANDING);
  2890. break;
  2891. case MPT2SAS_INTEL_RMS25JB080_SSDID:
  2892. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2893. MPT2SAS_INTEL_RMS25JB080_BRANDING);
  2894. break;
  2895. case MPT2SAS_INTEL_RMS25JB040_SSDID:
  2896. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2897. MPT2SAS_INTEL_RMS25JB040_BRANDING);
  2898. break;
  2899. case MPT2SAS_INTEL_RMS25KB080_SSDID:
  2900. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2901. MPT2SAS_INTEL_RMS25KB080_BRANDING);
  2902. break;
  2903. case MPT2SAS_INTEL_RMS25KB040_SSDID:
  2904. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2905. MPT2SAS_INTEL_RMS25KB040_BRANDING);
  2906. break;
  2907. case MPT2SAS_INTEL_RMS25LB040_SSDID:
  2908. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2909. MPT2SAS_INTEL_RMS25LB040_BRANDING);
  2910. break;
  2911. case MPT2SAS_INTEL_RMS25LB080_SSDID:
  2912. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2913. MPT2SAS_INTEL_RMS25LB080_BRANDING);
  2914. break;
  2915. default:
  2916. pr_info(MPT3SAS_FMT
  2917. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  2918. ioc->name, ioc->pdev->subsystem_device);
  2919. break;
  2920. }
  2921. case MPI25_MFGPAGE_DEVID_SAS3008:
  2922. switch (ioc->pdev->subsystem_device) {
  2923. case MPT3SAS_INTEL_RMS3JC080_SSDID:
  2924. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2925. MPT3SAS_INTEL_RMS3JC080_BRANDING);
  2926. break;
  2927. case MPT3SAS_INTEL_RS3GC008_SSDID:
  2928. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2929. MPT3SAS_INTEL_RS3GC008_BRANDING);
  2930. break;
  2931. case MPT3SAS_INTEL_RS3FC044_SSDID:
  2932. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2933. MPT3SAS_INTEL_RS3FC044_BRANDING);
  2934. break;
  2935. case MPT3SAS_INTEL_RS3UC080_SSDID:
  2936. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2937. MPT3SAS_INTEL_RS3UC080_BRANDING);
  2938. break;
  2939. default:
  2940. pr_info(MPT3SAS_FMT
  2941. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  2942. ioc->name, ioc->pdev->subsystem_device);
  2943. break;
  2944. }
  2945. break;
  2946. default:
  2947. pr_info(MPT3SAS_FMT
  2948. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  2949. ioc->name, ioc->pdev->subsystem_device);
  2950. break;
  2951. }
  2952. break;
  2953. case PCI_VENDOR_ID_DELL:
  2954. switch (ioc->pdev->device) {
  2955. case MPI2_MFGPAGE_DEVID_SAS2008:
  2956. switch (ioc->pdev->subsystem_device) {
  2957. case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
  2958. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2959. MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
  2960. break;
  2961. case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
  2962. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2963. MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
  2964. break;
  2965. case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
  2966. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2967. MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
  2968. break;
  2969. case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
  2970. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2971. MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
  2972. break;
  2973. case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
  2974. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2975. MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
  2976. break;
  2977. case MPT2SAS_DELL_PERC_H200_SSDID:
  2978. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2979. MPT2SAS_DELL_PERC_H200_BRANDING);
  2980. break;
  2981. case MPT2SAS_DELL_6GBPS_SAS_SSDID:
  2982. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2983. MPT2SAS_DELL_6GBPS_SAS_BRANDING);
  2984. break;
  2985. default:
  2986. pr_info(MPT3SAS_FMT
  2987. "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
  2988. ioc->name, ioc->pdev->subsystem_device);
  2989. break;
  2990. }
  2991. break;
  2992. case MPI25_MFGPAGE_DEVID_SAS3008:
  2993. switch (ioc->pdev->subsystem_device) {
  2994. case MPT3SAS_DELL_12G_HBA_SSDID:
  2995. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  2996. MPT3SAS_DELL_12G_HBA_BRANDING);
  2997. break;
  2998. default:
  2999. pr_info(MPT3SAS_FMT
  3000. "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
  3001. ioc->name, ioc->pdev->subsystem_device);
  3002. break;
  3003. }
  3004. break;
  3005. default:
  3006. pr_info(MPT3SAS_FMT
  3007. "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
  3008. ioc->pdev->subsystem_device);
  3009. break;
  3010. }
  3011. break;
  3012. case PCI_VENDOR_ID_CISCO:
  3013. switch (ioc->pdev->device) {
  3014. case MPI25_MFGPAGE_DEVID_SAS3008:
  3015. switch (ioc->pdev->subsystem_device) {
  3016. case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
  3017. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3018. MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
  3019. break;
  3020. case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
  3021. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3022. MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
  3023. break;
  3024. case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
  3025. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3026. MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
  3027. break;
  3028. default:
  3029. pr_info(MPT3SAS_FMT
  3030. "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3031. ioc->name, ioc->pdev->subsystem_device);
  3032. break;
  3033. }
  3034. break;
  3035. case MPI25_MFGPAGE_DEVID_SAS3108_1:
  3036. switch (ioc->pdev->subsystem_device) {
  3037. case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
  3038. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3039. MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
  3040. break;
  3041. case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
  3042. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3043. MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
  3044. );
  3045. break;
  3046. default:
  3047. pr_info(MPT3SAS_FMT
  3048. "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3049. ioc->name, ioc->pdev->subsystem_device);
  3050. break;
  3051. }
  3052. break;
  3053. default:
  3054. pr_info(MPT3SAS_FMT
  3055. "Cisco SAS HBA: Subsystem ID: 0x%X\n",
  3056. ioc->name, ioc->pdev->subsystem_device);
  3057. break;
  3058. }
  3059. break;
  3060. case MPT2SAS_HP_3PAR_SSVID:
  3061. switch (ioc->pdev->device) {
  3062. case MPI2_MFGPAGE_DEVID_SAS2004:
  3063. switch (ioc->pdev->subsystem_device) {
  3064. case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
  3065. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3066. MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
  3067. break;
  3068. default:
  3069. pr_info(MPT3SAS_FMT
  3070. "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3071. ioc->name, ioc->pdev->subsystem_device);
  3072. break;
  3073. }
  3074. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  3075. switch (ioc->pdev->subsystem_device) {
  3076. case MPT2SAS_HP_2_4_INTERNAL_SSDID:
  3077. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3078. MPT2SAS_HP_2_4_INTERNAL_BRANDING);
  3079. break;
  3080. case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
  3081. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3082. MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
  3083. break;
  3084. case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
  3085. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3086. MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
  3087. break;
  3088. case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
  3089. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3090. MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
  3091. break;
  3092. default:
  3093. pr_info(MPT3SAS_FMT
  3094. "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3095. ioc->name, ioc->pdev->subsystem_device);
  3096. break;
  3097. }
  3098. default:
  3099. pr_info(MPT3SAS_FMT
  3100. "HP SAS HBA: Subsystem ID: 0x%X\n",
  3101. ioc->name, ioc->pdev->subsystem_device);
  3102. break;
  3103. }
  3104. default:
  3105. break;
  3106. }
  3107. }
  3108. /**
  3109. * _base_display_ioc_capabilities - Disply IOC's capabilities.
  3110. * @ioc: per adapter object
  3111. *
  3112. * Return nothing.
  3113. */
  3114. static void
  3115. _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
  3116. {
  3117. int i = 0;
  3118. char desc[16];
  3119. u32 iounit_pg1_flags;
  3120. u32 bios_version;
  3121. bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
  3122. strncpy(desc, ioc->manu_pg0.ChipName, 16);
  3123. pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
  3124. "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
  3125. ioc->name, desc,
  3126. (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
  3127. (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
  3128. (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
  3129. ioc->facts.FWVersion.Word & 0x000000FF,
  3130. ioc->pdev->revision,
  3131. (bios_version & 0xFF000000) >> 24,
  3132. (bios_version & 0x00FF0000) >> 16,
  3133. (bios_version & 0x0000FF00) >> 8,
  3134. bios_version & 0x000000FF);
  3135. _base_display_OEMs_branding(ioc);
  3136. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
  3137. pr_info("%sNVMe", i ? "," : "");
  3138. i++;
  3139. }
  3140. pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
  3141. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
  3142. pr_info("Initiator");
  3143. i++;
  3144. }
  3145. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
  3146. pr_info("%sTarget", i ? "," : "");
  3147. i++;
  3148. }
  3149. i = 0;
  3150. pr_info("), ");
  3151. pr_info("Capabilities=(");
  3152. if (!ioc->hide_ir_msg) {
  3153. if (ioc->facts.IOCCapabilities &
  3154. MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
  3155. pr_info("Raid");
  3156. i++;
  3157. }
  3158. }
  3159. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
  3160. pr_info("%sTLR", i ? "," : "");
  3161. i++;
  3162. }
  3163. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
  3164. pr_info("%sMulticast", i ? "," : "");
  3165. i++;
  3166. }
  3167. if (ioc->facts.IOCCapabilities &
  3168. MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
  3169. pr_info("%sBIDI Target", i ? "," : "");
  3170. i++;
  3171. }
  3172. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
  3173. pr_info("%sEEDP", i ? "," : "");
  3174. i++;
  3175. }
  3176. if (ioc->facts.IOCCapabilities &
  3177. MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
  3178. pr_info("%sSnapshot Buffer", i ? "," : "");
  3179. i++;
  3180. }
  3181. if (ioc->facts.IOCCapabilities &
  3182. MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
  3183. pr_info("%sDiag Trace Buffer", i ? "," : "");
  3184. i++;
  3185. }
  3186. if (ioc->facts.IOCCapabilities &
  3187. MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
  3188. pr_info("%sDiag Extended Buffer", i ? "," : "");
  3189. i++;
  3190. }
  3191. if (ioc->facts.IOCCapabilities &
  3192. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
  3193. pr_info("%sTask Set Full", i ? "," : "");
  3194. i++;
  3195. }
  3196. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  3197. if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
  3198. pr_info("%sNCQ", i ? "," : "");
  3199. i++;
  3200. }
  3201. pr_info(")\n");
  3202. }
  3203. /**
  3204. * mpt3sas_base_update_missing_delay - change the missing delay timers
  3205. * @ioc: per adapter object
  3206. * @device_missing_delay: amount of time till device is reported missing
  3207. * @io_missing_delay: interval IO is returned when there is a missing device
  3208. *
  3209. * Return nothing.
  3210. *
  3211. * Passed on the command line, this function will modify the device missing
  3212. * delay, as well as the io missing delay. This should be called at driver
  3213. * load time.
  3214. */
  3215. void
  3216. mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
  3217. u16 device_missing_delay, u8 io_missing_delay)
  3218. {
  3219. u16 dmd, dmd_new, dmd_orignal;
  3220. u8 io_missing_delay_original;
  3221. u16 sz;
  3222. Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
  3223. Mpi2ConfigReply_t mpi_reply;
  3224. u8 num_phys = 0;
  3225. u16 ioc_status;
  3226. mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
  3227. if (!num_phys)
  3228. return;
  3229. sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
  3230. sizeof(Mpi2SasIOUnit1PhyData_t));
  3231. sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
  3232. if (!sas_iounit_pg1) {
  3233. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3234. ioc->name, __FILE__, __LINE__, __func__);
  3235. goto out;
  3236. }
  3237. if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
  3238. sas_iounit_pg1, sz))) {
  3239. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3240. ioc->name, __FILE__, __LINE__, __func__);
  3241. goto out;
  3242. }
  3243. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  3244. MPI2_IOCSTATUS_MASK;
  3245. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  3246. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3247. ioc->name, __FILE__, __LINE__, __func__);
  3248. goto out;
  3249. }
  3250. /* device missing delay */
  3251. dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
  3252. if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  3253. dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  3254. else
  3255. dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  3256. dmd_orignal = dmd;
  3257. if (device_missing_delay > 0x7F) {
  3258. dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
  3259. device_missing_delay;
  3260. dmd = dmd / 16;
  3261. dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
  3262. } else
  3263. dmd = device_missing_delay;
  3264. sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
  3265. /* io missing delay */
  3266. io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
  3267. sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
  3268. if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
  3269. sz)) {
  3270. if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  3271. dmd_new = (dmd &
  3272. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  3273. else
  3274. dmd_new =
  3275. dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  3276. pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
  3277. ioc->name, dmd_orignal, dmd_new);
  3278. pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
  3279. ioc->name, io_missing_delay_original,
  3280. io_missing_delay);
  3281. ioc->device_missing_delay = dmd_new;
  3282. ioc->io_missing_delay = io_missing_delay;
  3283. }
  3284. out:
  3285. kfree(sas_iounit_pg1);
  3286. }
  3287. /**
  3288. * _base_static_config_pages - static start of day config pages
  3289. * @ioc: per adapter object
  3290. *
  3291. * Return nothing.
  3292. */
  3293. static void
  3294. _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
  3295. {
  3296. Mpi2ConfigReply_t mpi_reply;
  3297. u32 iounit_pg1_flags;
  3298. mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
  3299. if (ioc->ir_firmware)
  3300. mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
  3301. &ioc->manu_pg10);
  3302. /*
  3303. * Ensure correct T10 PI operation if vendor left EEDPTagMode
  3304. * flag unset in NVDATA.
  3305. */
  3306. mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
  3307. if (ioc->manu_pg11.EEDPTagMode == 0) {
  3308. pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
  3309. ioc->name);
  3310. ioc->manu_pg11.EEDPTagMode &= ~0x3;
  3311. ioc->manu_pg11.EEDPTagMode |= 0x1;
  3312. mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
  3313. &ioc->manu_pg11);
  3314. }
  3315. mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
  3316. mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
  3317. mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
  3318. mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
  3319. mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  3320. mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
  3321. _base_display_ioc_capabilities(ioc);
  3322. /*
  3323. * Enable task_set_full handling in iounit_pg1 when the
  3324. * facts capabilities indicate that its supported.
  3325. */
  3326. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  3327. if ((ioc->facts.IOCCapabilities &
  3328. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
  3329. iounit_pg1_flags &=
  3330. ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  3331. else
  3332. iounit_pg1_flags |=
  3333. MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  3334. ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
  3335. mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  3336. if (ioc->iounit_pg8.NumSensors)
  3337. ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
  3338. }
  3339. /**
  3340. * _base_release_memory_pools - release memory
  3341. * @ioc: per adapter object
  3342. *
  3343. * Free memory allocated from _base_allocate_memory_pools.
  3344. *
  3345. * Return nothing.
  3346. */
  3347. static void
  3348. _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
  3349. {
  3350. int i = 0;
  3351. struct reply_post_struct *rps;
  3352. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3353. __func__));
  3354. if (ioc->request) {
  3355. pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
  3356. ioc->request, ioc->request_dma);
  3357. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3358. "request_pool(0x%p): free\n",
  3359. ioc->name, ioc->request));
  3360. ioc->request = NULL;
  3361. }
  3362. if (ioc->sense) {
  3363. dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
  3364. dma_pool_destroy(ioc->sense_dma_pool);
  3365. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3366. "sense_pool(0x%p): free\n",
  3367. ioc->name, ioc->sense));
  3368. ioc->sense = NULL;
  3369. }
  3370. if (ioc->reply) {
  3371. dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
  3372. dma_pool_destroy(ioc->reply_dma_pool);
  3373. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3374. "reply_pool(0x%p): free\n",
  3375. ioc->name, ioc->reply));
  3376. ioc->reply = NULL;
  3377. }
  3378. if (ioc->reply_free) {
  3379. dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
  3380. ioc->reply_free_dma);
  3381. dma_pool_destroy(ioc->reply_free_dma_pool);
  3382. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3383. "reply_free_pool(0x%p): free\n",
  3384. ioc->name, ioc->reply_free));
  3385. ioc->reply_free = NULL;
  3386. }
  3387. if (ioc->reply_post) {
  3388. do {
  3389. rps = &ioc->reply_post[i];
  3390. if (rps->reply_post_free) {
  3391. dma_pool_free(
  3392. ioc->reply_post_free_dma_pool,
  3393. rps->reply_post_free,
  3394. rps->reply_post_free_dma);
  3395. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3396. "reply_post_free_pool(0x%p): free\n",
  3397. ioc->name, rps->reply_post_free));
  3398. rps->reply_post_free = NULL;
  3399. }
  3400. } while (ioc->rdpq_array_enable &&
  3401. (++i < ioc->reply_queue_count));
  3402. dma_pool_destroy(ioc->reply_post_free_dma_pool);
  3403. kfree(ioc->reply_post);
  3404. }
  3405. if (ioc->pcie_sgl_dma_pool) {
  3406. for (i = 0; i < ioc->scsiio_depth; i++) {
  3407. if (ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl)
  3408. pci_pool_free(ioc->pcie_sgl_dma_pool,
  3409. ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl,
  3410. ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
  3411. }
  3412. if (ioc->pcie_sgl_dma_pool)
  3413. pci_pool_destroy(ioc->pcie_sgl_dma_pool);
  3414. }
  3415. if (ioc->config_page) {
  3416. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3417. "config_page(0x%p): free\n", ioc->name,
  3418. ioc->config_page));
  3419. pci_free_consistent(ioc->pdev, ioc->config_page_sz,
  3420. ioc->config_page, ioc->config_page_dma);
  3421. }
  3422. if (ioc->scsi_lookup) {
  3423. free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
  3424. ioc->scsi_lookup = NULL;
  3425. }
  3426. kfree(ioc->hpr_lookup);
  3427. kfree(ioc->internal_lookup);
  3428. if (ioc->chain_lookup) {
  3429. for (i = 0; i < ioc->chain_depth; i++) {
  3430. if (ioc->chain_lookup[i].chain_buffer)
  3431. dma_pool_free(ioc->chain_dma_pool,
  3432. ioc->chain_lookup[i].chain_buffer,
  3433. ioc->chain_lookup[i].chain_buffer_dma);
  3434. }
  3435. dma_pool_destroy(ioc->chain_dma_pool);
  3436. free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
  3437. ioc->chain_lookup = NULL;
  3438. }
  3439. }
  3440. /**
  3441. * _base_allocate_memory_pools - allocate start of day memory pools
  3442. * @ioc: per adapter object
  3443. *
  3444. * Returns 0 success, anything else error
  3445. */
  3446. static int
  3447. _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
  3448. {
  3449. struct mpt3sas_facts *facts;
  3450. u16 max_sge_elements;
  3451. u16 chains_needed_per_io;
  3452. u32 sz, total_sz, reply_post_free_sz;
  3453. u32 retry_sz;
  3454. u16 max_request_credit, nvme_blocks_needed;
  3455. unsigned short sg_tablesize;
  3456. u16 sge_size;
  3457. int i;
  3458. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3459. __func__));
  3460. retry_sz = 0;
  3461. facts = &ioc->facts;
  3462. /* command line tunables for max sgl entries */
  3463. if (max_sgl_entries != -1)
  3464. sg_tablesize = max_sgl_entries;
  3465. else {
  3466. if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
  3467. sg_tablesize = MPT2SAS_SG_DEPTH;
  3468. else
  3469. sg_tablesize = MPT3SAS_SG_DEPTH;
  3470. }
  3471. /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
  3472. if (reset_devices)
  3473. sg_tablesize = min_t(unsigned short, sg_tablesize,
  3474. MPT_KDUMP_MIN_PHYS_SEGMENTS);
  3475. if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
  3476. sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
  3477. else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
  3478. sg_tablesize = min_t(unsigned short, sg_tablesize,
  3479. SG_MAX_SEGMENTS);
  3480. pr_warn(MPT3SAS_FMT
  3481. "sg_tablesize(%u) is bigger than kernel"
  3482. " defined SG_CHUNK_SIZE(%u)\n", ioc->name,
  3483. sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
  3484. }
  3485. ioc->shost->sg_tablesize = sg_tablesize;
  3486. ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
  3487. (facts->RequestCredit / 4));
  3488. if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
  3489. if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
  3490. INTERNAL_SCSIIO_CMDS_COUNT)) {
  3491. pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
  3492. Credits, it has just %d number of credits\n",
  3493. ioc->name, facts->RequestCredit);
  3494. return -ENOMEM;
  3495. }
  3496. ioc->internal_depth = 10;
  3497. }
  3498. ioc->hi_priority_depth = ioc->internal_depth - (5);
  3499. /* command line tunables for max controller queue depth */
  3500. if (max_queue_depth != -1 && max_queue_depth != 0) {
  3501. max_request_credit = min_t(u16, max_queue_depth +
  3502. ioc->internal_depth, facts->RequestCredit);
  3503. if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
  3504. max_request_credit = MAX_HBA_QUEUE_DEPTH;
  3505. } else if (reset_devices)
  3506. max_request_credit = min_t(u16, facts->RequestCredit,
  3507. (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
  3508. else
  3509. max_request_credit = min_t(u16, facts->RequestCredit,
  3510. MAX_HBA_QUEUE_DEPTH);
  3511. /* Firmware maintains additional facts->HighPriorityCredit number of
  3512. * credits for HiPriprity Request messages, so hba queue depth will be
  3513. * sum of max_request_credit and high priority queue depth.
  3514. */
  3515. ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
  3516. /* request frame size */
  3517. ioc->request_sz = facts->IOCRequestFrameSize * 4;
  3518. /* reply frame size */
  3519. ioc->reply_sz = facts->ReplyFrameSize * 4;
  3520. /* chain segment size */
  3521. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  3522. if (facts->IOCMaxChainSegmentSize)
  3523. ioc->chain_segment_sz =
  3524. facts->IOCMaxChainSegmentSize *
  3525. MAX_CHAIN_ELEMT_SZ;
  3526. else
  3527. /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
  3528. ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
  3529. MAX_CHAIN_ELEMT_SZ;
  3530. } else
  3531. ioc->chain_segment_sz = ioc->request_sz;
  3532. /* calculate the max scatter element size */
  3533. sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
  3534. retry_allocation:
  3535. total_sz = 0;
  3536. /* calculate number of sg elements left over in the 1st frame */
  3537. max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
  3538. sizeof(Mpi2SGEIOUnion_t)) + sge_size);
  3539. ioc->max_sges_in_main_message = max_sge_elements/sge_size;
  3540. /* now do the same for a chain buffer */
  3541. max_sge_elements = ioc->chain_segment_sz - sge_size;
  3542. ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
  3543. /*
  3544. * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
  3545. */
  3546. chains_needed_per_io = ((ioc->shost->sg_tablesize -
  3547. ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
  3548. + 1;
  3549. if (chains_needed_per_io > facts->MaxChainDepth) {
  3550. chains_needed_per_io = facts->MaxChainDepth;
  3551. ioc->shost->sg_tablesize = min_t(u16,
  3552. ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
  3553. * chains_needed_per_io), ioc->shost->sg_tablesize);
  3554. }
  3555. ioc->chains_needed_per_io = chains_needed_per_io;
  3556. /* reply free queue sizing - taking into account for 64 FW events */
  3557. ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
  3558. /* calculate reply descriptor post queue depth */
  3559. ioc->reply_post_queue_depth = ioc->hba_queue_depth +
  3560. ioc->reply_free_queue_depth + 1 ;
  3561. /* align the reply post queue on the next 16 count boundary */
  3562. if (ioc->reply_post_queue_depth % 16)
  3563. ioc->reply_post_queue_depth += 16 -
  3564. (ioc->reply_post_queue_depth % 16);
  3565. if (ioc->reply_post_queue_depth >
  3566. facts->MaxReplyDescriptorPostQueueDepth) {
  3567. ioc->reply_post_queue_depth =
  3568. facts->MaxReplyDescriptorPostQueueDepth -
  3569. (facts->MaxReplyDescriptorPostQueueDepth % 16);
  3570. ioc->hba_queue_depth =
  3571. ((ioc->reply_post_queue_depth - 64) / 2) - 1;
  3572. ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
  3573. }
  3574. dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
  3575. "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
  3576. "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
  3577. ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
  3578. ioc->chains_needed_per_io));
  3579. /* reply post queue, 16 byte align */
  3580. reply_post_free_sz = ioc->reply_post_queue_depth *
  3581. sizeof(Mpi2DefaultReplyDescriptor_t);
  3582. sz = reply_post_free_sz;
  3583. if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
  3584. sz *= ioc->reply_queue_count;
  3585. ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
  3586. (ioc->reply_queue_count):1,
  3587. sizeof(struct reply_post_struct), GFP_KERNEL);
  3588. if (!ioc->reply_post) {
  3589. pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
  3590. ioc->name);
  3591. goto out;
  3592. }
  3593. ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
  3594. &ioc->pdev->dev, sz, 16, 0);
  3595. if (!ioc->reply_post_free_dma_pool) {
  3596. pr_err(MPT3SAS_FMT
  3597. "reply_post_free pool: dma_pool_create failed\n",
  3598. ioc->name);
  3599. goto out;
  3600. }
  3601. i = 0;
  3602. do {
  3603. ioc->reply_post[i].reply_post_free =
  3604. dma_pool_alloc(ioc->reply_post_free_dma_pool,
  3605. GFP_KERNEL,
  3606. &ioc->reply_post[i].reply_post_free_dma);
  3607. if (!ioc->reply_post[i].reply_post_free) {
  3608. pr_err(MPT3SAS_FMT
  3609. "reply_post_free pool: dma_pool_alloc failed\n",
  3610. ioc->name);
  3611. goto out;
  3612. }
  3613. memset(ioc->reply_post[i].reply_post_free, 0, sz);
  3614. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3615. "reply post free pool (0x%p): depth(%d),"
  3616. "element_size(%d), pool_size(%d kB)\n", ioc->name,
  3617. ioc->reply_post[i].reply_post_free,
  3618. ioc->reply_post_queue_depth, 8, sz/1024));
  3619. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3620. "reply_post_free_dma = (0x%llx)\n", ioc->name,
  3621. (unsigned long long)
  3622. ioc->reply_post[i].reply_post_free_dma));
  3623. total_sz += sz;
  3624. } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
  3625. if (ioc->dma_mask == 64) {
  3626. if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
  3627. pr_warn(MPT3SAS_FMT
  3628. "no suitable consistent DMA mask for %s\n",
  3629. ioc->name, pci_name(ioc->pdev));
  3630. goto out;
  3631. }
  3632. }
  3633. ioc->scsiio_depth = ioc->hba_queue_depth -
  3634. ioc->hi_priority_depth - ioc->internal_depth;
  3635. /* set the scsi host can_queue depth
  3636. * with some internal commands that could be outstanding
  3637. */
  3638. ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
  3639. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3640. "scsi host: can_queue depth (%d)\n",
  3641. ioc->name, ioc->shost->can_queue));
  3642. /* contiguous pool for request and chains, 16 byte align, one extra "
  3643. * "frame for smid=0
  3644. */
  3645. ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
  3646. sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
  3647. /* hi-priority queue */
  3648. sz += (ioc->hi_priority_depth * ioc->request_sz);
  3649. /* internal queue */
  3650. sz += (ioc->internal_depth * ioc->request_sz);
  3651. ioc->request_dma_sz = sz;
  3652. ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
  3653. if (!ioc->request) {
  3654. pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
  3655. "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
  3656. "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
  3657. ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
  3658. if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
  3659. goto out;
  3660. retry_sz = 64;
  3661. ioc->hba_queue_depth -= retry_sz;
  3662. _base_release_memory_pools(ioc);
  3663. goto retry_allocation;
  3664. }
  3665. if (retry_sz)
  3666. pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
  3667. "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
  3668. "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
  3669. ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
  3670. /* hi-priority queue */
  3671. ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
  3672. ioc->request_sz);
  3673. ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
  3674. ioc->request_sz);
  3675. /* internal queue */
  3676. ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
  3677. ioc->request_sz);
  3678. ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
  3679. ioc->request_sz);
  3680. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3681. "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
  3682. ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
  3683. (ioc->hba_queue_depth * ioc->request_sz)/1024));
  3684. dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
  3685. ioc->name, (unsigned long long) ioc->request_dma));
  3686. total_sz += sz;
  3687. sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
  3688. ioc->scsi_lookup_pages = get_order(sz);
  3689. ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
  3690. GFP_KERNEL, ioc->scsi_lookup_pages);
  3691. if (!ioc->scsi_lookup) {
  3692. pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
  3693. ioc->name, (int)sz);
  3694. goto out;
  3695. }
  3696. dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
  3697. ioc->name, ioc->request, ioc->scsiio_depth));
  3698. ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
  3699. sz = ioc->chain_depth * sizeof(struct chain_tracker);
  3700. ioc->chain_pages = get_order(sz);
  3701. ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
  3702. GFP_KERNEL, ioc->chain_pages);
  3703. if (!ioc->chain_lookup) {
  3704. pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
  3705. ioc->name);
  3706. goto out;
  3707. }
  3708. ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
  3709. ioc->chain_segment_sz, 16, 0);
  3710. if (!ioc->chain_dma_pool) {
  3711. pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
  3712. ioc->name);
  3713. goto out;
  3714. }
  3715. for (i = 0; i < ioc->chain_depth; i++) {
  3716. ioc->chain_lookup[i].chain_buffer = dma_pool_alloc(
  3717. ioc->chain_dma_pool , GFP_KERNEL,
  3718. &ioc->chain_lookup[i].chain_buffer_dma);
  3719. if (!ioc->chain_lookup[i].chain_buffer) {
  3720. ioc->chain_depth = i;
  3721. goto chain_done;
  3722. }
  3723. total_sz += ioc->chain_segment_sz;
  3724. }
  3725. chain_done:
  3726. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3727. "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
  3728. ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
  3729. ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
  3730. /* initialize hi-priority queue smid's */
  3731. ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
  3732. sizeof(struct request_tracker), GFP_KERNEL);
  3733. if (!ioc->hpr_lookup) {
  3734. pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
  3735. ioc->name);
  3736. goto out;
  3737. }
  3738. ioc->hi_priority_smid = ioc->scsiio_depth + 1;
  3739. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3740. "hi_priority(0x%p): depth(%d), start smid(%d)\n",
  3741. ioc->name, ioc->hi_priority,
  3742. ioc->hi_priority_depth, ioc->hi_priority_smid));
  3743. /* initialize internal queue smid's */
  3744. ioc->internal_lookup = kcalloc(ioc->internal_depth,
  3745. sizeof(struct request_tracker), GFP_KERNEL);
  3746. if (!ioc->internal_lookup) {
  3747. pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
  3748. ioc->name);
  3749. goto out;
  3750. }
  3751. ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
  3752. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3753. "internal(0x%p): depth(%d), start smid(%d)\n",
  3754. ioc->name, ioc->internal,
  3755. ioc->internal_depth, ioc->internal_smid));
  3756. /*
  3757. * The number of NVMe page sized blocks needed is:
  3758. * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
  3759. * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
  3760. * that is placed in the main message frame. 8 is the size of each PRP
  3761. * entry or PRP list pointer entry. 8 is subtracted from page_size
  3762. * because of the PRP list pointer entry at the end of a page, so this
  3763. * is not counted as a PRP entry. The 1 added page is a round up.
  3764. *
  3765. * To avoid allocation failures due to the amount of memory that could
  3766. * be required for NVMe PRP's, only each set of NVMe blocks will be
  3767. * contiguous, so a new set is allocated for each possible I/O.
  3768. */
  3769. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
  3770. nvme_blocks_needed =
  3771. (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
  3772. nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
  3773. nvme_blocks_needed++;
  3774. sz = nvme_blocks_needed * ioc->page_size;
  3775. ioc->pcie_sgl_dma_pool =
  3776. pci_pool_create("PCIe SGL pool", ioc->pdev, sz, 16, 0);
  3777. if (!ioc->pcie_sgl_dma_pool) {
  3778. pr_info(MPT3SAS_FMT
  3779. "PCIe SGL pool: pci_pool_create failed\n",
  3780. ioc->name);
  3781. goto out;
  3782. }
  3783. for (i = 0; i < ioc->scsiio_depth; i++) {
  3784. ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl =
  3785. pci_pool_alloc(ioc->pcie_sgl_dma_pool,
  3786. GFP_KERNEL,
  3787. &ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl_dma);
  3788. if (!ioc->scsi_lookup[i].pcie_sg_list.pcie_sgl) {
  3789. pr_info(MPT3SAS_FMT
  3790. "PCIe SGL pool: pci_pool_alloc failed\n",
  3791. ioc->name);
  3792. goto out;
  3793. }
  3794. }
  3795. dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
  3796. "element_size(%d), pool_size(%d kB)\n", ioc->name,
  3797. ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
  3798. total_sz += sz * ioc->scsiio_depth;
  3799. }
  3800. /* sense buffers, 4 byte align */
  3801. sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
  3802. ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
  3803. 4, 0);
  3804. if (!ioc->sense_dma_pool) {
  3805. pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
  3806. ioc->name);
  3807. goto out;
  3808. }
  3809. ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
  3810. &ioc->sense_dma);
  3811. if (!ioc->sense) {
  3812. pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
  3813. ioc->name);
  3814. goto out;
  3815. }
  3816. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3817. "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
  3818. "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
  3819. SCSI_SENSE_BUFFERSIZE, sz/1024));
  3820. dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
  3821. ioc->name, (unsigned long long)ioc->sense_dma));
  3822. total_sz += sz;
  3823. /* reply pool, 4 byte align */
  3824. sz = ioc->reply_free_queue_depth * ioc->reply_sz;
  3825. ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
  3826. 4, 0);
  3827. if (!ioc->reply_dma_pool) {
  3828. pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
  3829. ioc->name);
  3830. goto out;
  3831. }
  3832. ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
  3833. &ioc->reply_dma);
  3834. if (!ioc->reply) {
  3835. pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
  3836. ioc->name);
  3837. goto out;
  3838. }
  3839. ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
  3840. ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
  3841. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3842. "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
  3843. ioc->name, ioc->reply,
  3844. ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
  3845. dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
  3846. ioc->name, (unsigned long long)ioc->reply_dma));
  3847. total_sz += sz;
  3848. /* reply free queue, 16 byte align */
  3849. sz = ioc->reply_free_queue_depth * 4;
  3850. ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
  3851. &ioc->pdev->dev, sz, 16, 0);
  3852. if (!ioc->reply_free_dma_pool) {
  3853. pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
  3854. ioc->name);
  3855. goto out;
  3856. }
  3857. ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
  3858. &ioc->reply_free_dma);
  3859. if (!ioc->reply_free) {
  3860. pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
  3861. ioc->name);
  3862. goto out;
  3863. }
  3864. memset(ioc->reply_free, 0, sz);
  3865. dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
  3866. "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
  3867. ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
  3868. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3869. "reply_free_dma (0x%llx)\n",
  3870. ioc->name, (unsigned long long)ioc->reply_free_dma));
  3871. total_sz += sz;
  3872. ioc->config_page_sz = 512;
  3873. ioc->config_page = pci_alloc_consistent(ioc->pdev,
  3874. ioc->config_page_sz, &ioc->config_page_dma);
  3875. if (!ioc->config_page) {
  3876. pr_err(MPT3SAS_FMT
  3877. "config page: dma_pool_alloc failed\n",
  3878. ioc->name);
  3879. goto out;
  3880. }
  3881. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  3882. "config page(0x%p): size(%d)\n",
  3883. ioc->name, ioc->config_page, ioc->config_page_sz));
  3884. dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
  3885. ioc->name, (unsigned long long)ioc->config_page_dma));
  3886. total_sz += ioc->config_page_sz;
  3887. pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
  3888. ioc->name, total_sz/1024);
  3889. pr_info(MPT3SAS_FMT
  3890. "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
  3891. ioc->name, ioc->shost->can_queue, facts->RequestCredit);
  3892. pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
  3893. ioc->name, ioc->shost->sg_tablesize);
  3894. return 0;
  3895. out:
  3896. return -ENOMEM;
  3897. }
  3898. /**
  3899. * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
  3900. * @ioc: Pointer to MPT_ADAPTER structure
  3901. * @cooked: Request raw or cooked IOC state
  3902. *
  3903. * Returns all IOC Doorbell register bits if cooked==0, else just the
  3904. * Doorbell bits in MPI_IOC_STATE_MASK.
  3905. */
  3906. u32
  3907. mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
  3908. {
  3909. u32 s, sc;
  3910. s = readl(&ioc->chip->Doorbell);
  3911. sc = s & MPI2_IOC_STATE_MASK;
  3912. return cooked ? sc : s;
  3913. }
  3914. /**
  3915. * _base_wait_on_iocstate - waiting on a particular ioc state
  3916. * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
  3917. * @timeout: timeout in second
  3918. *
  3919. * Returns 0 for success, non-zero for failure.
  3920. */
  3921. static int
  3922. _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
  3923. {
  3924. u32 count, cntdn;
  3925. u32 current_state;
  3926. count = 0;
  3927. cntdn = 1000 * timeout;
  3928. do {
  3929. current_state = mpt3sas_base_get_iocstate(ioc, 1);
  3930. if (current_state == ioc_state)
  3931. return 0;
  3932. if (count && current_state == MPI2_IOC_STATE_FAULT)
  3933. break;
  3934. usleep_range(1000, 1500);
  3935. count++;
  3936. } while (--cntdn);
  3937. return current_state;
  3938. }
  3939. /**
  3940. * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
  3941. * a write to the doorbell)
  3942. * @ioc: per adapter object
  3943. * @timeout: timeout in second
  3944. *
  3945. * Returns 0 for success, non-zero for failure.
  3946. *
  3947. * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  3948. */
  3949. static int
  3950. _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
  3951. static int
  3952. _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
  3953. {
  3954. u32 cntdn, count;
  3955. u32 int_status;
  3956. count = 0;
  3957. cntdn = 1000 * timeout;
  3958. do {
  3959. int_status = readl(&ioc->chip->HostInterruptStatus);
  3960. if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  3961. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  3962. "%s: successful count(%d), timeout(%d)\n",
  3963. ioc->name, __func__, count, timeout));
  3964. return 0;
  3965. }
  3966. usleep_range(1000, 1500);
  3967. count++;
  3968. } while (--cntdn);
  3969. pr_err(MPT3SAS_FMT
  3970. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  3971. ioc->name, __func__, count, int_status);
  3972. return -EFAULT;
  3973. }
  3974. static int
  3975. _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
  3976. {
  3977. u32 cntdn, count;
  3978. u32 int_status;
  3979. count = 0;
  3980. cntdn = 2000 * timeout;
  3981. do {
  3982. int_status = readl(&ioc->chip->HostInterruptStatus);
  3983. if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  3984. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  3985. "%s: successful count(%d), timeout(%d)\n",
  3986. ioc->name, __func__, count, timeout));
  3987. return 0;
  3988. }
  3989. udelay(500);
  3990. count++;
  3991. } while (--cntdn);
  3992. pr_err(MPT3SAS_FMT
  3993. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  3994. ioc->name, __func__, count, int_status);
  3995. return -EFAULT;
  3996. }
  3997. /**
  3998. * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
  3999. * @ioc: per adapter object
  4000. * @timeout: timeout in second
  4001. *
  4002. * Returns 0 for success, non-zero for failure.
  4003. *
  4004. * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
  4005. * doorbell.
  4006. */
  4007. static int
  4008. _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4009. {
  4010. u32 cntdn, count;
  4011. u32 int_status;
  4012. u32 doorbell;
  4013. count = 0;
  4014. cntdn = 1000 * timeout;
  4015. do {
  4016. int_status = readl(&ioc->chip->HostInterruptStatus);
  4017. if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
  4018. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4019. "%s: successful count(%d), timeout(%d)\n",
  4020. ioc->name, __func__, count, timeout));
  4021. return 0;
  4022. } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  4023. doorbell = readl(&ioc->chip->Doorbell);
  4024. if ((doorbell & MPI2_IOC_STATE_MASK) ==
  4025. MPI2_IOC_STATE_FAULT) {
  4026. mpt3sas_base_fault_info(ioc , doorbell);
  4027. return -EFAULT;
  4028. }
  4029. } else if (int_status == 0xFFFFFFFF)
  4030. goto out;
  4031. usleep_range(1000, 1500);
  4032. count++;
  4033. } while (--cntdn);
  4034. out:
  4035. pr_err(MPT3SAS_FMT
  4036. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  4037. ioc->name, __func__, count, int_status);
  4038. return -EFAULT;
  4039. }
  4040. /**
  4041. * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
  4042. * @ioc: per adapter object
  4043. * @timeout: timeout in second
  4044. *
  4045. * Returns 0 for success, non-zero for failure.
  4046. *
  4047. */
  4048. static int
  4049. _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4050. {
  4051. u32 cntdn, count;
  4052. u32 doorbell_reg;
  4053. count = 0;
  4054. cntdn = 1000 * timeout;
  4055. do {
  4056. doorbell_reg = readl(&ioc->chip->Doorbell);
  4057. if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
  4058. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4059. "%s: successful count(%d), timeout(%d)\n",
  4060. ioc->name, __func__, count, timeout));
  4061. return 0;
  4062. }
  4063. usleep_range(1000, 1500);
  4064. count++;
  4065. } while (--cntdn);
  4066. pr_err(MPT3SAS_FMT
  4067. "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
  4068. ioc->name, __func__, count, doorbell_reg);
  4069. return -EFAULT;
  4070. }
  4071. /**
  4072. * _base_send_ioc_reset - send doorbell reset
  4073. * @ioc: per adapter object
  4074. * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
  4075. * @timeout: timeout in second
  4076. *
  4077. * Returns 0 for success, non-zero for failure.
  4078. */
  4079. static int
  4080. _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
  4081. {
  4082. u32 ioc_state;
  4083. int r = 0;
  4084. if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
  4085. pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
  4086. ioc->name, __func__);
  4087. return -EFAULT;
  4088. }
  4089. if (!(ioc->facts.IOCCapabilities &
  4090. MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
  4091. return -EFAULT;
  4092. pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
  4093. writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
  4094. &ioc->chip->Doorbell);
  4095. if ((_base_wait_for_doorbell_ack(ioc, 15))) {
  4096. r = -EFAULT;
  4097. goto out;
  4098. }
  4099. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
  4100. if (ioc_state) {
  4101. pr_err(MPT3SAS_FMT
  4102. "%s: failed going to ready state (ioc_state=0x%x)\n",
  4103. ioc->name, __func__, ioc_state);
  4104. r = -EFAULT;
  4105. goto out;
  4106. }
  4107. out:
  4108. pr_info(MPT3SAS_FMT "message unit reset: %s\n",
  4109. ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
  4110. return r;
  4111. }
  4112. /**
  4113. * _base_handshake_req_reply_wait - send request thru doorbell interface
  4114. * @ioc: per adapter object
  4115. * @request_bytes: request length
  4116. * @request: pointer having request payload
  4117. * @reply_bytes: reply length
  4118. * @reply: pointer to reply payload
  4119. * @timeout: timeout in second
  4120. *
  4121. * Returns 0 for success, non-zero for failure.
  4122. */
  4123. static int
  4124. _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
  4125. u32 *request, int reply_bytes, u16 *reply, int timeout)
  4126. {
  4127. MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
  4128. int i;
  4129. u8 failed;
  4130. __le32 *mfp;
  4131. /* make sure doorbell is not in use */
  4132. if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
  4133. pr_err(MPT3SAS_FMT
  4134. "doorbell is in use (line=%d)\n",
  4135. ioc->name, __LINE__);
  4136. return -EFAULT;
  4137. }
  4138. /* clear pending doorbell interrupts from previous state changes */
  4139. if (readl(&ioc->chip->HostInterruptStatus) &
  4140. MPI2_HIS_IOC2SYS_DB_STATUS)
  4141. writel(0, &ioc->chip->HostInterruptStatus);
  4142. /* send message to ioc */
  4143. writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
  4144. ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
  4145. &ioc->chip->Doorbell);
  4146. if ((_base_spin_on_doorbell_int(ioc, 5))) {
  4147. pr_err(MPT3SAS_FMT
  4148. "doorbell handshake int failed (line=%d)\n",
  4149. ioc->name, __LINE__);
  4150. return -EFAULT;
  4151. }
  4152. writel(0, &ioc->chip->HostInterruptStatus);
  4153. if ((_base_wait_for_doorbell_ack(ioc, 5))) {
  4154. pr_err(MPT3SAS_FMT
  4155. "doorbell handshake ack failed (line=%d)\n",
  4156. ioc->name, __LINE__);
  4157. return -EFAULT;
  4158. }
  4159. /* send message 32-bits at a time */
  4160. for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
  4161. writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
  4162. if ((_base_wait_for_doorbell_ack(ioc, 5)))
  4163. failed = 1;
  4164. }
  4165. if (failed) {
  4166. pr_err(MPT3SAS_FMT
  4167. "doorbell handshake sending request failed (line=%d)\n",
  4168. ioc->name, __LINE__);
  4169. return -EFAULT;
  4170. }
  4171. /* now wait for the reply */
  4172. if ((_base_wait_for_doorbell_int(ioc, timeout))) {
  4173. pr_err(MPT3SAS_FMT
  4174. "doorbell handshake int failed (line=%d)\n",
  4175. ioc->name, __LINE__);
  4176. return -EFAULT;
  4177. }
  4178. /* read the first two 16-bits, it gives the total length of the reply */
  4179. reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
  4180. & MPI2_DOORBELL_DATA_MASK);
  4181. writel(0, &ioc->chip->HostInterruptStatus);
  4182. if ((_base_wait_for_doorbell_int(ioc, 5))) {
  4183. pr_err(MPT3SAS_FMT
  4184. "doorbell handshake int failed (line=%d)\n",
  4185. ioc->name, __LINE__);
  4186. return -EFAULT;
  4187. }
  4188. reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
  4189. & MPI2_DOORBELL_DATA_MASK);
  4190. writel(0, &ioc->chip->HostInterruptStatus);
  4191. for (i = 2; i < default_reply->MsgLength * 2; i++) {
  4192. if ((_base_wait_for_doorbell_int(ioc, 5))) {
  4193. pr_err(MPT3SAS_FMT
  4194. "doorbell handshake int failed (line=%d)\n",
  4195. ioc->name, __LINE__);
  4196. return -EFAULT;
  4197. }
  4198. if (i >= reply_bytes/2) /* overflow case */
  4199. readl(&ioc->chip->Doorbell);
  4200. else
  4201. reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
  4202. & MPI2_DOORBELL_DATA_MASK);
  4203. writel(0, &ioc->chip->HostInterruptStatus);
  4204. }
  4205. _base_wait_for_doorbell_int(ioc, 5);
  4206. if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
  4207. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4208. "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
  4209. }
  4210. writel(0, &ioc->chip->HostInterruptStatus);
  4211. if (ioc->logging_level & MPT_DEBUG_INIT) {
  4212. mfp = (__le32 *)reply;
  4213. pr_info("\toffset:data\n");
  4214. for (i = 0; i < reply_bytes/4; i++)
  4215. pr_info("\t[0x%02x]:%08x\n", i*4,
  4216. le32_to_cpu(mfp[i]));
  4217. }
  4218. return 0;
  4219. }
  4220. /**
  4221. * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
  4222. * @ioc: per adapter object
  4223. * @mpi_reply: the reply payload from FW
  4224. * @mpi_request: the request payload sent to FW
  4225. *
  4226. * The SAS IO Unit Control Request message allows the host to perform low-level
  4227. * operations, such as resets on the PHYs of the IO Unit, also allows the host
  4228. * to obtain the IOC assigned device handles for a device if it has other
  4229. * identifying information about the device, in addition allows the host to
  4230. * remove IOC resources associated with the device.
  4231. *
  4232. * Returns 0 for success, non-zero for failure.
  4233. */
  4234. int
  4235. mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
  4236. Mpi2SasIoUnitControlReply_t *mpi_reply,
  4237. Mpi2SasIoUnitControlRequest_t *mpi_request)
  4238. {
  4239. u16 smid;
  4240. u32 ioc_state;
  4241. bool issue_reset = false;
  4242. int rc;
  4243. void *request;
  4244. u16 wait_state_count;
  4245. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4246. __func__));
  4247. mutex_lock(&ioc->base_cmds.mutex);
  4248. if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
  4249. pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
  4250. ioc->name, __func__);
  4251. rc = -EAGAIN;
  4252. goto out;
  4253. }
  4254. wait_state_count = 0;
  4255. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4256. while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  4257. if (wait_state_count++ == 10) {
  4258. pr_err(MPT3SAS_FMT
  4259. "%s: failed due to ioc not operational\n",
  4260. ioc->name, __func__);
  4261. rc = -EFAULT;
  4262. goto out;
  4263. }
  4264. ssleep(1);
  4265. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4266. pr_info(MPT3SAS_FMT
  4267. "%s: waiting for operational state(count=%d)\n",
  4268. ioc->name, __func__, wait_state_count);
  4269. }
  4270. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  4271. if (!smid) {
  4272. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4273. ioc->name, __func__);
  4274. rc = -EAGAIN;
  4275. goto out;
  4276. }
  4277. rc = 0;
  4278. ioc->base_cmds.status = MPT3_CMD_PENDING;
  4279. request = mpt3sas_base_get_msg_frame(ioc, smid);
  4280. ioc->base_cmds.smid = smid;
  4281. memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
  4282. if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
  4283. mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
  4284. ioc->ioc_link_reset_in_progress = 1;
  4285. init_completion(&ioc->base_cmds.done);
  4286. ioc->put_smid_default(ioc, smid);
  4287. wait_for_completion_timeout(&ioc->base_cmds.done,
  4288. msecs_to_jiffies(10000));
  4289. if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
  4290. mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
  4291. ioc->ioc_link_reset_in_progress)
  4292. ioc->ioc_link_reset_in_progress = 0;
  4293. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  4294. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4295. ioc->name, __func__);
  4296. _debug_dump_mf(mpi_request,
  4297. sizeof(Mpi2SasIoUnitControlRequest_t)/4);
  4298. if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
  4299. issue_reset = true;
  4300. goto issue_host_reset;
  4301. }
  4302. if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
  4303. memcpy(mpi_reply, ioc->base_cmds.reply,
  4304. sizeof(Mpi2SasIoUnitControlReply_t));
  4305. else
  4306. memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
  4307. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4308. goto out;
  4309. issue_host_reset:
  4310. if (issue_reset)
  4311. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  4312. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4313. rc = -EFAULT;
  4314. out:
  4315. mutex_unlock(&ioc->base_cmds.mutex);
  4316. return rc;
  4317. }
  4318. /**
  4319. * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
  4320. * @ioc: per adapter object
  4321. * @mpi_reply: the reply payload from FW
  4322. * @mpi_request: the request payload sent to FW
  4323. *
  4324. * The SCSI Enclosure Processor request message causes the IOC to
  4325. * communicate with SES devices to control LED status signals.
  4326. *
  4327. * Returns 0 for success, non-zero for failure.
  4328. */
  4329. int
  4330. mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
  4331. Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
  4332. {
  4333. u16 smid;
  4334. u32 ioc_state;
  4335. bool issue_reset = false;
  4336. int rc;
  4337. void *request;
  4338. u16 wait_state_count;
  4339. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4340. __func__));
  4341. mutex_lock(&ioc->base_cmds.mutex);
  4342. if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
  4343. pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
  4344. ioc->name, __func__);
  4345. rc = -EAGAIN;
  4346. goto out;
  4347. }
  4348. wait_state_count = 0;
  4349. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4350. while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  4351. if (wait_state_count++ == 10) {
  4352. pr_err(MPT3SAS_FMT
  4353. "%s: failed due to ioc not operational\n",
  4354. ioc->name, __func__);
  4355. rc = -EFAULT;
  4356. goto out;
  4357. }
  4358. ssleep(1);
  4359. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4360. pr_info(MPT3SAS_FMT
  4361. "%s: waiting for operational state(count=%d)\n",
  4362. ioc->name,
  4363. __func__, wait_state_count);
  4364. }
  4365. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  4366. if (!smid) {
  4367. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4368. ioc->name, __func__);
  4369. rc = -EAGAIN;
  4370. goto out;
  4371. }
  4372. rc = 0;
  4373. ioc->base_cmds.status = MPT3_CMD_PENDING;
  4374. request = mpt3sas_base_get_msg_frame(ioc, smid);
  4375. ioc->base_cmds.smid = smid;
  4376. memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
  4377. init_completion(&ioc->base_cmds.done);
  4378. ioc->put_smid_default(ioc, smid);
  4379. wait_for_completion_timeout(&ioc->base_cmds.done,
  4380. msecs_to_jiffies(10000));
  4381. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  4382. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4383. ioc->name, __func__);
  4384. _debug_dump_mf(mpi_request,
  4385. sizeof(Mpi2SepRequest_t)/4);
  4386. if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
  4387. issue_reset = false;
  4388. goto issue_host_reset;
  4389. }
  4390. if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
  4391. memcpy(mpi_reply, ioc->base_cmds.reply,
  4392. sizeof(Mpi2SepReply_t));
  4393. else
  4394. memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
  4395. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4396. goto out;
  4397. issue_host_reset:
  4398. if (issue_reset)
  4399. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  4400. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4401. rc = -EFAULT;
  4402. out:
  4403. mutex_unlock(&ioc->base_cmds.mutex);
  4404. return rc;
  4405. }
  4406. /**
  4407. * _base_get_port_facts - obtain port facts reply and save in ioc
  4408. * @ioc: per adapter object
  4409. *
  4410. * Returns 0 for success, non-zero for failure.
  4411. */
  4412. static int
  4413. _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
  4414. {
  4415. Mpi2PortFactsRequest_t mpi_request;
  4416. Mpi2PortFactsReply_t mpi_reply;
  4417. struct mpt3sas_port_facts *pfacts;
  4418. int mpi_reply_sz, mpi_request_sz, r;
  4419. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4420. __func__));
  4421. mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
  4422. mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
  4423. memset(&mpi_request, 0, mpi_request_sz);
  4424. mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
  4425. mpi_request.PortNumber = port;
  4426. r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
  4427. (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
  4428. if (r != 0) {
  4429. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  4430. ioc->name, __func__, r);
  4431. return r;
  4432. }
  4433. pfacts = &ioc->pfacts[port];
  4434. memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
  4435. pfacts->PortNumber = mpi_reply.PortNumber;
  4436. pfacts->VP_ID = mpi_reply.VP_ID;
  4437. pfacts->VF_ID = mpi_reply.VF_ID;
  4438. pfacts->MaxPostedCmdBuffers =
  4439. le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
  4440. return 0;
  4441. }
  4442. /**
  4443. * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
  4444. * @ioc: per adapter object
  4445. * @timeout:
  4446. *
  4447. * Returns 0 for success, non-zero for failure.
  4448. */
  4449. static int
  4450. _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4451. {
  4452. u32 ioc_state;
  4453. int rc;
  4454. dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
  4455. __func__));
  4456. if (ioc->pci_error_recovery) {
  4457. dfailprintk(ioc, printk(MPT3SAS_FMT
  4458. "%s: host in pci error recovery\n", ioc->name, __func__));
  4459. return -EFAULT;
  4460. }
  4461. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  4462. dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
  4463. ioc->name, __func__, ioc_state));
  4464. if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
  4465. (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
  4466. return 0;
  4467. if (ioc_state & MPI2_DOORBELL_USED) {
  4468. dhsprintk(ioc, printk(MPT3SAS_FMT
  4469. "unexpected doorbell active!\n", ioc->name));
  4470. goto issue_diag_reset;
  4471. }
  4472. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  4473. mpt3sas_base_fault_info(ioc, ioc_state &
  4474. MPI2_DOORBELL_DATA_MASK);
  4475. goto issue_diag_reset;
  4476. }
  4477. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
  4478. if (ioc_state) {
  4479. dfailprintk(ioc, printk(MPT3SAS_FMT
  4480. "%s: failed going to ready state (ioc_state=0x%x)\n",
  4481. ioc->name, __func__, ioc_state));
  4482. return -EFAULT;
  4483. }
  4484. issue_diag_reset:
  4485. rc = _base_diag_reset(ioc);
  4486. return rc;
  4487. }
  4488. /**
  4489. * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  4490. * @ioc: per adapter object
  4491. *
  4492. * Returns 0 for success, non-zero for failure.
  4493. */
  4494. static int
  4495. _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
  4496. {
  4497. Mpi2IOCFactsRequest_t mpi_request;
  4498. Mpi2IOCFactsReply_t mpi_reply;
  4499. struct mpt3sas_facts *facts;
  4500. int mpi_reply_sz, mpi_request_sz, r;
  4501. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4502. __func__));
  4503. r = _base_wait_for_iocstate(ioc, 10);
  4504. if (r) {
  4505. dfailprintk(ioc, printk(MPT3SAS_FMT
  4506. "%s: failed getting to correct state\n",
  4507. ioc->name, __func__));
  4508. return r;
  4509. }
  4510. mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
  4511. mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
  4512. memset(&mpi_request, 0, mpi_request_sz);
  4513. mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
  4514. r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
  4515. (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
  4516. if (r != 0) {
  4517. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  4518. ioc->name, __func__, r);
  4519. return r;
  4520. }
  4521. facts = &ioc->facts;
  4522. memset(facts, 0, sizeof(struct mpt3sas_facts));
  4523. facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
  4524. facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
  4525. facts->VP_ID = mpi_reply.VP_ID;
  4526. facts->VF_ID = mpi_reply.VF_ID;
  4527. facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
  4528. facts->MaxChainDepth = mpi_reply.MaxChainDepth;
  4529. facts->WhoInit = mpi_reply.WhoInit;
  4530. facts->NumberOfPorts = mpi_reply.NumberOfPorts;
  4531. facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
  4532. facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
  4533. facts->MaxReplyDescriptorPostQueueDepth =
  4534. le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
  4535. facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
  4536. facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
  4537. if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
  4538. ioc->ir_firmware = 1;
  4539. if ((facts->IOCCapabilities &
  4540. MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
  4541. ioc->rdpq_array_capable = 1;
  4542. if (facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
  4543. ioc->atomic_desc_capable = 1;
  4544. facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
  4545. facts->IOCRequestFrameSize =
  4546. le16_to_cpu(mpi_reply.IOCRequestFrameSize);
  4547. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  4548. facts->IOCMaxChainSegmentSize =
  4549. le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
  4550. }
  4551. facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
  4552. facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
  4553. ioc->shost->max_id = -1;
  4554. facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
  4555. facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
  4556. facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
  4557. facts->HighPriorityCredit =
  4558. le16_to_cpu(mpi_reply.HighPriorityCredit);
  4559. facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
  4560. facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
  4561. facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
  4562. /*
  4563. * Get the Page Size from IOC Facts. If it's 0, default to 4k.
  4564. */
  4565. ioc->page_size = 1 << facts->CurrentHostPageSize;
  4566. if (ioc->page_size == 1) {
  4567. pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
  4568. "default host page size to 4k\n", ioc->name);
  4569. ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
  4570. }
  4571. dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
  4572. ioc->name, facts->CurrentHostPageSize));
  4573. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4574. "hba queue depth(%d), max chains per io(%d)\n",
  4575. ioc->name, facts->RequestCredit,
  4576. facts->MaxChainDepth));
  4577. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4578. "request frame size(%d), reply frame size(%d)\n", ioc->name,
  4579. facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
  4580. return 0;
  4581. }
  4582. /**
  4583. * _base_send_ioc_init - send ioc_init to firmware
  4584. * @ioc: per adapter object
  4585. *
  4586. * Returns 0 for success, non-zero for failure.
  4587. */
  4588. static int
  4589. _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
  4590. {
  4591. Mpi2IOCInitRequest_t mpi_request;
  4592. Mpi2IOCInitReply_t mpi_reply;
  4593. int i, r = 0;
  4594. ktime_t current_time;
  4595. u16 ioc_status;
  4596. u32 reply_post_free_array_sz = 0;
  4597. Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
  4598. dma_addr_t reply_post_free_array_dma;
  4599. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4600. __func__));
  4601. memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
  4602. mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
  4603. mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
  4604. mpi_request.VF_ID = 0; /* TODO */
  4605. mpi_request.VP_ID = 0;
  4606. mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
  4607. mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
  4608. mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
  4609. if (_base_is_controller_msix_enabled(ioc))
  4610. mpi_request.HostMSIxVectors = ioc->reply_queue_count;
  4611. mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
  4612. mpi_request.ReplyDescriptorPostQueueDepth =
  4613. cpu_to_le16(ioc->reply_post_queue_depth);
  4614. mpi_request.ReplyFreeQueueDepth =
  4615. cpu_to_le16(ioc->reply_free_queue_depth);
  4616. mpi_request.SenseBufferAddressHigh =
  4617. cpu_to_le32((u64)ioc->sense_dma >> 32);
  4618. mpi_request.SystemReplyAddressHigh =
  4619. cpu_to_le32((u64)ioc->reply_dma >> 32);
  4620. mpi_request.SystemRequestFrameBaseAddress =
  4621. cpu_to_le64((u64)ioc->request_dma);
  4622. mpi_request.ReplyFreeQueueAddress =
  4623. cpu_to_le64((u64)ioc->reply_free_dma);
  4624. if (ioc->rdpq_array_enable) {
  4625. reply_post_free_array_sz = ioc->reply_queue_count *
  4626. sizeof(Mpi2IOCInitRDPQArrayEntry);
  4627. reply_post_free_array = pci_alloc_consistent(ioc->pdev,
  4628. reply_post_free_array_sz, &reply_post_free_array_dma);
  4629. if (!reply_post_free_array) {
  4630. pr_err(MPT3SAS_FMT
  4631. "reply_post_free_array: pci_alloc_consistent failed\n",
  4632. ioc->name);
  4633. r = -ENOMEM;
  4634. goto out;
  4635. }
  4636. memset(reply_post_free_array, 0, reply_post_free_array_sz);
  4637. for (i = 0; i < ioc->reply_queue_count; i++)
  4638. reply_post_free_array[i].RDPQBaseAddress =
  4639. cpu_to_le64(
  4640. (u64)ioc->reply_post[i].reply_post_free_dma);
  4641. mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
  4642. mpi_request.ReplyDescriptorPostQueueAddress =
  4643. cpu_to_le64((u64)reply_post_free_array_dma);
  4644. } else {
  4645. mpi_request.ReplyDescriptorPostQueueAddress =
  4646. cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
  4647. }
  4648. /* This time stamp specifies number of milliseconds
  4649. * since epoch ~ midnight January 1, 1970.
  4650. */
  4651. current_time = ktime_get_real();
  4652. mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
  4653. if (ioc->logging_level & MPT_DEBUG_INIT) {
  4654. __le32 *mfp;
  4655. int i;
  4656. mfp = (__le32 *)&mpi_request;
  4657. pr_info("\toffset:data\n");
  4658. for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
  4659. pr_info("\t[0x%02x]:%08x\n", i*4,
  4660. le32_to_cpu(mfp[i]));
  4661. }
  4662. r = _base_handshake_req_reply_wait(ioc,
  4663. sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
  4664. sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
  4665. if (r != 0) {
  4666. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  4667. ioc->name, __func__, r);
  4668. goto out;
  4669. }
  4670. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  4671. if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
  4672. mpi_reply.IOCLogInfo) {
  4673. pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
  4674. r = -EIO;
  4675. }
  4676. out:
  4677. if (reply_post_free_array)
  4678. pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
  4679. reply_post_free_array,
  4680. reply_post_free_array_dma);
  4681. return r;
  4682. }
  4683. /**
  4684. * mpt3sas_port_enable_done - command completion routine for port enable
  4685. * @ioc: per adapter object
  4686. * @smid: system request message index
  4687. * @msix_index: MSIX table index supplied by the OS
  4688. * @reply: reply message frame(lower 32bit addr)
  4689. *
  4690. * Return 1 meaning mf should be freed from _base_interrupt
  4691. * 0 means the mf is freed from this function.
  4692. */
  4693. u8
  4694. mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  4695. u32 reply)
  4696. {
  4697. MPI2DefaultReply_t *mpi_reply;
  4698. u16 ioc_status;
  4699. if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
  4700. return 1;
  4701. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  4702. if (!mpi_reply)
  4703. return 1;
  4704. if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
  4705. return 1;
  4706. ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
  4707. ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
  4708. ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
  4709. memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  4710. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  4711. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  4712. ioc->port_enable_failed = 1;
  4713. if (ioc->is_driver_loading) {
  4714. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  4715. mpt3sas_port_enable_complete(ioc);
  4716. return 1;
  4717. } else {
  4718. ioc->start_scan_failed = ioc_status;
  4719. ioc->start_scan = 0;
  4720. return 1;
  4721. }
  4722. }
  4723. complete(&ioc->port_enable_cmds.done);
  4724. return 1;
  4725. }
  4726. /**
  4727. * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  4728. * @ioc: per adapter object
  4729. *
  4730. * Returns 0 for success, non-zero for failure.
  4731. */
  4732. static int
  4733. _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
  4734. {
  4735. Mpi2PortEnableRequest_t *mpi_request;
  4736. Mpi2PortEnableReply_t *mpi_reply;
  4737. int r = 0;
  4738. u16 smid;
  4739. u16 ioc_status;
  4740. pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
  4741. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  4742. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  4743. ioc->name, __func__);
  4744. return -EAGAIN;
  4745. }
  4746. smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
  4747. if (!smid) {
  4748. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4749. ioc->name, __func__);
  4750. return -EAGAIN;
  4751. }
  4752. ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
  4753. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4754. ioc->port_enable_cmds.smid = smid;
  4755. memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
  4756. mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  4757. init_completion(&ioc->port_enable_cmds.done);
  4758. ioc->put_smid_default(ioc, smid);
  4759. wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
  4760. if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
  4761. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4762. ioc->name, __func__);
  4763. _debug_dump_mf(mpi_request,
  4764. sizeof(Mpi2PortEnableRequest_t)/4);
  4765. if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
  4766. r = -EFAULT;
  4767. else
  4768. r = -ETIME;
  4769. goto out;
  4770. }
  4771. mpi_reply = ioc->port_enable_cmds.reply;
  4772. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  4773. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  4774. pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
  4775. ioc->name, __func__, ioc_status);
  4776. r = -EFAULT;
  4777. goto out;
  4778. }
  4779. out:
  4780. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  4781. pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
  4782. "SUCCESS" : "FAILED"));
  4783. return r;
  4784. }
  4785. /**
  4786. * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
  4787. * @ioc: per adapter object
  4788. *
  4789. * Returns 0 for success, non-zero for failure.
  4790. */
  4791. int
  4792. mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
  4793. {
  4794. Mpi2PortEnableRequest_t *mpi_request;
  4795. u16 smid;
  4796. pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
  4797. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  4798. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  4799. ioc->name, __func__);
  4800. return -EAGAIN;
  4801. }
  4802. smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
  4803. if (!smid) {
  4804. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4805. ioc->name, __func__);
  4806. return -EAGAIN;
  4807. }
  4808. ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
  4809. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4810. ioc->port_enable_cmds.smid = smid;
  4811. memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
  4812. mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  4813. ioc->put_smid_default(ioc, smid);
  4814. return 0;
  4815. }
  4816. /**
  4817. * _base_determine_wait_on_discovery - desposition
  4818. * @ioc: per adapter object
  4819. *
  4820. * Decide whether to wait on discovery to complete. Used to either
  4821. * locate boot device, or report volumes ahead of physical devices.
  4822. *
  4823. * Returns 1 for wait, 0 for don't wait
  4824. */
  4825. static int
  4826. _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
  4827. {
  4828. /* We wait for discovery to complete if IR firmware is loaded.
  4829. * The sas topology events arrive before PD events, so we need time to
  4830. * turn on the bit in ioc->pd_handles to indicate PD
  4831. * Also, it maybe required to report Volumes ahead of physical
  4832. * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
  4833. */
  4834. if (ioc->ir_firmware)
  4835. return 1;
  4836. /* if no Bios, then we don't need to wait */
  4837. if (!ioc->bios_pg3.BiosVersion)
  4838. return 0;
  4839. /* Bios is present, then we drop down here.
  4840. *
  4841. * If there any entries in the Bios Page 2, then we wait
  4842. * for discovery to complete.
  4843. */
  4844. /* Current Boot Device */
  4845. if ((ioc->bios_pg2.CurrentBootDeviceForm &
  4846. MPI2_BIOSPAGE2_FORM_MASK) ==
  4847. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
  4848. /* Request Boot Device */
  4849. (ioc->bios_pg2.ReqBootDeviceForm &
  4850. MPI2_BIOSPAGE2_FORM_MASK) ==
  4851. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
  4852. /* Alternate Request Boot Device */
  4853. (ioc->bios_pg2.ReqAltBootDeviceForm &
  4854. MPI2_BIOSPAGE2_FORM_MASK) ==
  4855. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
  4856. return 0;
  4857. return 1;
  4858. }
  4859. /**
  4860. * _base_unmask_events - turn on notification for this event
  4861. * @ioc: per adapter object
  4862. * @event: firmware event
  4863. *
  4864. * The mask is stored in ioc->event_masks.
  4865. */
  4866. static void
  4867. _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
  4868. {
  4869. u32 desired_event;
  4870. if (event >= 128)
  4871. return;
  4872. desired_event = (1 << (event % 32));
  4873. if (event < 32)
  4874. ioc->event_masks[0] &= ~desired_event;
  4875. else if (event < 64)
  4876. ioc->event_masks[1] &= ~desired_event;
  4877. else if (event < 96)
  4878. ioc->event_masks[2] &= ~desired_event;
  4879. else if (event < 128)
  4880. ioc->event_masks[3] &= ~desired_event;
  4881. }
  4882. /**
  4883. * _base_event_notification - send event notification
  4884. * @ioc: per adapter object
  4885. *
  4886. * Returns 0 for success, non-zero for failure.
  4887. */
  4888. static int
  4889. _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
  4890. {
  4891. Mpi2EventNotificationRequest_t *mpi_request;
  4892. u16 smid;
  4893. int r = 0;
  4894. int i;
  4895. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4896. __func__));
  4897. if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
  4898. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  4899. ioc->name, __func__);
  4900. return -EAGAIN;
  4901. }
  4902. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  4903. if (!smid) {
  4904. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4905. ioc->name, __func__);
  4906. return -EAGAIN;
  4907. }
  4908. ioc->base_cmds.status = MPT3_CMD_PENDING;
  4909. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  4910. ioc->base_cmds.smid = smid;
  4911. memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
  4912. mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
  4913. mpi_request->VF_ID = 0; /* TODO */
  4914. mpi_request->VP_ID = 0;
  4915. for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
  4916. mpi_request->EventMasks[i] =
  4917. cpu_to_le32(ioc->event_masks[i]);
  4918. init_completion(&ioc->base_cmds.done);
  4919. ioc->put_smid_default(ioc, smid);
  4920. wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
  4921. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  4922. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4923. ioc->name, __func__);
  4924. _debug_dump_mf(mpi_request,
  4925. sizeof(Mpi2EventNotificationRequest_t)/4);
  4926. if (ioc->base_cmds.status & MPT3_CMD_RESET)
  4927. r = -EFAULT;
  4928. else
  4929. r = -ETIME;
  4930. } else
  4931. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
  4932. ioc->name, __func__));
  4933. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4934. return r;
  4935. }
  4936. /**
  4937. * mpt3sas_base_validate_event_type - validating event types
  4938. * @ioc: per adapter object
  4939. * @event: firmware event
  4940. *
  4941. * This will turn on firmware event notification when application
  4942. * ask for that event. We don't mask events that are already enabled.
  4943. */
  4944. void
  4945. mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
  4946. {
  4947. int i, j;
  4948. u32 event_mask, desired_event;
  4949. u8 send_update_to_fw;
  4950. for (i = 0, send_update_to_fw = 0; i <
  4951. MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
  4952. event_mask = ~event_type[i];
  4953. desired_event = 1;
  4954. for (j = 0; j < 32; j++) {
  4955. if (!(event_mask & desired_event) &&
  4956. (ioc->event_masks[i] & desired_event)) {
  4957. ioc->event_masks[i] &= ~desired_event;
  4958. send_update_to_fw = 1;
  4959. }
  4960. desired_event = (desired_event << 1);
  4961. }
  4962. }
  4963. if (!send_update_to_fw)
  4964. return;
  4965. mutex_lock(&ioc->base_cmds.mutex);
  4966. _base_event_notification(ioc);
  4967. mutex_unlock(&ioc->base_cmds.mutex);
  4968. }
  4969. /**
  4970. * _base_diag_reset - the "big hammer" start of day reset
  4971. * @ioc: per adapter object
  4972. *
  4973. * Returns 0 for success, non-zero for failure.
  4974. */
  4975. static int
  4976. _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
  4977. {
  4978. u32 host_diagnostic;
  4979. u32 ioc_state;
  4980. u32 count;
  4981. u32 hcb_size;
  4982. pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
  4983. drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
  4984. ioc->name));
  4985. count = 0;
  4986. do {
  4987. /* Write magic sequence to WriteSequence register
  4988. * Loop until in diagnostic mode
  4989. */
  4990. drsprintk(ioc, pr_info(MPT3SAS_FMT
  4991. "write magic sequence\n", ioc->name));
  4992. writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
  4993. writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
  4994. writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
  4995. writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
  4996. writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
  4997. writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
  4998. writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
  4999. /* wait 100 msec */
  5000. msleep(100);
  5001. if (count++ > 20)
  5002. goto out;
  5003. host_diagnostic = readl(&ioc->chip->HostDiagnostic);
  5004. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5005. "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
  5006. ioc->name, count, host_diagnostic));
  5007. } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
  5008. hcb_size = readl(&ioc->chip->HCBSize);
  5009. drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
  5010. ioc->name));
  5011. writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
  5012. &ioc->chip->HostDiagnostic);
  5013. /*This delay allows the chip PCIe hardware time to finish reset tasks*/
  5014. msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
  5015. /* Approximately 300 second max wait */
  5016. for (count = 0; count < (300000000 /
  5017. MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
  5018. host_diagnostic = readl(&ioc->chip->HostDiagnostic);
  5019. if (host_diagnostic == 0xFFFFFFFF)
  5020. goto out;
  5021. if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
  5022. break;
  5023. msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
  5024. }
  5025. if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
  5026. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5027. "restart the adapter assuming the HCB Address points to good F/W\n",
  5028. ioc->name));
  5029. host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
  5030. host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
  5031. writel(host_diagnostic, &ioc->chip->HostDiagnostic);
  5032. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5033. "re-enable the HCDW\n", ioc->name));
  5034. writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
  5035. &ioc->chip->HCBSize);
  5036. }
  5037. drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
  5038. ioc->name));
  5039. writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
  5040. &ioc->chip->HostDiagnostic);
  5041. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5042. "disable writes to the diagnostic register\n", ioc->name));
  5043. writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
  5044. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5045. "Wait for FW to go to the READY state\n", ioc->name));
  5046. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
  5047. if (ioc_state) {
  5048. pr_err(MPT3SAS_FMT
  5049. "%s: failed going to ready state (ioc_state=0x%x)\n",
  5050. ioc->name, __func__, ioc_state);
  5051. goto out;
  5052. }
  5053. pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
  5054. return 0;
  5055. out:
  5056. pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
  5057. return -EFAULT;
  5058. }
  5059. /**
  5060. * _base_make_ioc_ready - put controller in READY state
  5061. * @ioc: per adapter object
  5062. * @type: FORCE_BIG_HAMMER or SOFT_RESET
  5063. *
  5064. * Returns 0 for success, non-zero for failure.
  5065. */
  5066. static int
  5067. _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
  5068. {
  5069. u32 ioc_state;
  5070. int rc;
  5071. int count;
  5072. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5073. __func__));
  5074. if (ioc->pci_error_recovery)
  5075. return 0;
  5076. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5077. dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
  5078. ioc->name, __func__, ioc_state));
  5079. /* if in RESET state, it should move to READY state shortly */
  5080. count = 0;
  5081. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
  5082. while ((ioc_state & MPI2_IOC_STATE_MASK) !=
  5083. MPI2_IOC_STATE_READY) {
  5084. if (count++ == 10) {
  5085. pr_err(MPT3SAS_FMT
  5086. "%s: failed going to ready state (ioc_state=0x%x)\n",
  5087. ioc->name, __func__, ioc_state);
  5088. return -EFAULT;
  5089. }
  5090. ssleep(1);
  5091. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5092. }
  5093. }
  5094. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
  5095. return 0;
  5096. if (ioc_state & MPI2_DOORBELL_USED) {
  5097. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  5098. "unexpected doorbell active!\n",
  5099. ioc->name));
  5100. goto issue_diag_reset;
  5101. }
  5102. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  5103. mpt3sas_base_fault_info(ioc, ioc_state &
  5104. MPI2_DOORBELL_DATA_MASK);
  5105. goto issue_diag_reset;
  5106. }
  5107. if (type == FORCE_BIG_HAMMER)
  5108. goto issue_diag_reset;
  5109. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
  5110. if (!(_base_send_ioc_reset(ioc,
  5111. MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
  5112. return 0;
  5113. }
  5114. issue_diag_reset:
  5115. rc = _base_diag_reset(ioc);
  5116. return rc;
  5117. }
  5118. /**
  5119. * _base_make_ioc_operational - put controller in OPERATIONAL state
  5120. * @ioc: per adapter object
  5121. *
  5122. * Returns 0 for success, non-zero for failure.
  5123. */
  5124. static int
  5125. _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
  5126. {
  5127. int r, i, index;
  5128. unsigned long flags;
  5129. u32 reply_address;
  5130. u16 smid;
  5131. struct _tr_list *delayed_tr, *delayed_tr_next;
  5132. struct _sc_list *delayed_sc, *delayed_sc_next;
  5133. struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
  5134. u8 hide_flag;
  5135. struct adapter_reply_queue *reply_q;
  5136. Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
  5137. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5138. __func__));
  5139. /* clean the delayed target reset list */
  5140. list_for_each_entry_safe(delayed_tr, delayed_tr_next,
  5141. &ioc->delayed_tr_list, list) {
  5142. list_del(&delayed_tr->list);
  5143. kfree(delayed_tr);
  5144. }
  5145. list_for_each_entry_safe(delayed_tr, delayed_tr_next,
  5146. &ioc->delayed_tr_volume_list, list) {
  5147. list_del(&delayed_tr->list);
  5148. kfree(delayed_tr);
  5149. }
  5150. list_for_each_entry_safe(delayed_sc, delayed_sc_next,
  5151. &ioc->delayed_sc_list, list) {
  5152. list_del(&delayed_sc->list);
  5153. kfree(delayed_sc);
  5154. }
  5155. list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
  5156. &ioc->delayed_event_ack_list, list) {
  5157. list_del(&delayed_event_ack->list);
  5158. kfree(delayed_event_ack);
  5159. }
  5160. /* initialize the scsi lookup free list */
  5161. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  5162. INIT_LIST_HEAD(&ioc->free_list);
  5163. smid = 1;
  5164. for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
  5165. INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
  5166. ioc->scsi_lookup[i].cb_idx = 0xFF;
  5167. ioc->scsi_lookup[i].smid = smid;
  5168. ioc->scsi_lookup[i].scmd = NULL;
  5169. ioc->scsi_lookup[i].direct_io = 0;
  5170. list_add_tail(&ioc->scsi_lookup[i].tracker_list,
  5171. &ioc->free_list);
  5172. }
  5173. /* hi-priority queue */
  5174. INIT_LIST_HEAD(&ioc->hpr_free_list);
  5175. smid = ioc->hi_priority_smid;
  5176. for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
  5177. ioc->hpr_lookup[i].cb_idx = 0xFF;
  5178. ioc->hpr_lookup[i].smid = smid;
  5179. list_add_tail(&ioc->hpr_lookup[i].tracker_list,
  5180. &ioc->hpr_free_list);
  5181. }
  5182. /* internal queue */
  5183. INIT_LIST_HEAD(&ioc->internal_free_list);
  5184. smid = ioc->internal_smid;
  5185. for (i = 0; i < ioc->internal_depth; i++, smid++) {
  5186. ioc->internal_lookup[i].cb_idx = 0xFF;
  5187. ioc->internal_lookup[i].smid = smid;
  5188. list_add_tail(&ioc->internal_lookup[i].tracker_list,
  5189. &ioc->internal_free_list);
  5190. }
  5191. /* chain pool */
  5192. INIT_LIST_HEAD(&ioc->free_chain_list);
  5193. for (i = 0; i < ioc->chain_depth; i++)
  5194. list_add_tail(&ioc->chain_lookup[i].tracker_list,
  5195. &ioc->free_chain_list);
  5196. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  5197. /* initialize Reply Free Queue */
  5198. for (i = 0, reply_address = (u32)ioc->reply_dma ;
  5199. i < ioc->reply_free_queue_depth ; i++, reply_address +=
  5200. ioc->reply_sz)
  5201. ioc->reply_free[i] = cpu_to_le32(reply_address);
  5202. /* initialize reply queues */
  5203. if (ioc->is_driver_loading)
  5204. _base_assign_reply_queues(ioc);
  5205. /* initialize Reply Post Free Queue */
  5206. index = 0;
  5207. reply_post_free_contig = ioc->reply_post[0].reply_post_free;
  5208. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  5209. /*
  5210. * If RDPQ is enabled, switch to the next allocation.
  5211. * Otherwise advance within the contiguous region.
  5212. */
  5213. if (ioc->rdpq_array_enable) {
  5214. reply_q->reply_post_free =
  5215. ioc->reply_post[index++].reply_post_free;
  5216. } else {
  5217. reply_q->reply_post_free = reply_post_free_contig;
  5218. reply_post_free_contig += ioc->reply_post_queue_depth;
  5219. }
  5220. reply_q->reply_post_host_index = 0;
  5221. for (i = 0; i < ioc->reply_post_queue_depth; i++)
  5222. reply_q->reply_post_free[i].Words =
  5223. cpu_to_le64(ULLONG_MAX);
  5224. if (!_base_is_controller_msix_enabled(ioc))
  5225. goto skip_init_reply_post_free_queue;
  5226. }
  5227. skip_init_reply_post_free_queue:
  5228. r = _base_send_ioc_init(ioc);
  5229. if (r)
  5230. return r;
  5231. /* initialize reply free host index */
  5232. ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
  5233. writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
  5234. /* initialize reply post host index */
  5235. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  5236. if (ioc->combined_reply_queue)
  5237. writel((reply_q->msix_index & 7)<<
  5238. MPI2_RPHI_MSIX_INDEX_SHIFT,
  5239. ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
  5240. else
  5241. writel(reply_q->msix_index <<
  5242. MPI2_RPHI_MSIX_INDEX_SHIFT,
  5243. &ioc->chip->ReplyPostHostIndex);
  5244. if (!_base_is_controller_msix_enabled(ioc))
  5245. goto skip_init_reply_post_host_index;
  5246. }
  5247. skip_init_reply_post_host_index:
  5248. _base_unmask_interrupts(ioc);
  5249. r = _base_event_notification(ioc);
  5250. if (r)
  5251. return r;
  5252. _base_static_config_pages(ioc);
  5253. if (ioc->is_driver_loading) {
  5254. if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
  5255. == 0x80) {
  5256. hide_flag = (u8) (
  5257. le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
  5258. MFG_PAGE10_HIDE_SSDS_MASK);
  5259. if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
  5260. ioc->mfg_pg10_hide_flag = hide_flag;
  5261. }
  5262. ioc->wait_for_discovery_to_complete =
  5263. _base_determine_wait_on_discovery(ioc);
  5264. return r; /* scan_start and scan_finished support */
  5265. }
  5266. r = _base_send_port_enable(ioc);
  5267. if (r)
  5268. return r;
  5269. return r;
  5270. }
  5271. /**
  5272. * mpt3sas_base_free_resources - free resources controller resources
  5273. * @ioc: per adapter object
  5274. *
  5275. * Return nothing.
  5276. */
  5277. void
  5278. mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
  5279. {
  5280. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5281. __func__));
  5282. /* synchronizing freeing resource with pci_access_mutex lock */
  5283. mutex_lock(&ioc->pci_access_mutex);
  5284. if (ioc->chip_phys && ioc->chip) {
  5285. _base_mask_interrupts(ioc);
  5286. ioc->shost_recovery = 1;
  5287. _base_make_ioc_ready(ioc, SOFT_RESET);
  5288. ioc->shost_recovery = 0;
  5289. }
  5290. mpt3sas_base_unmap_resources(ioc);
  5291. mutex_unlock(&ioc->pci_access_mutex);
  5292. return;
  5293. }
  5294. /**
  5295. * mpt3sas_base_attach - attach controller instance
  5296. * @ioc: per adapter object
  5297. *
  5298. * Returns 0 for success, non-zero for failure.
  5299. */
  5300. int
  5301. mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
  5302. {
  5303. int r, i;
  5304. int cpu_id, last_cpu_id = 0;
  5305. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5306. __func__));
  5307. /* setup cpu_msix_table */
  5308. ioc->cpu_count = num_online_cpus();
  5309. for_each_online_cpu(cpu_id)
  5310. last_cpu_id = cpu_id;
  5311. ioc->cpu_msix_table_sz = last_cpu_id + 1;
  5312. ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
  5313. ioc->reply_queue_count = 1;
  5314. if (!ioc->cpu_msix_table) {
  5315. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  5316. "allocation for cpu_msix_table failed!!!\n",
  5317. ioc->name));
  5318. r = -ENOMEM;
  5319. goto out_free_resources;
  5320. }
  5321. if (ioc->is_warpdrive) {
  5322. ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
  5323. sizeof(resource_size_t *), GFP_KERNEL);
  5324. if (!ioc->reply_post_host_index) {
  5325. dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
  5326. "for reply_post_host_index failed!!!\n",
  5327. ioc->name));
  5328. r = -ENOMEM;
  5329. goto out_free_resources;
  5330. }
  5331. }
  5332. ioc->rdpq_array_enable_assigned = 0;
  5333. ioc->dma_mask = 0;
  5334. r = mpt3sas_base_map_resources(ioc);
  5335. if (r)
  5336. goto out_free_resources;
  5337. pci_set_drvdata(ioc->pdev, ioc->shost);
  5338. r = _base_get_ioc_facts(ioc);
  5339. if (r)
  5340. goto out_free_resources;
  5341. switch (ioc->hba_mpi_version_belonged) {
  5342. case MPI2_VERSION:
  5343. ioc->build_sg_scmd = &_base_build_sg_scmd;
  5344. ioc->build_sg = &_base_build_sg;
  5345. ioc->build_zero_len_sge = &_base_build_zero_len_sge;
  5346. break;
  5347. case MPI25_VERSION:
  5348. case MPI26_VERSION:
  5349. /*
  5350. * In SAS3.0,
  5351. * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
  5352. * Target Status - all require the IEEE formated scatter gather
  5353. * elements.
  5354. */
  5355. ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
  5356. ioc->build_sg = &_base_build_sg_ieee;
  5357. ioc->build_nvme_prp = &_base_build_nvme_prp;
  5358. ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
  5359. ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
  5360. break;
  5361. }
  5362. if (ioc->atomic_desc_capable) {
  5363. ioc->put_smid_default = &_base_put_smid_default_atomic;
  5364. ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
  5365. ioc->put_smid_fast_path = &_base_put_smid_fast_path_atomic;
  5366. ioc->put_smid_hi_priority = &_base_put_smid_hi_priority_atomic;
  5367. ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap_atomic;
  5368. } else {
  5369. ioc->put_smid_default = &_base_put_smid_default;
  5370. ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
  5371. ioc->put_smid_fast_path = &_base_put_smid_fast_path;
  5372. ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
  5373. ioc->put_smid_nvme_encap = &_base_put_smid_nvme_encap;
  5374. }
  5375. /*
  5376. * These function pointers for other requests that don't
  5377. * the require IEEE scatter gather elements.
  5378. *
  5379. * For example Configuration Pages and SAS IOUNIT Control don't.
  5380. */
  5381. ioc->build_sg_mpi = &_base_build_sg;
  5382. ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
  5383. r = _base_make_ioc_ready(ioc, SOFT_RESET);
  5384. if (r)
  5385. goto out_free_resources;
  5386. ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
  5387. sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
  5388. if (!ioc->pfacts) {
  5389. r = -ENOMEM;
  5390. goto out_free_resources;
  5391. }
  5392. for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
  5393. r = _base_get_port_facts(ioc, i);
  5394. if (r)
  5395. goto out_free_resources;
  5396. }
  5397. r = _base_allocate_memory_pools(ioc);
  5398. if (r)
  5399. goto out_free_resources;
  5400. init_waitqueue_head(&ioc->reset_wq);
  5401. /* allocate memory pd handle bitmask list */
  5402. ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
  5403. if (ioc->facts.MaxDevHandle % 8)
  5404. ioc->pd_handles_sz++;
  5405. ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
  5406. GFP_KERNEL);
  5407. if (!ioc->pd_handles) {
  5408. r = -ENOMEM;
  5409. goto out_free_resources;
  5410. }
  5411. ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
  5412. GFP_KERNEL);
  5413. if (!ioc->blocking_handles) {
  5414. r = -ENOMEM;
  5415. goto out_free_resources;
  5416. }
  5417. /* allocate memory for pending OS device add list */
  5418. ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
  5419. if (ioc->facts.MaxDevHandle % 8)
  5420. ioc->pend_os_device_add_sz++;
  5421. ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
  5422. GFP_KERNEL);
  5423. if (!ioc->pend_os_device_add)
  5424. goto out_free_resources;
  5425. ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
  5426. ioc->device_remove_in_progress =
  5427. kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
  5428. if (!ioc->device_remove_in_progress)
  5429. goto out_free_resources;
  5430. ioc->fwfault_debug = mpt3sas_fwfault_debug;
  5431. /* base internal command bits */
  5432. mutex_init(&ioc->base_cmds.mutex);
  5433. ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5434. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  5435. /* port_enable command bits */
  5436. ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5437. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  5438. /* transport internal command bits */
  5439. ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5440. ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
  5441. mutex_init(&ioc->transport_cmds.mutex);
  5442. /* scsih internal command bits */
  5443. ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5444. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  5445. mutex_init(&ioc->scsih_cmds.mutex);
  5446. /* task management internal command bits */
  5447. ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5448. ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
  5449. mutex_init(&ioc->tm_cmds.mutex);
  5450. /* config page internal command bits */
  5451. ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5452. ioc->config_cmds.status = MPT3_CMD_NOT_USED;
  5453. mutex_init(&ioc->config_cmds.mutex);
  5454. /* ctl module internal command bits */
  5455. ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5456. ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  5457. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  5458. mutex_init(&ioc->ctl_cmds.mutex);
  5459. if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
  5460. !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
  5461. !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
  5462. !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
  5463. r = -ENOMEM;
  5464. goto out_free_resources;
  5465. }
  5466. for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
  5467. ioc->event_masks[i] = -1;
  5468. /* here we enable the events we care about */
  5469. _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
  5470. _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  5471. _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  5472. _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  5473. _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  5474. _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  5475. _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
  5476. _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
  5477. _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
  5478. _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
  5479. _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
  5480. _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
  5481. if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
  5482. if (ioc->is_gen35_ioc) {
  5483. _base_unmask_events(ioc,
  5484. MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
  5485. _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
  5486. _base_unmask_events(ioc,
  5487. MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
  5488. }
  5489. }
  5490. r = _base_make_ioc_operational(ioc);
  5491. if (r)
  5492. goto out_free_resources;
  5493. ioc->non_operational_loop = 0;
  5494. ioc->got_task_abort_from_ioctl = 0;
  5495. return 0;
  5496. out_free_resources:
  5497. ioc->remove_host = 1;
  5498. mpt3sas_base_free_resources(ioc);
  5499. _base_release_memory_pools(ioc);
  5500. pci_set_drvdata(ioc->pdev, NULL);
  5501. kfree(ioc->cpu_msix_table);
  5502. if (ioc->is_warpdrive)
  5503. kfree(ioc->reply_post_host_index);
  5504. kfree(ioc->pd_handles);
  5505. kfree(ioc->blocking_handles);
  5506. kfree(ioc->device_remove_in_progress);
  5507. kfree(ioc->pend_os_device_add);
  5508. kfree(ioc->tm_cmds.reply);
  5509. kfree(ioc->transport_cmds.reply);
  5510. kfree(ioc->scsih_cmds.reply);
  5511. kfree(ioc->config_cmds.reply);
  5512. kfree(ioc->base_cmds.reply);
  5513. kfree(ioc->port_enable_cmds.reply);
  5514. kfree(ioc->ctl_cmds.reply);
  5515. kfree(ioc->ctl_cmds.sense);
  5516. kfree(ioc->pfacts);
  5517. ioc->ctl_cmds.reply = NULL;
  5518. ioc->base_cmds.reply = NULL;
  5519. ioc->tm_cmds.reply = NULL;
  5520. ioc->scsih_cmds.reply = NULL;
  5521. ioc->transport_cmds.reply = NULL;
  5522. ioc->config_cmds.reply = NULL;
  5523. ioc->pfacts = NULL;
  5524. return r;
  5525. }
  5526. /**
  5527. * mpt3sas_base_detach - remove controller instance
  5528. * @ioc: per adapter object
  5529. *
  5530. * Return nothing.
  5531. */
  5532. void
  5533. mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
  5534. {
  5535. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5536. __func__));
  5537. mpt3sas_base_stop_watchdog(ioc);
  5538. mpt3sas_base_free_resources(ioc);
  5539. _base_release_memory_pools(ioc);
  5540. pci_set_drvdata(ioc->pdev, NULL);
  5541. kfree(ioc->cpu_msix_table);
  5542. if (ioc->is_warpdrive)
  5543. kfree(ioc->reply_post_host_index);
  5544. kfree(ioc->pd_handles);
  5545. kfree(ioc->blocking_handles);
  5546. kfree(ioc->device_remove_in_progress);
  5547. kfree(ioc->pend_os_device_add);
  5548. kfree(ioc->pfacts);
  5549. kfree(ioc->ctl_cmds.reply);
  5550. kfree(ioc->ctl_cmds.sense);
  5551. kfree(ioc->base_cmds.reply);
  5552. kfree(ioc->port_enable_cmds.reply);
  5553. kfree(ioc->tm_cmds.reply);
  5554. kfree(ioc->transport_cmds.reply);
  5555. kfree(ioc->scsih_cmds.reply);
  5556. kfree(ioc->config_cmds.reply);
  5557. }
  5558. /**
  5559. * _base_reset_handler - reset callback handler (for base)
  5560. * @ioc: per adapter object
  5561. * @reset_phase: phase
  5562. *
  5563. * The handler for doing any required cleanup or initialization.
  5564. *
  5565. * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
  5566. * MPT3_IOC_DONE_RESET
  5567. *
  5568. * Return nothing.
  5569. */
  5570. static void
  5571. _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
  5572. {
  5573. mpt3sas_scsih_reset_handler(ioc, reset_phase);
  5574. mpt3sas_ctl_reset_handler(ioc, reset_phase);
  5575. switch (reset_phase) {
  5576. case MPT3_IOC_PRE_RESET:
  5577. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  5578. "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
  5579. break;
  5580. case MPT3_IOC_AFTER_RESET:
  5581. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  5582. "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
  5583. if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
  5584. ioc->transport_cmds.status |= MPT3_CMD_RESET;
  5585. mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
  5586. complete(&ioc->transport_cmds.done);
  5587. }
  5588. if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
  5589. ioc->base_cmds.status |= MPT3_CMD_RESET;
  5590. mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
  5591. complete(&ioc->base_cmds.done);
  5592. }
  5593. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  5594. ioc->port_enable_failed = 1;
  5595. ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
  5596. mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
  5597. if (ioc->is_driver_loading) {
  5598. ioc->start_scan_failed =
  5599. MPI2_IOCSTATUS_INTERNAL_ERROR;
  5600. ioc->start_scan = 0;
  5601. ioc->port_enable_cmds.status =
  5602. MPT3_CMD_NOT_USED;
  5603. } else
  5604. complete(&ioc->port_enable_cmds.done);
  5605. }
  5606. if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
  5607. ioc->config_cmds.status |= MPT3_CMD_RESET;
  5608. mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
  5609. ioc->config_cmds.smid = USHRT_MAX;
  5610. complete(&ioc->config_cmds.done);
  5611. }
  5612. break;
  5613. case MPT3_IOC_DONE_RESET:
  5614. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  5615. "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
  5616. break;
  5617. }
  5618. }
  5619. /**
  5620. * _wait_for_commands_to_complete - reset controller
  5621. * @ioc: Pointer to MPT_ADAPTER structure
  5622. *
  5623. * This function waiting(3s) for all pending commands to complete
  5624. * prior to putting controller in reset.
  5625. */
  5626. static void
  5627. _wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
  5628. {
  5629. u32 ioc_state;
  5630. unsigned long flags;
  5631. u16 i;
  5632. ioc->pending_io_count = 0;
  5633. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5634. if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
  5635. return;
  5636. /* pending command count */
  5637. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  5638. for (i = 0; i < ioc->scsiio_depth; i++)
  5639. if (ioc->scsi_lookup[i].cb_idx != 0xFF)
  5640. ioc->pending_io_count++;
  5641. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  5642. if (!ioc->pending_io_count)
  5643. return;
  5644. /* wait for pending commands to complete */
  5645. wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
  5646. }
  5647. /**
  5648. * mpt3sas_base_hard_reset_handler - reset controller
  5649. * @ioc: Pointer to MPT_ADAPTER structure
  5650. * @type: FORCE_BIG_HAMMER or SOFT_RESET
  5651. *
  5652. * Returns 0 for success, non-zero for failure.
  5653. */
  5654. int
  5655. mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
  5656. enum reset_type type)
  5657. {
  5658. int r;
  5659. unsigned long flags;
  5660. u32 ioc_state;
  5661. u8 is_fault = 0, is_trigger = 0;
  5662. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
  5663. __func__));
  5664. if (ioc->pci_error_recovery) {
  5665. pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
  5666. ioc->name, __func__);
  5667. r = 0;
  5668. goto out_unlocked;
  5669. }
  5670. if (mpt3sas_fwfault_debug)
  5671. mpt3sas_halt_firmware(ioc);
  5672. /* wait for an active reset in progress to complete */
  5673. if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
  5674. do {
  5675. ssleep(1);
  5676. } while (ioc->shost_recovery == 1);
  5677. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
  5678. __func__));
  5679. return ioc->ioc_reset_in_progress_status;
  5680. }
  5681. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  5682. ioc->shost_recovery = 1;
  5683. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  5684. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  5685. MPT3_DIAG_BUFFER_IS_REGISTERED) &&
  5686. (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  5687. MPT3_DIAG_BUFFER_IS_RELEASED))) {
  5688. is_trigger = 1;
  5689. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5690. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  5691. is_fault = 1;
  5692. }
  5693. _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
  5694. _wait_for_commands_to_complete(ioc);
  5695. _base_mask_interrupts(ioc);
  5696. r = _base_make_ioc_ready(ioc, type);
  5697. if (r)
  5698. goto out;
  5699. _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
  5700. /* If this hard reset is called while port enable is active, then
  5701. * there is no reason to call make_ioc_operational
  5702. */
  5703. if (ioc->is_driver_loading && ioc->port_enable_failed) {
  5704. ioc->remove_host = 1;
  5705. r = -EFAULT;
  5706. goto out;
  5707. }
  5708. r = _base_get_ioc_facts(ioc);
  5709. if (r)
  5710. goto out;
  5711. if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
  5712. panic("%s: Issue occurred with flashing controller firmware."
  5713. "Please reboot the system and ensure that the correct"
  5714. " firmware version is running\n", ioc->name);
  5715. r = _base_make_ioc_operational(ioc);
  5716. if (!r)
  5717. _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
  5718. out:
  5719. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
  5720. ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
  5721. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  5722. ioc->ioc_reset_in_progress_status = r;
  5723. ioc->shost_recovery = 0;
  5724. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  5725. ioc->ioc_reset_count++;
  5726. mutex_unlock(&ioc->reset_in_progress_mutex);
  5727. out_unlocked:
  5728. if ((r == 0) && is_trigger) {
  5729. if (is_fault)
  5730. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
  5731. else
  5732. mpt3sas_trigger_master(ioc,
  5733. MASTER_TRIGGER_ADAPTER_RESET);
  5734. }
  5735. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
  5736. __func__));
  5737. return r;
  5738. }