mpt3sas_base.c 199 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409341034113412341334143415341634173418341934203421342234233424342534263427342834293430343134323433343434353436343734383439344034413442344334443445344634473448344934503451345234533454345534563457345834593460346134623463346434653466346734683469347034713472347334743475347634773478347934803481348234833484348534863487348834893490349134923493349434953496349734983499350035013502350335043505350635073508350935103511351235133514351535163517351835193520352135223523352435253526352735283529353035313532353335343535353635373538353935403541354235433544354535463547354835493550355135523553355435553556355735583559356035613562356335643565356635673568356935703571357235733574357535763577357835793580358135823583358435853586358735883589359035913592359335943595359635973598359936003601360236033604360536063607360836093610361136123613361436153616361736183619362036213622362336243625362636273628362936303631363236333634363536363637363836393640364136423643364436453646364736483649365036513652365336543655365636573658365936603661366236633664366536663667366836693670367136723673367436753676367736783679368036813682368336843685368636873688368936903691369236933694369536963697369836993700370137023703370437053706370737083709371037113712371337143715371637173718371937203721372237233724372537263727372837293730373137323733373437353736373737383739374037413742374337443745374637473748374937503751375237533754375537563757375837593760376137623763376437653766376737683769377037713772377337743775377637773778377937803781378237833784378537863787378837893790379137923793379437953796379737983799380038013802380338043805380638073808380938103811381238133814381538163817381838193820382138223823382438253826382738283829383038313832383338343835383638373838383938403841384238433844384538463847384838493850385138523853385438553856385738583859386038613862386338643865386638673868386938703871387238733874387538763877387838793880388138823883388438853886388738883889389038913892389338943895389638973898389939003901390239033904390539063907390839093910391139123913391439153916391739183919392039213922392339243925392639273928392939303931393239333934393539363937393839393940394139423943394439453946394739483949395039513952395339543955395639573958395939603961396239633964396539663967396839693970397139723973397439753976397739783979398039813982398339843985398639873988398939903991399239933994399539963997399839994000400140024003400440054006400740084009401040114012401340144015401640174018401940204021402240234024402540264027402840294030403140324033403440354036403740384039404040414042404340444045404640474048404940504051405240534054405540564057405840594060406140624063406440654066406740684069407040714072407340744075407640774078407940804081408240834084408540864087408840894090409140924093409440954096409740984099410041014102410341044105410641074108410941104111411241134114411541164117411841194120412141224123412441254126412741284129413041314132413341344135413641374138413941404141414241434144414541464147414841494150415141524153415441554156415741584159416041614162416341644165416641674168416941704171417241734174417541764177417841794180418141824183418441854186418741884189419041914192419341944195419641974198419942004201420242034204420542064207420842094210421142124213421442154216421742184219422042214222422342244225422642274228422942304231423242334234423542364237423842394240424142424243424442454246424742484249425042514252425342544255425642574258425942604261426242634264426542664267426842694270427142724273427442754276427742784279428042814282428342844285428642874288428942904291429242934294429542964297429842994300430143024303430443054306430743084309431043114312431343144315431643174318431943204321432243234324432543264327432843294330433143324333433443354336433743384339434043414342434343444345434643474348434943504351435243534354435543564357435843594360436143624363436443654366436743684369437043714372437343744375437643774378437943804381438243834384438543864387438843894390439143924393439443954396439743984399440044014402440344044405440644074408440944104411441244134414441544164417441844194420442144224423442444254426442744284429443044314432443344344435443644374438443944404441444244434444444544464447444844494450445144524453445444554456445744584459446044614462446344644465446644674468446944704471447244734474447544764477447844794480448144824483448444854486448744884489449044914492449344944495449644974498449945004501450245034504450545064507450845094510451145124513451445154516451745184519452045214522452345244525452645274528452945304531453245334534453545364537453845394540454145424543454445454546454745484549455045514552455345544555455645574558455945604561456245634564456545664567456845694570457145724573457445754576457745784579458045814582458345844585458645874588458945904591459245934594459545964597459845994600460146024603460446054606460746084609461046114612461346144615461646174618461946204621462246234624462546264627462846294630463146324633463446354636463746384639464046414642464346444645464646474648464946504651465246534654465546564657465846594660466146624663466446654666466746684669467046714672467346744675467646774678467946804681468246834684468546864687468846894690469146924693469446954696469746984699470047014702470347044705470647074708470947104711471247134714471547164717471847194720472147224723472447254726472747284729473047314732473347344735473647374738473947404741474247434744474547464747474847494750475147524753475447554756475747584759476047614762476347644765476647674768476947704771477247734774477547764777477847794780478147824783478447854786478747884789479047914792479347944795479647974798479948004801480248034804480548064807480848094810481148124813481448154816481748184819482048214822482348244825482648274828482948304831483248334834483548364837483848394840484148424843484448454846484748484849485048514852485348544855485648574858485948604861486248634864486548664867486848694870487148724873487448754876487748784879488048814882488348844885488648874888488948904891489248934894489548964897489848994900490149024903490449054906490749084909491049114912491349144915491649174918491949204921492249234924492549264927492849294930493149324933493449354936493749384939494049414942494349444945494649474948494949504951495249534954495549564957495849594960496149624963496449654966496749684969497049714972497349744975497649774978497949804981498249834984498549864987498849894990499149924993499449954996499749984999500050015002500350045005500650075008500950105011501250135014501550165017501850195020502150225023502450255026502750285029503050315032503350345035503650375038503950405041504250435044504550465047504850495050505150525053505450555056505750585059506050615062506350645065506650675068506950705071507250735074507550765077507850795080508150825083508450855086508750885089509050915092509350945095509650975098509951005101510251035104510551065107510851095110511151125113511451155116511751185119512051215122512351245125512651275128512951305131513251335134513551365137513851395140514151425143514451455146514751485149515051515152515351545155515651575158515951605161516251635164516551665167516851695170517151725173517451755176517751785179518051815182518351845185518651875188518951905191519251935194519551965197519851995200520152025203520452055206520752085209521052115212521352145215521652175218521952205221522252235224522552265227522852295230523152325233523452355236523752385239524052415242524352445245524652475248524952505251525252535254525552565257525852595260526152625263526452655266526752685269527052715272527352745275527652775278527952805281528252835284528552865287528852895290529152925293529452955296529752985299530053015302530353045305530653075308530953105311531253135314531553165317531853195320532153225323532453255326532753285329533053315332533353345335533653375338533953405341534253435344534553465347534853495350535153525353535453555356535753585359536053615362536353645365536653675368536953705371537253735374537553765377537853795380538153825383538453855386538753885389539053915392539353945395539653975398539954005401540254035404540554065407540854095410541154125413541454155416541754185419542054215422542354245425542654275428542954305431543254335434543554365437543854395440544154425443544454455446544754485449545054515452545354545455545654575458545954605461546254635464546554665467546854695470547154725473547454755476547754785479548054815482548354845485548654875488548954905491549254935494549554965497549854995500550155025503550455055506550755085509551055115512551355145515551655175518551955205521552255235524552555265527552855295530553155325533553455355536553755385539554055415542554355445545554655475548554955505551555255535554555555565557555855595560556155625563556455655566556755685569557055715572557355745575557655775578557955805581558255835584558555865587558855895590559155925593559455955596559755985599560056015602560356045605560656075608560956105611561256135614561556165617561856195620562156225623562456255626562756285629563056315632563356345635563656375638563956405641564256435644564556465647564856495650565156525653565456555656565756585659566056615662566356645665566656675668566956705671567256735674567556765677567856795680568156825683568456855686568756885689569056915692569356945695569656975698569957005701570257035704570557065707570857095710571157125713571457155716571757185719572057215722572357245725572657275728572957305731573257335734573557365737573857395740574157425743574457455746574757485749575057515752575357545755575657575758575957605761576257635764576557665767576857695770577157725773577457755776577757785779578057815782578357845785578657875788578957905791579257935794579557965797579857995800580158025803580458055806580758085809581058115812581358145815581658175818581958205821582258235824582558265827582858295830583158325833583458355836583758385839584058415842584358445845584658475848584958505851585258535854585558565857585858595860586158625863586458655866586758685869587058715872587358745875587658775878587958805881588258835884588558865887588858895890589158925893589458955896589758985899590059015902590359045905590659075908590959105911591259135914591559165917591859195920592159225923592459255926592759285929593059315932593359345935593659375938593959405941594259435944594559465947594859495950595159525953595459555956595759585959596059615962596359645965596659675968596959705971597259735974597559765977597859795980598159825983598459855986598759885989599059915992599359945995599659975998599960006001600260036004600560066007600860096010601160126013601460156016601760186019602060216022602360246025602660276028602960306031603260336034603560366037603860396040604160426043604460456046604760486049605060516052605360546055605660576058605960606061606260636064606560666067606860696070607160726073607460756076607760786079608060816082608360846085608660876088608960906091609260936094609560966097609860996100610161026103610461056106610761086109611061116112611361146115611661176118611961206121612261236124612561266127612861296130613161326133613461356136613761386139614061416142614361446145614661476148614961506151615261536154615561566157615861596160616161626163616461656166616761686169617061716172617361746175617661776178617961806181618261836184618561866187618861896190619161926193619461956196619761986199620062016202620362046205620662076208620962106211621262136214621562166217621862196220622162226223622462256226622762286229623062316232623362346235623662376238623962406241624262436244624562466247624862496250625162526253625462556256625762586259626062616262626362646265626662676268626962706271627262736274627562766277627862796280628162826283628462856286628762886289629062916292629362946295629662976298629963006301630263036304630563066307630863096310631163126313631463156316631763186319632063216322632363246325632663276328632963306331633263336334633563366337633863396340634163426343634463456346634763486349635063516352635363546355635663576358635963606361636263636364636563666367636863696370637163726373637463756376637763786379638063816382638363846385638663876388638963906391639263936394639563966397639863996400640164026403640464056406640764086409641064116412641364146415641664176418641964206421642264236424642564266427642864296430643164326433643464356436643764386439644064416442644364446445644664476448644964506451645264536454645564566457645864596460646164626463646464656466646764686469647064716472647364746475647664776478647964806481648264836484648564866487648864896490649164926493649464956496649764986499650065016502650365046505650665076508650965106511651265136514651565166517651865196520652165226523652465256526652765286529653065316532653365346535653665376538653965406541654265436544654565466547654865496550655165526553655465556556655765586559656065616562656365646565656665676568656965706571657265736574657565766577657865796580658165826583658465856586658765886589659065916592659365946595659665976598659966006601660266036604660566066607660866096610661166126613661466156616661766186619662066216622662366246625662666276628662966306631663266336634663566366637663866396640664166426643664466456646664766486649665066516652665366546655665666576658665966606661666266636664666566666667666866696670667166726673667466756676667766786679668066816682668366846685668666876688668966906691669266936694669566966697669866996700670167026703670467056706670767086709671067116712671367146715671667176718671967206721672267236724672567266727672867296730673167326733673467356736673767386739674067416742674367446745674667476748674967506751675267536754675567566757675867596760676167626763676467656766676767686769677067716772677367746775677667776778677967806781678267836784678567866787678867896790679167926793679467956796679767986799680068016802680368046805680668076808680968106811681268136814681568166817681868196820682168226823682468256826682768286829683068316832683368346835683668376838683968406841684268436844684568466847684868496850685168526853685468556856685768586859686068616862686368646865686668676868686968706871687268736874687568766877687868796880688168826883688468856886688768886889689068916892689368946895689668976898689969006901690269036904690569066907690869096910691169126913691469156916691769186919692069216922692369246925692669276928692969306931693269336934693569366937693869396940694169426943694469456946694769486949695069516952695369546955695669576958695969606961696269636964696569666967696869696970697169726973697469756976697769786979698069816982698369846985698669876988698969906991699269936994699569966997699869997000700170027003700470057006700770087009701070117012701370147015701670177018701970207021702270237024
  1. /*
  2. * This is the Fusion MPT base driver providing common API layer interface
  3. * for access to MPT (Message Passing Technology) firmware.
  4. *
  5. * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
  6. * Copyright (C) 2012-2014 LSI Corporation
  7. * Copyright (C) 2013-2014 Avago Technologies
  8. * (mailto: MPT-FusionLinux.pdl@avagotech.com)
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License
  12. * as published by the Free Software Foundation; either version 2
  13. * of the License, or (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * NO WARRANTY
  21. * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  22. * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  23. * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  24. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  25. * solely responsible for determining the appropriateness of using and
  26. * distributing the Program and assumes all risks associated with its
  27. * exercise of rights under this Agreement, including but not limited to
  28. * the risks and costs of program errors, damage to or loss of data,
  29. * programs or equipment, and unavailability or interruption of operations.
  30. * DISCLAIMER OF LIABILITY
  31. * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33. * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35. * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36. * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37. * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38. * You should have received a copy of the GNU General Public License
  39. * along with this program; if not, write to the Free Software
  40. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
  41. * USA.
  42. */
  43. #include <linux/kernel.h>
  44. #include <linux/module.h>
  45. #include <linux/errno.h>
  46. #include <linux/init.h>
  47. #include <linux/slab.h>
  48. #include <linux/types.h>
  49. #include <linux/pci.h>
  50. #include <linux/kdev_t.h>
  51. #include <linux/blkdev.h>
  52. #include <linux/delay.h>
  53. #include <linux/interrupt.h>
  54. #include <linux/dma-mapping.h>
  55. #include <linux/io.h>
  56. #include <linux/time.h>
  57. #include <linux/ktime.h>
  58. #include <linux/kthread.h>
  59. #include <asm/page.h> /* To get host page size per arch */
  60. #include <linux/aer.h>
  61. #include "mpt3sas_base.h"
  62. static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
  63. #define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
  64. /* maximum controller queue depth */
  65. #define MAX_HBA_QUEUE_DEPTH 30000
  66. #define MAX_CHAIN_DEPTH 100000
  67. static int max_queue_depth = -1;
  68. module_param(max_queue_depth, int, 0);
  69. MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
  70. static int max_sgl_entries = -1;
  71. module_param(max_sgl_entries, int, 0);
  72. MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
  73. static int msix_disable = -1;
  74. module_param(msix_disable, int, 0);
  75. MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
  76. static int smp_affinity_enable = 1;
  77. module_param(smp_affinity_enable, int, S_IRUGO);
  78. MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
  79. static int max_msix_vectors = -1;
  80. module_param(max_msix_vectors, int, 0);
  81. MODULE_PARM_DESC(max_msix_vectors,
  82. " max msix vectors");
  83. static int mpt3sas_fwfault_debug;
  84. MODULE_PARM_DESC(mpt3sas_fwfault_debug,
  85. " enable detection of firmware fault and halt firmware - (default=0)");
  86. static int
  87. _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
  88. /**
  89. * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  90. *
  91. */
  92. static int
  93. _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
  94. {
  95. int ret = param_set_int(val, kp);
  96. struct MPT3SAS_ADAPTER *ioc;
  97. if (ret)
  98. return ret;
  99. /* global ioc spinlock to protect controller list on list operations */
  100. pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
  101. spin_lock(&gioc_lock);
  102. list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
  103. ioc->fwfault_debug = mpt3sas_fwfault_debug;
  104. spin_unlock(&gioc_lock);
  105. return 0;
  106. }
  107. module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
  108. param_get_int, &mpt3sas_fwfault_debug, 0644);
  109. /**
  110. * _base_clone_reply_to_sys_mem - copies reply to reply free iomem
  111. * in BAR0 space.
  112. *
  113. * @ioc: per adapter object
  114. * @reply: reply message frame(lower 32bit addr)
  115. * @index: System request message index.
  116. *
  117. * @Returns - Nothing
  118. */
  119. static void
  120. _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
  121. u32 index)
  122. {
  123. /*
  124. * 256 is offset within sys register.
  125. * 256 offset MPI frame starts. Max MPI frame supported is 32.
  126. * 32 * 128 = 4K. From here, Clone of reply free for mcpu starts
  127. */
  128. u16 cmd_credit = ioc->facts.RequestCredit + 1;
  129. void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
  130. MPI_FRAME_START_OFFSET +
  131. (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
  132. writel(reply, reply_free_iomem);
  133. }
  134. /**
  135. * _base_clone_mpi_to_sys_mem - Writes/copies MPI frames
  136. * to system/BAR0 region.
  137. *
  138. * @dst_iomem: Pointer to the destinaltion location in BAR0 space.
  139. * @src: Pointer to the Source data.
  140. * @size: Size of data to be copied.
  141. */
  142. static void
  143. _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
  144. {
  145. int i;
  146. u32 *src_virt_mem = (u32 *)src;
  147. for (i = 0; i < size/4; i++)
  148. writel((u32)src_virt_mem[i],
  149. (void __iomem *)dst_iomem + (i * 4));
  150. }
  151. /**
  152. * _base_clone_to_sys_mem - Writes/copies data to system/BAR0 region
  153. *
  154. * @dst_iomem: Pointer to the destination location in BAR0 space.
  155. * @src: Pointer to the Source data.
  156. * @size: Size of data to be copied.
  157. */
  158. static void
  159. _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
  160. {
  161. int i;
  162. u32 *src_virt_mem = (u32 *)(src);
  163. for (i = 0; i < size/4; i++)
  164. writel((u32)src_virt_mem[i],
  165. (void __iomem *)dst_iomem + (i * 4));
  166. }
  167. /**
  168. * _base_get_chain - Calculates and Returns virtual chain address
  169. * for the provided smid in BAR0 space.
  170. *
  171. * @ioc: per adapter object
  172. * @smid: system request message index
  173. * @sge_chain_count: Scatter gather chain count.
  174. *
  175. * @Return: chain address.
  176. */
  177. static inline void __iomem*
  178. _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  179. u8 sge_chain_count)
  180. {
  181. void __iomem *base_chain, *chain_virt;
  182. u16 cmd_credit = ioc->facts.RequestCredit + 1;
  183. base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
  184. (cmd_credit * ioc->request_sz) +
  185. REPLY_FREE_POOL_SIZE;
  186. chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
  187. ioc->request_sz) + (sge_chain_count * ioc->request_sz);
  188. return chain_virt;
  189. }
  190. /**
  191. * _base_get_chain_phys - Calculates and Returns physical address
  192. * in BAR0 for scatter gather chains, for
  193. * the provided smid.
  194. *
  195. * @ioc: per adapter object
  196. * @smid: system request message index
  197. * @sge_chain_count: Scatter gather chain count.
  198. *
  199. * @Return - Physical chain address.
  200. */
  201. static inline phys_addr_t
  202. _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  203. u8 sge_chain_count)
  204. {
  205. phys_addr_t base_chain_phys, chain_phys;
  206. u16 cmd_credit = ioc->facts.RequestCredit + 1;
  207. base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
  208. (cmd_credit * ioc->request_sz) +
  209. REPLY_FREE_POOL_SIZE;
  210. chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
  211. ioc->request_sz) + (sge_chain_count * ioc->request_sz);
  212. return chain_phys;
  213. }
  214. /**
  215. * _base_get_buffer_bar0 - Calculates and Returns BAR0 mapped Host
  216. * buffer address for the provided smid.
  217. * (Each smid can have 64K starts from 17024)
  218. *
  219. * @ioc: per adapter object
  220. * @smid: system request message index
  221. *
  222. * @Returns - Pointer to buffer location in BAR0.
  223. */
  224. static void __iomem *
  225. _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  226. {
  227. u16 cmd_credit = ioc->facts.RequestCredit + 1;
  228. // Added extra 1 to reach end of chain.
  229. void __iomem *chain_end = _base_get_chain(ioc,
  230. cmd_credit + 1,
  231. ioc->facts.MaxChainDepth);
  232. return chain_end + (smid * 64 * 1024);
  233. }
  234. /**
  235. * _base_get_buffer_phys_bar0 - Calculates and Returns BAR0 mapped
  236. * Host buffer Physical address for the provided smid.
  237. * (Each smid can have 64K starts from 17024)
  238. *
  239. * @ioc: per adapter object
  240. * @smid: system request message index
  241. *
  242. * @Returns - Pointer to buffer location in BAR0.
  243. */
  244. static phys_addr_t
  245. _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  246. {
  247. u16 cmd_credit = ioc->facts.RequestCredit + 1;
  248. phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
  249. cmd_credit + 1,
  250. ioc->facts.MaxChainDepth);
  251. return chain_end_phys + (smid * 64 * 1024);
  252. }
  253. /**
  254. * _base_get_chain_buffer_dma_to_chain_buffer - Iterates chain
  255. * lookup list and Provides chain_buffer
  256. * address for the matching dma address.
  257. * (Each smid can have 64K starts from 17024)
  258. *
  259. * @ioc: per adapter object
  260. * @chain_buffer_dma: Chain buffer dma address.
  261. *
  262. * @Returns - Pointer to chain buffer. Or Null on Failure.
  263. */
  264. static void *
  265. _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
  266. dma_addr_t chain_buffer_dma)
  267. {
  268. u16 index, j;
  269. struct chain_tracker *ct;
  270. for (index = 0; index < ioc->scsiio_depth; index++) {
  271. for (j = 0; j < ioc->chains_needed_per_io; j++) {
  272. ct = &ioc->chain_lookup[index].chains_per_smid[j];
  273. if (ct && ct->chain_buffer_dma == chain_buffer_dma)
  274. return ct->chain_buffer;
  275. }
  276. }
  277. pr_info(MPT3SAS_FMT
  278. "Provided chain_buffer_dma address is not in the lookup list\n",
  279. ioc->name);
  280. return NULL;
  281. }
  282. /**
  283. * _clone_sg_entries - MPI EP's scsiio and config requests
  284. * are handled here. Base function for
  285. * double buffering, before submitting
  286. * the requests.
  287. *
  288. * @ioc: per adapter object.
  289. * @mpi_request: mf request pointer.
  290. * @smid: system request message index.
  291. *
  292. * @Returns: Nothing.
  293. */
  294. static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
  295. void *mpi_request, u16 smid)
  296. {
  297. Mpi2SGESimple32_t *sgel, *sgel_next;
  298. u32 sgl_flags, sge_chain_count = 0;
  299. bool is_write = 0;
  300. u16 i = 0;
  301. void __iomem *buffer_iomem;
  302. phys_addr_t buffer_iomem_phys;
  303. void __iomem *buff_ptr;
  304. phys_addr_t buff_ptr_phys;
  305. void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
  306. void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
  307. phys_addr_t dst_addr_phys;
  308. MPI2RequestHeader_t *request_hdr;
  309. struct scsi_cmnd *scmd;
  310. struct scatterlist *sg_scmd = NULL;
  311. int is_scsiio_req = 0;
  312. request_hdr = (MPI2RequestHeader_t *) mpi_request;
  313. if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
  314. Mpi25SCSIIORequest_t *scsiio_request =
  315. (Mpi25SCSIIORequest_t *)mpi_request;
  316. sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
  317. is_scsiio_req = 1;
  318. } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
  319. Mpi2ConfigRequest_t *config_req =
  320. (Mpi2ConfigRequest_t *)mpi_request;
  321. sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
  322. } else
  323. return;
  324. /* From smid we can get scsi_cmd, once we have sg_scmd,
  325. * we just need to get sg_virt and sg_next to get virual
  326. * address associated with sgel->Address.
  327. */
  328. if (is_scsiio_req) {
  329. /* Get scsi_cmd using smid */
  330. scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  331. if (scmd == NULL) {
  332. pr_err(MPT3SAS_FMT "scmd is NULL\n", ioc->name);
  333. return;
  334. }
  335. /* Get sg_scmd from scmd provided */
  336. sg_scmd = scsi_sglist(scmd);
  337. }
  338. /*
  339. * 0 - 255 System register
  340. * 256 - 4352 MPI Frame. (This is based on maxCredit 32)
  341. * 4352 - 4864 Reply_free pool (512 byte is reserved
  342. * considering maxCredit 32. Reply need extra
  343. * room, for mCPU case kept four times of
  344. * maxCredit).
  345. * 4864 - 17152 SGE chain element. (32cmd * 3 chain of
  346. * 128 byte size = 12288)
  347. * 17152 - x Host buffer mapped with smid.
  348. * (Each smid can have 64K Max IO.)
  349. * BAR0+Last 1K MSIX Addr and Data
  350. * Total size in use 2113664 bytes of 4MB BAR0
  351. */
  352. buffer_iomem = _base_get_buffer_bar0(ioc, smid);
  353. buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
  354. buff_ptr = buffer_iomem;
  355. buff_ptr_phys = buffer_iomem_phys;
  356. WARN_ON(buff_ptr_phys > U32_MAX);
  357. if (le32_to_cpu(sgel->FlagsLength) &
  358. (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
  359. is_write = 1;
  360. for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
  361. sgl_flags =
  362. (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
  363. switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
  364. case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
  365. /*
  366. * Helper function which on passing
  367. * chain_buffer_dma returns chain_buffer. Get
  368. * the virtual address for sgel->Address
  369. */
  370. sgel_next =
  371. _base_get_chain_buffer_dma_to_chain_buffer(ioc,
  372. le32_to_cpu(sgel->Address));
  373. if (sgel_next == NULL)
  374. return;
  375. /*
  376. * This is coping 128 byte chain
  377. * frame (not a host buffer)
  378. */
  379. dst_chain_addr[sge_chain_count] =
  380. _base_get_chain(ioc,
  381. smid, sge_chain_count);
  382. src_chain_addr[sge_chain_count] =
  383. (void *) sgel_next;
  384. dst_addr_phys = _base_get_chain_phys(ioc,
  385. smid, sge_chain_count);
  386. WARN_ON(dst_addr_phys > U32_MAX);
  387. sgel->Address =
  388. cpu_to_le32(lower_32_bits(dst_addr_phys));
  389. sgel = sgel_next;
  390. sge_chain_count++;
  391. break;
  392. case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
  393. if (is_write) {
  394. if (is_scsiio_req) {
  395. _base_clone_to_sys_mem(buff_ptr,
  396. sg_virt(sg_scmd),
  397. (le32_to_cpu(sgel->FlagsLength) &
  398. 0x00ffffff));
  399. /*
  400. * FIXME: this relies on a a zero
  401. * PCI mem_offset.
  402. */
  403. sgel->Address =
  404. cpu_to_le32((u32)buff_ptr_phys);
  405. } else {
  406. _base_clone_to_sys_mem(buff_ptr,
  407. ioc->config_vaddr,
  408. (le32_to_cpu(sgel->FlagsLength) &
  409. 0x00ffffff));
  410. sgel->Address =
  411. cpu_to_le32((u32)buff_ptr_phys);
  412. }
  413. }
  414. buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
  415. 0x00ffffff);
  416. buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
  417. 0x00ffffff);
  418. if ((le32_to_cpu(sgel->FlagsLength) &
  419. (MPI2_SGE_FLAGS_END_OF_BUFFER
  420. << MPI2_SGE_FLAGS_SHIFT)))
  421. goto eob_clone_chain;
  422. else {
  423. /*
  424. * Every single element in MPT will have
  425. * associated sg_next. Better to sanity that
  426. * sg_next is not NULL, but it will be a bug
  427. * if it is null.
  428. */
  429. if (is_scsiio_req) {
  430. sg_scmd = sg_next(sg_scmd);
  431. if (sg_scmd)
  432. sgel++;
  433. else
  434. goto eob_clone_chain;
  435. }
  436. }
  437. break;
  438. }
  439. }
  440. eob_clone_chain:
  441. for (i = 0; i < sge_chain_count; i++) {
  442. if (is_scsiio_req)
  443. _base_clone_to_sys_mem(dst_chain_addr[i],
  444. src_chain_addr[i], ioc->request_sz);
  445. }
  446. }
  447. /**
  448. * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
  449. * @arg: input argument, used to derive ioc
  450. *
  451. * Return 0 if controller is removed from pci subsystem.
  452. * Return -1 for other case.
  453. */
  454. static int mpt3sas_remove_dead_ioc_func(void *arg)
  455. {
  456. struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
  457. struct pci_dev *pdev;
  458. if ((ioc == NULL))
  459. return -1;
  460. pdev = ioc->pdev;
  461. if ((pdev == NULL))
  462. return -1;
  463. pci_stop_and_remove_bus_device_locked(pdev);
  464. return 0;
  465. }
  466. /**
  467. * _base_fault_reset_work - workq handling ioc fault conditions
  468. * @work: input argument, used to derive ioc
  469. * Context: sleep.
  470. *
  471. * Return nothing.
  472. */
  473. static void
  474. _base_fault_reset_work(struct work_struct *work)
  475. {
  476. struct MPT3SAS_ADAPTER *ioc =
  477. container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
  478. unsigned long flags;
  479. u32 doorbell;
  480. int rc;
  481. struct task_struct *p;
  482. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  483. if (ioc->shost_recovery || ioc->pci_error_recovery)
  484. goto rearm_timer;
  485. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  486. doorbell = mpt3sas_base_get_iocstate(ioc, 0);
  487. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
  488. pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
  489. ioc->name);
  490. /* It may be possible that EEH recovery can resolve some of
  491. * pci bus failure issues rather removing the dead ioc function
  492. * by considering controller is in a non-operational state. So
  493. * here priority is given to the EEH recovery. If it doesn't
  494. * not resolve this issue, mpt3sas driver will consider this
  495. * controller to non-operational state and remove the dead ioc
  496. * function.
  497. */
  498. if (ioc->non_operational_loop++ < 5) {
  499. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
  500. flags);
  501. goto rearm_timer;
  502. }
  503. /*
  504. * Call _scsih_flush_pending_cmds callback so that we flush all
  505. * pending commands back to OS. This call is required to aovid
  506. * deadlock at block layer. Dead IOC will fail to do diag reset,
  507. * and this call is safe since dead ioc will never return any
  508. * command back from HW.
  509. */
  510. ioc->schedule_dead_ioc_flush_running_cmds(ioc);
  511. /*
  512. * Set remove_host flag early since kernel thread will
  513. * take some time to execute.
  514. */
  515. ioc->remove_host = 1;
  516. /*Remove the Dead Host */
  517. p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
  518. "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
  519. if (IS_ERR(p))
  520. pr_err(MPT3SAS_FMT
  521. "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
  522. ioc->name, __func__);
  523. else
  524. pr_err(MPT3SAS_FMT
  525. "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
  526. ioc->name, __func__);
  527. return; /* don't rearm timer */
  528. }
  529. ioc->non_operational_loop = 0;
  530. if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
  531. rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  532. pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
  533. __func__, (rc == 0) ? "success" : "failed");
  534. doorbell = mpt3sas_base_get_iocstate(ioc, 0);
  535. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  536. mpt3sas_base_fault_info(ioc, doorbell &
  537. MPI2_DOORBELL_DATA_MASK);
  538. if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
  539. MPI2_IOC_STATE_OPERATIONAL)
  540. return; /* don't rearm timer */
  541. }
  542. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  543. rearm_timer:
  544. if (ioc->fault_reset_work_q)
  545. queue_delayed_work(ioc->fault_reset_work_q,
  546. &ioc->fault_reset_work,
  547. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  548. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  549. }
  550. /**
  551. * mpt3sas_base_start_watchdog - start the fault_reset_work_q
  552. * @ioc: per adapter object
  553. * Context: sleep.
  554. *
  555. * Return nothing.
  556. */
  557. void
  558. mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
  559. {
  560. unsigned long flags;
  561. if (ioc->fault_reset_work_q)
  562. return;
  563. /* initialize fault polling */
  564. INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
  565. snprintf(ioc->fault_reset_work_q_name,
  566. sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
  567. ioc->driver_name, ioc->id);
  568. ioc->fault_reset_work_q =
  569. create_singlethread_workqueue(ioc->fault_reset_work_q_name);
  570. if (!ioc->fault_reset_work_q) {
  571. pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
  572. ioc->name, __func__, __LINE__);
  573. return;
  574. }
  575. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  576. if (ioc->fault_reset_work_q)
  577. queue_delayed_work(ioc->fault_reset_work_q,
  578. &ioc->fault_reset_work,
  579. msecs_to_jiffies(FAULT_POLLING_INTERVAL));
  580. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  581. }
  582. /**
  583. * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
  584. * @ioc: per adapter object
  585. * Context: sleep.
  586. *
  587. * Return nothing.
  588. */
  589. void
  590. mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
  591. {
  592. unsigned long flags;
  593. struct workqueue_struct *wq;
  594. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  595. wq = ioc->fault_reset_work_q;
  596. ioc->fault_reset_work_q = NULL;
  597. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  598. if (wq) {
  599. if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
  600. flush_workqueue(wq);
  601. destroy_workqueue(wq);
  602. }
  603. }
  604. /**
  605. * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
  606. * @ioc: per adapter object
  607. * @fault_code: fault code
  608. *
  609. * Return nothing.
  610. */
  611. void
  612. mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
  613. {
  614. pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
  615. ioc->name, fault_code);
  616. }
  617. /**
  618. * mpt3sas_halt_firmware - halt's mpt controller firmware
  619. * @ioc: per adapter object
  620. *
  621. * For debugging timeout related issues. Writing 0xCOFFEE00
  622. * to the doorbell register will halt controller firmware. With
  623. * the purpose to stop both driver and firmware, the enduser can
  624. * obtain a ring buffer from controller UART.
  625. */
  626. void
  627. mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
  628. {
  629. u32 doorbell;
  630. if (!ioc->fwfault_debug)
  631. return;
  632. dump_stack();
  633. doorbell = readl(&ioc->chip->Doorbell);
  634. if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  635. mpt3sas_base_fault_info(ioc , doorbell);
  636. else {
  637. writel(0xC0FFEE00, &ioc->chip->Doorbell);
  638. pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
  639. ioc->name);
  640. }
  641. if (ioc->fwfault_debug == 2)
  642. for (;;)
  643. ;
  644. else
  645. panic("panic in %s\n", __func__);
  646. }
  647. /**
  648. * _base_sas_ioc_info - verbose translation of the ioc status
  649. * @ioc: per adapter object
  650. * @mpi_reply: reply mf payload returned from firmware
  651. * @request_hdr: request mf
  652. *
  653. * Return nothing.
  654. */
  655. static void
  656. _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
  657. MPI2RequestHeader_t *request_hdr)
  658. {
  659. u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
  660. MPI2_IOCSTATUS_MASK;
  661. char *desc = NULL;
  662. u16 frame_sz;
  663. char *func_str = NULL;
  664. /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
  665. if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
  666. request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
  667. request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
  668. return;
  669. if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
  670. return;
  671. switch (ioc_status) {
  672. /****************************************************************************
  673. * Common IOCStatus values for all replies
  674. ****************************************************************************/
  675. case MPI2_IOCSTATUS_INVALID_FUNCTION:
  676. desc = "invalid function";
  677. break;
  678. case MPI2_IOCSTATUS_BUSY:
  679. desc = "busy";
  680. break;
  681. case MPI2_IOCSTATUS_INVALID_SGL:
  682. desc = "invalid sgl";
  683. break;
  684. case MPI2_IOCSTATUS_INTERNAL_ERROR:
  685. desc = "internal error";
  686. break;
  687. case MPI2_IOCSTATUS_INVALID_VPID:
  688. desc = "invalid vpid";
  689. break;
  690. case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
  691. desc = "insufficient resources";
  692. break;
  693. case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
  694. desc = "insufficient power";
  695. break;
  696. case MPI2_IOCSTATUS_INVALID_FIELD:
  697. desc = "invalid field";
  698. break;
  699. case MPI2_IOCSTATUS_INVALID_STATE:
  700. desc = "invalid state";
  701. break;
  702. case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
  703. desc = "op state not supported";
  704. break;
  705. /****************************************************************************
  706. * Config IOCStatus values
  707. ****************************************************************************/
  708. case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
  709. desc = "config invalid action";
  710. break;
  711. case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
  712. desc = "config invalid type";
  713. break;
  714. case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
  715. desc = "config invalid page";
  716. break;
  717. case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
  718. desc = "config invalid data";
  719. break;
  720. case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
  721. desc = "config no defaults";
  722. break;
  723. case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
  724. desc = "config cant commit";
  725. break;
  726. /****************************************************************************
  727. * SCSI IO Reply
  728. ****************************************************************************/
  729. case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
  730. case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
  731. case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
  732. case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
  733. case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
  734. case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
  735. case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
  736. case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
  737. case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
  738. case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
  739. case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
  740. case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
  741. break;
  742. /****************************************************************************
  743. * For use by SCSI Initiator and SCSI Target end-to-end data protection
  744. ****************************************************************************/
  745. case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
  746. desc = "eedp guard error";
  747. break;
  748. case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
  749. desc = "eedp ref tag error";
  750. break;
  751. case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
  752. desc = "eedp app tag error";
  753. break;
  754. /****************************************************************************
  755. * SCSI Target values
  756. ****************************************************************************/
  757. case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
  758. desc = "target invalid io index";
  759. break;
  760. case MPI2_IOCSTATUS_TARGET_ABORTED:
  761. desc = "target aborted";
  762. break;
  763. case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
  764. desc = "target no conn retryable";
  765. break;
  766. case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
  767. desc = "target no connection";
  768. break;
  769. case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
  770. desc = "target xfer count mismatch";
  771. break;
  772. case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
  773. desc = "target data offset error";
  774. break;
  775. case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
  776. desc = "target too much write data";
  777. break;
  778. case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
  779. desc = "target iu too short";
  780. break;
  781. case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
  782. desc = "target ack nak timeout";
  783. break;
  784. case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
  785. desc = "target nak received";
  786. break;
  787. /****************************************************************************
  788. * Serial Attached SCSI values
  789. ****************************************************************************/
  790. case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
  791. desc = "smp request failed";
  792. break;
  793. case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
  794. desc = "smp data overrun";
  795. break;
  796. /****************************************************************************
  797. * Diagnostic Buffer Post / Diagnostic Release values
  798. ****************************************************************************/
  799. case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
  800. desc = "diagnostic released";
  801. break;
  802. default:
  803. break;
  804. }
  805. if (!desc)
  806. return;
  807. switch (request_hdr->Function) {
  808. case MPI2_FUNCTION_CONFIG:
  809. frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
  810. func_str = "config_page";
  811. break;
  812. case MPI2_FUNCTION_SCSI_TASK_MGMT:
  813. frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
  814. func_str = "task_mgmt";
  815. break;
  816. case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
  817. frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
  818. func_str = "sas_iounit_ctl";
  819. break;
  820. case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
  821. frame_sz = sizeof(Mpi2SepRequest_t);
  822. func_str = "enclosure";
  823. break;
  824. case MPI2_FUNCTION_IOC_INIT:
  825. frame_sz = sizeof(Mpi2IOCInitRequest_t);
  826. func_str = "ioc_init";
  827. break;
  828. case MPI2_FUNCTION_PORT_ENABLE:
  829. frame_sz = sizeof(Mpi2PortEnableRequest_t);
  830. func_str = "port_enable";
  831. break;
  832. case MPI2_FUNCTION_SMP_PASSTHROUGH:
  833. frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
  834. func_str = "smp_passthru";
  835. break;
  836. case MPI2_FUNCTION_NVME_ENCAPSULATED:
  837. frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
  838. ioc->sge_size;
  839. func_str = "nvme_encapsulated";
  840. break;
  841. default:
  842. frame_sz = 32;
  843. func_str = "unknown";
  844. break;
  845. }
  846. pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
  847. ioc->name, desc, ioc_status, request_hdr, func_str);
  848. _debug_dump_mf(request_hdr, frame_sz/4);
  849. }
  850. /**
  851. * _base_display_event_data - verbose translation of firmware asyn events
  852. * @ioc: per adapter object
  853. * @mpi_reply: reply mf payload returned from firmware
  854. *
  855. * Return nothing.
  856. */
  857. static void
  858. _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
  859. Mpi2EventNotificationReply_t *mpi_reply)
  860. {
  861. char *desc = NULL;
  862. u16 event;
  863. if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
  864. return;
  865. event = le16_to_cpu(mpi_reply->Event);
  866. switch (event) {
  867. case MPI2_EVENT_LOG_DATA:
  868. desc = "Log Data";
  869. break;
  870. case MPI2_EVENT_STATE_CHANGE:
  871. desc = "Status Change";
  872. break;
  873. case MPI2_EVENT_HARD_RESET_RECEIVED:
  874. desc = "Hard Reset Received";
  875. break;
  876. case MPI2_EVENT_EVENT_CHANGE:
  877. desc = "Event Change";
  878. break;
  879. case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
  880. desc = "Device Status Change";
  881. break;
  882. case MPI2_EVENT_IR_OPERATION_STATUS:
  883. if (!ioc->hide_ir_msg)
  884. desc = "IR Operation Status";
  885. break;
  886. case MPI2_EVENT_SAS_DISCOVERY:
  887. {
  888. Mpi2EventDataSasDiscovery_t *event_data =
  889. (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
  890. pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
  891. (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
  892. "start" : "stop");
  893. if (event_data->DiscoveryStatus)
  894. pr_cont(" discovery_status(0x%08x)",
  895. le32_to_cpu(event_data->DiscoveryStatus));
  896. pr_cont("\n");
  897. return;
  898. }
  899. case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
  900. desc = "SAS Broadcast Primitive";
  901. break;
  902. case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
  903. desc = "SAS Init Device Status Change";
  904. break;
  905. case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
  906. desc = "SAS Init Table Overflow";
  907. break;
  908. case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
  909. desc = "SAS Topology Change List";
  910. break;
  911. case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
  912. desc = "SAS Enclosure Device Status Change";
  913. break;
  914. case MPI2_EVENT_IR_VOLUME:
  915. if (!ioc->hide_ir_msg)
  916. desc = "IR Volume";
  917. break;
  918. case MPI2_EVENT_IR_PHYSICAL_DISK:
  919. if (!ioc->hide_ir_msg)
  920. desc = "IR Physical Disk";
  921. break;
  922. case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
  923. if (!ioc->hide_ir_msg)
  924. desc = "IR Configuration Change List";
  925. break;
  926. case MPI2_EVENT_LOG_ENTRY_ADDED:
  927. if (!ioc->hide_ir_msg)
  928. desc = "Log Entry Added";
  929. break;
  930. case MPI2_EVENT_TEMP_THRESHOLD:
  931. desc = "Temperature Threshold";
  932. break;
  933. case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
  934. desc = "Cable Event";
  935. break;
  936. case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
  937. desc = "SAS Device Discovery Error";
  938. break;
  939. case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
  940. desc = "PCIE Device Status Change";
  941. break;
  942. case MPI2_EVENT_PCIE_ENUMERATION:
  943. {
  944. Mpi26EventDataPCIeEnumeration_t *event_data =
  945. (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
  946. pr_info(MPT3SAS_FMT "PCIE Enumeration: (%s)", ioc->name,
  947. (event_data->ReasonCode ==
  948. MPI26_EVENT_PCIE_ENUM_RC_STARTED) ?
  949. "start" : "stop");
  950. if (event_data->EnumerationStatus)
  951. pr_info("enumeration_status(0x%08x)",
  952. le32_to_cpu(event_data->EnumerationStatus));
  953. pr_info("\n");
  954. return;
  955. }
  956. case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
  957. desc = "PCIE Topology Change List";
  958. break;
  959. }
  960. if (!desc)
  961. return;
  962. pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
  963. }
  964. /**
  965. * _base_sas_log_info - verbose translation of firmware log info
  966. * @ioc: per adapter object
  967. * @log_info: log info
  968. *
  969. * Return nothing.
  970. */
  971. static void
  972. _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
  973. {
  974. union loginfo_type {
  975. u32 loginfo;
  976. struct {
  977. u32 subcode:16;
  978. u32 code:8;
  979. u32 originator:4;
  980. u32 bus_type:4;
  981. } dw;
  982. };
  983. union loginfo_type sas_loginfo;
  984. char *originator_str = NULL;
  985. sas_loginfo.loginfo = log_info;
  986. if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
  987. return;
  988. /* each nexus loss loginfo */
  989. if (log_info == 0x31170000)
  990. return;
  991. /* eat the loginfos associated with task aborts */
  992. if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
  993. 0x31140000 || log_info == 0x31130000))
  994. return;
  995. switch (sas_loginfo.dw.originator) {
  996. case 0:
  997. originator_str = "IOP";
  998. break;
  999. case 1:
  1000. originator_str = "PL";
  1001. break;
  1002. case 2:
  1003. if (!ioc->hide_ir_msg)
  1004. originator_str = "IR";
  1005. else
  1006. originator_str = "WarpDrive";
  1007. break;
  1008. }
  1009. pr_warn(MPT3SAS_FMT
  1010. "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
  1011. ioc->name, log_info,
  1012. originator_str, sas_loginfo.dw.code,
  1013. sas_loginfo.dw.subcode);
  1014. }
  1015. /**
  1016. * _base_display_reply_info -
  1017. * @ioc: per adapter object
  1018. * @smid: system request message index
  1019. * @msix_index: MSIX table index supplied by the OS
  1020. * @reply: reply message frame(lower 32bit addr)
  1021. *
  1022. * Return nothing.
  1023. */
  1024. static void
  1025. _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  1026. u32 reply)
  1027. {
  1028. MPI2DefaultReply_t *mpi_reply;
  1029. u16 ioc_status;
  1030. u32 loginfo = 0;
  1031. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  1032. if (unlikely(!mpi_reply)) {
  1033. pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
  1034. ioc->name, __FILE__, __LINE__, __func__);
  1035. return;
  1036. }
  1037. ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
  1038. if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
  1039. (ioc->logging_level & MPT_DEBUG_REPLY)) {
  1040. _base_sas_ioc_info(ioc , mpi_reply,
  1041. mpt3sas_base_get_msg_frame(ioc, smid));
  1042. }
  1043. if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
  1044. loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
  1045. _base_sas_log_info(ioc, loginfo);
  1046. }
  1047. if (ioc_status || loginfo) {
  1048. ioc_status &= MPI2_IOCSTATUS_MASK;
  1049. mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
  1050. }
  1051. }
  1052. /**
  1053. * mpt3sas_base_done - base internal command completion routine
  1054. * @ioc: per adapter object
  1055. * @smid: system request message index
  1056. * @msix_index: MSIX table index supplied by the OS
  1057. * @reply: reply message frame(lower 32bit addr)
  1058. *
  1059. * Return 1 meaning mf should be freed from _base_interrupt
  1060. * 0 means the mf is freed from this function.
  1061. */
  1062. u8
  1063. mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  1064. u32 reply)
  1065. {
  1066. MPI2DefaultReply_t *mpi_reply;
  1067. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  1068. if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
  1069. return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
  1070. if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
  1071. return 1;
  1072. ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
  1073. if (mpi_reply) {
  1074. ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
  1075. memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  1076. }
  1077. ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
  1078. complete(&ioc->base_cmds.done);
  1079. return 1;
  1080. }
  1081. /**
  1082. * _base_async_event - main callback handler for firmware asyn events
  1083. * @ioc: per adapter object
  1084. * @msix_index: MSIX table index supplied by the OS
  1085. * @reply: reply message frame(lower 32bit addr)
  1086. *
  1087. * Return 1 meaning mf should be freed from _base_interrupt
  1088. * 0 means the mf is freed from this function.
  1089. */
  1090. static u8
  1091. _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
  1092. {
  1093. Mpi2EventNotificationReply_t *mpi_reply;
  1094. Mpi2EventAckRequest_t *ack_request;
  1095. u16 smid;
  1096. struct _event_ack_list *delayed_event_ack;
  1097. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  1098. if (!mpi_reply)
  1099. return 1;
  1100. if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
  1101. return 1;
  1102. _base_display_event_data(ioc, mpi_reply);
  1103. if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
  1104. goto out;
  1105. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  1106. if (!smid) {
  1107. delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
  1108. GFP_ATOMIC);
  1109. if (!delayed_event_ack)
  1110. goto out;
  1111. INIT_LIST_HEAD(&delayed_event_ack->list);
  1112. delayed_event_ack->Event = mpi_reply->Event;
  1113. delayed_event_ack->EventContext = mpi_reply->EventContext;
  1114. list_add_tail(&delayed_event_ack->list,
  1115. &ioc->delayed_event_ack_list);
  1116. dewtprintk(ioc, pr_info(MPT3SAS_FMT
  1117. "DELAYED: EVENT ACK: event (0x%04x)\n",
  1118. ioc->name, le16_to_cpu(mpi_reply->Event)));
  1119. goto out;
  1120. }
  1121. ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
  1122. memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
  1123. ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
  1124. ack_request->Event = mpi_reply->Event;
  1125. ack_request->EventContext = mpi_reply->EventContext;
  1126. ack_request->VF_ID = 0; /* TODO */
  1127. ack_request->VP_ID = 0;
  1128. mpt3sas_base_put_smid_default(ioc, smid);
  1129. out:
  1130. /* scsih callback handler */
  1131. mpt3sas_scsih_event_callback(ioc, msix_index, reply);
  1132. /* ctl callback handler */
  1133. mpt3sas_ctl_event_callback(ioc, msix_index, reply);
  1134. return 1;
  1135. }
  1136. static struct scsiio_tracker *
  1137. _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  1138. {
  1139. struct scsi_cmnd *cmd;
  1140. if (WARN_ON(!smid) ||
  1141. WARN_ON(smid >= ioc->hi_priority_smid))
  1142. return NULL;
  1143. cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
  1144. if (cmd)
  1145. return scsi_cmd_priv(cmd);
  1146. return NULL;
  1147. }
  1148. /**
  1149. * _base_get_cb_idx - obtain the callback index
  1150. * @ioc: per adapter object
  1151. * @smid: system request message index
  1152. *
  1153. * Return callback index.
  1154. */
  1155. static u8
  1156. _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  1157. {
  1158. int i;
  1159. u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
  1160. u8 cb_idx = 0xFF;
  1161. if (smid < ioc->hi_priority_smid) {
  1162. struct scsiio_tracker *st;
  1163. if (smid < ctl_smid) {
  1164. st = _get_st_from_smid(ioc, smid);
  1165. if (st)
  1166. cb_idx = st->cb_idx;
  1167. } else if (smid == ctl_smid)
  1168. cb_idx = ioc->ctl_cb_idx;
  1169. } else if (smid < ioc->internal_smid) {
  1170. i = smid - ioc->hi_priority_smid;
  1171. cb_idx = ioc->hpr_lookup[i].cb_idx;
  1172. } else if (smid <= ioc->hba_queue_depth) {
  1173. i = smid - ioc->internal_smid;
  1174. cb_idx = ioc->internal_lookup[i].cb_idx;
  1175. }
  1176. return cb_idx;
  1177. }
  1178. /**
  1179. * _base_mask_interrupts - disable interrupts
  1180. * @ioc: per adapter object
  1181. *
  1182. * Disabling ResetIRQ, Reply and Doorbell Interrupts
  1183. *
  1184. * Return nothing.
  1185. */
  1186. static void
  1187. _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
  1188. {
  1189. u32 him_register;
  1190. ioc->mask_interrupts = 1;
  1191. him_register = readl(&ioc->chip->HostInterruptMask);
  1192. him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
  1193. writel(him_register, &ioc->chip->HostInterruptMask);
  1194. readl(&ioc->chip->HostInterruptMask);
  1195. }
  1196. /**
  1197. * _base_unmask_interrupts - enable interrupts
  1198. * @ioc: per adapter object
  1199. *
  1200. * Enabling only Reply Interrupts
  1201. *
  1202. * Return nothing.
  1203. */
  1204. static void
  1205. _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
  1206. {
  1207. u32 him_register;
  1208. him_register = readl(&ioc->chip->HostInterruptMask);
  1209. him_register &= ~MPI2_HIM_RIM;
  1210. writel(him_register, &ioc->chip->HostInterruptMask);
  1211. ioc->mask_interrupts = 0;
  1212. }
  1213. union reply_descriptor {
  1214. u64 word;
  1215. struct {
  1216. u32 low;
  1217. u32 high;
  1218. } u;
  1219. };
  1220. /**
  1221. * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
  1222. * @irq: irq number (not used)
  1223. * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
  1224. * @r: pt_regs pointer (not used)
  1225. *
  1226. * Return IRQ_HANDLE if processed, else IRQ_NONE.
  1227. */
  1228. static irqreturn_t
  1229. _base_interrupt(int irq, void *bus_id)
  1230. {
  1231. struct adapter_reply_queue *reply_q = bus_id;
  1232. union reply_descriptor rd;
  1233. u32 completed_cmds;
  1234. u8 request_desript_type;
  1235. u16 smid;
  1236. u8 cb_idx;
  1237. u32 reply;
  1238. u8 msix_index = reply_q->msix_index;
  1239. struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
  1240. Mpi2ReplyDescriptorsUnion_t *rpf;
  1241. u8 rc;
  1242. if (ioc->mask_interrupts)
  1243. return IRQ_NONE;
  1244. if (!atomic_add_unless(&reply_q->busy, 1, 1))
  1245. return IRQ_NONE;
  1246. rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
  1247. request_desript_type = rpf->Default.ReplyFlags
  1248. & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  1249. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
  1250. atomic_dec(&reply_q->busy);
  1251. return IRQ_NONE;
  1252. }
  1253. completed_cmds = 0;
  1254. cb_idx = 0xFF;
  1255. do {
  1256. rd.word = le64_to_cpu(rpf->Words);
  1257. if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
  1258. goto out;
  1259. reply = 0;
  1260. smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
  1261. if (request_desript_type ==
  1262. MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
  1263. request_desript_type ==
  1264. MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
  1265. request_desript_type ==
  1266. MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
  1267. cb_idx = _base_get_cb_idx(ioc, smid);
  1268. if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
  1269. (likely(mpt_callbacks[cb_idx] != NULL))) {
  1270. rc = mpt_callbacks[cb_idx](ioc, smid,
  1271. msix_index, 0);
  1272. if (rc)
  1273. mpt3sas_base_free_smid(ioc, smid);
  1274. }
  1275. } else if (request_desript_type ==
  1276. MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
  1277. reply = le32_to_cpu(
  1278. rpf->AddressReply.ReplyFrameAddress);
  1279. if (reply > ioc->reply_dma_max_address ||
  1280. reply < ioc->reply_dma_min_address)
  1281. reply = 0;
  1282. if (smid) {
  1283. cb_idx = _base_get_cb_idx(ioc, smid);
  1284. if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
  1285. (likely(mpt_callbacks[cb_idx] != NULL))) {
  1286. rc = mpt_callbacks[cb_idx](ioc, smid,
  1287. msix_index, reply);
  1288. if (reply)
  1289. _base_display_reply_info(ioc,
  1290. smid, msix_index, reply);
  1291. if (rc)
  1292. mpt3sas_base_free_smid(ioc,
  1293. smid);
  1294. }
  1295. } else {
  1296. _base_async_event(ioc, msix_index, reply);
  1297. }
  1298. /* reply free queue handling */
  1299. if (reply) {
  1300. ioc->reply_free_host_index =
  1301. (ioc->reply_free_host_index ==
  1302. (ioc->reply_free_queue_depth - 1)) ?
  1303. 0 : ioc->reply_free_host_index + 1;
  1304. ioc->reply_free[ioc->reply_free_host_index] =
  1305. cpu_to_le32(reply);
  1306. if (ioc->is_mcpu_endpoint)
  1307. _base_clone_reply_to_sys_mem(ioc,
  1308. reply,
  1309. ioc->reply_free_host_index);
  1310. writel(ioc->reply_free_host_index,
  1311. &ioc->chip->ReplyFreeHostIndex);
  1312. }
  1313. }
  1314. rpf->Words = cpu_to_le64(ULLONG_MAX);
  1315. reply_q->reply_post_host_index =
  1316. (reply_q->reply_post_host_index ==
  1317. (ioc->reply_post_queue_depth - 1)) ? 0 :
  1318. reply_q->reply_post_host_index + 1;
  1319. request_desript_type =
  1320. reply_q->reply_post_free[reply_q->reply_post_host_index].
  1321. Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
  1322. completed_cmds++;
  1323. /* Update the reply post host index after continuously
  1324. * processing the threshold number of Reply Descriptors.
  1325. * So that FW can find enough entries to post the Reply
  1326. * Descriptors in the reply descriptor post queue.
  1327. */
  1328. if (completed_cmds > ioc->hba_queue_depth/3) {
  1329. if (ioc->combined_reply_queue) {
  1330. writel(reply_q->reply_post_host_index |
  1331. ((msix_index & 7) <<
  1332. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1333. ioc->replyPostRegisterIndex[msix_index/8]);
  1334. } else {
  1335. writel(reply_q->reply_post_host_index |
  1336. (msix_index <<
  1337. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1338. &ioc->chip->ReplyPostHostIndex);
  1339. }
  1340. completed_cmds = 1;
  1341. }
  1342. if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
  1343. goto out;
  1344. if (!reply_q->reply_post_host_index)
  1345. rpf = reply_q->reply_post_free;
  1346. else
  1347. rpf++;
  1348. } while (1);
  1349. out:
  1350. if (!completed_cmds) {
  1351. atomic_dec(&reply_q->busy);
  1352. return IRQ_NONE;
  1353. }
  1354. if (ioc->is_warpdrive) {
  1355. writel(reply_q->reply_post_host_index,
  1356. ioc->reply_post_host_index[msix_index]);
  1357. atomic_dec(&reply_q->busy);
  1358. return IRQ_HANDLED;
  1359. }
  1360. /* Update Reply Post Host Index.
  1361. * For those HBA's which support combined reply queue feature
  1362. * 1. Get the correct Supplemental Reply Post Host Index Register.
  1363. * i.e. (msix_index / 8)th entry from Supplemental Reply Post Host
  1364. * Index Register address bank i.e replyPostRegisterIndex[],
  1365. * 2. Then update this register with new reply host index value
  1366. * in ReplyPostIndex field and the MSIxIndex field with
  1367. * msix_index value reduced to a value between 0 and 7,
  1368. * using a modulo 8 operation. Since each Supplemental Reply Post
  1369. * Host Index Register supports 8 MSI-X vectors.
  1370. *
  1371. * For other HBA's just update the Reply Post Host Index register with
  1372. * new reply host index value in ReplyPostIndex Field and msix_index
  1373. * value in MSIxIndex field.
  1374. */
  1375. if (ioc->combined_reply_queue)
  1376. writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
  1377. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1378. ioc->replyPostRegisterIndex[msix_index/8]);
  1379. else
  1380. writel(reply_q->reply_post_host_index | (msix_index <<
  1381. MPI2_RPHI_MSIX_INDEX_SHIFT),
  1382. &ioc->chip->ReplyPostHostIndex);
  1383. atomic_dec(&reply_q->busy);
  1384. return IRQ_HANDLED;
  1385. }
  1386. /**
  1387. * _base_is_controller_msix_enabled - is controller support muli-reply queues
  1388. * @ioc: per adapter object
  1389. *
  1390. */
  1391. static inline int
  1392. _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
  1393. {
  1394. return (ioc->facts.IOCCapabilities &
  1395. MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
  1396. }
  1397. /**
  1398. * mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
  1399. * @ioc: per adapter object
  1400. * Context: non ISR conext
  1401. *
  1402. * Called when a Task Management request has completed.
  1403. *
  1404. * Return nothing.
  1405. */
  1406. void
  1407. mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
  1408. {
  1409. struct adapter_reply_queue *reply_q;
  1410. /* If MSIX capability is turned off
  1411. * then multi-queues are not enabled
  1412. */
  1413. if (!_base_is_controller_msix_enabled(ioc))
  1414. return;
  1415. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  1416. if (ioc->shost_recovery || ioc->remove_host ||
  1417. ioc->pci_error_recovery)
  1418. return;
  1419. /* TMs are on msix_index == 0 */
  1420. if (reply_q->msix_index == 0)
  1421. continue;
  1422. synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
  1423. }
  1424. }
  1425. /**
  1426. * mpt3sas_base_release_callback_handler - clear interrupt callback handler
  1427. * @cb_idx: callback index
  1428. *
  1429. * Return nothing.
  1430. */
  1431. void
  1432. mpt3sas_base_release_callback_handler(u8 cb_idx)
  1433. {
  1434. mpt_callbacks[cb_idx] = NULL;
  1435. }
  1436. /**
  1437. * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
  1438. * @cb_func: callback function
  1439. *
  1440. * Returns cb_func.
  1441. */
  1442. u8
  1443. mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
  1444. {
  1445. u8 cb_idx;
  1446. for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
  1447. if (mpt_callbacks[cb_idx] == NULL)
  1448. break;
  1449. mpt_callbacks[cb_idx] = cb_func;
  1450. return cb_idx;
  1451. }
  1452. /**
  1453. * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
  1454. *
  1455. * Return nothing.
  1456. */
  1457. void
  1458. mpt3sas_base_initialize_callback_handler(void)
  1459. {
  1460. u8 cb_idx;
  1461. for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
  1462. mpt3sas_base_release_callback_handler(cb_idx);
  1463. }
  1464. /**
  1465. * _base_build_zero_len_sge - build zero length sg entry
  1466. * @ioc: per adapter object
  1467. * @paddr: virtual address for SGE
  1468. *
  1469. * Create a zero length scatter gather entry to insure the IOCs hardware has
  1470. * something to use if the target device goes brain dead and tries
  1471. * to send data even when none is asked for.
  1472. *
  1473. * Return nothing.
  1474. */
  1475. static void
  1476. _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
  1477. {
  1478. u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
  1479. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
  1480. MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
  1481. MPI2_SGE_FLAGS_SHIFT);
  1482. ioc->base_add_sg_single(paddr, flags_length, -1);
  1483. }
  1484. /**
  1485. * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
  1486. * @paddr: virtual address for SGE
  1487. * @flags_length: SGE flags and data transfer length
  1488. * @dma_addr: Physical address
  1489. *
  1490. * Return nothing.
  1491. */
  1492. static void
  1493. _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  1494. {
  1495. Mpi2SGESimple32_t *sgel = paddr;
  1496. flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
  1497. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  1498. sgel->FlagsLength = cpu_to_le32(flags_length);
  1499. sgel->Address = cpu_to_le32(dma_addr);
  1500. }
  1501. /**
  1502. * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
  1503. * @paddr: virtual address for SGE
  1504. * @flags_length: SGE flags and data transfer length
  1505. * @dma_addr: Physical address
  1506. *
  1507. * Return nothing.
  1508. */
  1509. static void
  1510. _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
  1511. {
  1512. Mpi2SGESimple64_t *sgel = paddr;
  1513. flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
  1514. MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
  1515. sgel->FlagsLength = cpu_to_le32(flags_length);
  1516. sgel->Address = cpu_to_le64(dma_addr);
  1517. }
  1518. /**
  1519. * _base_get_chain_buffer_tracker - obtain chain tracker
  1520. * @ioc: per adapter object
  1521. * @scmd: SCSI commands of the IO request
  1522. *
  1523. * Returns chain tracker from chain_lookup table using key as
  1524. * smid and smid's chain_offset.
  1525. */
  1526. static struct chain_tracker *
  1527. _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
  1528. struct scsi_cmnd *scmd)
  1529. {
  1530. struct chain_tracker *chain_req;
  1531. struct scsiio_tracker *st = scsi_cmd_priv(scmd);
  1532. u16 smid = st->smid;
  1533. u8 chain_offset =
  1534. atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
  1535. if (chain_offset == ioc->chains_needed_per_io)
  1536. return NULL;
  1537. chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
  1538. atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
  1539. return chain_req;
  1540. }
  1541. /**
  1542. * _base_build_sg - build generic sg
  1543. * @ioc: per adapter object
  1544. * @psge: virtual address for SGE
  1545. * @data_out_dma: physical address for WRITES
  1546. * @data_out_sz: data xfer size for WRITES
  1547. * @data_in_dma: physical address for READS
  1548. * @data_in_sz: data xfer size for READS
  1549. *
  1550. * Return nothing.
  1551. */
  1552. static void
  1553. _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
  1554. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  1555. size_t data_in_sz)
  1556. {
  1557. u32 sgl_flags;
  1558. if (!data_out_sz && !data_in_sz) {
  1559. _base_build_zero_len_sge(ioc, psge);
  1560. return;
  1561. }
  1562. if (data_out_sz && data_in_sz) {
  1563. /* WRITE sgel first */
  1564. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1565. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
  1566. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1567. ioc->base_add_sg_single(psge, sgl_flags |
  1568. data_out_sz, data_out_dma);
  1569. /* incr sgel */
  1570. psge += ioc->sge_size;
  1571. /* READ sgel last */
  1572. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1573. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1574. MPI2_SGE_FLAGS_END_OF_LIST);
  1575. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1576. ioc->base_add_sg_single(psge, sgl_flags |
  1577. data_in_sz, data_in_dma);
  1578. } else if (data_out_sz) /* WRITE */ {
  1579. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1580. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1581. MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
  1582. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1583. ioc->base_add_sg_single(psge, sgl_flags |
  1584. data_out_sz, data_out_dma);
  1585. } else if (data_in_sz) /* READ */ {
  1586. sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
  1587. MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
  1588. MPI2_SGE_FLAGS_END_OF_LIST);
  1589. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  1590. ioc->base_add_sg_single(psge, sgl_flags |
  1591. data_in_sz, data_in_dma);
  1592. }
  1593. }
  1594. /* IEEE format sgls */
  1595. /**
  1596. * _base_build_nvme_prp - This function is called for NVMe end devices to build
  1597. * a native SGL (NVMe PRP). The native SGL is built starting in the first PRP
  1598. * entry of the NVMe message (PRP1). If the data buffer is small enough to be
  1599. * described entirely using PRP1, then PRP2 is not used. If needed, PRP2 is
  1600. * used to describe a larger data buffer. If the data buffer is too large to
  1601. * describe using the two PRP entriess inside the NVMe message, then PRP1
  1602. * describes the first data memory segment, and PRP2 contains a pointer to a PRP
  1603. * list located elsewhere in memory to describe the remaining data memory
  1604. * segments. The PRP list will be contiguous.
  1605. * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP
  1606. * consists of a list of PRP entries to describe a number of noncontigous
  1607. * physical memory segments as a single memory buffer, just as a SGL does. Note
  1608. * however, that this function is only used by the IOCTL call, so the memory
  1609. * given will be guaranteed to be contiguous. There is no need to translate
  1610. * non-contiguous SGL into a PRP in this case. All PRPs will describe
  1611. * contiguous space that is one page size each.
  1612. *
  1613. * Each NVMe message contains two PRP entries. The first (PRP1) either contains
  1614. * a PRP list pointer or a PRP element, depending upon the command. PRP2
  1615. * contains the second PRP element if the memory being described fits within 2
  1616. * PRP entries, or a PRP list pointer if the PRP spans more than two entries.
  1617. *
  1618. * A PRP list pointer contains the address of a PRP list, structured as a linear
  1619. * array of PRP entries. Each PRP entry in this list describes a segment of
  1620. * physical memory.
  1621. *
  1622. * Each 64-bit PRP entry comprises an address and an offset field. The address
  1623. * always points at the beginning of a 4KB physical memory page, and the offset
  1624. * describes where within that 4KB page the memory segment begins. Only the
  1625. * first element in a PRP list may contain a non-zero offest, implying that all
  1626. * memory segments following the first begin at the start of a 4KB page.
  1627. *
  1628. * Each PRP element normally describes 4KB of physical memory, with exceptions
  1629. * for the first and last elements in the list. If the memory being described
  1630. * by the list begins at a non-zero offset within the first 4KB page, then the
  1631. * first PRP element will contain a non-zero offset indicating where the region
  1632. * begins within the 4KB page. The last memory segment may end before the end
  1633. * of the 4KB segment, depending upon the overall size of the memory being
  1634. * described by the PRP list.
  1635. *
  1636. * Since PRP entries lack any indication of size, the overall data buffer length
  1637. * is used to determine where the end of the data memory buffer is located, and
  1638. * how many PRP entries are required to describe it.
  1639. *
  1640. * @ioc: per adapter object
  1641. * @smid: system request message index for getting asscociated SGL
  1642. * @nvme_encap_request: the NVMe request msg frame pointer
  1643. * @data_out_dma: physical address for WRITES
  1644. * @data_out_sz: data xfer size for WRITES
  1645. * @data_in_dma: physical address for READS
  1646. * @data_in_sz: data xfer size for READS
  1647. *
  1648. * Returns nothing.
  1649. */
  1650. static void
  1651. _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  1652. Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
  1653. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  1654. size_t data_in_sz)
  1655. {
  1656. int prp_size = NVME_PRP_SIZE;
  1657. __le64 *prp_entry, *prp1_entry, *prp2_entry;
  1658. __le64 *prp_page;
  1659. dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
  1660. u32 offset, entry_len;
  1661. u32 page_mask_result, page_mask;
  1662. size_t length;
  1663. /*
  1664. * Not all commands require a data transfer. If no data, just return
  1665. * without constructing any PRP.
  1666. */
  1667. if (!data_in_sz && !data_out_sz)
  1668. return;
  1669. /*
  1670. * Set pointers to PRP1 and PRP2, which are in the NVMe command.
  1671. * PRP1 is located at a 24 byte offset from the start of the NVMe
  1672. * command. Then set the current PRP entry pointer to PRP1.
  1673. */
  1674. prp1_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
  1675. NVME_CMD_PRP1_OFFSET);
  1676. prp2_entry = (__le64 *)(nvme_encap_request->NVMe_Command +
  1677. NVME_CMD_PRP2_OFFSET);
  1678. prp_entry = prp1_entry;
  1679. /*
  1680. * For the PRP entries, use the specially allocated buffer of
  1681. * contiguous memory.
  1682. */
  1683. prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
  1684. prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
  1685. /*
  1686. * Check if we are within 1 entry of a page boundary we don't
  1687. * want our first entry to be a PRP List entry.
  1688. */
  1689. page_mask = ioc->page_size - 1;
  1690. page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
  1691. if (!page_mask_result) {
  1692. /* Bump up to next page boundary. */
  1693. prp_page = (__le64 *)((u8 *)prp_page + prp_size);
  1694. prp_page_dma = prp_page_dma + prp_size;
  1695. }
  1696. /*
  1697. * Set PRP physical pointer, which initially points to the current PRP
  1698. * DMA memory page.
  1699. */
  1700. prp_entry_dma = prp_page_dma;
  1701. /* Get physical address and length of the data buffer. */
  1702. if (data_in_sz) {
  1703. dma_addr = data_in_dma;
  1704. length = data_in_sz;
  1705. } else {
  1706. dma_addr = data_out_dma;
  1707. length = data_out_sz;
  1708. }
  1709. /* Loop while the length is not zero. */
  1710. while (length) {
  1711. /*
  1712. * Check if we need to put a list pointer here if we are at
  1713. * page boundary - prp_size (8 bytes).
  1714. */
  1715. page_mask_result = (prp_entry_dma + prp_size) & page_mask;
  1716. if (!page_mask_result) {
  1717. /*
  1718. * This is the last entry in a PRP List, so we need to
  1719. * put a PRP list pointer here. What this does is:
  1720. * - bump the current memory pointer to the next
  1721. * address, which will be the next full page.
  1722. * - set the PRP Entry to point to that page. This
  1723. * is now the PRP List pointer.
  1724. * - bump the PRP Entry pointer the start of the
  1725. * next page. Since all of this PRP memory is
  1726. * contiguous, no need to get a new page - it's
  1727. * just the next address.
  1728. */
  1729. prp_entry_dma++;
  1730. *prp_entry = cpu_to_le64(prp_entry_dma);
  1731. prp_entry++;
  1732. }
  1733. /* Need to handle if entry will be part of a page. */
  1734. offset = dma_addr & page_mask;
  1735. entry_len = ioc->page_size - offset;
  1736. if (prp_entry == prp1_entry) {
  1737. /*
  1738. * Must fill in the first PRP pointer (PRP1) before
  1739. * moving on.
  1740. */
  1741. *prp1_entry = cpu_to_le64(dma_addr);
  1742. /*
  1743. * Now point to the second PRP entry within the
  1744. * command (PRP2).
  1745. */
  1746. prp_entry = prp2_entry;
  1747. } else if (prp_entry == prp2_entry) {
  1748. /*
  1749. * Should the PRP2 entry be a PRP List pointer or just
  1750. * a regular PRP pointer? If there is more than one
  1751. * more page of data, must use a PRP List pointer.
  1752. */
  1753. if (length > ioc->page_size) {
  1754. /*
  1755. * PRP2 will contain a PRP List pointer because
  1756. * more PRP's are needed with this command. The
  1757. * list will start at the beginning of the
  1758. * contiguous buffer.
  1759. */
  1760. *prp2_entry = cpu_to_le64(prp_entry_dma);
  1761. /*
  1762. * The next PRP Entry will be the start of the
  1763. * first PRP List.
  1764. */
  1765. prp_entry = prp_page;
  1766. } else {
  1767. /*
  1768. * After this, the PRP Entries are complete.
  1769. * This command uses 2 PRP's and no PRP list.
  1770. */
  1771. *prp2_entry = cpu_to_le64(dma_addr);
  1772. }
  1773. } else {
  1774. /*
  1775. * Put entry in list and bump the addresses.
  1776. *
  1777. * After PRP1 and PRP2 are filled in, this will fill in
  1778. * all remaining PRP entries in a PRP List, one per
  1779. * each time through the loop.
  1780. */
  1781. *prp_entry = cpu_to_le64(dma_addr);
  1782. prp_entry++;
  1783. prp_entry_dma++;
  1784. }
  1785. /*
  1786. * Bump the phys address of the command's data buffer by the
  1787. * entry_len.
  1788. */
  1789. dma_addr += entry_len;
  1790. /* Decrement length accounting for last partial page. */
  1791. if (entry_len > length)
  1792. length = 0;
  1793. else
  1794. length -= entry_len;
  1795. }
  1796. }
  1797. /**
  1798. * base_make_prp_nvme -
  1799. * Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
  1800. *
  1801. * @ioc: per adapter object
  1802. * @scmd: SCSI command from the mid-layer
  1803. * @mpi_request: mpi request
  1804. * @smid: msg Index
  1805. * @sge_count: scatter gather element count.
  1806. *
  1807. * Returns: true: PRPs are built
  1808. * false: IEEE SGLs needs to be built
  1809. */
  1810. static void
  1811. base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
  1812. struct scsi_cmnd *scmd,
  1813. Mpi25SCSIIORequest_t *mpi_request,
  1814. u16 smid, int sge_count)
  1815. {
  1816. int sge_len, num_prp_in_chain = 0;
  1817. Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
  1818. __le64 *curr_buff;
  1819. dma_addr_t msg_dma, sge_addr, offset;
  1820. u32 page_mask, page_mask_result;
  1821. struct scatterlist *sg_scmd;
  1822. u32 first_prp_len;
  1823. int data_len = scsi_bufflen(scmd);
  1824. u32 nvme_pg_size;
  1825. nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
  1826. /*
  1827. * Nvme has a very convoluted prp format. One prp is required
  1828. * for each page or partial page. Driver need to split up OS sg_list
  1829. * entries if it is longer than one page or cross a page
  1830. * boundary. Driver also have to insert a PRP list pointer entry as
  1831. * the last entry in each physical page of the PRP list.
  1832. *
  1833. * NOTE: The first PRP "entry" is actually placed in the first
  1834. * SGL entry in the main message as IEEE 64 format. The 2nd
  1835. * entry in the main message is the chain element, and the rest
  1836. * of the PRP entries are built in the contiguous pcie buffer.
  1837. */
  1838. page_mask = nvme_pg_size - 1;
  1839. /*
  1840. * Native SGL is needed.
  1841. * Put a chain element in main message frame that points to the first
  1842. * chain buffer.
  1843. *
  1844. * NOTE: The ChainOffset field must be 0 when using a chain pointer to
  1845. * a native SGL.
  1846. */
  1847. /* Set main message chain element pointer */
  1848. main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
  1849. /*
  1850. * For NVMe the chain element needs to be the 2nd SG entry in the main
  1851. * message.
  1852. */
  1853. main_chain_element = (Mpi25IeeeSgeChain64_t *)
  1854. ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
  1855. /*
  1856. * For the PRP entries, use the specially allocated buffer of
  1857. * contiguous memory. Normal chain buffers can't be used
  1858. * because each chain buffer would need to be the size of an OS
  1859. * page (4k).
  1860. */
  1861. curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
  1862. msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
  1863. main_chain_element->Address = cpu_to_le64(msg_dma);
  1864. main_chain_element->NextChainOffset = 0;
  1865. main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  1866. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
  1867. MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
  1868. /* Build first prp, sge need not to be page aligned*/
  1869. ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
  1870. sg_scmd = scsi_sglist(scmd);
  1871. sge_addr = sg_dma_address(sg_scmd);
  1872. sge_len = sg_dma_len(sg_scmd);
  1873. offset = sge_addr & page_mask;
  1874. first_prp_len = nvme_pg_size - offset;
  1875. ptr_first_sgl->Address = cpu_to_le64(sge_addr);
  1876. ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
  1877. data_len -= first_prp_len;
  1878. if (sge_len > first_prp_len) {
  1879. sge_addr += first_prp_len;
  1880. sge_len -= first_prp_len;
  1881. } else if (data_len && (sge_len == first_prp_len)) {
  1882. sg_scmd = sg_next(sg_scmd);
  1883. sge_addr = sg_dma_address(sg_scmd);
  1884. sge_len = sg_dma_len(sg_scmd);
  1885. }
  1886. for (;;) {
  1887. offset = sge_addr & page_mask;
  1888. /* Put PRP pointer due to page boundary*/
  1889. page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
  1890. if (unlikely(!page_mask_result)) {
  1891. scmd_printk(KERN_NOTICE,
  1892. scmd, "page boundary curr_buff: 0x%p\n",
  1893. curr_buff);
  1894. msg_dma += 8;
  1895. *curr_buff = cpu_to_le64(msg_dma);
  1896. curr_buff++;
  1897. num_prp_in_chain++;
  1898. }
  1899. *curr_buff = cpu_to_le64(sge_addr);
  1900. curr_buff++;
  1901. msg_dma += 8;
  1902. num_prp_in_chain++;
  1903. sge_addr += nvme_pg_size;
  1904. sge_len -= nvme_pg_size;
  1905. data_len -= nvme_pg_size;
  1906. if (data_len <= 0)
  1907. break;
  1908. if (sge_len > 0)
  1909. continue;
  1910. sg_scmd = sg_next(sg_scmd);
  1911. sge_addr = sg_dma_address(sg_scmd);
  1912. sge_len = sg_dma_len(sg_scmd);
  1913. }
  1914. main_chain_element->Length =
  1915. cpu_to_le32(num_prp_in_chain * sizeof(u64));
  1916. return;
  1917. }
  1918. static bool
  1919. base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
  1920. struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
  1921. {
  1922. u32 data_length = 0;
  1923. struct scatterlist *sg_scmd;
  1924. bool build_prp = true;
  1925. data_length = scsi_bufflen(scmd);
  1926. sg_scmd = scsi_sglist(scmd);
  1927. /* If Datalenth is <= 16K and number of SGE’s entries are <= 2
  1928. * we built IEEE SGL
  1929. */
  1930. if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
  1931. build_prp = false;
  1932. return build_prp;
  1933. }
  1934. /**
  1935. * _base_check_pcie_native_sgl - This function is called for PCIe end devices to
  1936. * determine if the driver needs to build a native SGL. If so, that native
  1937. * SGL is built in the special contiguous buffers allocated especially for
  1938. * PCIe SGL creation. If the driver will not build a native SGL, return
  1939. * TRUE and a normal IEEE SGL will be built. Currently this routine
  1940. * supports NVMe.
  1941. * @ioc: per adapter object
  1942. * @mpi_request: mf request pointer
  1943. * @smid: system request message index
  1944. * @scmd: scsi command
  1945. * @pcie_device: points to the PCIe device's info
  1946. *
  1947. * Returns 0 if native SGL was built, 1 if no SGL was built
  1948. */
  1949. static int
  1950. _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
  1951. Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
  1952. struct _pcie_device *pcie_device)
  1953. {
  1954. struct scatterlist *sg_scmd;
  1955. int sges_left;
  1956. /* Get the SG list pointer and info. */
  1957. sg_scmd = scsi_sglist(scmd);
  1958. sges_left = scsi_dma_map(scmd);
  1959. if (sges_left < 0) {
  1960. sdev_printk(KERN_ERR, scmd->device,
  1961. "scsi_dma_map failed: request for %d bytes!\n",
  1962. scsi_bufflen(scmd));
  1963. return 1;
  1964. }
  1965. /* Check if we need to build a native SG list. */
  1966. if (base_is_prp_possible(ioc, pcie_device,
  1967. scmd, sges_left) == 0) {
  1968. /* We built a native SG list, just return. */
  1969. goto out;
  1970. }
  1971. /*
  1972. * Build native NVMe PRP.
  1973. */
  1974. base_make_prp_nvme(ioc, scmd, mpi_request,
  1975. smid, sges_left);
  1976. return 0;
  1977. out:
  1978. scsi_dma_unmap(scmd);
  1979. return 1;
  1980. }
  1981. /**
  1982. * _base_add_sg_single_ieee - add sg element for IEEE format
  1983. * @paddr: virtual address for SGE
  1984. * @flags: SGE flags
  1985. * @chain_offset: number of 128 byte elements from start of segment
  1986. * @length: data transfer length
  1987. * @dma_addr: Physical address
  1988. *
  1989. * Return nothing.
  1990. */
  1991. static void
  1992. _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
  1993. dma_addr_t dma_addr)
  1994. {
  1995. Mpi25IeeeSgeChain64_t *sgel = paddr;
  1996. sgel->Flags = flags;
  1997. sgel->NextChainOffset = chain_offset;
  1998. sgel->Length = cpu_to_le32(length);
  1999. sgel->Address = cpu_to_le64(dma_addr);
  2000. }
  2001. /**
  2002. * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
  2003. * @ioc: per adapter object
  2004. * @paddr: virtual address for SGE
  2005. *
  2006. * Create a zero length scatter gather entry to insure the IOCs hardware has
  2007. * something to use if the target device goes brain dead and tries
  2008. * to send data even when none is asked for.
  2009. *
  2010. * Return nothing.
  2011. */
  2012. static void
  2013. _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
  2014. {
  2015. u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  2016. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
  2017. MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
  2018. _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
  2019. }
  2020. /**
  2021. * _base_build_sg_scmd - main sg creation routine
  2022. * pcie_device is unused here!
  2023. * @ioc: per adapter object
  2024. * @scmd: scsi command
  2025. * @smid: system request message index
  2026. * @unused: unused pcie_device pointer
  2027. * Context: none.
  2028. *
  2029. * The main routine that builds scatter gather table from a given
  2030. * scsi request sent via the .queuecommand main handler.
  2031. *
  2032. * Returns 0 success, anything else error
  2033. */
  2034. static int
  2035. _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
  2036. struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
  2037. {
  2038. Mpi2SCSIIORequest_t *mpi_request;
  2039. dma_addr_t chain_dma;
  2040. struct scatterlist *sg_scmd;
  2041. void *sg_local, *chain;
  2042. u32 chain_offset;
  2043. u32 chain_length;
  2044. u32 chain_flags;
  2045. int sges_left;
  2046. u32 sges_in_segment;
  2047. u32 sgl_flags;
  2048. u32 sgl_flags_last_element;
  2049. u32 sgl_flags_end_buffer;
  2050. struct chain_tracker *chain_req;
  2051. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  2052. /* init scatter gather flags */
  2053. sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
  2054. if (scmd->sc_data_direction == DMA_TO_DEVICE)
  2055. sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
  2056. sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
  2057. << MPI2_SGE_FLAGS_SHIFT;
  2058. sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
  2059. MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
  2060. << MPI2_SGE_FLAGS_SHIFT;
  2061. sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
  2062. sg_scmd = scsi_sglist(scmd);
  2063. sges_left = scsi_dma_map(scmd);
  2064. if (sges_left < 0) {
  2065. sdev_printk(KERN_ERR, scmd->device,
  2066. "pci_map_sg failed: request for %d bytes!\n",
  2067. scsi_bufflen(scmd));
  2068. return -ENOMEM;
  2069. }
  2070. sg_local = &mpi_request->SGL;
  2071. sges_in_segment = ioc->max_sges_in_main_message;
  2072. if (sges_left <= sges_in_segment)
  2073. goto fill_in_last_segment;
  2074. mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
  2075. (sges_in_segment * ioc->sge_size))/4;
  2076. /* fill in main message segment when there is a chain following */
  2077. while (sges_in_segment) {
  2078. if (sges_in_segment == 1)
  2079. ioc->base_add_sg_single(sg_local,
  2080. sgl_flags_last_element | sg_dma_len(sg_scmd),
  2081. sg_dma_address(sg_scmd));
  2082. else
  2083. ioc->base_add_sg_single(sg_local, sgl_flags |
  2084. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2085. sg_scmd = sg_next(sg_scmd);
  2086. sg_local += ioc->sge_size;
  2087. sges_left--;
  2088. sges_in_segment--;
  2089. }
  2090. /* initializing the chain flags and pointers */
  2091. chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
  2092. chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
  2093. if (!chain_req)
  2094. return -1;
  2095. chain = chain_req->chain_buffer;
  2096. chain_dma = chain_req->chain_buffer_dma;
  2097. do {
  2098. sges_in_segment = (sges_left <=
  2099. ioc->max_sges_in_chain_message) ? sges_left :
  2100. ioc->max_sges_in_chain_message;
  2101. chain_offset = (sges_left == sges_in_segment) ?
  2102. 0 : (sges_in_segment * ioc->sge_size)/4;
  2103. chain_length = sges_in_segment * ioc->sge_size;
  2104. if (chain_offset) {
  2105. chain_offset = chain_offset <<
  2106. MPI2_SGE_CHAIN_OFFSET_SHIFT;
  2107. chain_length += ioc->sge_size;
  2108. }
  2109. ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
  2110. chain_length, chain_dma);
  2111. sg_local = chain;
  2112. if (!chain_offset)
  2113. goto fill_in_last_segment;
  2114. /* fill in chain segments */
  2115. while (sges_in_segment) {
  2116. if (sges_in_segment == 1)
  2117. ioc->base_add_sg_single(sg_local,
  2118. sgl_flags_last_element |
  2119. sg_dma_len(sg_scmd),
  2120. sg_dma_address(sg_scmd));
  2121. else
  2122. ioc->base_add_sg_single(sg_local, sgl_flags |
  2123. sg_dma_len(sg_scmd),
  2124. sg_dma_address(sg_scmd));
  2125. sg_scmd = sg_next(sg_scmd);
  2126. sg_local += ioc->sge_size;
  2127. sges_left--;
  2128. sges_in_segment--;
  2129. }
  2130. chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
  2131. if (!chain_req)
  2132. return -1;
  2133. chain = chain_req->chain_buffer;
  2134. chain_dma = chain_req->chain_buffer_dma;
  2135. } while (1);
  2136. fill_in_last_segment:
  2137. /* fill the last segment */
  2138. while (sges_left) {
  2139. if (sges_left == 1)
  2140. ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
  2141. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2142. else
  2143. ioc->base_add_sg_single(sg_local, sgl_flags |
  2144. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2145. sg_scmd = sg_next(sg_scmd);
  2146. sg_local += ioc->sge_size;
  2147. sges_left--;
  2148. }
  2149. return 0;
  2150. }
  2151. /**
  2152. * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
  2153. * @ioc: per adapter object
  2154. * @scmd: scsi command
  2155. * @smid: system request message index
  2156. * @pcie_device: Pointer to pcie_device. If set, the pcie native sgl will be
  2157. * constructed on need.
  2158. * Context: none.
  2159. *
  2160. * The main routine that builds scatter gather table from a given
  2161. * scsi request sent via the .queuecommand main handler.
  2162. *
  2163. * Returns 0 success, anything else error
  2164. */
  2165. static int
  2166. _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
  2167. struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
  2168. {
  2169. Mpi25SCSIIORequest_t *mpi_request;
  2170. dma_addr_t chain_dma;
  2171. struct scatterlist *sg_scmd;
  2172. void *sg_local, *chain;
  2173. u32 chain_offset;
  2174. u32 chain_length;
  2175. int sges_left;
  2176. u32 sges_in_segment;
  2177. u8 simple_sgl_flags;
  2178. u8 simple_sgl_flags_last;
  2179. u8 chain_sgl_flags;
  2180. struct chain_tracker *chain_req;
  2181. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  2182. /* init scatter gather flags */
  2183. simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  2184. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  2185. simple_sgl_flags_last = simple_sgl_flags |
  2186. MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
  2187. chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
  2188. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  2189. /* Check if we need to build a native SG list. */
  2190. if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
  2191. smid, scmd, pcie_device) == 0)) {
  2192. /* We built a native SG list, just return. */
  2193. return 0;
  2194. }
  2195. sg_scmd = scsi_sglist(scmd);
  2196. sges_left = scsi_dma_map(scmd);
  2197. if (sges_left < 0) {
  2198. sdev_printk(KERN_ERR, scmd->device,
  2199. "pci_map_sg failed: request for %d bytes!\n",
  2200. scsi_bufflen(scmd));
  2201. return -ENOMEM;
  2202. }
  2203. sg_local = &mpi_request->SGL;
  2204. sges_in_segment = (ioc->request_sz -
  2205. offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
  2206. if (sges_left <= sges_in_segment)
  2207. goto fill_in_last_segment;
  2208. mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
  2209. (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
  2210. /* fill in main message segment when there is a chain following */
  2211. while (sges_in_segment > 1) {
  2212. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  2213. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2214. sg_scmd = sg_next(sg_scmd);
  2215. sg_local += ioc->sge_size_ieee;
  2216. sges_left--;
  2217. sges_in_segment--;
  2218. }
  2219. /* initializing the pointers */
  2220. chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
  2221. if (!chain_req)
  2222. return -1;
  2223. chain = chain_req->chain_buffer;
  2224. chain_dma = chain_req->chain_buffer_dma;
  2225. do {
  2226. sges_in_segment = (sges_left <=
  2227. ioc->max_sges_in_chain_message) ? sges_left :
  2228. ioc->max_sges_in_chain_message;
  2229. chain_offset = (sges_left == sges_in_segment) ?
  2230. 0 : sges_in_segment;
  2231. chain_length = sges_in_segment * ioc->sge_size_ieee;
  2232. if (chain_offset)
  2233. chain_length += ioc->sge_size_ieee;
  2234. _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
  2235. chain_offset, chain_length, chain_dma);
  2236. sg_local = chain;
  2237. if (!chain_offset)
  2238. goto fill_in_last_segment;
  2239. /* fill in chain segments */
  2240. while (sges_in_segment) {
  2241. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  2242. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2243. sg_scmd = sg_next(sg_scmd);
  2244. sg_local += ioc->sge_size_ieee;
  2245. sges_left--;
  2246. sges_in_segment--;
  2247. }
  2248. chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
  2249. if (!chain_req)
  2250. return -1;
  2251. chain = chain_req->chain_buffer;
  2252. chain_dma = chain_req->chain_buffer_dma;
  2253. } while (1);
  2254. fill_in_last_segment:
  2255. /* fill the last segment */
  2256. while (sges_left > 0) {
  2257. if (sges_left == 1)
  2258. _base_add_sg_single_ieee(sg_local,
  2259. simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
  2260. sg_dma_address(sg_scmd));
  2261. else
  2262. _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
  2263. sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
  2264. sg_scmd = sg_next(sg_scmd);
  2265. sg_local += ioc->sge_size_ieee;
  2266. sges_left--;
  2267. }
  2268. return 0;
  2269. }
  2270. /**
  2271. * _base_build_sg_ieee - build generic sg for IEEE format
  2272. * @ioc: per adapter object
  2273. * @psge: virtual address for SGE
  2274. * @data_out_dma: physical address for WRITES
  2275. * @data_out_sz: data xfer size for WRITES
  2276. * @data_in_dma: physical address for READS
  2277. * @data_in_sz: data xfer size for READS
  2278. *
  2279. * Return nothing.
  2280. */
  2281. static void
  2282. _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
  2283. dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
  2284. size_t data_in_sz)
  2285. {
  2286. u8 sgl_flags;
  2287. if (!data_out_sz && !data_in_sz) {
  2288. _base_build_zero_len_sge_ieee(ioc, psge);
  2289. return;
  2290. }
  2291. if (data_out_sz && data_in_sz) {
  2292. /* WRITE sgel first */
  2293. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  2294. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  2295. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
  2296. data_out_dma);
  2297. /* incr sgel */
  2298. psge += ioc->sge_size_ieee;
  2299. /* READ sgel last */
  2300. sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
  2301. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
  2302. data_in_dma);
  2303. } else if (data_out_sz) /* WRITE */ {
  2304. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  2305. MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
  2306. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  2307. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
  2308. data_out_dma);
  2309. } else if (data_in_sz) /* READ */ {
  2310. sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
  2311. MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
  2312. MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
  2313. _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
  2314. data_in_dma);
  2315. }
  2316. }
  2317. #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
  2318. /**
  2319. * _base_config_dma_addressing - set dma addressing
  2320. * @ioc: per adapter object
  2321. * @pdev: PCI device struct
  2322. *
  2323. * Returns 0 for success, non-zero for failure.
  2324. */
  2325. static int
  2326. _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
  2327. {
  2328. struct sysinfo s;
  2329. u64 consistent_dma_mask;
  2330. if (ioc->is_mcpu_endpoint)
  2331. goto try_32bit;
  2332. if (ioc->dma_mask)
  2333. consistent_dma_mask = DMA_BIT_MASK(64);
  2334. else
  2335. consistent_dma_mask = DMA_BIT_MASK(32);
  2336. if (sizeof(dma_addr_t) > 4) {
  2337. const uint64_t required_mask =
  2338. dma_get_required_mask(&pdev->dev);
  2339. if ((required_mask > DMA_BIT_MASK(32)) &&
  2340. !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
  2341. !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
  2342. ioc->base_add_sg_single = &_base_add_sg_single_64;
  2343. ioc->sge_size = sizeof(Mpi2SGESimple64_t);
  2344. ioc->dma_mask = 64;
  2345. goto out;
  2346. }
  2347. }
  2348. try_32bit:
  2349. if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
  2350. && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
  2351. ioc->base_add_sg_single = &_base_add_sg_single_32;
  2352. ioc->sge_size = sizeof(Mpi2SGESimple32_t);
  2353. ioc->dma_mask = 32;
  2354. } else
  2355. return -ENODEV;
  2356. out:
  2357. si_meminfo(&s);
  2358. pr_info(MPT3SAS_FMT
  2359. "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
  2360. ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
  2361. return 0;
  2362. }
  2363. static int
  2364. _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
  2365. struct pci_dev *pdev)
  2366. {
  2367. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  2368. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
  2369. return -ENODEV;
  2370. }
  2371. return 0;
  2372. }
  2373. /**
  2374. * _base_check_enable_msix - checks MSIX capabable.
  2375. * @ioc: per adapter object
  2376. *
  2377. * Check to see if card is capable of MSIX, and set number
  2378. * of available msix vectors
  2379. */
  2380. static int
  2381. _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  2382. {
  2383. int base;
  2384. u16 message_control;
  2385. /* Check whether controller SAS2008 B0 controller,
  2386. * if it is SAS2008 B0 controller use IO-APIC instead of MSIX
  2387. */
  2388. if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
  2389. ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
  2390. return -EINVAL;
  2391. }
  2392. base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
  2393. if (!base) {
  2394. dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
  2395. ioc->name));
  2396. return -EINVAL;
  2397. }
  2398. /* get msix vector count */
  2399. /* NUMA_IO not supported for older controllers */
  2400. if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
  2401. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
  2402. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
  2403. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
  2404. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
  2405. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
  2406. ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
  2407. ioc->msix_vector_count = 1;
  2408. else {
  2409. pci_read_config_word(ioc->pdev, base + 2, &message_control);
  2410. ioc->msix_vector_count = (message_control & 0x3FF) + 1;
  2411. }
  2412. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  2413. "msix is supported, vector_count(%d)\n",
  2414. ioc->name, ioc->msix_vector_count));
  2415. return 0;
  2416. }
  2417. /**
  2418. * _base_free_irq - free irq
  2419. * @ioc: per adapter object
  2420. *
  2421. * Freeing respective reply_queue from the list.
  2422. */
  2423. static void
  2424. _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
  2425. {
  2426. struct adapter_reply_queue *reply_q, *next;
  2427. if (list_empty(&ioc->reply_queue_list))
  2428. return;
  2429. list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
  2430. list_del(&reply_q->list);
  2431. free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
  2432. reply_q);
  2433. kfree(reply_q);
  2434. }
  2435. }
  2436. /**
  2437. * _base_request_irq - request irq
  2438. * @ioc: per adapter object
  2439. * @index: msix index into vector table
  2440. *
  2441. * Inserting respective reply_queue into the list.
  2442. */
  2443. static int
  2444. _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
  2445. {
  2446. struct pci_dev *pdev = ioc->pdev;
  2447. struct adapter_reply_queue *reply_q;
  2448. int r;
  2449. reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
  2450. if (!reply_q) {
  2451. pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
  2452. ioc->name, (int)sizeof(struct adapter_reply_queue));
  2453. return -ENOMEM;
  2454. }
  2455. reply_q->ioc = ioc;
  2456. reply_q->msix_index = index;
  2457. atomic_set(&reply_q->busy, 0);
  2458. if (ioc->msix_enable)
  2459. snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
  2460. ioc->driver_name, ioc->id, index);
  2461. else
  2462. snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
  2463. ioc->driver_name, ioc->id);
  2464. r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
  2465. IRQF_SHARED, reply_q->name, reply_q);
  2466. if (r) {
  2467. pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
  2468. reply_q->name, pci_irq_vector(pdev, index));
  2469. kfree(reply_q);
  2470. return -EBUSY;
  2471. }
  2472. INIT_LIST_HEAD(&reply_q->list);
  2473. list_add_tail(&reply_q->list, &ioc->reply_queue_list);
  2474. return 0;
  2475. }
  2476. /**
  2477. * _base_assign_reply_queues - assigning msix index for each cpu
  2478. * @ioc: per adapter object
  2479. *
  2480. * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
  2481. *
  2482. * It would nice if we could call irq_set_affinity, however it is not
  2483. * an exported symbol
  2484. */
  2485. static void
  2486. _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
  2487. {
  2488. unsigned int cpu, nr_cpus, nr_msix, index = 0;
  2489. struct adapter_reply_queue *reply_q;
  2490. if (!_base_is_controller_msix_enabled(ioc))
  2491. return;
  2492. memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
  2493. nr_cpus = num_online_cpus();
  2494. nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
  2495. ioc->facts.MaxMSIxVectors);
  2496. if (!nr_msix)
  2497. return;
  2498. if (smp_affinity_enable) {
  2499. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  2500. const cpumask_t *mask = pci_irq_get_affinity(ioc->pdev,
  2501. reply_q->msix_index);
  2502. if (!mask) {
  2503. pr_warn(MPT3SAS_FMT "no affinity for msi %x\n",
  2504. ioc->name, reply_q->msix_index);
  2505. continue;
  2506. }
  2507. for_each_cpu_and(cpu, mask, cpu_online_mask) {
  2508. if (cpu >= ioc->cpu_msix_table_sz)
  2509. break;
  2510. ioc->cpu_msix_table[cpu] = reply_q->msix_index;
  2511. }
  2512. }
  2513. return;
  2514. }
  2515. cpu = cpumask_first(cpu_online_mask);
  2516. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  2517. unsigned int i, group = nr_cpus / nr_msix;
  2518. if (cpu >= nr_cpus)
  2519. break;
  2520. if (index < nr_cpus % nr_msix)
  2521. group++;
  2522. for (i = 0 ; i < group ; i++) {
  2523. ioc->cpu_msix_table[cpu] = reply_q->msix_index;
  2524. cpu = cpumask_next(cpu, cpu_online_mask);
  2525. }
  2526. index++;
  2527. }
  2528. }
  2529. /**
  2530. * _base_disable_msix - disables msix
  2531. * @ioc: per adapter object
  2532. *
  2533. */
  2534. static void
  2535. _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
  2536. {
  2537. if (!ioc->msix_enable)
  2538. return;
  2539. pci_disable_msix(ioc->pdev);
  2540. ioc->msix_enable = 0;
  2541. }
  2542. /**
  2543. * _base_enable_msix - enables msix, failback to io_apic
  2544. * @ioc: per adapter object
  2545. *
  2546. */
  2547. static int
  2548. _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
  2549. {
  2550. int r;
  2551. int i, local_max_msix_vectors;
  2552. u8 try_msix = 0;
  2553. unsigned int irq_flags = PCI_IRQ_MSIX;
  2554. if (msix_disable == -1 || msix_disable == 0)
  2555. try_msix = 1;
  2556. if (!try_msix)
  2557. goto try_ioapic;
  2558. if (_base_check_enable_msix(ioc) != 0)
  2559. goto try_ioapic;
  2560. ioc->reply_queue_count = min_t(int, ioc->cpu_count,
  2561. ioc->msix_vector_count);
  2562. printk(MPT3SAS_FMT "MSI-X vectors supported: %d, no of cores"
  2563. ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
  2564. ioc->cpu_count, max_msix_vectors);
  2565. if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
  2566. local_max_msix_vectors = (reset_devices) ? 1 : 8;
  2567. else
  2568. local_max_msix_vectors = max_msix_vectors;
  2569. if (local_max_msix_vectors > 0)
  2570. ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
  2571. ioc->reply_queue_count);
  2572. else if (local_max_msix_vectors == 0)
  2573. goto try_ioapic;
  2574. if (ioc->msix_vector_count < ioc->cpu_count)
  2575. smp_affinity_enable = 0;
  2576. if (smp_affinity_enable)
  2577. irq_flags |= PCI_IRQ_AFFINITY;
  2578. r = pci_alloc_irq_vectors(ioc->pdev, 1, ioc->reply_queue_count,
  2579. irq_flags);
  2580. if (r < 0) {
  2581. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  2582. "pci_alloc_irq_vectors failed (r=%d) !!!\n",
  2583. ioc->name, r));
  2584. goto try_ioapic;
  2585. }
  2586. ioc->msix_enable = 1;
  2587. ioc->reply_queue_count = r;
  2588. for (i = 0; i < ioc->reply_queue_count; i++) {
  2589. r = _base_request_irq(ioc, i);
  2590. if (r) {
  2591. _base_free_irq(ioc);
  2592. _base_disable_msix(ioc);
  2593. goto try_ioapic;
  2594. }
  2595. }
  2596. return 0;
  2597. /* failback to io_apic interrupt routing */
  2598. try_ioapic:
  2599. ioc->reply_queue_count = 1;
  2600. r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
  2601. if (r < 0) {
  2602. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  2603. "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
  2604. ioc->name, r));
  2605. } else
  2606. r = _base_request_irq(ioc, 0);
  2607. return r;
  2608. }
  2609. /**
  2610. * mpt3sas_base_unmap_resources - free controller resources
  2611. * @ioc: per adapter object
  2612. */
  2613. static void
  2614. mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
  2615. {
  2616. struct pci_dev *pdev = ioc->pdev;
  2617. dexitprintk(ioc, printk(MPT3SAS_FMT "%s\n",
  2618. ioc->name, __func__));
  2619. _base_free_irq(ioc);
  2620. _base_disable_msix(ioc);
  2621. if (ioc->combined_reply_queue) {
  2622. kfree(ioc->replyPostRegisterIndex);
  2623. ioc->replyPostRegisterIndex = NULL;
  2624. }
  2625. if (ioc->chip_phys) {
  2626. iounmap(ioc->chip);
  2627. ioc->chip_phys = 0;
  2628. }
  2629. if (pci_is_enabled(pdev)) {
  2630. pci_release_selected_regions(ioc->pdev, ioc->bars);
  2631. pci_disable_pcie_error_reporting(pdev);
  2632. pci_disable_device(pdev);
  2633. }
  2634. }
  2635. /**
  2636. * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
  2637. * @ioc: per adapter object
  2638. *
  2639. * Returns 0 for success, non-zero for failure.
  2640. */
  2641. int
  2642. mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
  2643. {
  2644. struct pci_dev *pdev = ioc->pdev;
  2645. u32 memap_sz;
  2646. u32 pio_sz;
  2647. int i, r = 0;
  2648. u64 pio_chip = 0;
  2649. phys_addr_t chip_phys = 0;
  2650. struct adapter_reply_queue *reply_q;
  2651. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
  2652. ioc->name, __func__));
  2653. ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
  2654. if (pci_enable_device_mem(pdev)) {
  2655. pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
  2656. ioc->name);
  2657. ioc->bars = 0;
  2658. return -ENODEV;
  2659. }
  2660. if (pci_request_selected_regions(pdev, ioc->bars,
  2661. ioc->driver_name)) {
  2662. pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
  2663. ioc->name);
  2664. ioc->bars = 0;
  2665. r = -ENODEV;
  2666. goto out_fail;
  2667. }
  2668. /* AER (Advanced Error Reporting) hooks */
  2669. pci_enable_pcie_error_reporting(pdev);
  2670. pci_set_master(pdev);
  2671. if (_base_config_dma_addressing(ioc, pdev) != 0) {
  2672. pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
  2673. ioc->name, pci_name(pdev));
  2674. r = -ENODEV;
  2675. goto out_fail;
  2676. }
  2677. for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
  2678. (!memap_sz || !pio_sz); i++) {
  2679. if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
  2680. if (pio_sz)
  2681. continue;
  2682. pio_chip = (u64)pci_resource_start(pdev, i);
  2683. pio_sz = pci_resource_len(pdev, i);
  2684. } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
  2685. if (memap_sz)
  2686. continue;
  2687. ioc->chip_phys = pci_resource_start(pdev, i);
  2688. chip_phys = ioc->chip_phys;
  2689. memap_sz = pci_resource_len(pdev, i);
  2690. ioc->chip = ioremap(ioc->chip_phys, memap_sz);
  2691. }
  2692. }
  2693. if (ioc->chip == NULL) {
  2694. pr_err(MPT3SAS_FMT "unable to map adapter memory! "
  2695. " or resource not found\n", ioc->name);
  2696. r = -EINVAL;
  2697. goto out_fail;
  2698. }
  2699. _base_mask_interrupts(ioc);
  2700. r = _base_get_ioc_facts(ioc);
  2701. if (r)
  2702. goto out_fail;
  2703. if (!ioc->rdpq_array_enable_assigned) {
  2704. ioc->rdpq_array_enable = ioc->rdpq_array_capable;
  2705. ioc->rdpq_array_enable_assigned = 1;
  2706. }
  2707. r = _base_enable_msix(ioc);
  2708. if (r)
  2709. goto out_fail;
  2710. /* Use the Combined reply queue feature only for SAS3 C0 & higher
  2711. * revision HBAs and also only when reply queue count is greater than 8
  2712. */
  2713. if (ioc->combined_reply_queue && ioc->reply_queue_count > 8) {
  2714. /* Determine the Supplemental Reply Post Host Index Registers
  2715. * Addresse. Supplemental Reply Post Host Index Registers
  2716. * starts at offset MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET and
  2717. * each register is at offset bytes of
  2718. * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET from previous one.
  2719. */
  2720. ioc->replyPostRegisterIndex = kcalloc(
  2721. ioc->combined_reply_index_count,
  2722. sizeof(resource_size_t *), GFP_KERNEL);
  2723. if (!ioc->replyPostRegisterIndex) {
  2724. dfailprintk(ioc, printk(MPT3SAS_FMT
  2725. "allocation for reply Post Register Index failed!!!\n",
  2726. ioc->name));
  2727. r = -ENOMEM;
  2728. goto out_fail;
  2729. }
  2730. for (i = 0; i < ioc->combined_reply_index_count; i++) {
  2731. ioc->replyPostRegisterIndex[i] = (resource_size_t *)
  2732. ((u8 __force *)&ioc->chip->Doorbell +
  2733. MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
  2734. (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
  2735. }
  2736. } else
  2737. ioc->combined_reply_queue = 0;
  2738. if (ioc->is_warpdrive) {
  2739. ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
  2740. &ioc->chip->ReplyPostHostIndex;
  2741. for (i = 1; i < ioc->cpu_msix_table_sz; i++)
  2742. ioc->reply_post_host_index[i] =
  2743. (resource_size_t __iomem *)
  2744. ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
  2745. * 4)));
  2746. }
  2747. list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
  2748. pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
  2749. reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
  2750. "IO-APIC enabled"),
  2751. pci_irq_vector(ioc->pdev, reply_q->msix_index));
  2752. pr_info(MPT3SAS_FMT "iomem(%pap), mapped(0x%p), size(%d)\n",
  2753. ioc->name, &chip_phys, ioc->chip, memap_sz);
  2754. pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
  2755. ioc->name, (unsigned long long)pio_chip, pio_sz);
  2756. /* Save PCI configuration state for recovery from PCI AER/EEH errors */
  2757. pci_save_state(pdev);
  2758. return 0;
  2759. out_fail:
  2760. mpt3sas_base_unmap_resources(ioc);
  2761. return r;
  2762. }
  2763. /**
  2764. * mpt3sas_base_get_msg_frame - obtain request mf pointer
  2765. * @ioc: per adapter object
  2766. * @smid: system request message index(smid zero is invalid)
  2767. *
  2768. * Returns virt pointer to message frame.
  2769. */
  2770. void *
  2771. mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2772. {
  2773. return (void *)(ioc->request + (smid * ioc->request_sz));
  2774. }
  2775. /**
  2776. * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
  2777. * @ioc: per adapter object
  2778. * @smid: system request message index
  2779. *
  2780. * Returns virt pointer to sense buffer.
  2781. */
  2782. void *
  2783. mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2784. {
  2785. return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
  2786. }
  2787. /**
  2788. * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
  2789. * @ioc: per adapter object
  2790. * @smid: system request message index
  2791. *
  2792. * Returns phys pointer to the low 32bit address of the sense buffer.
  2793. */
  2794. __le32
  2795. mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2796. {
  2797. return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
  2798. SCSI_SENSE_BUFFERSIZE));
  2799. }
  2800. /**
  2801. * mpt3sas_base_get_pcie_sgl - obtain a PCIe SGL virt addr
  2802. * @ioc: per adapter object
  2803. * @smid: system request message index
  2804. *
  2805. * Returns virt pointer to a PCIe SGL.
  2806. */
  2807. void *
  2808. mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2809. {
  2810. return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
  2811. }
  2812. /**
  2813. * mpt3sas_base_get_pcie_sgl_dma - obtain a PCIe SGL dma addr
  2814. * @ioc: per adapter object
  2815. * @smid: system request message index
  2816. *
  2817. * Returns phys pointer to the address of the PCIe buffer.
  2818. */
  2819. dma_addr_t
  2820. mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2821. {
  2822. return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
  2823. }
  2824. /**
  2825. * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
  2826. * @ioc: per adapter object
  2827. * @phys_addr: lower 32 physical addr of the reply
  2828. *
  2829. * Converts 32bit lower physical addr into a virt address.
  2830. */
  2831. void *
  2832. mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
  2833. {
  2834. if (!phys_addr)
  2835. return NULL;
  2836. return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
  2837. }
  2838. static inline u8
  2839. _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
  2840. {
  2841. return ioc->cpu_msix_table[raw_smp_processor_id()];
  2842. }
  2843. /**
  2844. * mpt3sas_base_get_smid - obtain a free smid from internal queue
  2845. * @ioc: per adapter object
  2846. * @cb_idx: callback index
  2847. *
  2848. * Returns smid (zero is invalid)
  2849. */
  2850. u16
  2851. mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
  2852. {
  2853. unsigned long flags;
  2854. struct request_tracker *request;
  2855. u16 smid;
  2856. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2857. if (list_empty(&ioc->internal_free_list)) {
  2858. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2859. pr_err(MPT3SAS_FMT "%s: smid not available\n",
  2860. ioc->name, __func__);
  2861. return 0;
  2862. }
  2863. request = list_entry(ioc->internal_free_list.next,
  2864. struct request_tracker, tracker_list);
  2865. request->cb_idx = cb_idx;
  2866. smid = request->smid;
  2867. list_del(&request->tracker_list);
  2868. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2869. return smid;
  2870. }
  2871. /**
  2872. * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
  2873. * @ioc: per adapter object
  2874. * @cb_idx: callback index
  2875. * @scmd: pointer to scsi command object
  2876. *
  2877. * Returns smid (zero is invalid)
  2878. */
  2879. u16
  2880. mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
  2881. struct scsi_cmnd *scmd)
  2882. {
  2883. struct scsiio_tracker *request = scsi_cmd_priv(scmd);
  2884. unsigned int tag = scmd->request->tag;
  2885. u16 smid;
  2886. smid = tag + 1;
  2887. request->cb_idx = cb_idx;
  2888. request->msix_io = _base_get_msix_index(ioc);
  2889. request->smid = smid;
  2890. INIT_LIST_HEAD(&request->chain_list);
  2891. return smid;
  2892. }
  2893. /**
  2894. * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
  2895. * @ioc: per adapter object
  2896. * @cb_idx: callback index
  2897. *
  2898. * Returns smid (zero is invalid)
  2899. */
  2900. u16
  2901. mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
  2902. {
  2903. unsigned long flags;
  2904. struct request_tracker *request;
  2905. u16 smid;
  2906. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2907. if (list_empty(&ioc->hpr_free_list)) {
  2908. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2909. return 0;
  2910. }
  2911. request = list_entry(ioc->hpr_free_list.next,
  2912. struct request_tracker, tracker_list);
  2913. request->cb_idx = cb_idx;
  2914. smid = request->smid;
  2915. list_del(&request->tracker_list);
  2916. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2917. return smid;
  2918. }
  2919. static void
  2920. _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
  2921. {
  2922. /*
  2923. * See _wait_for_commands_to_complete() call with regards to this code.
  2924. */
  2925. if (ioc->shost_recovery && ioc->pending_io_count) {
  2926. ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
  2927. if (ioc->pending_io_count == 0)
  2928. wake_up(&ioc->reset_wq);
  2929. }
  2930. }
  2931. void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
  2932. struct scsiio_tracker *st)
  2933. {
  2934. if (WARN_ON(st->smid == 0))
  2935. return;
  2936. st->cb_idx = 0xFF;
  2937. st->direct_io = 0;
  2938. atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
  2939. }
  2940. /**
  2941. * mpt3sas_base_free_smid - put smid back on free_list
  2942. * @ioc: per adapter object
  2943. * @smid: system request message index
  2944. *
  2945. * Return nothing.
  2946. */
  2947. void
  2948. mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  2949. {
  2950. unsigned long flags;
  2951. int i;
  2952. if (smid < ioc->hi_priority_smid) {
  2953. struct scsiio_tracker *st;
  2954. st = _get_st_from_smid(ioc, smid);
  2955. if (!st) {
  2956. _base_recovery_check(ioc);
  2957. return;
  2958. }
  2959. mpt3sas_base_clear_st(ioc, st);
  2960. _base_recovery_check(ioc);
  2961. return;
  2962. }
  2963. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  2964. if (smid < ioc->internal_smid) {
  2965. /* hi-priority */
  2966. i = smid - ioc->hi_priority_smid;
  2967. ioc->hpr_lookup[i].cb_idx = 0xFF;
  2968. list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
  2969. } else if (smid <= ioc->hba_queue_depth) {
  2970. /* internal queue */
  2971. i = smid - ioc->internal_smid;
  2972. ioc->internal_lookup[i].cb_idx = 0xFF;
  2973. list_add(&ioc->internal_lookup[i].tracker_list,
  2974. &ioc->internal_free_list);
  2975. }
  2976. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  2977. }
  2978. /**
  2979. * _base_mpi_ep_writeq - 32 bit write to MMIO
  2980. * @b: data payload
  2981. * @addr: address in MMIO space
  2982. * @writeq_lock: spin lock
  2983. *
  2984. * This special handling for MPI EP to take care of 32 bit
  2985. * environment where its not quarenteed to send the entire word
  2986. * in one transfer.
  2987. */
  2988. static inline void
  2989. _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
  2990. spinlock_t *writeq_lock)
  2991. {
  2992. unsigned long flags;
  2993. __u64 data_out = b;
  2994. spin_lock_irqsave(writeq_lock, flags);
  2995. writel((u32)(data_out), addr);
  2996. writel((u32)(data_out >> 32), (addr + 4));
  2997. spin_unlock_irqrestore(writeq_lock, flags);
  2998. }
  2999. /**
  3000. * _base_writeq - 64 bit write to MMIO
  3001. * @ioc: per adapter object
  3002. * @b: data payload
  3003. * @addr: address in MMIO space
  3004. * @writeq_lock: spin lock
  3005. *
  3006. * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
  3007. * care of 32 bit environment where its not quarenteed to send the entire word
  3008. * in one transfer.
  3009. */
  3010. #if defined(writeq) && defined(CONFIG_64BIT)
  3011. static inline void
  3012. _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
  3013. {
  3014. writeq(b, addr);
  3015. }
  3016. #else
  3017. static inline void
  3018. _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
  3019. {
  3020. _base_mpi_ep_writeq(b, addr, writeq_lock);
  3021. }
  3022. #endif
  3023. /**
  3024. * _base_put_smid_mpi_ep_scsi_io - send SCSI_IO request to firmware
  3025. * @ioc: per adapter object
  3026. * @smid: system request message index
  3027. * @handle: device handle
  3028. *
  3029. * Return nothing.
  3030. */
  3031. static void
  3032. _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
  3033. {
  3034. Mpi2RequestDescriptorUnion_t descriptor;
  3035. u64 *request = (u64 *)&descriptor;
  3036. void *mpi_req_iomem;
  3037. __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
  3038. _clone_sg_entries(ioc, (void *) mfp, smid);
  3039. mpi_req_iomem = (void __force *)ioc->chip +
  3040. MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
  3041. _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
  3042. ioc->request_sz);
  3043. descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
  3044. descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
  3045. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  3046. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  3047. descriptor.SCSIIO.LMID = 0;
  3048. _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3049. &ioc->scsi_lookup_lock);
  3050. }
  3051. /**
  3052. * _base_put_smid_scsi_io - send SCSI_IO request to firmware
  3053. * @ioc: per adapter object
  3054. * @smid: system request message index
  3055. * @handle: device handle
  3056. *
  3057. * Return nothing.
  3058. */
  3059. static void
  3060. _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
  3061. {
  3062. Mpi2RequestDescriptorUnion_t descriptor;
  3063. u64 *request = (u64 *)&descriptor;
  3064. descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
  3065. descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
  3066. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  3067. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  3068. descriptor.SCSIIO.LMID = 0;
  3069. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3070. &ioc->scsi_lookup_lock);
  3071. }
  3072. /**
  3073. * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
  3074. * @ioc: per adapter object
  3075. * @smid: system request message index
  3076. * @handle: device handle
  3077. *
  3078. * Return nothing.
  3079. */
  3080. void
  3081. mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3082. u16 handle)
  3083. {
  3084. Mpi2RequestDescriptorUnion_t descriptor;
  3085. u64 *request = (u64 *)&descriptor;
  3086. descriptor.SCSIIO.RequestFlags =
  3087. MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
  3088. descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
  3089. descriptor.SCSIIO.SMID = cpu_to_le16(smid);
  3090. descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
  3091. descriptor.SCSIIO.LMID = 0;
  3092. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3093. &ioc->scsi_lookup_lock);
  3094. }
  3095. /**
  3096. * mpt3sas_base_put_smid_hi_priority - send Task Management request to firmware
  3097. * @ioc: per adapter object
  3098. * @smid: system request message index
  3099. * @msix_task: msix_task will be same as msix of IO incase of task abort else 0.
  3100. * Return nothing.
  3101. */
  3102. void
  3103. mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
  3104. u16 msix_task)
  3105. {
  3106. Mpi2RequestDescriptorUnion_t descriptor;
  3107. void *mpi_req_iomem;
  3108. u64 *request;
  3109. if (ioc->is_mcpu_endpoint) {
  3110. MPI2RequestHeader_t *request_hdr;
  3111. __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
  3112. request_hdr = (MPI2RequestHeader_t *)mfp;
  3113. /* TBD 256 is offset within sys register. */
  3114. mpi_req_iomem = (void __force *)ioc->chip
  3115. + MPI_FRAME_START_OFFSET
  3116. + (smid * ioc->request_sz);
  3117. _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
  3118. ioc->request_sz);
  3119. }
  3120. request = (u64 *)&descriptor;
  3121. descriptor.HighPriority.RequestFlags =
  3122. MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
  3123. descriptor.HighPriority.MSIxIndex = msix_task;
  3124. descriptor.HighPriority.SMID = cpu_to_le16(smid);
  3125. descriptor.HighPriority.LMID = 0;
  3126. descriptor.HighPriority.Reserved1 = 0;
  3127. if (ioc->is_mcpu_endpoint)
  3128. _base_mpi_ep_writeq(*request,
  3129. &ioc->chip->RequestDescriptorPostLow,
  3130. &ioc->scsi_lookup_lock);
  3131. else
  3132. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3133. &ioc->scsi_lookup_lock);
  3134. }
  3135. /**
  3136. * mpt3sas_base_put_smid_nvme_encap - send NVMe encapsulated request to
  3137. * firmware
  3138. * @ioc: per adapter object
  3139. * @smid: system request message index
  3140. *
  3141. * Return nothing.
  3142. */
  3143. void
  3144. mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  3145. {
  3146. Mpi2RequestDescriptorUnion_t descriptor;
  3147. u64 *request = (u64 *)&descriptor;
  3148. descriptor.Default.RequestFlags =
  3149. MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
  3150. descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
  3151. descriptor.Default.SMID = cpu_to_le16(smid);
  3152. descriptor.Default.LMID = 0;
  3153. descriptor.Default.DescriptorTypeDependent = 0;
  3154. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3155. &ioc->scsi_lookup_lock);
  3156. }
  3157. /**
  3158. * mpt3sas_base_put_smid_default - Default, primarily used for config pages
  3159. * @ioc: per adapter object
  3160. * @smid: system request message index
  3161. *
  3162. * Return nothing.
  3163. */
  3164. void
  3165. mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
  3166. {
  3167. Mpi2RequestDescriptorUnion_t descriptor;
  3168. void *mpi_req_iomem;
  3169. u64 *request;
  3170. MPI2RequestHeader_t *request_hdr;
  3171. if (ioc->is_mcpu_endpoint) {
  3172. __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
  3173. request_hdr = (MPI2RequestHeader_t *)mfp;
  3174. _clone_sg_entries(ioc, (void *) mfp, smid);
  3175. /* TBD 256 is offset within sys register */
  3176. mpi_req_iomem = (void __force *)ioc->chip +
  3177. MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
  3178. _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
  3179. ioc->request_sz);
  3180. }
  3181. request = (u64 *)&descriptor;
  3182. descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
  3183. descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
  3184. descriptor.Default.SMID = cpu_to_le16(smid);
  3185. descriptor.Default.LMID = 0;
  3186. descriptor.Default.DescriptorTypeDependent = 0;
  3187. if (ioc->is_mcpu_endpoint)
  3188. _base_mpi_ep_writeq(*request,
  3189. &ioc->chip->RequestDescriptorPostLow,
  3190. &ioc->scsi_lookup_lock);
  3191. else
  3192. _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
  3193. &ioc->scsi_lookup_lock);
  3194. }
  3195. /**
  3196. * _base_display_OEMs_branding - Display branding string
  3197. * @ioc: per adapter object
  3198. *
  3199. * Return nothing.
  3200. */
  3201. static void
  3202. _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
  3203. {
  3204. if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
  3205. return;
  3206. switch (ioc->pdev->subsystem_vendor) {
  3207. case PCI_VENDOR_ID_INTEL:
  3208. switch (ioc->pdev->device) {
  3209. case MPI2_MFGPAGE_DEVID_SAS2008:
  3210. switch (ioc->pdev->subsystem_device) {
  3211. case MPT2SAS_INTEL_RMS2LL080_SSDID:
  3212. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3213. MPT2SAS_INTEL_RMS2LL080_BRANDING);
  3214. break;
  3215. case MPT2SAS_INTEL_RMS2LL040_SSDID:
  3216. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3217. MPT2SAS_INTEL_RMS2LL040_BRANDING);
  3218. break;
  3219. case MPT2SAS_INTEL_SSD910_SSDID:
  3220. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3221. MPT2SAS_INTEL_SSD910_BRANDING);
  3222. break;
  3223. default:
  3224. pr_info(MPT3SAS_FMT
  3225. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  3226. ioc->name, ioc->pdev->subsystem_device);
  3227. break;
  3228. }
  3229. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  3230. switch (ioc->pdev->subsystem_device) {
  3231. case MPT2SAS_INTEL_RS25GB008_SSDID:
  3232. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3233. MPT2SAS_INTEL_RS25GB008_BRANDING);
  3234. break;
  3235. case MPT2SAS_INTEL_RMS25JB080_SSDID:
  3236. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3237. MPT2SAS_INTEL_RMS25JB080_BRANDING);
  3238. break;
  3239. case MPT2SAS_INTEL_RMS25JB040_SSDID:
  3240. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3241. MPT2SAS_INTEL_RMS25JB040_BRANDING);
  3242. break;
  3243. case MPT2SAS_INTEL_RMS25KB080_SSDID:
  3244. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3245. MPT2SAS_INTEL_RMS25KB080_BRANDING);
  3246. break;
  3247. case MPT2SAS_INTEL_RMS25KB040_SSDID:
  3248. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3249. MPT2SAS_INTEL_RMS25KB040_BRANDING);
  3250. break;
  3251. case MPT2SAS_INTEL_RMS25LB040_SSDID:
  3252. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3253. MPT2SAS_INTEL_RMS25LB040_BRANDING);
  3254. break;
  3255. case MPT2SAS_INTEL_RMS25LB080_SSDID:
  3256. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3257. MPT2SAS_INTEL_RMS25LB080_BRANDING);
  3258. break;
  3259. default:
  3260. pr_info(MPT3SAS_FMT
  3261. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  3262. ioc->name, ioc->pdev->subsystem_device);
  3263. break;
  3264. }
  3265. case MPI25_MFGPAGE_DEVID_SAS3008:
  3266. switch (ioc->pdev->subsystem_device) {
  3267. case MPT3SAS_INTEL_RMS3JC080_SSDID:
  3268. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3269. MPT3SAS_INTEL_RMS3JC080_BRANDING);
  3270. break;
  3271. case MPT3SAS_INTEL_RS3GC008_SSDID:
  3272. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3273. MPT3SAS_INTEL_RS3GC008_BRANDING);
  3274. break;
  3275. case MPT3SAS_INTEL_RS3FC044_SSDID:
  3276. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3277. MPT3SAS_INTEL_RS3FC044_BRANDING);
  3278. break;
  3279. case MPT3SAS_INTEL_RS3UC080_SSDID:
  3280. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3281. MPT3SAS_INTEL_RS3UC080_BRANDING);
  3282. break;
  3283. default:
  3284. pr_info(MPT3SAS_FMT
  3285. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  3286. ioc->name, ioc->pdev->subsystem_device);
  3287. break;
  3288. }
  3289. break;
  3290. default:
  3291. pr_info(MPT3SAS_FMT
  3292. "Intel(R) Controller: Subsystem ID: 0x%X\n",
  3293. ioc->name, ioc->pdev->subsystem_device);
  3294. break;
  3295. }
  3296. break;
  3297. case PCI_VENDOR_ID_DELL:
  3298. switch (ioc->pdev->device) {
  3299. case MPI2_MFGPAGE_DEVID_SAS2008:
  3300. switch (ioc->pdev->subsystem_device) {
  3301. case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
  3302. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3303. MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
  3304. break;
  3305. case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
  3306. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3307. MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
  3308. break;
  3309. case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
  3310. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3311. MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
  3312. break;
  3313. case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
  3314. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3315. MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
  3316. break;
  3317. case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
  3318. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3319. MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
  3320. break;
  3321. case MPT2SAS_DELL_PERC_H200_SSDID:
  3322. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3323. MPT2SAS_DELL_PERC_H200_BRANDING);
  3324. break;
  3325. case MPT2SAS_DELL_6GBPS_SAS_SSDID:
  3326. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3327. MPT2SAS_DELL_6GBPS_SAS_BRANDING);
  3328. break;
  3329. default:
  3330. pr_info(MPT3SAS_FMT
  3331. "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
  3332. ioc->name, ioc->pdev->subsystem_device);
  3333. break;
  3334. }
  3335. break;
  3336. case MPI25_MFGPAGE_DEVID_SAS3008:
  3337. switch (ioc->pdev->subsystem_device) {
  3338. case MPT3SAS_DELL_12G_HBA_SSDID:
  3339. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3340. MPT3SAS_DELL_12G_HBA_BRANDING);
  3341. break;
  3342. default:
  3343. pr_info(MPT3SAS_FMT
  3344. "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
  3345. ioc->name, ioc->pdev->subsystem_device);
  3346. break;
  3347. }
  3348. break;
  3349. default:
  3350. pr_info(MPT3SAS_FMT
  3351. "Dell HBA: Subsystem ID: 0x%X\n", ioc->name,
  3352. ioc->pdev->subsystem_device);
  3353. break;
  3354. }
  3355. break;
  3356. case PCI_VENDOR_ID_CISCO:
  3357. switch (ioc->pdev->device) {
  3358. case MPI25_MFGPAGE_DEVID_SAS3008:
  3359. switch (ioc->pdev->subsystem_device) {
  3360. case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
  3361. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3362. MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
  3363. break;
  3364. case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
  3365. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3366. MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
  3367. break;
  3368. case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
  3369. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3370. MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
  3371. break;
  3372. default:
  3373. pr_info(MPT3SAS_FMT
  3374. "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3375. ioc->name, ioc->pdev->subsystem_device);
  3376. break;
  3377. }
  3378. break;
  3379. case MPI25_MFGPAGE_DEVID_SAS3108_1:
  3380. switch (ioc->pdev->subsystem_device) {
  3381. case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
  3382. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3383. MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
  3384. break;
  3385. case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
  3386. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3387. MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING
  3388. );
  3389. break;
  3390. default:
  3391. pr_info(MPT3SAS_FMT
  3392. "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3393. ioc->name, ioc->pdev->subsystem_device);
  3394. break;
  3395. }
  3396. break;
  3397. default:
  3398. pr_info(MPT3SAS_FMT
  3399. "Cisco SAS HBA: Subsystem ID: 0x%X\n",
  3400. ioc->name, ioc->pdev->subsystem_device);
  3401. break;
  3402. }
  3403. break;
  3404. case MPT2SAS_HP_3PAR_SSVID:
  3405. switch (ioc->pdev->device) {
  3406. case MPI2_MFGPAGE_DEVID_SAS2004:
  3407. switch (ioc->pdev->subsystem_device) {
  3408. case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
  3409. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3410. MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
  3411. break;
  3412. default:
  3413. pr_info(MPT3SAS_FMT
  3414. "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3415. ioc->name, ioc->pdev->subsystem_device);
  3416. break;
  3417. }
  3418. case MPI2_MFGPAGE_DEVID_SAS2308_2:
  3419. switch (ioc->pdev->subsystem_device) {
  3420. case MPT2SAS_HP_2_4_INTERNAL_SSDID:
  3421. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3422. MPT2SAS_HP_2_4_INTERNAL_BRANDING);
  3423. break;
  3424. case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
  3425. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3426. MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
  3427. break;
  3428. case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
  3429. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3430. MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
  3431. break;
  3432. case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
  3433. pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3434. MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
  3435. break;
  3436. default:
  3437. pr_info(MPT3SAS_FMT
  3438. "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
  3439. ioc->name, ioc->pdev->subsystem_device);
  3440. break;
  3441. }
  3442. default:
  3443. pr_info(MPT3SAS_FMT
  3444. "HP SAS HBA: Subsystem ID: 0x%X\n",
  3445. ioc->name, ioc->pdev->subsystem_device);
  3446. break;
  3447. }
  3448. default:
  3449. break;
  3450. }
  3451. }
  3452. /**
  3453. * _base_display_fwpkg_version - sends FWUpload request to pull FWPkg
  3454. * version from FW Image Header.
  3455. * @ioc: per adapter object
  3456. *
  3457. * Returns 0 for success, non-zero for failure.
  3458. */
  3459. static int
  3460. _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
  3461. {
  3462. Mpi2FWImageHeader_t *FWImgHdr;
  3463. Mpi25FWUploadRequest_t *mpi_request;
  3464. Mpi2FWUploadReply_t mpi_reply;
  3465. int r = 0;
  3466. void *fwpkg_data = NULL;
  3467. dma_addr_t fwpkg_data_dma;
  3468. u16 smid, ioc_status;
  3469. size_t data_length;
  3470. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3471. __func__));
  3472. if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
  3473. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  3474. ioc->name, __func__);
  3475. return -EAGAIN;
  3476. }
  3477. data_length = sizeof(Mpi2FWImageHeader_t);
  3478. fwpkg_data = pci_alloc_consistent(ioc->pdev, data_length,
  3479. &fwpkg_data_dma);
  3480. if (!fwpkg_data) {
  3481. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3482. ioc->name, __FILE__, __LINE__, __func__);
  3483. return -ENOMEM;
  3484. }
  3485. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  3486. if (!smid) {
  3487. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  3488. ioc->name, __func__);
  3489. r = -EAGAIN;
  3490. goto out;
  3491. }
  3492. ioc->base_cmds.status = MPT3_CMD_PENDING;
  3493. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  3494. ioc->base_cmds.smid = smid;
  3495. memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
  3496. mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
  3497. mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
  3498. mpi_request->ImageSize = cpu_to_le32(data_length);
  3499. ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
  3500. data_length);
  3501. init_completion(&ioc->base_cmds.done);
  3502. mpt3sas_base_put_smid_default(ioc, smid);
  3503. /* Wait for 15 seconds */
  3504. wait_for_completion_timeout(&ioc->base_cmds.done,
  3505. FW_IMG_HDR_READ_TIMEOUT*HZ);
  3506. pr_info(MPT3SAS_FMT "%s: complete\n",
  3507. ioc->name, __func__);
  3508. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  3509. pr_err(MPT3SAS_FMT "%s: timeout\n",
  3510. ioc->name, __func__);
  3511. _debug_dump_mf(mpi_request,
  3512. sizeof(Mpi25FWUploadRequest_t)/4);
  3513. r = -ETIME;
  3514. } else {
  3515. memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
  3516. if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
  3517. memcpy(&mpi_reply, ioc->base_cmds.reply,
  3518. sizeof(Mpi2FWUploadReply_t));
  3519. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  3520. MPI2_IOCSTATUS_MASK;
  3521. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  3522. FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
  3523. if (FWImgHdr->PackageVersion.Word) {
  3524. pr_info(MPT3SAS_FMT "FW Package Version"
  3525. "(%02d.%02d.%02d.%02d)\n",
  3526. ioc->name,
  3527. FWImgHdr->PackageVersion.Struct.Major,
  3528. FWImgHdr->PackageVersion.Struct.Minor,
  3529. FWImgHdr->PackageVersion.Struct.Unit,
  3530. FWImgHdr->PackageVersion.Struct.Dev);
  3531. }
  3532. } else {
  3533. _debug_dump_mf(&mpi_reply,
  3534. sizeof(Mpi2FWUploadReply_t)/4);
  3535. }
  3536. }
  3537. }
  3538. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  3539. out:
  3540. if (fwpkg_data)
  3541. pci_free_consistent(ioc->pdev, data_length, fwpkg_data,
  3542. fwpkg_data_dma);
  3543. return r;
  3544. }
  3545. /**
  3546. * _base_display_ioc_capabilities - Disply IOC's capabilities.
  3547. * @ioc: per adapter object
  3548. *
  3549. * Return nothing.
  3550. */
  3551. static void
  3552. _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
  3553. {
  3554. int i = 0;
  3555. char desc[16];
  3556. u32 iounit_pg1_flags;
  3557. u32 bios_version;
  3558. bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
  3559. strncpy(desc, ioc->manu_pg0.ChipName, 16);
  3560. pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
  3561. "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
  3562. ioc->name, desc,
  3563. (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
  3564. (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
  3565. (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
  3566. ioc->facts.FWVersion.Word & 0x000000FF,
  3567. ioc->pdev->revision,
  3568. (bios_version & 0xFF000000) >> 24,
  3569. (bios_version & 0x00FF0000) >> 16,
  3570. (bios_version & 0x0000FF00) >> 8,
  3571. bios_version & 0x000000FF);
  3572. _base_display_OEMs_branding(ioc);
  3573. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
  3574. pr_info("%sNVMe", i ? "," : "");
  3575. i++;
  3576. }
  3577. pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
  3578. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
  3579. pr_info("Initiator");
  3580. i++;
  3581. }
  3582. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
  3583. pr_info("%sTarget", i ? "," : "");
  3584. i++;
  3585. }
  3586. i = 0;
  3587. pr_info("), ");
  3588. pr_info("Capabilities=(");
  3589. if (!ioc->hide_ir_msg) {
  3590. if (ioc->facts.IOCCapabilities &
  3591. MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
  3592. pr_info("Raid");
  3593. i++;
  3594. }
  3595. }
  3596. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
  3597. pr_info("%sTLR", i ? "," : "");
  3598. i++;
  3599. }
  3600. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
  3601. pr_info("%sMulticast", i ? "," : "");
  3602. i++;
  3603. }
  3604. if (ioc->facts.IOCCapabilities &
  3605. MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
  3606. pr_info("%sBIDI Target", i ? "," : "");
  3607. i++;
  3608. }
  3609. if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
  3610. pr_info("%sEEDP", i ? "," : "");
  3611. i++;
  3612. }
  3613. if (ioc->facts.IOCCapabilities &
  3614. MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
  3615. pr_info("%sSnapshot Buffer", i ? "," : "");
  3616. i++;
  3617. }
  3618. if (ioc->facts.IOCCapabilities &
  3619. MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
  3620. pr_info("%sDiag Trace Buffer", i ? "," : "");
  3621. i++;
  3622. }
  3623. if (ioc->facts.IOCCapabilities &
  3624. MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
  3625. pr_info("%sDiag Extended Buffer", i ? "," : "");
  3626. i++;
  3627. }
  3628. if (ioc->facts.IOCCapabilities &
  3629. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
  3630. pr_info("%sTask Set Full", i ? "," : "");
  3631. i++;
  3632. }
  3633. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  3634. if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
  3635. pr_info("%sNCQ", i ? "," : "");
  3636. i++;
  3637. }
  3638. pr_info(")\n");
  3639. }
  3640. /**
  3641. * mpt3sas_base_update_missing_delay - change the missing delay timers
  3642. * @ioc: per adapter object
  3643. * @device_missing_delay: amount of time till device is reported missing
  3644. * @io_missing_delay: interval IO is returned when there is a missing device
  3645. *
  3646. * Return nothing.
  3647. *
  3648. * Passed on the command line, this function will modify the device missing
  3649. * delay, as well as the io missing delay. This should be called at driver
  3650. * load time.
  3651. */
  3652. void
  3653. mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
  3654. u16 device_missing_delay, u8 io_missing_delay)
  3655. {
  3656. u16 dmd, dmd_new, dmd_orignal;
  3657. u8 io_missing_delay_original;
  3658. u16 sz;
  3659. Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
  3660. Mpi2ConfigReply_t mpi_reply;
  3661. u8 num_phys = 0;
  3662. u16 ioc_status;
  3663. mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
  3664. if (!num_phys)
  3665. return;
  3666. sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
  3667. sizeof(Mpi2SasIOUnit1PhyData_t));
  3668. sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
  3669. if (!sas_iounit_pg1) {
  3670. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3671. ioc->name, __FILE__, __LINE__, __func__);
  3672. goto out;
  3673. }
  3674. if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
  3675. sas_iounit_pg1, sz))) {
  3676. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3677. ioc->name, __FILE__, __LINE__, __func__);
  3678. goto out;
  3679. }
  3680. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
  3681. MPI2_IOCSTATUS_MASK;
  3682. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  3683. pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
  3684. ioc->name, __FILE__, __LINE__, __func__);
  3685. goto out;
  3686. }
  3687. /* device missing delay */
  3688. dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
  3689. if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  3690. dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  3691. else
  3692. dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  3693. dmd_orignal = dmd;
  3694. if (device_missing_delay > 0x7F) {
  3695. dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
  3696. device_missing_delay;
  3697. dmd = dmd / 16;
  3698. dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
  3699. } else
  3700. dmd = device_missing_delay;
  3701. sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
  3702. /* io missing delay */
  3703. io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
  3704. sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
  3705. if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
  3706. sz)) {
  3707. if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
  3708. dmd_new = (dmd &
  3709. MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
  3710. else
  3711. dmd_new =
  3712. dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
  3713. pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
  3714. ioc->name, dmd_orignal, dmd_new);
  3715. pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
  3716. ioc->name, io_missing_delay_original,
  3717. io_missing_delay);
  3718. ioc->device_missing_delay = dmd_new;
  3719. ioc->io_missing_delay = io_missing_delay;
  3720. }
  3721. out:
  3722. kfree(sas_iounit_pg1);
  3723. }
  3724. /**
  3725. * _base_static_config_pages - static start of day config pages
  3726. * @ioc: per adapter object
  3727. *
  3728. * Return nothing.
  3729. */
  3730. static void
  3731. _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
  3732. {
  3733. Mpi2ConfigReply_t mpi_reply;
  3734. u32 iounit_pg1_flags;
  3735. ioc->nvme_abort_timeout = 30;
  3736. mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
  3737. if (ioc->ir_firmware)
  3738. mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
  3739. &ioc->manu_pg10);
  3740. /*
  3741. * Ensure correct T10 PI operation if vendor left EEDPTagMode
  3742. * flag unset in NVDATA.
  3743. */
  3744. mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
  3745. if (ioc->manu_pg11.EEDPTagMode == 0) {
  3746. pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
  3747. ioc->name);
  3748. ioc->manu_pg11.EEDPTagMode &= ~0x3;
  3749. ioc->manu_pg11.EEDPTagMode |= 0x1;
  3750. mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
  3751. &ioc->manu_pg11);
  3752. }
  3753. if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
  3754. ioc->tm_custom_handling = 1;
  3755. else {
  3756. ioc->tm_custom_handling = 0;
  3757. if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
  3758. ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
  3759. else if (ioc->manu_pg11.NVMeAbortTO >
  3760. NVME_TASK_ABORT_MAX_TIMEOUT)
  3761. ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
  3762. else
  3763. ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
  3764. }
  3765. mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
  3766. mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
  3767. mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
  3768. mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
  3769. mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  3770. mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
  3771. _base_display_ioc_capabilities(ioc);
  3772. /*
  3773. * Enable task_set_full handling in iounit_pg1 when the
  3774. * facts capabilities indicate that its supported.
  3775. */
  3776. iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
  3777. if ((ioc->facts.IOCCapabilities &
  3778. MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
  3779. iounit_pg1_flags &=
  3780. ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  3781. else
  3782. iounit_pg1_flags |=
  3783. MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
  3784. ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
  3785. mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
  3786. if (ioc->iounit_pg8.NumSensors)
  3787. ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
  3788. }
  3789. /**
  3790. * mpt3sas_free_enclosure_list - release memory
  3791. * @ioc: per adapter object
  3792. *
  3793. * Free memory allocated during encloure add.
  3794. *
  3795. * Return nothing.
  3796. */
  3797. void
  3798. mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
  3799. {
  3800. struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
  3801. /* Free enclosure list */
  3802. list_for_each_entry_safe(enclosure_dev,
  3803. enclosure_dev_next, &ioc->enclosure_list, list) {
  3804. list_del(&enclosure_dev->list);
  3805. kfree(enclosure_dev);
  3806. }
  3807. }
  3808. /**
  3809. * _base_release_memory_pools - release memory
  3810. * @ioc: per adapter object
  3811. *
  3812. * Free memory allocated from _base_allocate_memory_pools.
  3813. *
  3814. * Return nothing.
  3815. */
  3816. static void
  3817. _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
  3818. {
  3819. int i = 0;
  3820. int j = 0;
  3821. struct chain_tracker *ct;
  3822. struct reply_post_struct *rps;
  3823. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3824. __func__));
  3825. if (ioc->request) {
  3826. pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
  3827. ioc->request, ioc->request_dma);
  3828. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3829. "request_pool(0x%p): free\n",
  3830. ioc->name, ioc->request));
  3831. ioc->request = NULL;
  3832. }
  3833. if (ioc->sense) {
  3834. dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
  3835. dma_pool_destroy(ioc->sense_dma_pool);
  3836. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3837. "sense_pool(0x%p): free\n",
  3838. ioc->name, ioc->sense));
  3839. ioc->sense = NULL;
  3840. }
  3841. if (ioc->reply) {
  3842. dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
  3843. dma_pool_destroy(ioc->reply_dma_pool);
  3844. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3845. "reply_pool(0x%p): free\n",
  3846. ioc->name, ioc->reply));
  3847. ioc->reply = NULL;
  3848. }
  3849. if (ioc->reply_free) {
  3850. dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
  3851. ioc->reply_free_dma);
  3852. dma_pool_destroy(ioc->reply_free_dma_pool);
  3853. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3854. "reply_free_pool(0x%p): free\n",
  3855. ioc->name, ioc->reply_free));
  3856. ioc->reply_free = NULL;
  3857. }
  3858. if (ioc->reply_post) {
  3859. do {
  3860. rps = &ioc->reply_post[i];
  3861. if (rps->reply_post_free) {
  3862. dma_pool_free(
  3863. ioc->reply_post_free_dma_pool,
  3864. rps->reply_post_free,
  3865. rps->reply_post_free_dma);
  3866. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3867. "reply_post_free_pool(0x%p): free\n",
  3868. ioc->name, rps->reply_post_free));
  3869. rps->reply_post_free = NULL;
  3870. }
  3871. } while (ioc->rdpq_array_enable &&
  3872. (++i < ioc->reply_queue_count));
  3873. if (ioc->reply_post_free_array &&
  3874. ioc->rdpq_array_enable) {
  3875. dma_pool_free(ioc->reply_post_free_array_dma_pool,
  3876. ioc->reply_post_free_array,
  3877. ioc->reply_post_free_array_dma);
  3878. ioc->reply_post_free_array = NULL;
  3879. }
  3880. dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
  3881. dma_pool_destroy(ioc->reply_post_free_dma_pool);
  3882. kfree(ioc->reply_post);
  3883. }
  3884. if (ioc->pcie_sgl_dma_pool) {
  3885. for (i = 0; i < ioc->scsiio_depth; i++) {
  3886. dma_pool_free(ioc->pcie_sgl_dma_pool,
  3887. ioc->pcie_sg_lookup[i].pcie_sgl,
  3888. ioc->pcie_sg_lookup[i].pcie_sgl_dma);
  3889. }
  3890. if (ioc->pcie_sgl_dma_pool)
  3891. dma_pool_destroy(ioc->pcie_sgl_dma_pool);
  3892. }
  3893. if (ioc->config_page) {
  3894. dexitprintk(ioc, pr_info(MPT3SAS_FMT
  3895. "config_page(0x%p): free\n", ioc->name,
  3896. ioc->config_page));
  3897. pci_free_consistent(ioc->pdev, ioc->config_page_sz,
  3898. ioc->config_page, ioc->config_page_dma);
  3899. }
  3900. kfree(ioc->hpr_lookup);
  3901. kfree(ioc->internal_lookup);
  3902. if (ioc->chain_lookup) {
  3903. for (i = 0; i < ioc->scsiio_depth; i++) {
  3904. for (j = ioc->chains_per_prp_buffer;
  3905. j < ioc->chains_needed_per_io; j++) {
  3906. ct = &ioc->chain_lookup[i].chains_per_smid[j];
  3907. if (ct && ct->chain_buffer)
  3908. dma_pool_free(ioc->chain_dma_pool,
  3909. ct->chain_buffer,
  3910. ct->chain_buffer_dma);
  3911. }
  3912. kfree(ioc->chain_lookup[i].chains_per_smid);
  3913. }
  3914. dma_pool_destroy(ioc->chain_dma_pool);
  3915. kfree(ioc->chain_lookup);
  3916. ioc->chain_lookup = NULL;
  3917. }
  3918. }
  3919. /**
  3920. * is_MSB_are_same - checks whether all reply queues in a set are
  3921. * having same upper 32bits in their base memory address.
  3922. * @reply_pool_start_address: Base address of a reply queue set
  3923. * @pool_sz: Size of single Reply Descriptor Post Queues pool size
  3924. *
  3925. * Returns 1 if reply queues in a set have a same upper 32bits
  3926. * in their base memory address,
  3927. * else 0
  3928. */
  3929. static int
  3930. is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
  3931. {
  3932. long reply_pool_end_address;
  3933. reply_pool_end_address = reply_pool_start_address + pool_sz;
  3934. if (upper_32_bits(reply_pool_start_address) ==
  3935. upper_32_bits(reply_pool_end_address))
  3936. return 1;
  3937. else
  3938. return 0;
  3939. }
  3940. /**
  3941. * _base_allocate_memory_pools - allocate start of day memory pools
  3942. * @ioc: per adapter object
  3943. *
  3944. * Returns 0 success, anything else error
  3945. */
  3946. static int
  3947. _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
  3948. {
  3949. struct mpt3sas_facts *facts;
  3950. u16 max_sge_elements;
  3951. u16 chains_needed_per_io;
  3952. u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
  3953. u32 retry_sz;
  3954. u16 max_request_credit, nvme_blocks_needed;
  3955. unsigned short sg_tablesize;
  3956. u16 sge_size;
  3957. int i, j;
  3958. struct chain_tracker *ct;
  3959. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  3960. __func__));
  3961. retry_sz = 0;
  3962. facts = &ioc->facts;
  3963. /* command line tunables for max sgl entries */
  3964. if (max_sgl_entries != -1)
  3965. sg_tablesize = max_sgl_entries;
  3966. else {
  3967. if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
  3968. sg_tablesize = MPT2SAS_SG_DEPTH;
  3969. else
  3970. sg_tablesize = MPT3SAS_SG_DEPTH;
  3971. }
  3972. /* max sgl entries <= MPT_KDUMP_MIN_PHYS_SEGMENTS in KDUMP mode */
  3973. if (reset_devices)
  3974. sg_tablesize = min_t(unsigned short, sg_tablesize,
  3975. MPT_KDUMP_MIN_PHYS_SEGMENTS);
  3976. if (ioc->is_mcpu_endpoint)
  3977. ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
  3978. else {
  3979. if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
  3980. sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
  3981. else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
  3982. sg_tablesize = min_t(unsigned short, sg_tablesize,
  3983. SG_MAX_SEGMENTS);
  3984. pr_warn(MPT3SAS_FMT
  3985. "sg_tablesize(%u) is bigger than kernel "
  3986. "defined SG_CHUNK_SIZE(%u)\n", ioc->name,
  3987. sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
  3988. }
  3989. ioc->shost->sg_tablesize = sg_tablesize;
  3990. }
  3991. ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
  3992. (facts->RequestCredit / 4));
  3993. if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
  3994. if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
  3995. INTERNAL_SCSIIO_CMDS_COUNT)) {
  3996. pr_err(MPT3SAS_FMT "IOC doesn't have enough Request \
  3997. Credits, it has just %d number of credits\n",
  3998. ioc->name, facts->RequestCredit);
  3999. return -ENOMEM;
  4000. }
  4001. ioc->internal_depth = 10;
  4002. }
  4003. ioc->hi_priority_depth = ioc->internal_depth - (5);
  4004. /* command line tunables for max controller queue depth */
  4005. if (max_queue_depth != -1 && max_queue_depth != 0) {
  4006. max_request_credit = min_t(u16, max_queue_depth +
  4007. ioc->internal_depth, facts->RequestCredit);
  4008. if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
  4009. max_request_credit = MAX_HBA_QUEUE_DEPTH;
  4010. } else if (reset_devices)
  4011. max_request_credit = min_t(u16, facts->RequestCredit,
  4012. (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
  4013. else
  4014. max_request_credit = min_t(u16, facts->RequestCredit,
  4015. MAX_HBA_QUEUE_DEPTH);
  4016. /* Firmware maintains additional facts->HighPriorityCredit number of
  4017. * credits for HiPriprity Request messages, so hba queue depth will be
  4018. * sum of max_request_credit and high priority queue depth.
  4019. */
  4020. ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
  4021. /* request frame size */
  4022. ioc->request_sz = facts->IOCRequestFrameSize * 4;
  4023. /* reply frame size */
  4024. ioc->reply_sz = facts->ReplyFrameSize * 4;
  4025. /* chain segment size */
  4026. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  4027. if (facts->IOCMaxChainSegmentSize)
  4028. ioc->chain_segment_sz =
  4029. facts->IOCMaxChainSegmentSize *
  4030. MAX_CHAIN_ELEMT_SZ;
  4031. else
  4032. /* set to 128 bytes size if IOCMaxChainSegmentSize is zero */
  4033. ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
  4034. MAX_CHAIN_ELEMT_SZ;
  4035. } else
  4036. ioc->chain_segment_sz = ioc->request_sz;
  4037. /* calculate the max scatter element size */
  4038. sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
  4039. retry_allocation:
  4040. total_sz = 0;
  4041. /* calculate number of sg elements left over in the 1st frame */
  4042. max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
  4043. sizeof(Mpi2SGEIOUnion_t)) + sge_size);
  4044. ioc->max_sges_in_main_message = max_sge_elements/sge_size;
  4045. /* now do the same for a chain buffer */
  4046. max_sge_elements = ioc->chain_segment_sz - sge_size;
  4047. ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
  4048. /*
  4049. * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
  4050. */
  4051. chains_needed_per_io = ((ioc->shost->sg_tablesize -
  4052. ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
  4053. + 1;
  4054. if (chains_needed_per_io > facts->MaxChainDepth) {
  4055. chains_needed_per_io = facts->MaxChainDepth;
  4056. ioc->shost->sg_tablesize = min_t(u16,
  4057. ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
  4058. * chains_needed_per_io), ioc->shost->sg_tablesize);
  4059. }
  4060. ioc->chains_needed_per_io = chains_needed_per_io;
  4061. /* reply free queue sizing - taking into account for 64 FW events */
  4062. ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
  4063. /* mCPU manage single counters for simplicity */
  4064. if (ioc->is_mcpu_endpoint)
  4065. ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
  4066. else {
  4067. /* calculate reply descriptor post queue depth */
  4068. ioc->reply_post_queue_depth = ioc->hba_queue_depth +
  4069. ioc->reply_free_queue_depth + 1;
  4070. /* align the reply post queue on the next 16 count boundary */
  4071. if (ioc->reply_post_queue_depth % 16)
  4072. ioc->reply_post_queue_depth += 16 -
  4073. (ioc->reply_post_queue_depth % 16);
  4074. }
  4075. if (ioc->reply_post_queue_depth >
  4076. facts->MaxReplyDescriptorPostQueueDepth) {
  4077. ioc->reply_post_queue_depth =
  4078. facts->MaxReplyDescriptorPostQueueDepth -
  4079. (facts->MaxReplyDescriptorPostQueueDepth % 16);
  4080. ioc->hba_queue_depth =
  4081. ((ioc->reply_post_queue_depth - 64) / 2) - 1;
  4082. ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
  4083. }
  4084. dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
  4085. "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
  4086. "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
  4087. ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
  4088. ioc->chains_needed_per_io));
  4089. /* reply post queue, 16 byte align */
  4090. reply_post_free_sz = ioc->reply_post_queue_depth *
  4091. sizeof(Mpi2DefaultReplyDescriptor_t);
  4092. sz = reply_post_free_sz;
  4093. if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
  4094. sz *= ioc->reply_queue_count;
  4095. ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
  4096. (ioc->reply_queue_count):1,
  4097. sizeof(struct reply_post_struct), GFP_KERNEL);
  4098. if (!ioc->reply_post) {
  4099. pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
  4100. ioc->name);
  4101. goto out;
  4102. }
  4103. ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
  4104. &ioc->pdev->dev, sz, 16, 0);
  4105. if (!ioc->reply_post_free_dma_pool) {
  4106. pr_err(MPT3SAS_FMT
  4107. "reply_post_free pool: dma_pool_create failed\n",
  4108. ioc->name);
  4109. goto out;
  4110. }
  4111. i = 0;
  4112. do {
  4113. ioc->reply_post[i].reply_post_free =
  4114. dma_pool_alloc(ioc->reply_post_free_dma_pool,
  4115. GFP_KERNEL,
  4116. &ioc->reply_post[i].reply_post_free_dma);
  4117. if (!ioc->reply_post[i].reply_post_free) {
  4118. pr_err(MPT3SAS_FMT
  4119. "reply_post_free pool: dma_pool_alloc failed\n",
  4120. ioc->name);
  4121. goto out;
  4122. }
  4123. memset(ioc->reply_post[i].reply_post_free, 0, sz);
  4124. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4125. "reply post free pool (0x%p): depth(%d),"
  4126. "element_size(%d), pool_size(%d kB)\n", ioc->name,
  4127. ioc->reply_post[i].reply_post_free,
  4128. ioc->reply_post_queue_depth, 8, sz/1024));
  4129. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4130. "reply_post_free_dma = (0x%llx)\n", ioc->name,
  4131. (unsigned long long)
  4132. ioc->reply_post[i].reply_post_free_dma));
  4133. total_sz += sz;
  4134. } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
  4135. if (ioc->dma_mask == 64) {
  4136. if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
  4137. pr_warn(MPT3SAS_FMT
  4138. "no suitable consistent DMA mask for %s\n",
  4139. ioc->name, pci_name(ioc->pdev));
  4140. goto out;
  4141. }
  4142. }
  4143. ioc->scsiio_depth = ioc->hba_queue_depth -
  4144. ioc->hi_priority_depth - ioc->internal_depth;
  4145. /* set the scsi host can_queue depth
  4146. * with some internal commands that could be outstanding
  4147. */
  4148. ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
  4149. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4150. "scsi host: can_queue depth (%d)\n",
  4151. ioc->name, ioc->shost->can_queue));
  4152. /* contiguous pool for request and chains, 16 byte align, one extra "
  4153. * "frame for smid=0
  4154. */
  4155. ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
  4156. sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
  4157. /* hi-priority queue */
  4158. sz += (ioc->hi_priority_depth * ioc->request_sz);
  4159. /* internal queue */
  4160. sz += (ioc->internal_depth * ioc->request_sz);
  4161. ioc->request_dma_sz = sz;
  4162. ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
  4163. if (!ioc->request) {
  4164. pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
  4165. "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
  4166. "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
  4167. ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
  4168. if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
  4169. goto out;
  4170. retry_sz = 64;
  4171. ioc->hba_queue_depth -= retry_sz;
  4172. _base_release_memory_pools(ioc);
  4173. goto retry_allocation;
  4174. }
  4175. if (retry_sz)
  4176. pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
  4177. "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
  4178. "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
  4179. ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
  4180. /* hi-priority queue */
  4181. ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
  4182. ioc->request_sz);
  4183. ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
  4184. ioc->request_sz);
  4185. /* internal queue */
  4186. ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
  4187. ioc->request_sz);
  4188. ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
  4189. ioc->request_sz);
  4190. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4191. "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
  4192. ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
  4193. (ioc->hba_queue_depth * ioc->request_sz)/1024));
  4194. dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
  4195. ioc->name, (unsigned long long) ioc->request_dma));
  4196. total_sz += sz;
  4197. dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
  4198. ioc->name, ioc->request, ioc->scsiio_depth));
  4199. ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
  4200. sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
  4201. ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
  4202. if (!ioc->chain_lookup) {
  4203. pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages "
  4204. "failed\n", ioc->name);
  4205. goto out;
  4206. }
  4207. sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
  4208. for (i = 0; i < ioc->scsiio_depth; i++) {
  4209. ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
  4210. if (!ioc->chain_lookup[i].chains_per_smid) {
  4211. pr_err(MPT3SAS_FMT "chain_lookup: "
  4212. " kzalloc failed\n", ioc->name);
  4213. goto out;
  4214. }
  4215. }
  4216. /* initialize hi-priority queue smid's */
  4217. ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
  4218. sizeof(struct request_tracker), GFP_KERNEL);
  4219. if (!ioc->hpr_lookup) {
  4220. pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
  4221. ioc->name);
  4222. goto out;
  4223. }
  4224. ioc->hi_priority_smid = ioc->scsiio_depth + 1;
  4225. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4226. "hi_priority(0x%p): depth(%d), start smid(%d)\n",
  4227. ioc->name, ioc->hi_priority,
  4228. ioc->hi_priority_depth, ioc->hi_priority_smid));
  4229. /* initialize internal queue smid's */
  4230. ioc->internal_lookup = kcalloc(ioc->internal_depth,
  4231. sizeof(struct request_tracker), GFP_KERNEL);
  4232. if (!ioc->internal_lookup) {
  4233. pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
  4234. ioc->name);
  4235. goto out;
  4236. }
  4237. ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
  4238. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4239. "internal(0x%p): depth(%d), start smid(%d)\n",
  4240. ioc->name, ioc->internal,
  4241. ioc->internal_depth, ioc->internal_smid));
  4242. /*
  4243. * The number of NVMe page sized blocks needed is:
  4244. * (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
  4245. * ((sg_tablesize * 8) - 1) is the max PRP's minus the first PRP entry
  4246. * that is placed in the main message frame. 8 is the size of each PRP
  4247. * entry or PRP list pointer entry. 8 is subtracted from page_size
  4248. * because of the PRP list pointer entry at the end of a page, so this
  4249. * is not counted as a PRP entry. The 1 added page is a round up.
  4250. *
  4251. * To avoid allocation failures due to the amount of memory that could
  4252. * be required for NVMe PRP's, only each set of NVMe blocks will be
  4253. * contiguous, so a new set is allocated for each possible I/O.
  4254. */
  4255. ioc->chains_per_prp_buffer = 0;
  4256. if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
  4257. nvme_blocks_needed =
  4258. (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
  4259. nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
  4260. nvme_blocks_needed++;
  4261. sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
  4262. ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
  4263. if (!ioc->pcie_sg_lookup) {
  4264. pr_info(MPT3SAS_FMT
  4265. "PCIe SGL lookup: kzalloc failed\n", ioc->name);
  4266. goto out;
  4267. }
  4268. sz = nvme_blocks_needed * ioc->page_size;
  4269. ioc->pcie_sgl_dma_pool =
  4270. dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
  4271. if (!ioc->pcie_sgl_dma_pool) {
  4272. pr_info(MPT3SAS_FMT
  4273. "PCIe SGL pool: dma_pool_create failed\n",
  4274. ioc->name);
  4275. goto out;
  4276. }
  4277. ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
  4278. ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
  4279. ioc->chains_needed_per_io);
  4280. for (i = 0; i < ioc->scsiio_depth; i++) {
  4281. ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
  4282. ioc->pcie_sgl_dma_pool, GFP_KERNEL,
  4283. &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
  4284. if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
  4285. pr_info(MPT3SAS_FMT
  4286. "PCIe SGL pool: dma_pool_alloc failed\n",
  4287. ioc->name);
  4288. goto out;
  4289. }
  4290. for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
  4291. ct = &ioc->chain_lookup[i].chains_per_smid[j];
  4292. ct->chain_buffer =
  4293. ioc->pcie_sg_lookup[i].pcie_sgl +
  4294. (j * ioc->chain_segment_sz);
  4295. ct->chain_buffer_dma =
  4296. ioc->pcie_sg_lookup[i].pcie_sgl_dma +
  4297. (j * ioc->chain_segment_sz);
  4298. }
  4299. }
  4300. dinitprintk(ioc, pr_info(MPT3SAS_FMT "PCIe sgl pool depth(%d), "
  4301. "element_size(%d), pool_size(%d kB)\n", ioc->name,
  4302. ioc->scsiio_depth, sz, (sz * ioc->scsiio_depth)/1024));
  4303. dinitprintk(ioc, pr_info(MPT3SAS_FMT "Number of chains can "
  4304. "fit in a PRP page(%d)\n", ioc->name,
  4305. ioc->chains_per_prp_buffer));
  4306. total_sz += sz * ioc->scsiio_depth;
  4307. }
  4308. ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
  4309. ioc->chain_segment_sz, 16, 0);
  4310. if (!ioc->chain_dma_pool) {
  4311. pr_err(MPT3SAS_FMT "chain_dma_pool: dma_pool_create failed\n",
  4312. ioc->name);
  4313. goto out;
  4314. }
  4315. for (i = 0; i < ioc->scsiio_depth; i++) {
  4316. for (j = ioc->chains_per_prp_buffer;
  4317. j < ioc->chains_needed_per_io; j++) {
  4318. ct = &ioc->chain_lookup[i].chains_per_smid[j];
  4319. ct->chain_buffer = dma_pool_alloc(
  4320. ioc->chain_dma_pool, GFP_KERNEL,
  4321. &ct->chain_buffer_dma);
  4322. if (!ct->chain_buffer) {
  4323. pr_err(MPT3SAS_FMT "chain_lookup: "
  4324. " pci_pool_alloc failed\n", ioc->name);
  4325. _base_release_memory_pools(ioc);
  4326. goto out;
  4327. }
  4328. }
  4329. total_sz += ioc->chain_segment_sz;
  4330. }
  4331. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4332. "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
  4333. ioc->name, ioc->chain_depth, ioc->chain_segment_sz,
  4334. ((ioc->chain_depth * ioc->chain_segment_sz))/1024));
  4335. /* sense buffers, 4 byte align */
  4336. sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
  4337. ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
  4338. 4, 0);
  4339. if (!ioc->sense_dma_pool) {
  4340. pr_err(MPT3SAS_FMT "sense pool: dma_pool_create failed\n",
  4341. ioc->name);
  4342. goto out;
  4343. }
  4344. ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
  4345. &ioc->sense_dma);
  4346. if (!ioc->sense) {
  4347. pr_err(MPT3SAS_FMT "sense pool: dma_pool_alloc failed\n",
  4348. ioc->name);
  4349. goto out;
  4350. }
  4351. /* sense buffer requires to be in same 4 gb region.
  4352. * Below function will check the same.
  4353. * In case of failure, new pci pool will be created with updated
  4354. * alignment. Older allocation and pool will be destroyed.
  4355. * Alignment will be used such a way that next allocation if
  4356. * success, will always meet same 4gb region requirement.
  4357. * Actual requirement is not alignment, but we need start and end of
  4358. * DMA address must have same upper 32 bit address.
  4359. */
  4360. if (!is_MSB_are_same((long)ioc->sense, sz)) {
  4361. //Release Sense pool & Reallocate
  4362. dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
  4363. dma_pool_destroy(ioc->sense_dma_pool);
  4364. ioc->sense = NULL;
  4365. ioc->sense_dma_pool =
  4366. dma_pool_create("sense pool", &ioc->pdev->dev, sz,
  4367. roundup_pow_of_two(sz), 0);
  4368. if (!ioc->sense_dma_pool) {
  4369. pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
  4370. ioc->name);
  4371. goto out;
  4372. }
  4373. ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
  4374. &ioc->sense_dma);
  4375. if (!ioc->sense) {
  4376. pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
  4377. ioc->name);
  4378. goto out;
  4379. }
  4380. }
  4381. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4382. "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
  4383. "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
  4384. SCSI_SENSE_BUFFERSIZE, sz/1024));
  4385. dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
  4386. ioc->name, (unsigned long long)ioc->sense_dma));
  4387. total_sz += sz;
  4388. /* reply pool, 4 byte align */
  4389. sz = ioc->reply_free_queue_depth * ioc->reply_sz;
  4390. ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
  4391. 4, 0);
  4392. if (!ioc->reply_dma_pool) {
  4393. pr_err(MPT3SAS_FMT "reply pool: dma_pool_create failed\n",
  4394. ioc->name);
  4395. goto out;
  4396. }
  4397. ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
  4398. &ioc->reply_dma);
  4399. if (!ioc->reply) {
  4400. pr_err(MPT3SAS_FMT "reply pool: dma_pool_alloc failed\n",
  4401. ioc->name);
  4402. goto out;
  4403. }
  4404. ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
  4405. ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
  4406. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4407. "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
  4408. ioc->name, ioc->reply,
  4409. ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
  4410. dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
  4411. ioc->name, (unsigned long long)ioc->reply_dma));
  4412. total_sz += sz;
  4413. /* reply free queue, 16 byte align */
  4414. sz = ioc->reply_free_queue_depth * 4;
  4415. ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
  4416. &ioc->pdev->dev, sz, 16, 0);
  4417. if (!ioc->reply_free_dma_pool) {
  4418. pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_create failed\n",
  4419. ioc->name);
  4420. goto out;
  4421. }
  4422. ioc->reply_free = dma_pool_alloc(ioc->reply_free_dma_pool, GFP_KERNEL,
  4423. &ioc->reply_free_dma);
  4424. if (!ioc->reply_free) {
  4425. pr_err(MPT3SAS_FMT "reply_free pool: dma_pool_alloc failed\n",
  4426. ioc->name);
  4427. goto out;
  4428. }
  4429. memset(ioc->reply_free, 0, sz);
  4430. dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
  4431. "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
  4432. ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
  4433. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4434. "reply_free_dma (0x%llx)\n",
  4435. ioc->name, (unsigned long long)ioc->reply_free_dma));
  4436. total_sz += sz;
  4437. if (ioc->rdpq_array_enable) {
  4438. reply_post_free_array_sz = ioc->reply_queue_count *
  4439. sizeof(Mpi2IOCInitRDPQArrayEntry);
  4440. ioc->reply_post_free_array_dma_pool =
  4441. dma_pool_create("reply_post_free_array pool",
  4442. &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
  4443. if (!ioc->reply_post_free_array_dma_pool) {
  4444. dinitprintk(ioc,
  4445. pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
  4446. "dma_pool_create failed\n", ioc->name));
  4447. goto out;
  4448. }
  4449. ioc->reply_post_free_array =
  4450. dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
  4451. GFP_KERNEL, &ioc->reply_post_free_array_dma);
  4452. if (!ioc->reply_post_free_array) {
  4453. dinitprintk(ioc,
  4454. pr_info(MPT3SAS_FMT "reply_post_free_array pool: "
  4455. "dma_pool_alloc failed\n", ioc->name));
  4456. goto out;
  4457. }
  4458. }
  4459. ioc->config_page_sz = 512;
  4460. ioc->config_page = pci_alloc_consistent(ioc->pdev,
  4461. ioc->config_page_sz, &ioc->config_page_dma);
  4462. if (!ioc->config_page) {
  4463. pr_err(MPT3SAS_FMT
  4464. "config page: dma_pool_alloc failed\n",
  4465. ioc->name);
  4466. goto out;
  4467. }
  4468. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  4469. "config page(0x%p): size(%d)\n",
  4470. ioc->name, ioc->config_page, ioc->config_page_sz));
  4471. dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
  4472. ioc->name, (unsigned long long)ioc->config_page_dma));
  4473. total_sz += ioc->config_page_sz;
  4474. pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
  4475. ioc->name, total_sz/1024);
  4476. pr_info(MPT3SAS_FMT
  4477. "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
  4478. ioc->name, ioc->shost->can_queue, facts->RequestCredit);
  4479. pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
  4480. ioc->name, ioc->shost->sg_tablesize);
  4481. return 0;
  4482. out:
  4483. return -ENOMEM;
  4484. }
  4485. /**
  4486. * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
  4487. * @ioc: Pointer to MPT_ADAPTER structure
  4488. * @cooked: Request raw or cooked IOC state
  4489. *
  4490. * Returns all IOC Doorbell register bits if cooked==0, else just the
  4491. * Doorbell bits in MPI_IOC_STATE_MASK.
  4492. */
  4493. u32
  4494. mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
  4495. {
  4496. u32 s, sc;
  4497. s = readl(&ioc->chip->Doorbell);
  4498. sc = s & MPI2_IOC_STATE_MASK;
  4499. return cooked ? sc : s;
  4500. }
  4501. /**
  4502. * _base_wait_on_iocstate - waiting on a particular ioc state
  4503. * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
  4504. * @timeout: timeout in second
  4505. *
  4506. * Returns 0 for success, non-zero for failure.
  4507. */
  4508. static int
  4509. _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
  4510. {
  4511. u32 count, cntdn;
  4512. u32 current_state;
  4513. count = 0;
  4514. cntdn = 1000 * timeout;
  4515. do {
  4516. current_state = mpt3sas_base_get_iocstate(ioc, 1);
  4517. if (current_state == ioc_state)
  4518. return 0;
  4519. if (count && current_state == MPI2_IOC_STATE_FAULT)
  4520. break;
  4521. usleep_range(1000, 1500);
  4522. count++;
  4523. } while (--cntdn);
  4524. return current_state;
  4525. }
  4526. /**
  4527. * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
  4528. * a write to the doorbell)
  4529. * @ioc: per adapter object
  4530. * @timeout: timeout in second
  4531. *
  4532. * Returns 0 for success, non-zero for failure.
  4533. *
  4534. * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
  4535. */
  4536. static int
  4537. _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
  4538. static int
  4539. _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4540. {
  4541. u32 cntdn, count;
  4542. u32 int_status;
  4543. count = 0;
  4544. cntdn = 1000 * timeout;
  4545. do {
  4546. int_status = readl(&ioc->chip->HostInterruptStatus);
  4547. if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  4548. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4549. "%s: successful count(%d), timeout(%d)\n",
  4550. ioc->name, __func__, count, timeout));
  4551. return 0;
  4552. }
  4553. usleep_range(1000, 1500);
  4554. count++;
  4555. } while (--cntdn);
  4556. pr_err(MPT3SAS_FMT
  4557. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  4558. ioc->name, __func__, count, int_status);
  4559. return -EFAULT;
  4560. }
  4561. static int
  4562. _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4563. {
  4564. u32 cntdn, count;
  4565. u32 int_status;
  4566. count = 0;
  4567. cntdn = 2000 * timeout;
  4568. do {
  4569. int_status = readl(&ioc->chip->HostInterruptStatus);
  4570. if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  4571. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4572. "%s: successful count(%d), timeout(%d)\n",
  4573. ioc->name, __func__, count, timeout));
  4574. return 0;
  4575. }
  4576. udelay(500);
  4577. count++;
  4578. } while (--cntdn);
  4579. pr_err(MPT3SAS_FMT
  4580. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  4581. ioc->name, __func__, count, int_status);
  4582. return -EFAULT;
  4583. }
  4584. /**
  4585. * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
  4586. * @ioc: per adapter object
  4587. * @timeout: timeout in second
  4588. *
  4589. * Returns 0 for success, non-zero for failure.
  4590. *
  4591. * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
  4592. * doorbell.
  4593. */
  4594. static int
  4595. _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4596. {
  4597. u32 cntdn, count;
  4598. u32 int_status;
  4599. u32 doorbell;
  4600. count = 0;
  4601. cntdn = 1000 * timeout;
  4602. do {
  4603. int_status = readl(&ioc->chip->HostInterruptStatus);
  4604. if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
  4605. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4606. "%s: successful count(%d), timeout(%d)\n",
  4607. ioc->name, __func__, count, timeout));
  4608. return 0;
  4609. } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
  4610. doorbell = readl(&ioc->chip->Doorbell);
  4611. if ((doorbell & MPI2_IOC_STATE_MASK) ==
  4612. MPI2_IOC_STATE_FAULT) {
  4613. mpt3sas_base_fault_info(ioc , doorbell);
  4614. return -EFAULT;
  4615. }
  4616. } else if (int_status == 0xFFFFFFFF)
  4617. goto out;
  4618. usleep_range(1000, 1500);
  4619. count++;
  4620. } while (--cntdn);
  4621. out:
  4622. pr_err(MPT3SAS_FMT
  4623. "%s: failed due to timeout count(%d), int_status(%x)!\n",
  4624. ioc->name, __func__, count, int_status);
  4625. return -EFAULT;
  4626. }
  4627. /**
  4628. * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
  4629. * @ioc: per adapter object
  4630. * @timeout: timeout in second
  4631. *
  4632. * Returns 0 for success, non-zero for failure.
  4633. *
  4634. */
  4635. static int
  4636. _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
  4637. {
  4638. u32 cntdn, count;
  4639. u32 doorbell_reg;
  4640. count = 0;
  4641. cntdn = 1000 * timeout;
  4642. do {
  4643. doorbell_reg = readl(&ioc->chip->Doorbell);
  4644. if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
  4645. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4646. "%s: successful count(%d), timeout(%d)\n",
  4647. ioc->name, __func__, count, timeout));
  4648. return 0;
  4649. }
  4650. usleep_range(1000, 1500);
  4651. count++;
  4652. } while (--cntdn);
  4653. pr_err(MPT3SAS_FMT
  4654. "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
  4655. ioc->name, __func__, count, doorbell_reg);
  4656. return -EFAULT;
  4657. }
  4658. /**
  4659. * _base_send_ioc_reset - send doorbell reset
  4660. * @ioc: per adapter object
  4661. * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
  4662. * @timeout: timeout in second
  4663. *
  4664. * Returns 0 for success, non-zero for failure.
  4665. */
  4666. static int
  4667. _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
  4668. {
  4669. u32 ioc_state;
  4670. int r = 0;
  4671. if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
  4672. pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
  4673. ioc->name, __func__);
  4674. return -EFAULT;
  4675. }
  4676. if (!(ioc->facts.IOCCapabilities &
  4677. MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
  4678. return -EFAULT;
  4679. pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
  4680. writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
  4681. &ioc->chip->Doorbell);
  4682. if ((_base_wait_for_doorbell_ack(ioc, 15))) {
  4683. r = -EFAULT;
  4684. goto out;
  4685. }
  4686. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
  4687. if (ioc_state) {
  4688. pr_err(MPT3SAS_FMT
  4689. "%s: failed going to ready state (ioc_state=0x%x)\n",
  4690. ioc->name, __func__, ioc_state);
  4691. r = -EFAULT;
  4692. goto out;
  4693. }
  4694. out:
  4695. pr_info(MPT3SAS_FMT "message unit reset: %s\n",
  4696. ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
  4697. return r;
  4698. }
  4699. /**
  4700. * _base_handshake_req_reply_wait - send request thru doorbell interface
  4701. * @ioc: per adapter object
  4702. * @request_bytes: request length
  4703. * @request: pointer having request payload
  4704. * @reply_bytes: reply length
  4705. * @reply: pointer to reply payload
  4706. * @timeout: timeout in second
  4707. *
  4708. * Returns 0 for success, non-zero for failure.
  4709. */
  4710. static int
  4711. _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
  4712. u32 *request, int reply_bytes, u16 *reply, int timeout)
  4713. {
  4714. MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
  4715. int i;
  4716. u8 failed;
  4717. __le32 *mfp;
  4718. /* make sure doorbell is not in use */
  4719. if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
  4720. pr_err(MPT3SAS_FMT
  4721. "doorbell is in use (line=%d)\n",
  4722. ioc->name, __LINE__);
  4723. return -EFAULT;
  4724. }
  4725. /* clear pending doorbell interrupts from previous state changes */
  4726. if (readl(&ioc->chip->HostInterruptStatus) &
  4727. MPI2_HIS_IOC2SYS_DB_STATUS)
  4728. writel(0, &ioc->chip->HostInterruptStatus);
  4729. /* send message to ioc */
  4730. writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
  4731. ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
  4732. &ioc->chip->Doorbell);
  4733. if ((_base_spin_on_doorbell_int(ioc, 5))) {
  4734. pr_err(MPT3SAS_FMT
  4735. "doorbell handshake int failed (line=%d)\n",
  4736. ioc->name, __LINE__);
  4737. return -EFAULT;
  4738. }
  4739. writel(0, &ioc->chip->HostInterruptStatus);
  4740. if ((_base_wait_for_doorbell_ack(ioc, 5))) {
  4741. pr_err(MPT3SAS_FMT
  4742. "doorbell handshake ack failed (line=%d)\n",
  4743. ioc->name, __LINE__);
  4744. return -EFAULT;
  4745. }
  4746. /* send message 32-bits at a time */
  4747. for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
  4748. writel((u32)(request[i]), &ioc->chip->Doorbell);
  4749. if ((_base_wait_for_doorbell_ack(ioc, 5)))
  4750. failed = 1;
  4751. }
  4752. if (failed) {
  4753. pr_err(MPT3SAS_FMT
  4754. "doorbell handshake sending request failed (line=%d)\n",
  4755. ioc->name, __LINE__);
  4756. return -EFAULT;
  4757. }
  4758. /* now wait for the reply */
  4759. if ((_base_wait_for_doorbell_int(ioc, timeout))) {
  4760. pr_err(MPT3SAS_FMT
  4761. "doorbell handshake int failed (line=%d)\n",
  4762. ioc->name, __LINE__);
  4763. return -EFAULT;
  4764. }
  4765. /* read the first two 16-bits, it gives the total length of the reply */
  4766. reply[0] = (u16)(readl(&ioc->chip->Doorbell)
  4767. & MPI2_DOORBELL_DATA_MASK);
  4768. writel(0, &ioc->chip->HostInterruptStatus);
  4769. if ((_base_wait_for_doorbell_int(ioc, 5))) {
  4770. pr_err(MPT3SAS_FMT
  4771. "doorbell handshake int failed (line=%d)\n",
  4772. ioc->name, __LINE__);
  4773. return -EFAULT;
  4774. }
  4775. reply[1] = (u16)(readl(&ioc->chip->Doorbell)
  4776. & MPI2_DOORBELL_DATA_MASK);
  4777. writel(0, &ioc->chip->HostInterruptStatus);
  4778. for (i = 2; i < default_reply->MsgLength * 2; i++) {
  4779. if ((_base_wait_for_doorbell_int(ioc, 5))) {
  4780. pr_err(MPT3SAS_FMT
  4781. "doorbell handshake int failed (line=%d)\n",
  4782. ioc->name, __LINE__);
  4783. return -EFAULT;
  4784. }
  4785. if (i >= reply_bytes/2) /* overflow case */
  4786. readl(&ioc->chip->Doorbell);
  4787. else
  4788. reply[i] = (u16)(readl(&ioc->chip->Doorbell)
  4789. & MPI2_DOORBELL_DATA_MASK);
  4790. writel(0, &ioc->chip->HostInterruptStatus);
  4791. }
  4792. _base_wait_for_doorbell_int(ioc, 5);
  4793. if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
  4794. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  4795. "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
  4796. }
  4797. writel(0, &ioc->chip->HostInterruptStatus);
  4798. if (ioc->logging_level & MPT_DEBUG_INIT) {
  4799. mfp = (__le32 *)reply;
  4800. pr_info("\toffset:data\n");
  4801. for (i = 0; i < reply_bytes/4; i++)
  4802. pr_info("\t[0x%02x]:%08x\n", i*4,
  4803. le32_to_cpu(mfp[i]));
  4804. }
  4805. return 0;
  4806. }
  4807. /**
  4808. * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
  4809. * @ioc: per adapter object
  4810. * @mpi_reply: the reply payload from FW
  4811. * @mpi_request: the request payload sent to FW
  4812. *
  4813. * The SAS IO Unit Control Request message allows the host to perform low-level
  4814. * operations, such as resets on the PHYs of the IO Unit, also allows the host
  4815. * to obtain the IOC assigned device handles for a device if it has other
  4816. * identifying information about the device, in addition allows the host to
  4817. * remove IOC resources associated with the device.
  4818. *
  4819. * Returns 0 for success, non-zero for failure.
  4820. */
  4821. int
  4822. mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
  4823. Mpi2SasIoUnitControlReply_t *mpi_reply,
  4824. Mpi2SasIoUnitControlRequest_t *mpi_request)
  4825. {
  4826. u16 smid;
  4827. u32 ioc_state;
  4828. bool issue_reset = false;
  4829. int rc;
  4830. void *request;
  4831. u16 wait_state_count;
  4832. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4833. __func__));
  4834. mutex_lock(&ioc->base_cmds.mutex);
  4835. if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
  4836. pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
  4837. ioc->name, __func__);
  4838. rc = -EAGAIN;
  4839. goto out;
  4840. }
  4841. wait_state_count = 0;
  4842. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4843. while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  4844. if (wait_state_count++ == 10) {
  4845. pr_err(MPT3SAS_FMT
  4846. "%s: failed due to ioc not operational\n",
  4847. ioc->name, __func__);
  4848. rc = -EFAULT;
  4849. goto out;
  4850. }
  4851. ssleep(1);
  4852. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4853. pr_info(MPT3SAS_FMT
  4854. "%s: waiting for operational state(count=%d)\n",
  4855. ioc->name, __func__, wait_state_count);
  4856. }
  4857. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  4858. if (!smid) {
  4859. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4860. ioc->name, __func__);
  4861. rc = -EAGAIN;
  4862. goto out;
  4863. }
  4864. rc = 0;
  4865. ioc->base_cmds.status = MPT3_CMD_PENDING;
  4866. request = mpt3sas_base_get_msg_frame(ioc, smid);
  4867. ioc->base_cmds.smid = smid;
  4868. memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
  4869. if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
  4870. mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
  4871. ioc->ioc_link_reset_in_progress = 1;
  4872. init_completion(&ioc->base_cmds.done);
  4873. mpt3sas_base_put_smid_default(ioc, smid);
  4874. wait_for_completion_timeout(&ioc->base_cmds.done,
  4875. msecs_to_jiffies(10000));
  4876. if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
  4877. mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
  4878. ioc->ioc_link_reset_in_progress)
  4879. ioc->ioc_link_reset_in_progress = 0;
  4880. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  4881. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4882. ioc->name, __func__);
  4883. _debug_dump_mf(mpi_request,
  4884. sizeof(Mpi2SasIoUnitControlRequest_t)/4);
  4885. if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
  4886. issue_reset = true;
  4887. goto issue_host_reset;
  4888. }
  4889. if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
  4890. memcpy(mpi_reply, ioc->base_cmds.reply,
  4891. sizeof(Mpi2SasIoUnitControlReply_t));
  4892. else
  4893. memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
  4894. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4895. goto out;
  4896. issue_host_reset:
  4897. if (issue_reset)
  4898. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  4899. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4900. rc = -EFAULT;
  4901. out:
  4902. mutex_unlock(&ioc->base_cmds.mutex);
  4903. return rc;
  4904. }
  4905. /**
  4906. * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
  4907. * @ioc: per adapter object
  4908. * @mpi_reply: the reply payload from FW
  4909. * @mpi_request: the request payload sent to FW
  4910. *
  4911. * The SCSI Enclosure Processor request message causes the IOC to
  4912. * communicate with SES devices to control LED status signals.
  4913. *
  4914. * Returns 0 for success, non-zero for failure.
  4915. */
  4916. int
  4917. mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
  4918. Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
  4919. {
  4920. u16 smid;
  4921. u32 ioc_state;
  4922. bool issue_reset = false;
  4923. int rc;
  4924. void *request;
  4925. u16 wait_state_count;
  4926. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  4927. __func__));
  4928. mutex_lock(&ioc->base_cmds.mutex);
  4929. if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
  4930. pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
  4931. ioc->name, __func__);
  4932. rc = -EAGAIN;
  4933. goto out;
  4934. }
  4935. wait_state_count = 0;
  4936. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4937. while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
  4938. if (wait_state_count++ == 10) {
  4939. pr_err(MPT3SAS_FMT
  4940. "%s: failed due to ioc not operational\n",
  4941. ioc->name, __func__);
  4942. rc = -EFAULT;
  4943. goto out;
  4944. }
  4945. ssleep(1);
  4946. ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
  4947. pr_info(MPT3SAS_FMT
  4948. "%s: waiting for operational state(count=%d)\n",
  4949. ioc->name,
  4950. __func__, wait_state_count);
  4951. }
  4952. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  4953. if (!smid) {
  4954. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  4955. ioc->name, __func__);
  4956. rc = -EAGAIN;
  4957. goto out;
  4958. }
  4959. rc = 0;
  4960. ioc->base_cmds.status = MPT3_CMD_PENDING;
  4961. request = mpt3sas_base_get_msg_frame(ioc, smid);
  4962. ioc->base_cmds.smid = smid;
  4963. memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
  4964. init_completion(&ioc->base_cmds.done);
  4965. mpt3sas_base_put_smid_default(ioc, smid);
  4966. wait_for_completion_timeout(&ioc->base_cmds.done,
  4967. msecs_to_jiffies(10000));
  4968. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  4969. pr_err(MPT3SAS_FMT "%s: timeout\n",
  4970. ioc->name, __func__);
  4971. _debug_dump_mf(mpi_request,
  4972. sizeof(Mpi2SepRequest_t)/4);
  4973. if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
  4974. issue_reset = false;
  4975. goto issue_host_reset;
  4976. }
  4977. if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
  4978. memcpy(mpi_reply, ioc->base_cmds.reply,
  4979. sizeof(Mpi2SepReply_t));
  4980. else
  4981. memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
  4982. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4983. goto out;
  4984. issue_host_reset:
  4985. if (issue_reset)
  4986. mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
  4987. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  4988. rc = -EFAULT;
  4989. out:
  4990. mutex_unlock(&ioc->base_cmds.mutex);
  4991. return rc;
  4992. }
  4993. /**
  4994. * _base_get_port_facts - obtain port facts reply and save in ioc
  4995. * @ioc: per adapter object
  4996. *
  4997. * Returns 0 for success, non-zero for failure.
  4998. */
  4999. static int
  5000. _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
  5001. {
  5002. Mpi2PortFactsRequest_t mpi_request;
  5003. Mpi2PortFactsReply_t mpi_reply;
  5004. struct mpt3sas_port_facts *pfacts;
  5005. int mpi_reply_sz, mpi_request_sz, r;
  5006. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5007. __func__));
  5008. mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
  5009. mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
  5010. memset(&mpi_request, 0, mpi_request_sz);
  5011. mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
  5012. mpi_request.PortNumber = port;
  5013. r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
  5014. (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
  5015. if (r != 0) {
  5016. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  5017. ioc->name, __func__, r);
  5018. return r;
  5019. }
  5020. pfacts = &ioc->pfacts[port];
  5021. memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
  5022. pfacts->PortNumber = mpi_reply.PortNumber;
  5023. pfacts->VP_ID = mpi_reply.VP_ID;
  5024. pfacts->VF_ID = mpi_reply.VF_ID;
  5025. pfacts->MaxPostedCmdBuffers =
  5026. le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
  5027. return 0;
  5028. }
  5029. /**
  5030. * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
  5031. * @ioc: per adapter object
  5032. * @timeout:
  5033. *
  5034. * Returns 0 for success, non-zero for failure.
  5035. */
  5036. static int
  5037. _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
  5038. {
  5039. u32 ioc_state;
  5040. int rc;
  5041. dinitprintk(ioc, printk(MPT3SAS_FMT "%s\n", ioc->name,
  5042. __func__));
  5043. if (ioc->pci_error_recovery) {
  5044. dfailprintk(ioc, printk(MPT3SAS_FMT
  5045. "%s: host in pci error recovery\n", ioc->name, __func__));
  5046. return -EFAULT;
  5047. }
  5048. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5049. dhsprintk(ioc, printk(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
  5050. ioc->name, __func__, ioc_state));
  5051. if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
  5052. (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
  5053. return 0;
  5054. if (ioc_state & MPI2_DOORBELL_USED) {
  5055. dhsprintk(ioc, printk(MPT3SAS_FMT
  5056. "unexpected doorbell active!\n", ioc->name));
  5057. goto issue_diag_reset;
  5058. }
  5059. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  5060. mpt3sas_base_fault_info(ioc, ioc_state &
  5061. MPI2_DOORBELL_DATA_MASK);
  5062. goto issue_diag_reset;
  5063. }
  5064. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
  5065. if (ioc_state) {
  5066. dfailprintk(ioc, printk(MPT3SAS_FMT
  5067. "%s: failed going to ready state (ioc_state=0x%x)\n",
  5068. ioc->name, __func__, ioc_state));
  5069. return -EFAULT;
  5070. }
  5071. issue_diag_reset:
  5072. rc = _base_diag_reset(ioc);
  5073. return rc;
  5074. }
  5075. /**
  5076. * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  5077. * @ioc: per adapter object
  5078. *
  5079. * Returns 0 for success, non-zero for failure.
  5080. */
  5081. static int
  5082. _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
  5083. {
  5084. Mpi2IOCFactsRequest_t mpi_request;
  5085. Mpi2IOCFactsReply_t mpi_reply;
  5086. struct mpt3sas_facts *facts;
  5087. int mpi_reply_sz, mpi_request_sz, r;
  5088. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5089. __func__));
  5090. r = _base_wait_for_iocstate(ioc, 10);
  5091. if (r) {
  5092. dfailprintk(ioc, printk(MPT3SAS_FMT
  5093. "%s: failed getting to correct state\n",
  5094. ioc->name, __func__));
  5095. return r;
  5096. }
  5097. mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
  5098. mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
  5099. memset(&mpi_request, 0, mpi_request_sz);
  5100. mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
  5101. r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
  5102. (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
  5103. if (r != 0) {
  5104. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  5105. ioc->name, __func__, r);
  5106. return r;
  5107. }
  5108. facts = &ioc->facts;
  5109. memset(facts, 0, sizeof(struct mpt3sas_facts));
  5110. facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
  5111. facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
  5112. facts->VP_ID = mpi_reply.VP_ID;
  5113. facts->VF_ID = mpi_reply.VF_ID;
  5114. facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
  5115. facts->MaxChainDepth = mpi_reply.MaxChainDepth;
  5116. facts->WhoInit = mpi_reply.WhoInit;
  5117. facts->NumberOfPorts = mpi_reply.NumberOfPorts;
  5118. facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
  5119. facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
  5120. facts->MaxReplyDescriptorPostQueueDepth =
  5121. le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
  5122. facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
  5123. facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
  5124. if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
  5125. ioc->ir_firmware = 1;
  5126. if ((facts->IOCCapabilities &
  5127. MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
  5128. ioc->rdpq_array_capable = 1;
  5129. facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
  5130. facts->IOCRequestFrameSize =
  5131. le16_to_cpu(mpi_reply.IOCRequestFrameSize);
  5132. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  5133. facts->IOCMaxChainSegmentSize =
  5134. le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
  5135. }
  5136. facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
  5137. facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
  5138. ioc->shost->max_id = -1;
  5139. facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
  5140. facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
  5141. facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
  5142. facts->HighPriorityCredit =
  5143. le16_to_cpu(mpi_reply.HighPriorityCredit);
  5144. facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
  5145. facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
  5146. facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
  5147. /*
  5148. * Get the Page Size from IOC Facts. If it's 0, default to 4k.
  5149. */
  5150. ioc->page_size = 1 << facts->CurrentHostPageSize;
  5151. if (ioc->page_size == 1) {
  5152. pr_info(MPT3SAS_FMT "CurrentHostPageSize is 0: Setting "
  5153. "default host page size to 4k\n", ioc->name);
  5154. ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
  5155. }
  5156. dinitprintk(ioc, pr_info(MPT3SAS_FMT "CurrentHostPageSize(%d)\n",
  5157. ioc->name, facts->CurrentHostPageSize));
  5158. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  5159. "hba queue depth(%d), max chains per io(%d)\n",
  5160. ioc->name, facts->RequestCredit,
  5161. facts->MaxChainDepth));
  5162. dinitprintk(ioc, pr_info(MPT3SAS_FMT
  5163. "request frame size(%d), reply frame size(%d)\n", ioc->name,
  5164. facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
  5165. return 0;
  5166. }
  5167. /**
  5168. * _base_send_ioc_init - send ioc_init to firmware
  5169. * @ioc: per adapter object
  5170. *
  5171. * Returns 0 for success, non-zero for failure.
  5172. */
  5173. static int
  5174. _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
  5175. {
  5176. Mpi2IOCInitRequest_t mpi_request;
  5177. Mpi2IOCInitReply_t mpi_reply;
  5178. int i, r = 0;
  5179. ktime_t current_time;
  5180. u16 ioc_status;
  5181. u32 reply_post_free_array_sz = 0;
  5182. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5183. __func__));
  5184. memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
  5185. mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
  5186. mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
  5187. mpi_request.VF_ID = 0; /* TODO */
  5188. mpi_request.VP_ID = 0;
  5189. mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
  5190. mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
  5191. mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
  5192. if (_base_is_controller_msix_enabled(ioc))
  5193. mpi_request.HostMSIxVectors = ioc->reply_queue_count;
  5194. mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
  5195. mpi_request.ReplyDescriptorPostQueueDepth =
  5196. cpu_to_le16(ioc->reply_post_queue_depth);
  5197. mpi_request.ReplyFreeQueueDepth =
  5198. cpu_to_le16(ioc->reply_free_queue_depth);
  5199. mpi_request.SenseBufferAddressHigh =
  5200. cpu_to_le32((u64)ioc->sense_dma >> 32);
  5201. mpi_request.SystemReplyAddressHigh =
  5202. cpu_to_le32((u64)ioc->reply_dma >> 32);
  5203. mpi_request.SystemRequestFrameBaseAddress =
  5204. cpu_to_le64((u64)ioc->request_dma);
  5205. mpi_request.ReplyFreeQueueAddress =
  5206. cpu_to_le64((u64)ioc->reply_free_dma);
  5207. if (ioc->rdpq_array_enable) {
  5208. reply_post_free_array_sz = ioc->reply_queue_count *
  5209. sizeof(Mpi2IOCInitRDPQArrayEntry);
  5210. memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
  5211. for (i = 0; i < ioc->reply_queue_count; i++)
  5212. ioc->reply_post_free_array[i].RDPQBaseAddress =
  5213. cpu_to_le64(
  5214. (u64)ioc->reply_post[i].reply_post_free_dma);
  5215. mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
  5216. mpi_request.ReplyDescriptorPostQueueAddress =
  5217. cpu_to_le64((u64)ioc->reply_post_free_array_dma);
  5218. } else {
  5219. mpi_request.ReplyDescriptorPostQueueAddress =
  5220. cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
  5221. }
  5222. /* This time stamp specifies number of milliseconds
  5223. * since epoch ~ midnight January 1, 1970.
  5224. */
  5225. current_time = ktime_get_real();
  5226. mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
  5227. if (ioc->logging_level & MPT_DEBUG_INIT) {
  5228. __le32 *mfp;
  5229. int i;
  5230. mfp = (__le32 *)&mpi_request;
  5231. pr_info("\toffset:data\n");
  5232. for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
  5233. pr_info("\t[0x%02x]:%08x\n", i*4,
  5234. le32_to_cpu(mfp[i]));
  5235. }
  5236. r = _base_handshake_req_reply_wait(ioc,
  5237. sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
  5238. sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
  5239. if (r != 0) {
  5240. pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
  5241. ioc->name, __func__, r);
  5242. return r;
  5243. }
  5244. ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
  5245. if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
  5246. mpi_reply.IOCLogInfo) {
  5247. pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
  5248. r = -EIO;
  5249. }
  5250. return r;
  5251. }
  5252. /**
  5253. * mpt3sas_port_enable_done - command completion routine for port enable
  5254. * @ioc: per adapter object
  5255. * @smid: system request message index
  5256. * @msix_index: MSIX table index supplied by the OS
  5257. * @reply: reply message frame(lower 32bit addr)
  5258. *
  5259. * Return 1 meaning mf should be freed from _base_interrupt
  5260. * 0 means the mf is freed from this function.
  5261. */
  5262. u8
  5263. mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
  5264. u32 reply)
  5265. {
  5266. MPI2DefaultReply_t *mpi_reply;
  5267. u16 ioc_status;
  5268. if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
  5269. return 1;
  5270. mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
  5271. if (!mpi_reply)
  5272. return 1;
  5273. if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
  5274. return 1;
  5275. ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
  5276. ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
  5277. ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
  5278. memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
  5279. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  5280. if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
  5281. ioc->port_enable_failed = 1;
  5282. if (ioc->is_driver_loading) {
  5283. if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
  5284. mpt3sas_port_enable_complete(ioc);
  5285. return 1;
  5286. } else {
  5287. ioc->start_scan_failed = ioc_status;
  5288. ioc->start_scan = 0;
  5289. return 1;
  5290. }
  5291. }
  5292. complete(&ioc->port_enable_cmds.done);
  5293. return 1;
  5294. }
  5295. /**
  5296. * _base_send_port_enable - send port_enable(discovery stuff) to firmware
  5297. * @ioc: per adapter object
  5298. *
  5299. * Returns 0 for success, non-zero for failure.
  5300. */
  5301. static int
  5302. _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
  5303. {
  5304. Mpi2PortEnableRequest_t *mpi_request;
  5305. Mpi2PortEnableReply_t *mpi_reply;
  5306. int r = 0;
  5307. u16 smid;
  5308. u16 ioc_status;
  5309. pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
  5310. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  5311. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  5312. ioc->name, __func__);
  5313. return -EAGAIN;
  5314. }
  5315. smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
  5316. if (!smid) {
  5317. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  5318. ioc->name, __func__);
  5319. return -EAGAIN;
  5320. }
  5321. ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
  5322. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  5323. ioc->port_enable_cmds.smid = smid;
  5324. memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
  5325. mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  5326. init_completion(&ioc->port_enable_cmds.done);
  5327. mpt3sas_base_put_smid_default(ioc, smid);
  5328. wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
  5329. if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
  5330. pr_err(MPT3SAS_FMT "%s: timeout\n",
  5331. ioc->name, __func__);
  5332. _debug_dump_mf(mpi_request,
  5333. sizeof(Mpi2PortEnableRequest_t)/4);
  5334. if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
  5335. r = -EFAULT;
  5336. else
  5337. r = -ETIME;
  5338. goto out;
  5339. }
  5340. mpi_reply = ioc->port_enable_cmds.reply;
  5341. ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
  5342. if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
  5343. pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
  5344. ioc->name, __func__, ioc_status);
  5345. r = -EFAULT;
  5346. goto out;
  5347. }
  5348. out:
  5349. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  5350. pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
  5351. "SUCCESS" : "FAILED"));
  5352. return r;
  5353. }
  5354. /**
  5355. * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
  5356. * @ioc: per adapter object
  5357. *
  5358. * Returns 0 for success, non-zero for failure.
  5359. */
  5360. int
  5361. mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
  5362. {
  5363. Mpi2PortEnableRequest_t *mpi_request;
  5364. u16 smid;
  5365. pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
  5366. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  5367. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  5368. ioc->name, __func__);
  5369. return -EAGAIN;
  5370. }
  5371. smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
  5372. if (!smid) {
  5373. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  5374. ioc->name, __func__);
  5375. return -EAGAIN;
  5376. }
  5377. ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
  5378. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  5379. ioc->port_enable_cmds.smid = smid;
  5380. memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
  5381. mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
  5382. mpt3sas_base_put_smid_default(ioc, smid);
  5383. return 0;
  5384. }
  5385. /**
  5386. * _base_determine_wait_on_discovery - desposition
  5387. * @ioc: per adapter object
  5388. *
  5389. * Decide whether to wait on discovery to complete. Used to either
  5390. * locate boot device, or report volumes ahead of physical devices.
  5391. *
  5392. * Returns 1 for wait, 0 for don't wait
  5393. */
  5394. static int
  5395. _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
  5396. {
  5397. /* We wait for discovery to complete if IR firmware is loaded.
  5398. * The sas topology events arrive before PD events, so we need time to
  5399. * turn on the bit in ioc->pd_handles to indicate PD
  5400. * Also, it maybe required to report Volumes ahead of physical
  5401. * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
  5402. */
  5403. if (ioc->ir_firmware)
  5404. return 1;
  5405. /* if no Bios, then we don't need to wait */
  5406. if (!ioc->bios_pg3.BiosVersion)
  5407. return 0;
  5408. /* Bios is present, then we drop down here.
  5409. *
  5410. * If there any entries in the Bios Page 2, then we wait
  5411. * for discovery to complete.
  5412. */
  5413. /* Current Boot Device */
  5414. if ((ioc->bios_pg2.CurrentBootDeviceForm &
  5415. MPI2_BIOSPAGE2_FORM_MASK) ==
  5416. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
  5417. /* Request Boot Device */
  5418. (ioc->bios_pg2.ReqBootDeviceForm &
  5419. MPI2_BIOSPAGE2_FORM_MASK) ==
  5420. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
  5421. /* Alternate Request Boot Device */
  5422. (ioc->bios_pg2.ReqAltBootDeviceForm &
  5423. MPI2_BIOSPAGE2_FORM_MASK) ==
  5424. MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
  5425. return 0;
  5426. return 1;
  5427. }
  5428. /**
  5429. * _base_unmask_events - turn on notification for this event
  5430. * @ioc: per adapter object
  5431. * @event: firmware event
  5432. *
  5433. * The mask is stored in ioc->event_masks.
  5434. */
  5435. static void
  5436. _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
  5437. {
  5438. u32 desired_event;
  5439. if (event >= 128)
  5440. return;
  5441. desired_event = (1 << (event % 32));
  5442. if (event < 32)
  5443. ioc->event_masks[0] &= ~desired_event;
  5444. else if (event < 64)
  5445. ioc->event_masks[1] &= ~desired_event;
  5446. else if (event < 96)
  5447. ioc->event_masks[2] &= ~desired_event;
  5448. else if (event < 128)
  5449. ioc->event_masks[3] &= ~desired_event;
  5450. }
  5451. /**
  5452. * _base_event_notification - send event notification
  5453. * @ioc: per adapter object
  5454. *
  5455. * Returns 0 for success, non-zero for failure.
  5456. */
  5457. static int
  5458. _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
  5459. {
  5460. Mpi2EventNotificationRequest_t *mpi_request;
  5461. u16 smid;
  5462. int r = 0;
  5463. int i;
  5464. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5465. __func__));
  5466. if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
  5467. pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
  5468. ioc->name, __func__);
  5469. return -EAGAIN;
  5470. }
  5471. smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
  5472. if (!smid) {
  5473. pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
  5474. ioc->name, __func__);
  5475. return -EAGAIN;
  5476. }
  5477. ioc->base_cmds.status = MPT3_CMD_PENDING;
  5478. mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
  5479. ioc->base_cmds.smid = smid;
  5480. memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
  5481. mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
  5482. mpi_request->VF_ID = 0; /* TODO */
  5483. mpi_request->VP_ID = 0;
  5484. for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
  5485. mpi_request->EventMasks[i] =
  5486. cpu_to_le32(ioc->event_masks[i]);
  5487. init_completion(&ioc->base_cmds.done);
  5488. mpt3sas_base_put_smid_default(ioc, smid);
  5489. wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
  5490. if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
  5491. pr_err(MPT3SAS_FMT "%s: timeout\n",
  5492. ioc->name, __func__);
  5493. _debug_dump_mf(mpi_request,
  5494. sizeof(Mpi2EventNotificationRequest_t)/4);
  5495. if (ioc->base_cmds.status & MPT3_CMD_RESET)
  5496. r = -EFAULT;
  5497. else
  5498. r = -ETIME;
  5499. } else
  5500. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
  5501. ioc->name, __func__));
  5502. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  5503. return r;
  5504. }
  5505. /**
  5506. * mpt3sas_base_validate_event_type - validating event types
  5507. * @ioc: per adapter object
  5508. * @event: firmware event
  5509. *
  5510. * This will turn on firmware event notification when application
  5511. * ask for that event. We don't mask events that are already enabled.
  5512. */
  5513. void
  5514. mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
  5515. {
  5516. int i, j;
  5517. u32 event_mask, desired_event;
  5518. u8 send_update_to_fw;
  5519. for (i = 0, send_update_to_fw = 0; i <
  5520. MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
  5521. event_mask = ~event_type[i];
  5522. desired_event = 1;
  5523. for (j = 0; j < 32; j++) {
  5524. if (!(event_mask & desired_event) &&
  5525. (ioc->event_masks[i] & desired_event)) {
  5526. ioc->event_masks[i] &= ~desired_event;
  5527. send_update_to_fw = 1;
  5528. }
  5529. desired_event = (desired_event << 1);
  5530. }
  5531. }
  5532. if (!send_update_to_fw)
  5533. return;
  5534. mutex_lock(&ioc->base_cmds.mutex);
  5535. _base_event_notification(ioc);
  5536. mutex_unlock(&ioc->base_cmds.mutex);
  5537. }
  5538. /**
  5539. * _base_diag_reset - the "big hammer" start of day reset
  5540. * @ioc: per adapter object
  5541. *
  5542. * Returns 0 for success, non-zero for failure.
  5543. */
  5544. static int
  5545. _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
  5546. {
  5547. u32 host_diagnostic;
  5548. u32 ioc_state;
  5549. u32 count;
  5550. u32 hcb_size;
  5551. pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
  5552. drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
  5553. ioc->name));
  5554. count = 0;
  5555. do {
  5556. /* Write magic sequence to WriteSequence register
  5557. * Loop until in diagnostic mode
  5558. */
  5559. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5560. "write magic sequence\n", ioc->name));
  5561. writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
  5562. writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
  5563. writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
  5564. writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
  5565. writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
  5566. writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
  5567. writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
  5568. /* wait 100 msec */
  5569. msleep(100);
  5570. if (count++ > 20)
  5571. goto out;
  5572. host_diagnostic = readl(&ioc->chip->HostDiagnostic);
  5573. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5574. "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
  5575. ioc->name, count, host_diagnostic));
  5576. } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
  5577. hcb_size = readl(&ioc->chip->HCBSize);
  5578. drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
  5579. ioc->name));
  5580. writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
  5581. &ioc->chip->HostDiagnostic);
  5582. /*This delay allows the chip PCIe hardware time to finish reset tasks*/
  5583. msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
  5584. /* Approximately 300 second max wait */
  5585. for (count = 0; count < (300000000 /
  5586. MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
  5587. host_diagnostic = readl(&ioc->chip->HostDiagnostic);
  5588. if (host_diagnostic == 0xFFFFFFFF)
  5589. goto out;
  5590. if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
  5591. break;
  5592. msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
  5593. }
  5594. if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
  5595. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5596. "restart the adapter assuming the HCB Address points to good F/W\n",
  5597. ioc->name));
  5598. host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
  5599. host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
  5600. writel(host_diagnostic, &ioc->chip->HostDiagnostic);
  5601. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5602. "re-enable the HCDW\n", ioc->name));
  5603. writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
  5604. &ioc->chip->HCBSize);
  5605. }
  5606. drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
  5607. ioc->name));
  5608. writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
  5609. &ioc->chip->HostDiagnostic);
  5610. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5611. "disable writes to the diagnostic register\n", ioc->name));
  5612. writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
  5613. drsprintk(ioc, pr_info(MPT3SAS_FMT
  5614. "Wait for FW to go to the READY state\n", ioc->name));
  5615. ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
  5616. if (ioc_state) {
  5617. pr_err(MPT3SAS_FMT
  5618. "%s: failed going to ready state (ioc_state=0x%x)\n",
  5619. ioc->name, __func__, ioc_state);
  5620. goto out;
  5621. }
  5622. pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
  5623. return 0;
  5624. out:
  5625. pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
  5626. return -EFAULT;
  5627. }
  5628. /**
  5629. * _base_make_ioc_ready - put controller in READY state
  5630. * @ioc: per adapter object
  5631. * @type: FORCE_BIG_HAMMER or SOFT_RESET
  5632. *
  5633. * Returns 0 for success, non-zero for failure.
  5634. */
  5635. static int
  5636. _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
  5637. {
  5638. u32 ioc_state;
  5639. int rc;
  5640. int count;
  5641. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5642. __func__));
  5643. if (ioc->pci_error_recovery)
  5644. return 0;
  5645. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5646. dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
  5647. ioc->name, __func__, ioc_state));
  5648. /* if in RESET state, it should move to READY state shortly */
  5649. count = 0;
  5650. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
  5651. while ((ioc_state & MPI2_IOC_STATE_MASK) !=
  5652. MPI2_IOC_STATE_READY) {
  5653. if (count++ == 10) {
  5654. pr_err(MPT3SAS_FMT
  5655. "%s: failed going to ready state (ioc_state=0x%x)\n",
  5656. ioc->name, __func__, ioc_state);
  5657. return -EFAULT;
  5658. }
  5659. ssleep(1);
  5660. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  5661. }
  5662. }
  5663. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
  5664. return 0;
  5665. if (ioc_state & MPI2_DOORBELL_USED) {
  5666. dhsprintk(ioc, pr_info(MPT3SAS_FMT
  5667. "unexpected doorbell active!\n",
  5668. ioc->name));
  5669. goto issue_diag_reset;
  5670. }
  5671. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
  5672. mpt3sas_base_fault_info(ioc, ioc_state &
  5673. MPI2_DOORBELL_DATA_MASK);
  5674. goto issue_diag_reset;
  5675. }
  5676. if (type == FORCE_BIG_HAMMER)
  5677. goto issue_diag_reset;
  5678. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
  5679. if (!(_base_send_ioc_reset(ioc,
  5680. MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
  5681. return 0;
  5682. }
  5683. issue_diag_reset:
  5684. rc = _base_diag_reset(ioc);
  5685. return rc;
  5686. }
  5687. /**
  5688. * _base_make_ioc_operational - put controller in OPERATIONAL state
  5689. * @ioc: per adapter object
  5690. *
  5691. * Returns 0 for success, non-zero for failure.
  5692. */
  5693. static int
  5694. _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
  5695. {
  5696. int r, i, index;
  5697. unsigned long flags;
  5698. u32 reply_address;
  5699. u16 smid;
  5700. struct _tr_list *delayed_tr, *delayed_tr_next;
  5701. struct _sc_list *delayed_sc, *delayed_sc_next;
  5702. struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
  5703. u8 hide_flag;
  5704. struct adapter_reply_queue *reply_q;
  5705. Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
  5706. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5707. __func__));
  5708. /* clean the delayed target reset list */
  5709. list_for_each_entry_safe(delayed_tr, delayed_tr_next,
  5710. &ioc->delayed_tr_list, list) {
  5711. list_del(&delayed_tr->list);
  5712. kfree(delayed_tr);
  5713. }
  5714. list_for_each_entry_safe(delayed_tr, delayed_tr_next,
  5715. &ioc->delayed_tr_volume_list, list) {
  5716. list_del(&delayed_tr->list);
  5717. kfree(delayed_tr);
  5718. }
  5719. list_for_each_entry_safe(delayed_sc, delayed_sc_next,
  5720. &ioc->delayed_sc_list, list) {
  5721. list_del(&delayed_sc->list);
  5722. kfree(delayed_sc);
  5723. }
  5724. list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
  5725. &ioc->delayed_event_ack_list, list) {
  5726. list_del(&delayed_event_ack->list);
  5727. kfree(delayed_event_ack);
  5728. }
  5729. spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
  5730. /* hi-priority queue */
  5731. INIT_LIST_HEAD(&ioc->hpr_free_list);
  5732. smid = ioc->hi_priority_smid;
  5733. for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
  5734. ioc->hpr_lookup[i].cb_idx = 0xFF;
  5735. ioc->hpr_lookup[i].smid = smid;
  5736. list_add_tail(&ioc->hpr_lookup[i].tracker_list,
  5737. &ioc->hpr_free_list);
  5738. }
  5739. /* internal queue */
  5740. INIT_LIST_HEAD(&ioc->internal_free_list);
  5741. smid = ioc->internal_smid;
  5742. for (i = 0; i < ioc->internal_depth; i++, smid++) {
  5743. ioc->internal_lookup[i].cb_idx = 0xFF;
  5744. ioc->internal_lookup[i].smid = smid;
  5745. list_add_tail(&ioc->internal_lookup[i].tracker_list,
  5746. &ioc->internal_free_list);
  5747. }
  5748. spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
  5749. /* initialize Reply Free Queue */
  5750. for (i = 0, reply_address = (u32)ioc->reply_dma ;
  5751. i < ioc->reply_free_queue_depth ; i++, reply_address +=
  5752. ioc->reply_sz) {
  5753. ioc->reply_free[i] = cpu_to_le32(reply_address);
  5754. if (ioc->is_mcpu_endpoint)
  5755. _base_clone_reply_to_sys_mem(ioc,
  5756. reply_address, i);
  5757. }
  5758. /* initialize reply queues */
  5759. if (ioc->is_driver_loading)
  5760. _base_assign_reply_queues(ioc);
  5761. /* initialize Reply Post Free Queue */
  5762. index = 0;
  5763. reply_post_free_contig = ioc->reply_post[0].reply_post_free;
  5764. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  5765. /*
  5766. * If RDPQ is enabled, switch to the next allocation.
  5767. * Otherwise advance within the contiguous region.
  5768. */
  5769. if (ioc->rdpq_array_enable) {
  5770. reply_q->reply_post_free =
  5771. ioc->reply_post[index++].reply_post_free;
  5772. } else {
  5773. reply_q->reply_post_free = reply_post_free_contig;
  5774. reply_post_free_contig += ioc->reply_post_queue_depth;
  5775. }
  5776. reply_q->reply_post_host_index = 0;
  5777. for (i = 0; i < ioc->reply_post_queue_depth; i++)
  5778. reply_q->reply_post_free[i].Words =
  5779. cpu_to_le64(ULLONG_MAX);
  5780. if (!_base_is_controller_msix_enabled(ioc))
  5781. goto skip_init_reply_post_free_queue;
  5782. }
  5783. skip_init_reply_post_free_queue:
  5784. r = _base_send_ioc_init(ioc);
  5785. if (r)
  5786. return r;
  5787. /* initialize reply free host index */
  5788. ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
  5789. writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
  5790. /* initialize reply post host index */
  5791. list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
  5792. if (ioc->combined_reply_queue)
  5793. writel((reply_q->msix_index & 7)<<
  5794. MPI2_RPHI_MSIX_INDEX_SHIFT,
  5795. ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
  5796. else
  5797. writel(reply_q->msix_index <<
  5798. MPI2_RPHI_MSIX_INDEX_SHIFT,
  5799. &ioc->chip->ReplyPostHostIndex);
  5800. if (!_base_is_controller_msix_enabled(ioc))
  5801. goto skip_init_reply_post_host_index;
  5802. }
  5803. skip_init_reply_post_host_index:
  5804. _base_unmask_interrupts(ioc);
  5805. if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
  5806. r = _base_display_fwpkg_version(ioc);
  5807. if (r)
  5808. return r;
  5809. }
  5810. _base_static_config_pages(ioc);
  5811. r = _base_event_notification(ioc);
  5812. if (r)
  5813. return r;
  5814. if (ioc->is_driver_loading) {
  5815. if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
  5816. == 0x80) {
  5817. hide_flag = (u8) (
  5818. le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
  5819. MFG_PAGE10_HIDE_SSDS_MASK);
  5820. if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
  5821. ioc->mfg_pg10_hide_flag = hide_flag;
  5822. }
  5823. ioc->wait_for_discovery_to_complete =
  5824. _base_determine_wait_on_discovery(ioc);
  5825. return r; /* scan_start and scan_finished support */
  5826. }
  5827. r = _base_send_port_enable(ioc);
  5828. if (r)
  5829. return r;
  5830. return r;
  5831. }
  5832. /**
  5833. * mpt3sas_base_free_resources - free resources controller resources
  5834. * @ioc: per adapter object
  5835. *
  5836. * Return nothing.
  5837. */
  5838. void
  5839. mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
  5840. {
  5841. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5842. __func__));
  5843. /* synchronizing freeing resource with pci_access_mutex lock */
  5844. mutex_lock(&ioc->pci_access_mutex);
  5845. if (ioc->chip_phys && ioc->chip) {
  5846. _base_mask_interrupts(ioc);
  5847. ioc->shost_recovery = 1;
  5848. _base_make_ioc_ready(ioc, SOFT_RESET);
  5849. ioc->shost_recovery = 0;
  5850. }
  5851. mpt3sas_base_unmap_resources(ioc);
  5852. mutex_unlock(&ioc->pci_access_mutex);
  5853. return;
  5854. }
  5855. /**
  5856. * mpt3sas_base_attach - attach controller instance
  5857. * @ioc: per adapter object
  5858. *
  5859. * Returns 0 for success, non-zero for failure.
  5860. */
  5861. int
  5862. mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
  5863. {
  5864. int r, i;
  5865. int cpu_id, last_cpu_id = 0;
  5866. dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  5867. __func__));
  5868. /* setup cpu_msix_table */
  5869. ioc->cpu_count = num_online_cpus();
  5870. for_each_online_cpu(cpu_id)
  5871. last_cpu_id = cpu_id;
  5872. ioc->cpu_msix_table_sz = last_cpu_id + 1;
  5873. ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
  5874. ioc->reply_queue_count = 1;
  5875. if (!ioc->cpu_msix_table) {
  5876. dfailprintk(ioc, pr_info(MPT3SAS_FMT
  5877. "allocation for cpu_msix_table failed!!!\n",
  5878. ioc->name));
  5879. r = -ENOMEM;
  5880. goto out_free_resources;
  5881. }
  5882. if (ioc->is_warpdrive) {
  5883. ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
  5884. sizeof(resource_size_t *), GFP_KERNEL);
  5885. if (!ioc->reply_post_host_index) {
  5886. dfailprintk(ioc, pr_info(MPT3SAS_FMT "allocation "
  5887. "for reply_post_host_index failed!!!\n",
  5888. ioc->name));
  5889. r = -ENOMEM;
  5890. goto out_free_resources;
  5891. }
  5892. }
  5893. ioc->rdpq_array_enable_assigned = 0;
  5894. ioc->dma_mask = 0;
  5895. r = mpt3sas_base_map_resources(ioc);
  5896. if (r)
  5897. goto out_free_resources;
  5898. pci_set_drvdata(ioc->pdev, ioc->shost);
  5899. r = _base_get_ioc_facts(ioc);
  5900. if (r)
  5901. goto out_free_resources;
  5902. switch (ioc->hba_mpi_version_belonged) {
  5903. case MPI2_VERSION:
  5904. ioc->build_sg_scmd = &_base_build_sg_scmd;
  5905. ioc->build_sg = &_base_build_sg;
  5906. ioc->build_zero_len_sge = &_base_build_zero_len_sge;
  5907. break;
  5908. case MPI25_VERSION:
  5909. case MPI26_VERSION:
  5910. /*
  5911. * In SAS3.0,
  5912. * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
  5913. * Target Status - all require the IEEE formated scatter gather
  5914. * elements.
  5915. */
  5916. ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
  5917. ioc->build_sg = &_base_build_sg_ieee;
  5918. ioc->build_nvme_prp = &_base_build_nvme_prp;
  5919. ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
  5920. ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
  5921. break;
  5922. }
  5923. if (ioc->is_mcpu_endpoint)
  5924. ioc->put_smid_scsi_io = &_base_put_smid_mpi_ep_scsi_io;
  5925. else
  5926. ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
  5927. /*
  5928. * These function pointers for other requests that don't
  5929. * the require IEEE scatter gather elements.
  5930. *
  5931. * For example Configuration Pages and SAS IOUNIT Control don't.
  5932. */
  5933. ioc->build_sg_mpi = &_base_build_sg;
  5934. ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
  5935. r = _base_make_ioc_ready(ioc, SOFT_RESET);
  5936. if (r)
  5937. goto out_free_resources;
  5938. ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
  5939. sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
  5940. if (!ioc->pfacts) {
  5941. r = -ENOMEM;
  5942. goto out_free_resources;
  5943. }
  5944. for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
  5945. r = _base_get_port_facts(ioc, i);
  5946. if (r)
  5947. goto out_free_resources;
  5948. }
  5949. r = _base_allocate_memory_pools(ioc);
  5950. if (r)
  5951. goto out_free_resources;
  5952. init_waitqueue_head(&ioc->reset_wq);
  5953. /* allocate memory pd handle bitmask list */
  5954. ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
  5955. if (ioc->facts.MaxDevHandle % 8)
  5956. ioc->pd_handles_sz++;
  5957. ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
  5958. GFP_KERNEL);
  5959. if (!ioc->pd_handles) {
  5960. r = -ENOMEM;
  5961. goto out_free_resources;
  5962. }
  5963. ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
  5964. GFP_KERNEL);
  5965. if (!ioc->blocking_handles) {
  5966. r = -ENOMEM;
  5967. goto out_free_resources;
  5968. }
  5969. /* allocate memory for pending OS device add list */
  5970. ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
  5971. if (ioc->facts.MaxDevHandle % 8)
  5972. ioc->pend_os_device_add_sz++;
  5973. ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
  5974. GFP_KERNEL);
  5975. if (!ioc->pend_os_device_add)
  5976. goto out_free_resources;
  5977. ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
  5978. ioc->device_remove_in_progress =
  5979. kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
  5980. if (!ioc->device_remove_in_progress)
  5981. goto out_free_resources;
  5982. ioc->fwfault_debug = mpt3sas_fwfault_debug;
  5983. /* base internal command bits */
  5984. mutex_init(&ioc->base_cmds.mutex);
  5985. ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5986. ioc->base_cmds.status = MPT3_CMD_NOT_USED;
  5987. /* port_enable command bits */
  5988. ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5989. ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
  5990. /* transport internal command bits */
  5991. ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5992. ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
  5993. mutex_init(&ioc->transport_cmds.mutex);
  5994. /* scsih internal command bits */
  5995. ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  5996. ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
  5997. mutex_init(&ioc->scsih_cmds.mutex);
  5998. /* task management internal command bits */
  5999. ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  6000. ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
  6001. mutex_init(&ioc->tm_cmds.mutex);
  6002. /* config page internal command bits */
  6003. ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  6004. ioc->config_cmds.status = MPT3_CMD_NOT_USED;
  6005. mutex_init(&ioc->config_cmds.mutex);
  6006. /* ctl module internal command bits */
  6007. ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
  6008. ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  6009. ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
  6010. mutex_init(&ioc->ctl_cmds.mutex);
  6011. if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
  6012. !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
  6013. !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
  6014. !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
  6015. r = -ENOMEM;
  6016. goto out_free_resources;
  6017. }
  6018. for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
  6019. ioc->event_masks[i] = -1;
  6020. /* here we enable the events we care about */
  6021. _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
  6022. _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
  6023. _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
  6024. _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
  6025. _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
  6026. _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
  6027. _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
  6028. _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
  6029. _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
  6030. _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
  6031. _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
  6032. _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
  6033. _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
  6034. if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
  6035. if (ioc->is_gen35_ioc) {
  6036. _base_unmask_events(ioc,
  6037. MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
  6038. _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
  6039. _base_unmask_events(ioc,
  6040. MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
  6041. }
  6042. }
  6043. r = _base_make_ioc_operational(ioc);
  6044. if (r)
  6045. goto out_free_resources;
  6046. ioc->non_operational_loop = 0;
  6047. ioc->got_task_abort_from_ioctl = 0;
  6048. return 0;
  6049. out_free_resources:
  6050. ioc->remove_host = 1;
  6051. mpt3sas_base_free_resources(ioc);
  6052. _base_release_memory_pools(ioc);
  6053. pci_set_drvdata(ioc->pdev, NULL);
  6054. kfree(ioc->cpu_msix_table);
  6055. if (ioc->is_warpdrive)
  6056. kfree(ioc->reply_post_host_index);
  6057. kfree(ioc->pd_handles);
  6058. kfree(ioc->blocking_handles);
  6059. kfree(ioc->device_remove_in_progress);
  6060. kfree(ioc->pend_os_device_add);
  6061. kfree(ioc->tm_cmds.reply);
  6062. kfree(ioc->transport_cmds.reply);
  6063. kfree(ioc->scsih_cmds.reply);
  6064. kfree(ioc->config_cmds.reply);
  6065. kfree(ioc->base_cmds.reply);
  6066. kfree(ioc->port_enable_cmds.reply);
  6067. kfree(ioc->ctl_cmds.reply);
  6068. kfree(ioc->ctl_cmds.sense);
  6069. kfree(ioc->pfacts);
  6070. ioc->ctl_cmds.reply = NULL;
  6071. ioc->base_cmds.reply = NULL;
  6072. ioc->tm_cmds.reply = NULL;
  6073. ioc->scsih_cmds.reply = NULL;
  6074. ioc->transport_cmds.reply = NULL;
  6075. ioc->config_cmds.reply = NULL;
  6076. ioc->pfacts = NULL;
  6077. return r;
  6078. }
  6079. /**
  6080. * mpt3sas_base_detach - remove controller instance
  6081. * @ioc: per adapter object
  6082. *
  6083. * Return nothing.
  6084. */
  6085. void
  6086. mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
  6087. {
  6088. dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
  6089. __func__));
  6090. mpt3sas_base_stop_watchdog(ioc);
  6091. mpt3sas_base_free_resources(ioc);
  6092. _base_release_memory_pools(ioc);
  6093. mpt3sas_free_enclosure_list(ioc);
  6094. pci_set_drvdata(ioc->pdev, NULL);
  6095. kfree(ioc->cpu_msix_table);
  6096. if (ioc->is_warpdrive)
  6097. kfree(ioc->reply_post_host_index);
  6098. kfree(ioc->pd_handles);
  6099. kfree(ioc->blocking_handles);
  6100. kfree(ioc->device_remove_in_progress);
  6101. kfree(ioc->pend_os_device_add);
  6102. kfree(ioc->pfacts);
  6103. kfree(ioc->ctl_cmds.reply);
  6104. kfree(ioc->ctl_cmds.sense);
  6105. kfree(ioc->base_cmds.reply);
  6106. kfree(ioc->port_enable_cmds.reply);
  6107. kfree(ioc->tm_cmds.reply);
  6108. kfree(ioc->transport_cmds.reply);
  6109. kfree(ioc->scsih_cmds.reply);
  6110. kfree(ioc->config_cmds.reply);
  6111. }
  6112. /**
  6113. * _base_reset_handler - reset callback handler (for base)
  6114. * @ioc: per adapter object
  6115. * @reset_phase: phase
  6116. *
  6117. * The handler for doing any required cleanup or initialization.
  6118. *
  6119. * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
  6120. * MPT3_IOC_DONE_RESET
  6121. *
  6122. * Return nothing.
  6123. */
  6124. static void
  6125. _base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
  6126. {
  6127. mpt3sas_scsih_reset_handler(ioc, reset_phase);
  6128. mpt3sas_ctl_reset_handler(ioc, reset_phase);
  6129. switch (reset_phase) {
  6130. case MPT3_IOC_PRE_RESET:
  6131. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  6132. "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
  6133. break;
  6134. case MPT3_IOC_AFTER_RESET:
  6135. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  6136. "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
  6137. if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
  6138. ioc->transport_cmds.status |= MPT3_CMD_RESET;
  6139. mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
  6140. complete(&ioc->transport_cmds.done);
  6141. }
  6142. if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
  6143. ioc->base_cmds.status |= MPT3_CMD_RESET;
  6144. mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
  6145. complete(&ioc->base_cmds.done);
  6146. }
  6147. if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
  6148. ioc->port_enable_failed = 1;
  6149. ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
  6150. mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
  6151. if (ioc->is_driver_loading) {
  6152. ioc->start_scan_failed =
  6153. MPI2_IOCSTATUS_INTERNAL_ERROR;
  6154. ioc->start_scan = 0;
  6155. ioc->port_enable_cmds.status =
  6156. MPT3_CMD_NOT_USED;
  6157. } else
  6158. complete(&ioc->port_enable_cmds.done);
  6159. }
  6160. if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
  6161. ioc->config_cmds.status |= MPT3_CMD_RESET;
  6162. mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
  6163. ioc->config_cmds.smid = USHRT_MAX;
  6164. complete(&ioc->config_cmds.done);
  6165. }
  6166. break;
  6167. case MPT3_IOC_DONE_RESET:
  6168. dtmprintk(ioc, pr_info(MPT3SAS_FMT
  6169. "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
  6170. break;
  6171. }
  6172. }
  6173. /**
  6174. * mpt3sas_wait_for_commands_to_complete - reset controller
  6175. * @ioc: Pointer to MPT_ADAPTER structure
  6176. *
  6177. * This function is waiting 10s for all pending commands to complete
  6178. * prior to putting controller in reset.
  6179. */
  6180. void
  6181. mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
  6182. {
  6183. u32 ioc_state;
  6184. ioc->pending_io_count = 0;
  6185. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  6186. if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
  6187. return;
  6188. /* pending command count */
  6189. ioc->pending_io_count = atomic_read(&ioc->shost->host_busy);
  6190. if (!ioc->pending_io_count)
  6191. return;
  6192. /* wait for pending commands to complete */
  6193. wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
  6194. }
  6195. /**
  6196. * mpt3sas_base_hard_reset_handler - reset controller
  6197. * @ioc: Pointer to MPT_ADAPTER structure
  6198. * @type: FORCE_BIG_HAMMER or SOFT_RESET
  6199. *
  6200. * Returns 0 for success, non-zero for failure.
  6201. */
  6202. int
  6203. mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
  6204. enum reset_type type)
  6205. {
  6206. int r;
  6207. unsigned long flags;
  6208. u32 ioc_state;
  6209. u8 is_fault = 0, is_trigger = 0;
  6210. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
  6211. __func__));
  6212. if (ioc->pci_error_recovery) {
  6213. pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
  6214. ioc->name, __func__);
  6215. r = 0;
  6216. goto out_unlocked;
  6217. }
  6218. if (mpt3sas_fwfault_debug)
  6219. mpt3sas_halt_firmware(ioc);
  6220. /* wait for an active reset in progress to complete */
  6221. if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
  6222. do {
  6223. ssleep(1);
  6224. } while (ioc->shost_recovery == 1);
  6225. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
  6226. __func__));
  6227. return ioc->ioc_reset_in_progress_status;
  6228. }
  6229. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  6230. ioc->shost_recovery = 1;
  6231. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  6232. if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  6233. MPT3_DIAG_BUFFER_IS_REGISTERED) &&
  6234. (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
  6235. MPT3_DIAG_BUFFER_IS_RELEASED))) {
  6236. is_trigger = 1;
  6237. ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
  6238. if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
  6239. is_fault = 1;
  6240. }
  6241. _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
  6242. mpt3sas_wait_for_commands_to_complete(ioc);
  6243. _base_mask_interrupts(ioc);
  6244. r = _base_make_ioc_ready(ioc, type);
  6245. if (r)
  6246. goto out;
  6247. _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
  6248. /* If this hard reset is called while port enable is active, then
  6249. * there is no reason to call make_ioc_operational
  6250. */
  6251. if (ioc->is_driver_loading && ioc->port_enable_failed) {
  6252. ioc->remove_host = 1;
  6253. r = -EFAULT;
  6254. goto out;
  6255. }
  6256. r = _base_get_ioc_facts(ioc);
  6257. if (r)
  6258. goto out;
  6259. if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
  6260. panic("%s: Issue occurred with flashing controller firmware."
  6261. "Please reboot the system and ensure that the correct"
  6262. " firmware version is running\n", ioc->name);
  6263. r = _base_make_ioc_operational(ioc);
  6264. if (!r)
  6265. _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
  6266. out:
  6267. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
  6268. ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
  6269. spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
  6270. ioc->ioc_reset_in_progress_status = r;
  6271. ioc->shost_recovery = 0;
  6272. spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
  6273. ioc->ioc_reset_count++;
  6274. mutex_unlock(&ioc->reset_in_progress_mutex);
  6275. out_unlocked:
  6276. if ((r == 0) && is_trigger) {
  6277. if (is_fault)
  6278. mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
  6279. else
  6280. mpt3sas_trigger_master(ioc,
  6281. MASTER_TRIGGER_ADAPTER_RESET);
  6282. }
  6283. dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
  6284. __func__));
  6285. return r;
  6286. }