ufshcd.c 144 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559
  1. /*
  2. * Universal Flash Storage Host controller driver Core
  3. *
  4. * This code is based on drivers/scsi/ufs/ufshcd.c
  5. * Copyright (C) 2011-2013 Samsung India Software Operations
  6. * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  7. *
  8. * Authors:
  9. * Santosh Yaraganavi <santosh.sy@samsung.com>
  10. * Vinayak Holikatti <h.vinayak@samsung.com>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version 2
  15. * of the License, or (at your option) any later version.
  16. * See the COPYING file in the top-level directory or visit
  17. * <http://www.gnu.org/licenses/gpl-2.0.html>
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * This program is provided "AS IS" and "WITH ALL FAULTS" and
  25. * without warranty of any kind. You are solely responsible for
  26. * determining the appropriateness of using and distributing
  27. * the program and assume all risks associated with your exercise
  28. * of rights with respect to the program, including but not limited
  29. * to infringement of third party rights, the risks and costs of
  30. * program errors, damage to or loss of data, programs or equipment,
  31. * and unavailability or interruption of operations. Under no
  32. * circumstances will the contributor of this Program be liable for
  33. * any damages of any kind arising from your use or distribution of
  34. * this program.
  35. *
  36. * The Linux Foundation chooses to take subject only to the GPLv2
  37. * license terms, and distributes only under these terms.
  38. */
  39. #include <linux/async.h>
  40. #include <linux/devfreq.h>
  41. #include "ufshcd.h"
  42. #include "unipro.h"
  43. #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
  44. UTP_TASK_REQ_COMPL |\
  45. UFSHCD_ERROR_MASK)
  46. /* UIC command timeout, unit: ms */
  47. #define UIC_CMD_TIMEOUT 500
  48. /* NOP OUT retries waiting for NOP IN response */
  49. #define NOP_OUT_RETRIES 10
  50. /* Timeout after 30 msecs if NOP OUT hangs without response */
  51. #define NOP_OUT_TIMEOUT 30 /* msecs */
  52. /* Query request retries */
  53. #define QUERY_REQ_RETRIES 10
  54. /* Query request timeout */
  55. #define QUERY_REQ_TIMEOUT 30 /* msec */
  56. /* Task management command timeout */
  57. #define TM_CMD_TIMEOUT 100 /* msecs */
  58. /* maximum number of link-startup retries */
  59. #define DME_LINKSTARTUP_RETRIES 3
  60. /* maximum number of reset retries before giving up */
  61. #define MAX_HOST_RESET_RETRIES 5
  62. /* Expose the flag value from utp_upiu_query.value */
  63. #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
  64. /* Interrupt aggregation default timeout, unit: 40us */
  65. #define INT_AGGR_DEF_TO 0x02
  66. #define ufshcd_toggle_vreg(_dev, _vreg, _on) \
  67. ({ \
  68. int _ret; \
  69. if (_on) \
  70. _ret = ufshcd_enable_vreg(_dev, _vreg); \
  71. else \
  72. _ret = ufshcd_disable_vreg(_dev, _vreg); \
  73. _ret; \
  74. })
  75. static u32 ufs_query_desc_max_size[] = {
  76. QUERY_DESC_DEVICE_MAX_SIZE,
  77. QUERY_DESC_CONFIGURAION_MAX_SIZE,
  78. QUERY_DESC_UNIT_MAX_SIZE,
  79. QUERY_DESC_RFU_MAX_SIZE,
  80. QUERY_DESC_INTERCONNECT_MAX_SIZE,
  81. QUERY_DESC_STRING_MAX_SIZE,
  82. QUERY_DESC_RFU_MAX_SIZE,
  83. QUERY_DESC_GEOMETRY_MAZ_SIZE,
  84. QUERY_DESC_POWER_MAX_SIZE,
  85. QUERY_DESC_RFU_MAX_SIZE,
  86. };
  87. enum {
  88. UFSHCD_MAX_CHANNEL = 0,
  89. UFSHCD_MAX_ID = 1,
  90. UFSHCD_CMD_PER_LUN = 32,
  91. UFSHCD_CAN_QUEUE = 32,
  92. };
  93. /* UFSHCD states */
  94. enum {
  95. UFSHCD_STATE_RESET,
  96. UFSHCD_STATE_ERROR,
  97. UFSHCD_STATE_OPERATIONAL,
  98. };
  99. /* UFSHCD error handling flags */
  100. enum {
  101. UFSHCD_EH_IN_PROGRESS = (1 << 0),
  102. };
  103. /* UFSHCD UIC layer error flags */
  104. enum {
  105. UFSHCD_UIC_DL_PA_INIT_ERROR = (1 << 0), /* Data link layer error */
  106. UFSHCD_UIC_NL_ERROR = (1 << 1), /* Network layer error */
  107. UFSHCD_UIC_TL_ERROR = (1 << 2), /* Transport Layer error */
  108. UFSHCD_UIC_DME_ERROR = (1 << 3), /* DME error */
  109. };
  110. /* Interrupt configuration options */
  111. enum {
  112. UFSHCD_INT_DISABLE,
  113. UFSHCD_INT_ENABLE,
  114. UFSHCD_INT_CLEAR,
  115. };
  116. #define ufshcd_set_eh_in_progress(h) \
  117. (h->eh_flags |= UFSHCD_EH_IN_PROGRESS)
  118. #define ufshcd_eh_in_progress(h) \
  119. (h->eh_flags & UFSHCD_EH_IN_PROGRESS)
  120. #define ufshcd_clear_eh_in_progress(h) \
  121. (h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
  122. #define ufshcd_set_ufs_dev_active(h) \
  123. ((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
  124. #define ufshcd_set_ufs_dev_sleep(h) \
  125. ((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
  126. #define ufshcd_set_ufs_dev_poweroff(h) \
  127. ((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
  128. #define ufshcd_is_ufs_dev_active(h) \
  129. ((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
  130. #define ufshcd_is_ufs_dev_sleep(h) \
  131. ((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
  132. #define ufshcd_is_ufs_dev_poweroff(h) \
  133. ((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
  134. static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
  135. {UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  136. {UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  137. {UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
  138. {UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  139. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
  140. {UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
  141. };
  142. static inline enum ufs_dev_pwr_mode
  143. ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
  144. {
  145. return ufs_pm_lvl_states[lvl].dev_state;
  146. }
  147. static inline enum uic_link_state
  148. ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
  149. {
  150. return ufs_pm_lvl_states[lvl].link_state;
  151. }
  152. static void ufshcd_tmc_handler(struct ufs_hba *hba);
  153. static void ufshcd_async_scan(void *data, async_cookie_t cookie);
  154. static int ufshcd_reset_and_restore(struct ufs_hba *hba);
  155. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
  156. static void ufshcd_hba_exit(struct ufs_hba *hba);
  157. static int ufshcd_probe_hba(struct ufs_hba *hba);
  158. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  159. bool skip_ref_clk);
  160. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
  161. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
  162. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
  163. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
  164. static irqreturn_t ufshcd_intr(int irq, void *__hba);
  165. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  166. struct ufs_pa_layer_attr *desired_pwr_mode);
  167. static inline int ufshcd_enable_irq(struct ufs_hba *hba)
  168. {
  169. int ret = 0;
  170. if (!hba->is_irq_enabled) {
  171. ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
  172. hba);
  173. if (ret)
  174. dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
  175. __func__, ret);
  176. hba->is_irq_enabled = true;
  177. }
  178. return ret;
  179. }
  180. static inline void ufshcd_disable_irq(struct ufs_hba *hba)
  181. {
  182. if (hba->is_irq_enabled) {
  183. free_irq(hba->irq, hba);
  184. hba->is_irq_enabled = false;
  185. }
  186. }
  187. /*
  188. * ufshcd_wait_for_register - wait for register value to change
  189. * @hba - per-adapter interface
  190. * @reg - mmio register offset
  191. * @mask - mask to apply to read register value
  192. * @val - wait condition
  193. * @interval_us - polling interval in microsecs
  194. * @timeout_ms - timeout in millisecs
  195. *
  196. * Returns -ETIMEDOUT on error, zero on success
  197. */
  198. static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
  199. u32 val, unsigned long interval_us, unsigned long timeout_ms)
  200. {
  201. int err = 0;
  202. unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
  203. /* ignore bits that we don't intend to wait on */
  204. val = val & mask;
  205. while ((ufshcd_readl(hba, reg) & mask) != val) {
  206. /* wakeup within 50us of expiry */
  207. usleep_range(interval_us, interval_us + 50);
  208. if (time_after(jiffies, timeout)) {
  209. if ((ufshcd_readl(hba, reg) & mask) != val)
  210. err = -ETIMEDOUT;
  211. break;
  212. }
  213. }
  214. return err;
  215. }
  216. /**
  217. * ufshcd_get_intr_mask - Get the interrupt bit mask
  218. * @hba - Pointer to adapter instance
  219. *
  220. * Returns interrupt bit mask per version
  221. */
  222. static inline u32 ufshcd_get_intr_mask(struct ufs_hba *hba)
  223. {
  224. if (hba->ufs_version == UFSHCI_VERSION_10)
  225. return INTERRUPT_MASK_ALL_VER_10;
  226. else
  227. return INTERRUPT_MASK_ALL_VER_11;
  228. }
  229. /**
  230. * ufshcd_get_ufs_version - Get the UFS version supported by the HBA
  231. * @hba - Pointer to adapter instance
  232. *
  233. * Returns UFSHCI version supported by the controller
  234. */
  235. static inline u32 ufshcd_get_ufs_version(struct ufs_hba *hba)
  236. {
  237. return ufshcd_readl(hba, REG_UFS_VERSION);
  238. }
  239. /**
  240. * ufshcd_is_device_present - Check if any device connected to
  241. * the host controller
  242. * @hba: pointer to adapter instance
  243. *
  244. * Returns 1 if device present, 0 if no device detected
  245. */
  246. static inline int ufshcd_is_device_present(struct ufs_hba *hba)
  247. {
  248. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
  249. DEVICE_PRESENT) ? 1 : 0;
  250. }
  251. /**
  252. * ufshcd_get_tr_ocs - Get the UTRD Overall Command Status
  253. * @lrb: pointer to local command reference block
  254. *
  255. * This function is used to get the OCS field from UTRD
  256. * Returns the OCS field in the UTRD
  257. */
  258. static inline int ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp)
  259. {
  260. return le32_to_cpu(lrbp->utr_descriptor_ptr->header.dword_2) & MASK_OCS;
  261. }
  262. /**
  263. * ufshcd_get_tmr_ocs - Get the UTMRD Overall Command Status
  264. * @task_req_descp: pointer to utp_task_req_desc structure
  265. *
  266. * This function is used to get the OCS field from UTMRD
  267. * Returns the OCS field in the UTMRD
  268. */
  269. static inline int
  270. ufshcd_get_tmr_ocs(struct utp_task_req_desc *task_req_descp)
  271. {
  272. return le32_to_cpu(task_req_descp->header.dword_2) & MASK_OCS;
  273. }
  274. /**
  275. * ufshcd_get_tm_free_slot - get a free slot for task management request
  276. * @hba: per adapter instance
  277. * @free_slot: pointer to variable with available slot value
  278. *
  279. * Get a free tag and lock it until ufshcd_put_tm_slot() is called.
  280. * Returns 0 if free slot is not available, else return 1 with tag value
  281. * in @free_slot.
  282. */
  283. static bool ufshcd_get_tm_free_slot(struct ufs_hba *hba, int *free_slot)
  284. {
  285. int tag;
  286. bool ret = false;
  287. if (!free_slot)
  288. goto out;
  289. do {
  290. tag = find_first_zero_bit(&hba->tm_slots_in_use, hba->nutmrs);
  291. if (tag >= hba->nutmrs)
  292. goto out;
  293. } while (test_and_set_bit_lock(tag, &hba->tm_slots_in_use));
  294. *free_slot = tag;
  295. ret = true;
  296. out:
  297. return ret;
  298. }
  299. static inline void ufshcd_put_tm_slot(struct ufs_hba *hba, int slot)
  300. {
  301. clear_bit_unlock(slot, &hba->tm_slots_in_use);
  302. }
  303. /**
  304. * ufshcd_utrl_clear - Clear a bit in UTRLCLR register
  305. * @hba: per adapter instance
  306. * @pos: position of the bit to be cleared
  307. */
  308. static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos)
  309. {
  310. ufshcd_writel(hba, ~(1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR);
  311. }
  312. /**
  313. * ufshcd_get_lists_status - Check UCRDY, UTRLRDY and UTMRLRDY
  314. * @reg: Register value of host controller status
  315. *
  316. * Returns integer, 0 on Success and positive value if failed
  317. */
  318. static inline int ufshcd_get_lists_status(u32 reg)
  319. {
  320. /*
  321. * The mask 0xFF is for the following HCS register bits
  322. * Bit Description
  323. * 0 Device Present
  324. * 1 UTRLRDY
  325. * 2 UTMRLRDY
  326. * 3 UCRDY
  327. * 4 HEI
  328. * 5 DEI
  329. * 6-7 reserved
  330. */
  331. return (((reg) & (0xFF)) >> 1) ^ (0x07);
  332. }
  333. /**
  334. * ufshcd_get_uic_cmd_result - Get the UIC command result
  335. * @hba: Pointer to adapter instance
  336. *
  337. * This function gets the result of UIC command completion
  338. * Returns 0 on success, non zero value on error
  339. */
  340. static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba)
  341. {
  342. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2) &
  343. MASK_UIC_COMMAND_RESULT;
  344. }
  345. /**
  346. * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command
  347. * @hba: Pointer to adapter instance
  348. *
  349. * This function gets UIC command argument3
  350. * Returns 0 on success, non zero value on error
  351. */
  352. static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba)
  353. {
  354. return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3);
  355. }
  356. /**
  357. * ufshcd_get_req_rsp - returns the TR response transaction type
  358. * @ucd_rsp_ptr: pointer to response UPIU
  359. */
  360. static inline int
  361. ufshcd_get_req_rsp(struct utp_upiu_rsp *ucd_rsp_ptr)
  362. {
  363. return be32_to_cpu(ucd_rsp_ptr->header.dword_0) >> 24;
  364. }
  365. /**
  366. * ufshcd_get_rsp_upiu_result - Get the result from response UPIU
  367. * @ucd_rsp_ptr: pointer to response UPIU
  368. *
  369. * This function gets the response status and scsi_status from response UPIU
  370. * Returns the response result code.
  371. */
  372. static inline int
  373. ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr)
  374. {
  375. return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT;
  376. }
  377. /*
  378. * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length
  379. * from response UPIU
  380. * @ucd_rsp_ptr: pointer to response UPIU
  381. *
  382. * Return the data segment length.
  383. */
  384. static inline unsigned int
  385. ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr)
  386. {
  387. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  388. MASK_RSP_UPIU_DATA_SEG_LEN;
  389. }
  390. /**
  391. * ufshcd_is_exception_event - Check if the device raised an exception event
  392. * @ucd_rsp_ptr: pointer to response UPIU
  393. *
  394. * The function checks if the device raised an exception event indicated in
  395. * the Device Information field of response UPIU.
  396. *
  397. * Returns true if exception is raised, false otherwise.
  398. */
  399. static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr)
  400. {
  401. return be32_to_cpu(ucd_rsp_ptr->header.dword_2) &
  402. MASK_RSP_EXCEPTION_EVENT ? true : false;
  403. }
  404. /**
  405. * ufshcd_reset_intr_aggr - Reset interrupt aggregation values.
  406. * @hba: per adapter instance
  407. */
  408. static inline void
  409. ufshcd_reset_intr_aggr(struct ufs_hba *hba)
  410. {
  411. ufshcd_writel(hba, INT_AGGR_ENABLE |
  412. INT_AGGR_COUNTER_AND_TIMER_RESET,
  413. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  414. }
  415. /**
  416. * ufshcd_config_intr_aggr - Configure interrupt aggregation values.
  417. * @hba: per adapter instance
  418. * @cnt: Interrupt aggregation counter threshold
  419. * @tmout: Interrupt aggregation timeout value
  420. */
  421. static inline void
  422. ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout)
  423. {
  424. ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE |
  425. INT_AGGR_COUNTER_THLD_VAL(cnt) |
  426. INT_AGGR_TIMEOUT_VAL(tmout),
  427. REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL);
  428. }
  429. /**
  430. * ufshcd_enable_run_stop_reg - Enable run-stop registers,
  431. * When run-stop registers are set to 1, it indicates the
  432. * host controller that it can process the requests
  433. * @hba: per adapter instance
  434. */
  435. static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
  436. {
  437. ufshcd_writel(hba, UTP_TASK_REQ_LIST_RUN_STOP_BIT,
  438. REG_UTP_TASK_REQ_LIST_RUN_STOP);
  439. ufshcd_writel(hba, UTP_TRANSFER_REQ_LIST_RUN_STOP_BIT,
  440. REG_UTP_TRANSFER_REQ_LIST_RUN_STOP);
  441. }
  442. /**
  443. * ufshcd_hba_start - Start controller initialization sequence
  444. * @hba: per adapter instance
  445. */
  446. static inline void ufshcd_hba_start(struct ufs_hba *hba)
  447. {
  448. ufshcd_writel(hba, CONTROLLER_ENABLE, REG_CONTROLLER_ENABLE);
  449. }
  450. /**
  451. * ufshcd_is_hba_active - Get controller state
  452. * @hba: per adapter instance
  453. *
  454. * Returns zero if controller is active, 1 otherwise
  455. */
  456. static inline int ufshcd_is_hba_active(struct ufs_hba *hba)
  457. {
  458. return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
  459. }
  460. static void ufshcd_ungate_work(struct work_struct *work)
  461. {
  462. int ret;
  463. unsigned long flags;
  464. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  465. clk_gating.ungate_work);
  466. cancel_delayed_work_sync(&hba->clk_gating.gate_work);
  467. spin_lock_irqsave(hba->host->host_lock, flags);
  468. if (hba->clk_gating.state == CLKS_ON) {
  469. spin_unlock_irqrestore(hba->host->host_lock, flags);
  470. goto unblock_reqs;
  471. }
  472. spin_unlock_irqrestore(hba->host->host_lock, flags);
  473. ufshcd_setup_clocks(hba, true);
  474. /* Exit from hibern8 */
  475. if (ufshcd_can_hibern8_during_gating(hba)) {
  476. /* Prevent gating in this path */
  477. hba->clk_gating.is_suspended = true;
  478. if (ufshcd_is_link_hibern8(hba)) {
  479. ret = ufshcd_uic_hibern8_exit(hba);
  480. if (ret)
  481. dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
  482. __func__, ret);
  483. else
  484. ufshcd_set_link_active(hba);
  485. }
  486. hba->clk_gating.is_suspended = false;
  487. }
  488. unblock_reqs:
  489. if (ufshcd_is_clkscaling_enabled(hba))
  490. devfreq_resume_device(hba->devfreq);
  491. scsi_unblock_requests(hba->host);
  492. }
  493. /**
  494. * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
  495. * Also, exit from hibern8 mode and set the link as active.
  496. * @hba: per adapter instance
  497. * @async: This indicates whether caller should ungate clocks asynchronously.
  498. */
  499. int ufshcd_hold(struct ufs_hba *hba, bool async)
  500. {
  501. int rc = 0;
  502. unsigned long flags;
  503. if (!ufshcd_is_clkgating_allowed(hba))
  504. goto out;
  505. spin_lock_irqsave(hba->host->host_lock, flags);
  506. hba->clk_gating.active_reqs++;
  507. start:
  508. switch (hba->clk_gating.state) {
  509. case CLKS_ON:
  510. break;
  511. case REQ_CLKS_OFF:
  512. if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
  513. hba->clk_gating.state = CLKS_ON;
  514. break;
  515. }
  516. /*
  517. * If we here, it means gating work is either done or
  518. * currently running. Hence, fall through to cancel gating
  519. * work and to enable clocks.
  520. */
  521. case CLKS_OFF:
  522. scsi_block_requests(hba->host);
  523. hba->clk_gating.state = REQ_CLKS_ON;
  524. schedule_work(&hba->clk_gating.ungate_work);
  525. /*
  526. * fall through to check if we should wait for this
  527. * work to be done or not.
  528. */
  529. case REQ_CLKS_ON:
  530. if (async) {
  531. rc = -EAGAIN;
  532. hba->clk_gating.active_reqs--;
  533. break;
  534. }
  535. spin_unlock_irqrestore(hba->host->host_lock, flags);
  536. flush_work(&hba->clk_gating.ungate_work);
  537. /* Make sure state is CLKS_ON before returning */
  538. spin_lock_irqsave(hba->host->host_lock, flags);
  539. goto start;
  540. default:
  541. dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
  542. __func__, hba->clk_gating.state);
  543. break;
  544. }
  545. spin_unlock_irqrestore(hba->host->host_lock, flags);
  546. out:
  547. return rc;
  548. }
  549. static void ufshcd_gate_work(struct work_struct *work)
  550. {
  551. struct ufs_hba *hba = container_of(work, struct ufs_hba,
  552. clk_gating.gate_work.work);
  553. unsigned long flags;
  554. spin_lock_irqsave(hba->host->host_lock, flags);
  555. if (hba->clk_gating.is_suspended) {
  556. hba->clk_gating.state = CLKS_ON;
  557. goto rel_lock;
  558. }
  559. if (hba->clk_gating.active_reqs
  560. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  561. || hba->lrb_in_use || hba->outstanding_tasks
  562. || hba->active_uic_cmd || hba->uic_async_done)
  563. goto rel_lock;
  564. spin_unlock_irqrestore(hba->host->host_lock, flags);
  565. /* put the link into hibern8 mode before turning off clocks */
  566. if (ufshcd_can_hibern8_during_gating(hba)) {
  567. if (ufshcd_uic_hibern8_enter(hba)) {
  568. hba->clk_gating.state = CLKS_ON;
  569. goto out;
  570. }
  571. ufshcd_set_link_hibern8(hba);
  572. }
  573. if (ufshcd_is_clkscaling_enabled(hba)) {
  574. devfreq_suspend_device(hba->devfreq);
  575. hba->clk_scaling.window_start_t = 0;
  576. }
  577. if (!ufshcd_is_link_active(hba))
  578. ufshcd_setup_clocks(hba, false);
  579. else
  580. /* If link is active, device ref_clk can't be switched off */
  581. __ufshcd_setup_clocks(hba, false, true);
  582. /*
  583. * In case you are here to cancel this work the gating state
  584. * would be marked as REQ_CLKS_ON. In this case keep the state
  585. * as REQ_CLKS_ON which would anyway imply that clocks are off
  586. * and a request to turn them on is pending. By doing this way,
  587. * we keep the state machine in tact and this would ultimately
  588. * prevent from doing cancel work multiple times when there are
  589. * new requests arriving before the current cancel work is done.
  590. */
  591. spin_lock_irqsave(hba->host->host_lock, flags);
  592. if (hba->clk_gating.state == REQ_CLKS_OFF)
  593. hba->clk_gating.state = CLKS_OFF;
  594. rel_lock:
  595. spin_unlock_irqrestore(hba->host->host_lock, flags);
  596. out:
  597. return;
  598. }
  599. /* host lock must be held before calling this variant */
  600. static void __ufshcd_release(struct ufs_hba *hba)
  601. {
  602. if (!ufshcd_is_clkgating_allowed(hba))
  603. return;
  604. hba->clk_gating.active_reqs--;
  605. if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
  606. || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
  607. || hba->lrb_in_use || hba->outstanding_tasks
  608. || hba->active_uic_cmd || hba->uic_async_done)
  609. return;
  610. hba->clk_gating.state = REQ_CLKS_OFF;
  611. schedule_delayed_work(&hba->clk_gating.gate_work,
  612. msecs_to_jiffies(hba->clk_gating.delay_ms));
  613. }
  614. void ufshcd_release(struct ufs_hba *hba)
  615. {
  616. unsigned long flags;
  617. spin_lock_irqsave(hba->host->host_lock, flags);
  618. __ufshcd_release(hba);
  619. spin_unlock_irqrestore(hba->host->host_lock, flags);
  620. }
  621. static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
  622. struct device_attribute *attr, char *buf)
  623. {
  624. struct ufs_hba *hba = dev_get_drvdata(dev);
  625. return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
  626. }
  627. static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
  628. struct device_attribute *attr, const char *buf, size_t count)
  629. {
  630. struct ufs_hba *hba = dev_get_drvdata(dev);
  631. unsigned long flags, value;
  632. if (kstrtoul(buf, 0, &value))
  633. return -EINVAL;
  634. spin_lock_irqsave(hba->host->host_lock, flags);
  635. hba->clk_gating.delay_ms = value;
  636. spin_unlock_irqrestore(hba->host->host_lock, flags);
  637. return count;
  638. }
  639. static void ufshcd_init_clk_gating(struct ufs_hba *hba)
  640. {
  641. if (!ufshcd_is_clkgating_allowed(hba))
  642. return;
  643. hba->clk_gating.delay_ms = 150;
  644. INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
  645. INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
  646. hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
  647. hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
  648. sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
  649. hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
  650. hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
  651. if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
  652. dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
  653. }
  654. static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
  655. {
  656. if (!ufshcd_is_clkgating_allowed(hba))
  657. return;
  658. device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
  659. }
  660. /* Must be called with host lock acquired */
  661. static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
  662. {
  663. if (!ufshcd_is_clkscaling_enabled(hba))
  664. return;
  665. if (!hba->clk_scaling.is_busy_started) {
  666. hba->clk_scaling.busy_start_t = ktime_get();
  667. hba->clk_scaling.is_busy_started = true;
  668. }
  669. }
  670. static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
  671. {
  672. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  673. if (!ufshcd_is_clkscaling_enabled(hba))
  674. return;
  675. if (!hba->outstanding_reqs && scaling->is_busy_started) {
  676. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  677. scaling->busy_start_t));
  678. scaling->busy_start_t = ktime_set(0, 0);
  679. scaling->is_busy_started = false;
  680. }
  681. }
  682. /**
  683. * ufshcd_send_command - Send SCSI or device management commands
  684. * @hba: per adapter instance
  685. * @task_tag: Task tag of the command
  686. */
  687. static inline
  688. void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
  689. {
  690. ufshcd_clk_scaling_start_busy(hba);
  691. __set_bit(task_tag, &hba->outstanding_reqs);
  692. ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  693. }
  694. /**
  695. * ufshcd_copy_sense_data - Copy sense data in case of check condition
  696. * @lrb - pointer to local reference block
  697. */
  698. static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp)
  699. {
  700. int len;
  701. if (lrbp->sense_buffer &&
  702. ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) {
  703. len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len);
  704. memcpy(lrbp->sense_buffer,
  705. lrbp->ucd_rsp_ptr->sr.sense_data,
  706. min_t(int, len, SCSI_SENSE_BUFFERSIZE));
  707. }
  708. }
  709. /**
  710. * ufshcd_copy_query_response() - Copy the Query Response and the data
  711. * descriptor
  712. * @hba: per adapter instance
  713. * @lrb - pointer to local reference block
  714. */
  715. static
  716. int ufshcd_copy_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  717. {
  718. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  719. memcpy(&query_res->upiu_res, &lrbp->ucd_rsp_ptr->qr, QUERY_OSF_SIZE);
  720. /* Get the descriptor */
  721. if (lrbp->ucd_rsp_ptr->qr.opcode == UPIU_QUERY_OPCODE_READ_DESC) {
  722. u8 *descp = (u8 *)lrbp->ucd_rsp_ptr +
  723. GENERAL_UPIU_REQUEST_SIZE;
  724. u16 resp_len;
  725. u16 buf_len;
  726. /* data segment length */
  727. resp_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) &
  728. MASK_QUERY_DATA_SEG_LEN;
  729. buf_len = be16_to_cpu(
  730. hba->dev_cmd.query.request.upiu_req.length);
  731. if (likely(buf_len >= resp_len)) {
  732. memcpy(hba->dev_cmd.query.descriptor, descp, resp_len);
  733. } else {
  734. dev_warn(hba->dev,
  735. "%s: Response size is bigger than buffer",
  736. __func__);
  737. return -EINVAL;
  738. }
  739. }
  740. return 0;
  741. }
  742. /**
  743. * ufshcd_hba_capabilities - Read controller capabilities
  744. * @hba: per adapter instance
  745. */
  746. static inline void ufshcd_hba_capabilities(struct ufs_hba *hba)
  747. {
  748. hba->capabilities = ufshcd_readl(hba, REG_CONTROLLER_CAPABILITIES);
  749. /* nutrs and nutmrs are 0 based values */
  750. hba->nutrs = (hba->capabilities & MASK_TRANSFER_REQUESTS_SLOTS) + 1;
  751. hba->nutmrs =
  752. ((hba->capabilities & MASK_TASK_MANAGEMENT_REQUEST_SLOTS) >> 16) + 1;
  753. }
  754. /**
  755. * ufshcd_ready_for_uic_cmd - Check if controller is ready
  756. * to accept UIC commands
  757. * @hba: per adapter instance
  758. * Return true on success, else false
  759. */
  760. static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba)
  761. {
  762. if (ufshcd_readl(hba, REG_CONTROLLER_STATUS) & UIC_COMMAND_READY)
  763. return true;
  764. else
  765. return false;
  766. }
  767. /**
  768. * ufshcd_get_upmcrs - Get the power mode change request status
  769. * @hba: Pointer to adapter instance
  770. *
  771. * This function gets the UPMCRS field of HCS register
  772. * Returns value of UPMCRS field
  773. */
  774. static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba)
  775. {
  776. return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7;
  777. }
  778. /**
  779. * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers
  780. * @hba: per adapter instance
  781. * @uic_cmd: UIC command
  782. *
  783. * Mutex must be held.
  784. */
  785. static inline void
  786. ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  787. {
  788. WARN_ON(hba->active_uic_cmd);
  789. hba->active_uic_cmd = uic_cmd;
  790. /* Write Args */
  791. ufshcd_writel(hba, uic_cmd->argument1, REG_UIC_COMMAND_ARG_1);
  792. ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
  793. ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
  794. /* Write UIC Cmd */
  795. ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
  796. REG_UIC_COMMAND);
  797. }
  798. /**
  799. * ufshcd_wait_for_uic_cmd - Wait complectioin of UIC command
  800. * @hba: per adapter instance
  801. * @uic_command: UIC command
  802. *
  803. * Must be called with mutex held.
  804. * Returns 0 only if success.
  805. */
  806. static int
  807. ufshcd_wait_for_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  808. {
  809. int ret;
  810. unsigned long flags;
  811. if (wait_for_completion_timeout(&uic_cmd->done,
  812. msecs_to_jiffies(UIC_CMD_TIMEOUT)))
  813. ret = uic_cmd->argument2 & MASK_UIC_COMMAND_RESULT;
  814. else
  815. ret = -ETIMEDOUT;
  816. spin_lock_irqsave(hba->host->host_lock, flags);
  817. hba->active_uic_cmd = NULL;
  818. spin_unlock_irqrestore(hba->host->host_lock, flags);
  819. return ret;
  820. }
  821. /**
  822. * __ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  823. * @hba: per adapter instance
  824. * @uic_cmd: UIC command
  825. *
  826. * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
  827. * with mutex held and host_lock locked.
  828. * Returns 0 only if success.
  829. */
  830. static int
  831. __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  832. {
  833. if (!ufshcd_ready_for_uic_cmd(hba)) {
  834. dev_err(hba->dev,
  835. "Controller not ready to accept UIC commands\n");
  836. return -EIO;
  837. }
  838. init_completion(&uic_cmd->done);
  839. ufshcd_dispatch_uic_cmd(hba, uic_cmd);
  840. return 0;
  841. }
  842. /**
  843. * ufshcd_send_uic_cmd - Send UIC commands and retrieve the result
  844. * @hba: per adapter instance
  845. * @uic_cmd: UIC command
  846. *
  847. * Returns 0 only if success.
  848. */
  849. static int
  850. ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
  851. {
  852. int ret;
  853. unsigned long flags;
  854. ufshcd_hold(hba, false);
  855. mutex_lock(&hba->uic_cmd_mutex);
  856. spin_lock_irqsave(hba->host->host_lock, flags);
  857. ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
  858. spin_unlock_irqrestore(hba->host->host_lock, flags);
  859. if (!ret)
  860. ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
  861. mutex_unlock(&hba->uic_cmd_mutex);
  862. ufshcd_release(hba);
  863. return ret;
  864. }
  865. /**
  866. * ufshcd_map_sg - Map scatter-gather list to prdt
  867. * @lrbp - pointer to local reference block
  868. *
  869. * Returns 0 in case of success, non-zero value in case of failure
  870. */
  871. static int ufshcd_map_sg(struct ufshcd_lrb *lrbp)
  872. {
  873. struct ufshcd_sg_entry *prd_table;
  874. struct scatterlist *sg;
  875. struct scsi_cmnd *cmd;
  876. int sg_segments;
  877. int i;
  878. cmd = lrbp->cmd;
  879. sg_segments = scsi_dma_map(cmd);
  880. if (sg_segments < 0)
  881. return sg_segments;
  882. if (sg_segments) {
  883. lrbp->utr_descriptor_ptr->prd_table_length =
  884. cpu_to_le16((u16) (sg_segments));
  885. prd_table = (struct ufshcd_sg_entry *)lrbp->ucd_prdt_ptr;
  886. scsi_for_each_sg(cmd, sg, sg_segments, i) {
  887. prd_table[i].size =
  888. cpu_to_le32(((u32) sg_dma_len(sg))-1);
  889. prd_table[i].base_addr =
  890. cpu_to_le32(lower_32_bits(sg->dma_address));
  891. prd_table[i].upper_addr =
  892. cpu_to_le32(upper_32_bits(sg->dma_address));
  893. }
  894. } else {
  895. lrbp->utr_descriptor_ptr->prd_table_length = 0;
  896. }
  897. return 0;
  898. }
  899. /**
  900. * ufshcd_enable_intr - enable interrupts
  901. * @hba: per adapter instance
  902. * @intrs: interrupt bits
  903. */
  904. static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
  905. {
  906. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  907. if (hba->ufs_version == UFSHCI_VERSION_10) {
  908. u32 rw;
  909. rw = set & INTERRUPT_MASK_RW_VER_10;
  910. set = rw | ((set ^ intrs) & intrs);
  911. } else {
  912. set |= intrs;
  913. }
  914. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  915. }
  916. /**
  917. * ufshcd_disable_intr - disable interrupts
  918. * @hba: per adapter instance
  919. * @intrs: interrupt bits
  920. */
  921. static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
  922. {
  923. u32 set = ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
  924. if (hba->ufs_version == UFSHCI_VERSION_10) {
  925. u32 rw;
  926. rw = (set & INTERRUPT_MASK_RW_VER_10) &
  927. ~(intrs & INTERRUPT_MASK_RW_VER_10);
  928. set = rw | ((set & intrs) & ~INTERRUPT_MASK_RW_VER_10);
  929. } else {
  930. set &= ~intrs;
  931. }
  932. ufshcd_writel(hba, set, REG_INTERRUPT_ENABLE);
  933. }
  934. /**
  935. * ufshcd_prepare_req_desc_hdr() - Fills the requests header
  936. * descriptor according to request
  937. * @lrbp: pointer to local reference block
  938. * @upiu_flags: flags required in the header
  939. * @cmd_dir: requests data direction
  940. */
  941. static void ufshcd_prepare_req_desc_hdr(struct ufshcd_lrb *lrbp,
  942. u32 *upiu_flags, enum dma_data_direction cmd_dir)
  943. {
  944. struct utp_transfer_req_desc *req_desc = lrbp->utr_descriptor_ptr;
  945. u32 data_direction;
  946. u32 dword_0;
  947. if (cmd_dir == DMA_FROM_DEVICE) {
  948. data_direction = UTP_DEVICE_TO_HOST;
  949. *upiu_flags = UPIU_CMD_FLAGS_READ;
  950. } else if (cmd_dir == DMA_TO_DEVICE) {
  951. data_direction = UTP_HOST_TO_DEVICE;
  952. *upiu_flags = UPIU_CMD_FLAGS_WRITE;
  953. } else {
  954. data_direction = UTP_NO_DATA_TRANSFER;
  955. *upiu_flags = UPIU_CMD_FLAGS_NONE;
  956. }
  957. dword_0 = data_direction | (lrbp->command_type
  958. << UPIU_COMMAND_TYPE_OFFSET);
  959. if (lrbp->intr_cmd)
  960. dword_0 |= UTP_REQ_DESC_INT_CMD;
  961. /* Transfer request descriptor header fields */
  962. req_desc->header.dword_0 = cpu_to_le32(dword_0);
  963. /*
  964. * assigning invalid value for command status. Controller
  965. * updates OCS on command completion, with the command
  966. * status
  967. */
  968. req_desc->header.dword_2 =
  969. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  970. }
  971. /**
  972. * ufshcd_prepare_utp_scsi_cmd_upiu() - fills the utp_transfer_req_desc,
  973. * for scsi commands
  974. * @lrbp - local reference block pointer
  975. * @upiu_flags - flags
  976. */
  977. static
  978. void ufshcd_prepare_utp_scsi_cmd_upiu(struct ufshcd_lrb *lrbp, u32 upiu_flags)
  979. {
  980. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  981. /* command descriptor fields */
  982. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  983. UPIU_TRANSACTION_COMMAND, upiu_flags,
  984. lrbp->lun, lrbp->task_tag);
  985. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  986. UPIU_COMMAND_SET_TYPE_SCSI, 0, 0, 0);
  987. /* Total EHS length and Data segment length will be zero */
  988. ucd_req_ptr->header.dword_2 = 0;
  989. ucd_req_ptr->sc.exp_data_transfer_len =
  990. cpu_to_be32(lrbp->cmd->sdb.length);
  991. memcpy(ucd_req_ptr->sc.cdb, lrbp->cmd->cmnd,
  992. (min_t(unsigned short, lrbp->cmd->cmd_len, MAX_CDB_SIZE)));
  993. }
  994. /**
  995. * ufshcd_prepare_utp_query_req_upiu() - fills the utp_transfer_req_desc,
  996. * for query requsts
  997. * @hba: UFS hba
  998. * @lrbp: local reference block pointer
  999. * @upiu_flags: flags
  1000. */
  1001. static void ufshcd_prepare_utp_query_req_upiu(struct ufs_hba *hba,
  1002. struct ufshcd_lrb *lrbp, u32 upiu_flags)
  1003. {
  1004. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1005. struct ufs_query *query = &hba->dev_cmd.query;
  1006. u16 len = be16_to_cpu(query->request.upiu_req.length);
  1007. u8 *descp = (u8 *)lrbp->ucd_req_ptr + GENERAL_UPIU_REQUEST_SIZE;
  1008. /* Query request header */
  1009. ucd_req_ptr->header.dword_0 = UPIU_HEADER_DWORD(
  1010. UPIU_TRANSACTION_QUERY_REQ, upiu_flags,
  1011. lrbp->lun, lrbp->task_tag);
  1012. ucd_req_ptr->header.dword_1 = UPIU_HEADER_DWORD(
  1013. 0, query->request.query_func, 0, 0);
  1014. /* Data segment length */
  1015. ucd_req_ptr->header.dword_2 = UPIU_HEADER_DWORD(
  1016. 0, 0, len >> 8, (u8)len);
  1017. /* Copy the Query Request buffer as is */
  1018. memcpy(&ucd_req_ptr->qr, &query->request.upiu_req,
  1019. QUERY_OSF_SIZE);
  1020. /* Copy the Descriptor */
  1021. if (query->request.upiu_req.opcode == UPIU_QUERY_OPCODE_WRITE_DESC)
  1022. memcpy(descp, query->descriptor, len);
  1023. }
  1024. static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
  1025. {
  1026. struct utp_upiu_req *ucd_req_ptr = lrbp->ucd_req_ptr;
  1027. memset(ucd_req_ptr, 0, sizeof(struct utp_upiu_req));
  1028. /* command descriptor fields */
  1029. ucd_req_ptr->header.dword_0 =
  1030. UPIU_HEADER_DWORD(
  1031. UPIU_TRANSACTION_NOP_OUT, 0, 0, lrbp->task_tag);
  1032. }
  1033. /**
  1034. * ufshcd_compose_upiu - form UFS Protocol Information Unit(UPIU)
  1035. * @hba - per adapter instance
  1036. * @lrb - pointer to local reference block
  1037. */
  1038. static int ufshcd_compose_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1039. {
  1040. u32 upiu_flags;
  1041. int ret = 0;
  1042. switch (lrbp->command_type) {
  1043. case UTP_CMD_TYPE_SCSI:
  1044. if (likely(lrbp->cmd)) {
  1045. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags,
  1046. lrbp->cmd->sc_data_direction);
  1047. ufshcd_prepare_utp_scsi_cmd_upiu(lrbp, upiu_flags);
  1048. } else {
  1049. ret = -EINVAL;
  1050. }
  1051. break;
  1052. case UTP_CMD_TYPE_DEV_MANAGE:
  1053. ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, DMA_NONE);
  1054. if (hba->dev_cmd.type == DEV_CMD_TYPE_QUERY)
  1055. ufshcd_prepare_utp_query_req_upiu(
  1056. hba, lrbp, upiu_flags);
  1057. else if (hba->dev_cmd.type == DEV_CMD_TYPE_NOP)
  1058. ufshcd_prepare_utp_nop_upiu(lrbp);
  1059. else
  1060. ret = -EINVAL;
  1061. break;
  1062. case UTP_CMD_TYPE_UFS:
  1063. /* For UFS native command implementation */
  1064. ret = -ENOTSUPP;
  1065. dev_err(hba->dev, "%s: UFS native command are not supported\n",
  1066. __func__);
  1067. break;
  1068. default:
  1069. ret = -ENOTSUPP;
  1070. dev_err(hba->dev, "%s: unknown command type: 0x%x\n",
  1071. __func__, lrbp->command_type);
  1072. break;
  1073. } /* end of switch */
  1074. return ret;
  1075. }
  1076. /*
  1077. * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
  1078. * @scsi_lun: scsi LUN id
  1079. *
  1080. * Returns UPIU LUN id
  1081. */
  1082. static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
  1083. {
  1084. if (scsi_is_wlun(scsi_lun))
  1085. return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
  1086. | UFS_UPIU_WLUN_ID;
  1087. else
  1088. return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
  1089. }
  1090. /**
  1091. * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
  1092. * @scsi_lun: UPIU W-LUN id
  1093. *
  1094. * Returns SCSI W-LUN id
  1095. */
  1096. static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
  1097. {
  1098. return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
  1099. }
  1100. /**
  1101. * ufshcd_queuecommand - main entry point for SCSI requests
  1102. * @cmd: command from SCSI Midlayer
  1103. * @done: call back function
  1104. *
  1105. * Returns 0 for success, non-zero in case of failure
  1106. */
  1107. static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
  1108. {
  1109. struct ufshcd_lrb *lrbp;
  1110. struct ufs_hba *hba;
  1111. unsigned long flags;
  1112. int tag;
  1113. int err = 0;
  1114. hba = shost_priv(host);
  1115. tag = cmd->request->tag;
  1116. spin_lock_irqsave(hba->host->host_lock, flags);
  1117. switch (hba->ufshcd_state) {
  1118. case UFSHCD_STATE_OPERATIONAL:
  1119. break;
  1120. case UFSHCD_STATE_RESET:
  1121. err = SCSI_MLQUEUE_HOST_BUSY;
  1122. goto out_unlock;
  1123. case UFSHCD_STATE_ERROR:
  1124. set_host_byte(cmd, DID_ERROR);
  1125. cmd->scsi_done(cmd);
  1126. goto out_unlock;
  1127. default:
  1128. dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
  1129. __func__, hba->ufshcd_state);
  1130. set_host_byte(cmd, DID_BAD_TARGET);
  1131. cmd->scsi_done(cmd);
  1132. goto out_unlock;
  1133. }
  1134. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1135. /* acquire the tag to make sure device cmds don't use it */
  1136. if (test_and_set_bit_lock(tag, &hba->lrb_in_use)) {
  1137. /*
  1138. * Dev manage command in progress, requeue the command.
  1139. * Requeuing the command helps in cases where the request *may*
  1140. * find different tag instead of waiting for dev manage command
  1141. * completion.
  1142. */
  1143. err = SCSI_MLQUEUE_HOST_BUSY;
  1144. goto out;
  1145. }
  1146. err = ufshcd_hold(hba, true);
  1147. if (err) {
  1148. err = SCSI_MLQUEUE_HOST_BUSY;
  1149. clear_bit_unlock(tag, &hba->lrb_in_use);
  1150. goto out;
  1151. }
  1152. WARN_ON(hba->clk_gating.state != CLKS_ON);
  1153. lrbp = &hba->lrb[tag];
  1154. WARN_ON(lrbp->cmd);
  1155. lrbp->cmd = cmd;
  1156. lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
  1157. lrbp->sense_buffer = cmd->sense_buffer;
  1158. lrbp->task_tag = tag;
  1159. lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
  1160. lrbp->intr_cmd = false;
  1161. lrbp->command_type = UTP_CMD_TYPE_SCSI;
  1162. /* form UPIU before issuing the command */
  1163. ufshcd_compose_upiu(hba, lrbp);
  1164. err = ufshcd_map_sg(lrbp);
  1165. if (err) {
  1166. lrbp->cmd = NULL;
  1167. clear_bit_unlock(tag, &hba->lrb_in_use);
  1168. goto out;
  1169. }
  1170. /* issue command to the controller */
  1171. spin_lock_irqsave(hba->host->host_lock, flags);
  1172. ufshcd_send_command(hba, tag);
  1173. out_unlock:
  1174. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1175. out:
  1176. return err;
  1177. }
  1178. static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
  1179. struct ufshcd_lrb *lrbp, enum dev_cmd_type cmd_type, int tag)
  1180. {
  1181. lrbp->cmd = NULL;
  1182. lrbp->sense_bufflen = 0;
  1183. lrbp->sense_buffer = NULL;
  1184. lrbp->task_tag = tag;
  1185. lrbp->lun = 0; /* device management cmd is not specific to any LUN */
  1186. lrbp->command_type = UTP_CMD_TYPE_DEV_MANAGE;
  1187. lrbp->intr_cmd = true; /* No interrupt aggregation */
  1188. hba->dev_cmd.type = cmd_type;
  1189. return ufshcd_compose_upiu(hba, lrbp);
  1190. }
  1191. static int
  1192. ufshcd_clear_cmd(struct ufs_hba *hba, int tag)
  1193. {
  1194. int err = 0;
  1195. unsigned long flags;
  1196. u32 mask = 1 << tag;
  1197. /* clear outstanding transaction before retry */
  1198. spin_lock_irqsave(hba->host->host_lock, flags);
  1199. ufshcd_utrl_clear(hba, tag);
  1200. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1201. /*
  1202. * wait for for h/w to clear corresponding bit in door-bell.
  1203. * max. wait is 1 sec.
  1204. */
  1205. err = ufshcd_wait_for_register(hba,
  1206. REG_UTP_TRANSFER_REQ_DOOR_BELL,
  1207. mask, ~mask, 1000, 1000);
  1208. return err;
  1209. }
  1210. static int
  1211. ufshcd_check_query_response(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1212. {
  1213. struct ufs_query_res *query_res = &hba->dev_cmd.query.response;
  1214. /* Get the UPIU response */
  1215. query_res->response = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr) >>
  1216. UPIU_RSP_CODE_OFFSET;
  1217. return query_res->response;
  1218. }
  1219. /**
  1220. * ufshcd_dev_cmd_completion() - handles device management command responses
  1221. * @hba: per adapter instance
  1222. * @lrbp: pointer to local reference block
  1223. */
  1224. static int
  1225. ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  1226. {
  1227. int resp;
  1228. int err = 0;
  1229. resp = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  1230. switch (resp) {
  1231. case UPIU_TRANSACTION_NOP_IN:
  1232. if (hba->dev_cmd.type != DEV_CMD_TYPE_NOP) {
  1233. err = -EINVAL;
  1234. dev_err(hba->dev, "%s: unexpected response %x\n",
  1235. __func__, resp);
  1236. }
  1237. break;
  1238. case UPIU_TRANSACTION_QUERY_RSP:
  1239. err = ufshcd_check_query_response(hba, lrbp);
  1240. if (!err)
  1241. err = ufshcd_copy_query_response(hba, lrbp);
  1242. break;
  1243. case UPIU_TRANSACTION_REJECT_UPIU:
  1244. /* TODO: handle Reject UPIU Response */
  1245. err = -EPERM;
  1246. dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n",
  1247. __func__);
  1248. break;
  1249. default:
  1250. err = -EINVAL;
  1251. dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n",
  1252. __func__, resp);
  1253. break;
  1254. }
  1255. return err;
  1256. }
  1257. static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba,
  1258. struct ufshcd_lrb *lrbp, int max_timeout)
  1259. {
  1260. int err = 0;
  1261. unsigned long time_left;
  1262. unsigned long flags;
  1263. time_left = wait_for_completion_timeout(hba->dev_cmd.complete,
  1264. msecs_to_jiffies(max_timeout));
  1265. spin_lock_irqsave(hba->host->host_lock, flags);
  1266. hba->dev_cmd.complete = NULL;
  1267. if (likely(time_left)) {
  1268. err = ufshcd_get_tr_ocs(lrbp);
  1269. if (!err)
  1270. err = ufshcd_dev_cmd_completion(hba, lrbp);
  1271. }
  1272. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1273. if (!time_left) {
  1274. err = -ETIMEDOUT;
  1275. if (!ufshcd_clear_cmd(hba, lrbp->task_tag))
  1276. /* sucessfully cleared the command, retry if needed */
  1277. err = -EAGAIN;
  1278. }
  1279. return err;
  1280. }
  1281. /**
  1282. * ufshcd_get_dev_cmd_tag - Get device management command tag
  1283. * @hba: per-adapter instance
  1284. * @tag: pointer to variable with available slot value
  1285. *
  1286. * Get a free slot and lock it until device management command
  1287. * completes.
  1288. *
  1289. * Returns false if free slot is unavailable for locking, else
  1290. * return true with tag value in @tag.
  1291. */
  1292. static bool ufshcd_get_dev_cmd_tag(struct ufs_hba *hba, int *tag_out)
  1293. {
  1294. int tag;
  1295. bool ret = false;
  1296. unsigned long tmp;
  1297. if (!tag_out)
  1298. goto out;
  1299. do {
  1300. tmp = ~hba->lrb_in_use;
  1301. tag = find_last_bit(&tmp, hba->nutrs);
  1302. if (tag >= hba->nutrs)
  1303. goto out;
  1304. } while (test_and_set_bit_lock(tag, &hba->lrb_in_use));
  1305. *tag_out = tag;
  1306. ret = true;
  1307. out:
  1308. return ret;
  1309. }
  1310. static inline void ufshcd_put_dev_cmd_tag(struct ufs_hba *hba, int tag)
  1311. {
  1312. clear_bit_unlock(tag, &hba->lrb_in_use);
  1313. }
  1314. /**
  1315. * ufshcd_exec_dev_cmd - API for sending device management requests
  1316. * @hba - UFS hba
  1317. * @cmd_type - specifies the type (NOP, Query...)
  1318. * @timeout - time in seconds
  1319. *
  1320. * NOTE: Since there is only one available tag for device management commands,
  1321. * it is expected you hold the hba->dev_cmd.lock mutex.
  1322. */
  1323. static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
  1324. enum dev_cmd_type cmd_type, int timeout)
  1325. {
  1326. struct ufshcd_lrb *lrbp;
  1327. int err;
  1328. int tag;
  1329. struct completion wait;
  1330. unsigned long flags;
  1331. /*
  1332. * Get free slot, sleep if slots are unavailable.
  1333. * Even though we use wait_event() which sleeps indefinitely,
  1334. * the maximum wait time is bounded by SCSI request timeout.
  1335. */
  1336. wait_event(hba->dev_cmd.tag_wq, ufshcd_get_dev_cmd_tag(hba, &tag));
  1337. init_completion(&wait);
  1338. lrbp = &hba->lrb[tag];
  1339. WARN_ON(lrbp->cmd);
  1340. err = ufshcd_compose_dev_cmd(hba, lrbp, cmd_type, tag);
  1341. if (unlikely(err))
  1342. goto out_put_tag;
  1343. hba->dev_cmd.complete = &wait;
  1344. spin_lock_irqsave(hba->host->host_lock, flags);
  1345. ufshcd_send_command(hba, tag);
  1346. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1347. err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
  1348. out_put_tag:
  1349. ufshcd_put_dev_cmd_tag(hba, tag);
  1350. wake_up(&hba->dev_cmd.tag_wq);
  1351. return err;
  1352. }
  1353. /**
  1354. * ufshcd_init_query() - init the query response and request parameters
  1355. * @hba: per-adapter instance
  1356. * @request: address of the request pointer to be initialized
  1357. * @response: address of the response pointer to be initialized
  1358. * @opcode: operation to perform
  1359. * @idn: flag idn to access
  1360. * @index: LU number to access
  1361. * @selector: query/flag/descriptor further identification
  1362. */
  1363. static inline void ufshcd_init_query(struct ufs_hba *hba,
  1364. struct ufs_query_req **request, struct ufs_query_res **response,
  1365. enum query_opcode opcode, u8 idn, u8 index, u8 selector)
  1366. {
  1367. *request = &hba->dev_cmd.query.request;
  1368. *response = &hba->dev_cmd.query.response;
  1369. memset(*request, 0, sizeof(struct ufs_query_req));
  1370. memset(*response, 0, sizeof(struct ufs_query_res));
  1371. (*request)->upiu_req.opcode = opcode;
  1372. (*request)->upiu_req.idn = idn;
  1373. (*request)->upiu_req.index = index;
  1374. (*request)->upiu_req.selector = selector;
  1375. }
  1376. /**
  1377. * ufshcd_query_flag() - API function for sending flag query requests
  1378. * hba: per-adapter instance
  1379. * query_opcode: flag query to perform
  1380. * idn: flag idn to access
  1381. * flag_res: the flag value after the query request completes
  1382. *
  1383. * Returns 0 for success, non-zero in case of failure
  1384. */
  1385. static int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
  1386. enum flag_idn idn, bool *flag_res)
  1387. {
  1388. struct ufs_query_req *request = NULL;
  1389. struct ufs_query_res *response = NULL;
  1390. int err, index = 0, selector = 0;
  1391. BUG_ON(!hba);
  1392. ufshcd_hold(hba, false);
  1393. mutex_lock(&hba->dev_cmd.lock);
  1394. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1395. selector);
  1396. switch (opcode) {
  1397. case UPIU_QUERY_OPCODE_SET_FLAG:
  1398. case UPIU_QUERY_OPCODE_CLEAR_FLAG:
  1399. case UPIU_QUERY_OPCODE_TOGGLE_FLAG:
  1400. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1401. break;
  1402. case UPIU_QUERY_OPCODE_READ_FLAG:
  1403. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1404. if (!flag_res) {
  1405. /* No dummy reads */
  1406. dev_err(hba->dev, "%s: Invalid argument for read request\n",
  1407. __func__);
  1408. err = -EINVAL;
  1409. goto out_unlock;
  1410. }
  1411. break;
  1412. default:
  1413. dev_err(hba->dev,
  1414. "%s: Expected query flag opcode but got = %d\n",
  1415. __func__, opcode);
  1416. err = -EINVAL;
  1417. goto out_unlock;
  1418. }
  1419. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1420. if (err) {
  1421. dev_err(hba->dev,
  1422. "%s: Sending flag query for idn %d failed, err = %d\n",
  1423. __func__, idn, err);
  1424. goto out_unlock;
  1425. }
  1426. if (flag_res)
  1427. *flag_res = (be32_to_cpu(response->upiu_res.value) &
  1428. MASK_QUERY_UPIU_FLAG_LOC) & 0x1;
  1429. out_unlock:
  1430. mutex_unlock(&hba->dev_cmd.lock);
  1431. ufshcd_release(hba);
  1432. return err;
  1433. }
  1434. /**
  1435. * ufshcd_query_attr - API function for sending attribute requests
  1436. * hba: per-adapter instance
  1437. * opcode: attribute opcode
  1438. * idn: attribute idn to access
  1439. * index: index field
  1440. * selector: selector field
  1441. * attr_val: the attribute value after the query request completes
  1442. *
  1443. * Returns 0 for success, non-zero in case of failure
  1444. */
  1445. static int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
  1446. enum attr_idn idn, u8 index, u8 selector, u32 *attr_val)
  1447. {
  1448. struct ufs_query_req *request = NULL;
  1449. struct ufs_query_res *response = NULL;
  1450. int err;
  1451. BUG_ON(!hba);
  1452. ufshcd_hold(hba, false);
  1453. if (!attr_val) {
  1454. dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
  1455. __func__, opcode);
  1456. err = -EINVAL;
  1457. goto out;
  1458. }
  1459. mutex_lock(&hba->dev_cmd.lock);
  1460. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1461. selector);
  1462. switch (opcode) {
  1463. case UPIU_QUERY_OPCODE_WRITE_ATTR:
  1464. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1465. request->upiu_req.value = cpu_to_be32(*attr_val);
  1466. break;
  1467. case UPIU_QUERY_OPCODE_READ_ATTR:
  1468. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1469. break;
  1470. default:
  1471. dev_err(hba->dev, "%s: Expected query attr opcode but got = 0x%.2x\n",
  1472. __func__, opcode);
  1473. err = -EINVAL;
  1474. goto out_unlock;
  1475. }
  1476. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1477. if (err) {
  1478. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1479. __func__, opcode, idn, err);
  1480. goto out_unlock;
  1481. }
  1482. *attr_val = be32_to_cpu(response->upiu_res.value);
  1483. out_unlock:
  1484. mutex_unlock(&hba->dev_cmd.lock);
  1485. out:
  1486. ufshcd_release(hba);
  1487. return err;
  1488. }
  1489. /**
  1490. * ufshcd_query_descriptor - API function for sending descriptor requests
  1491. * hba: per-adapter instance
  1492. * opcode: attribute opcode
  1493. * idn: attribute idn to access
  1494. * index: index field
  1495. * selector: selector field
  1496. * desc_buf: the buffer that contains the descriptor
  1497. * buf_len: length parameter passed to the device
  1498. *
  1499. * Returns 0 for success, non-zero in case of failure.
  1500. * The buf_len parameter will contain, on return, the length parameter
  1501. * received on the response.
  1502. */
  1503. static int ufshcd_query_descriptor(struct ufs_hba *hba,
  1504. enum query_opcode opcode, enum desc_idn idn, u8 index,
  1505. u8 selector, u8 *desc_buf, int *buf_len)
  1506. {
  1507. struct ufs_query_req *request = NULL;
  1508. struct ufs_query_res *response = NULL;
  1509. int err;
  1510. BUG_ON(!hba);
  1511. ufshcd_hold(hba, false);
  1512. if (!desc_buf) {
  1513. dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
  1514. __func__, opcode);
  1515. err = -EINVAL;
  1516. goto out;
  1517. }
  1518. if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
  1519. dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
  1520. __func__, *buf_len);
  1521. err = -EINVAL;
  1522. goto out;
  1523. }
  1524. mutex_lock(&hba->dev_cmd.lock);
  1525. ufshcd_init_query(hba, &request, &response, opcode, idn, index,
  1526. selector);
  1527. hba->dev_cmd.query.descriptor = desc_buf;
  1528. request->upiu_req.length = cpu_to_be16(*buf_len);
  1529. switch (opcode) {
  1530. case UPIU_QUERY_OPCODE_WRITE_DESC:
  1531. request->query_func = UPIU_QUERY_FUNC_STANDARD_WRITE_REQUEST;
  1532. break;
  1533. case UPIU_QUERY_OPCODE_READ_DESC:
  1534. request->query_func = UPIU_QUERY_FUNC_STANDARD_READ_REQUEST;
  1535. break;
  1536. default:
  1537. dev_err(hba->dev,
  1538. "%s: Expected query descriptor opcode but got = 0x%.2x\n",
  1539. __func__, opcode);
  1540. err = -EINVAL;
  1541. goto out_unlock;
  1542. }
  1543. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_QUERY, QUERY_REQ_TIMEOUT);
  1544. if (err) {
  1545. dev_err(hba->dev, "%s: opcode 0x%.2x for idn %d failed, err = %d\n",
  1546. __func__, opcode, idn, err);
  1547. goto out_unlock;
  1548. }
  1549. hba->dev_cmd.query.descriptor = NULL;
  1550. *buf_len = be16_to_cpu(response->upiu_res.length);
  1551. out_unlock:
  1552. mutex_unlock(&hba->dev_cmd.lock);
  1553. out:
  1554. ufshcd_release(hba);
  1555. return err;
  1556. }
  1557. /**
  1558. * ufshcd_read_desc_param - read the specified descriptor parameter
  1559. * @hba: Pointer to adapter instance
  1560. * @desc_id: descriptor idn value
  1561. * @desc_index: descriptor index
  1562. * @param_offset: offset of the parameter to read
  1563. * @param_read_buf: pointer to buffer where parameter would be read
  1564. * @param_size: sizeof(param_read_buf)
  1565. *
  1566. * Return 0 in case of success, non-zero otherwise
  1567. */
  1568. static int ufshcd_read_desc_param(struct ufs_hba *hba,
  1569. enum desc_idn desc_id,
  1570. int desc_index,
  1571. u32 param_offset,
  1572. u8 *param_read_buf,
  1573. u32 param_size)
  1574. {
  1575. int ret;
  1576. u8 *desc_buf;
  1577. u32 buff_len;
  1578. bool is_kmalloc = true;
  1579. /* safety checks */
  1580. if (desc_id >= QUERY_DESC_IDN_MAX)
  1581. return -EINVAL;
  1582. buff_len = ufs_query_desc_max_size[desc_id];
  1583. if ((param_offset + param_size) > buff_len)
  1584. return -EINVAL;
  1585. if (!param_offset && (param_size == buff_len)) {
  1586. /* memory space already available to hold full descriptor */
  1587. desc_buf = param_read_buf;
  1588. is_kmalloc = false;
  1589. } else {
  1590. /* allocate memory to hold full descriptor */
  1591. desc_buf = kmalloc(buff_len, GFP_KERNEL);
  1592. if (!desc_buf)
  1593. return -ENOMEM;
  1594. }
  1595. ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
  1596. desc_id, desc_index, 0, desc_buf,
  1597. &buff_len);
  1598. if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
  1599. (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
  1600. ufs_query_desc_max_size[desc_id])
  1601. || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
  1602. dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
  1603. __func__, desc_id, param_offset, buff_len, ret);
  1604. if (!ret)
  1605. ret = -EINVAL;
  1606. goto out;
  1607. }
  1608. if (is_kmalloc)
  1609. memcpy(param_read_buf, &desc_buf[param_offset], param_size);
  1610. out:
  1611. if (is_kmalloc)
  1612. kfree(desc_buf);
  1613. return ret;
  1614. }
  1615. static inline int ufshcd_read_desc(struct ufs_hba *hba,
  1616. enum desc_idn desc_id,
  1617. int desc_index,
  1618. u8 *buf,
  1619. u32 size)
  1620. {
  1621. return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
  1622. }
  1623. static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
  1624. u8 *buf,
  1625. u32 size)
  1626. {
  1627. return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
  1628. }
  1629. /**
  1630. * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
  1631. * @hba: Pointer to adapter instance
  1632. * @lun: lun id
  1633. * @param_offset: offset of the parameter to read
  1634. * @param_read_buf: pointer to buffer where parameter would be read
  1635. * @param_size: sizeof(param_read_buf)
  1636. *
  1637. * Return 0 in case of success, non-zero otherwise
  1638. */
  1639. static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
  1640. int lun,
  1641. enum unit_desc_param param_offset,
  1642. u8 *param_read_buf,
  1643. u32 param_size)
  1644. {
  1645. /*
  1646. * Unit descriptors are only available for general purpose LUs (LUN id
  1647. * from 0 to 7) and RPMB Well known LU.
  1648. */
  1649. if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
  1650. return -EOPNOTSUPP;
  1651. return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
  1652. param_offset, param_read_buf, param_size);
  1653. }
  1654. /**
  1655. * ufshcd_memory_alloc - allocate memory for host memory space data structures
  1656. * @hba: per adapter instance
  1657. *
  1658. * 1. Allocate DMA memory for Command Descriptor array
  1659. * Each command descriptor consist of Command UPIU, Response UPIU and PRDT
  1660. * 2. Allocate DMA memory for UTP Transfer Request Descriptor List (UTRDL).
  1661. * 3. Allocate DMA memory for UTP Task Management Request Descriptor List
  1662. * (UTMRDL)
  1663. * 4. Allocate memory for local reference block(lrb).
  1664. *
  1665. * Returns 0 for success, non-zero in case of failure
  1666. */
  1667. static int ufshcd_memory_alloc(struct ufs_hba *hba)
  1668. {
  1669. size_t utmrdl_size, utrdl_size, ucdl_size;
  1670. /* Allocate memory for UTP command descriptors */
  1671. ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
  1672. hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev,
  1673. ucdl_size,
  1674. &hba->ucdl_dma_addr,
  1675. GFP_KERNEL);
  1676. /*
  1677. * UFSHCI requires UTP command descriptor to be 128 byte aligned.
  1678. * make sure hba->ucdl_dma_addr is aligned to PAGE_SIZE
  1679. * if hba->ucdl_dma_addr is aligned to PAGE_SIZE, then it will
  1680. * be aligned to 128 bytes as well
  1681. */
  1682. if (!hba->ucdl_base_addr ||
  1683. WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
  1684. dev_err(hba->dev,
  1685. "Command Descriptor Memory allocation failed\n");
  1686. goto out;
  1687. }
  1688. /*
  1689. * Allocate memory for UTP Transfer descriptors
  1690. * UFSHCI requires 1024 byte alignment of UTRD
  1691. */
  1692. utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
  1693. hba->utrdl_base_addr = dmam_alloc_coherent(hba->dev,
  1694. utrdl_size,
  1695. &hba->utrdl_dma_addr,
  1696. GFP_KERNEL);
  1697. if (!hba->utrdl_base_addr ||
  1698. WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
  1699. dev_err(hba->dev,
  1700. "Transfer Descriptor Memory allocation failed\n");
  1701. goto out;
  1702. }
  1703. /*
  1704. * Allocate memory for UTP Task Management descriptors
  1705. * UFSHCI requires 1024 byte alignment of UTMRD
  1706. */
  1707. utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
  1708. hba->utmrdl_base_addr = dmam_alloc_coherent(hba->dev,
  1709. utmrdl_size,
  1710. &hba->utmrdl_dma_addr,
  1711. GFP_KERNEL);
  1712. if (!hba->utmrdl_base_addr ||
  1713. WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
  1714. dev_err(hba->dev,
  1715. "Task Management Descriptor Memory allocation failed\n");
  1716. goto out;
  1717. }
  1718. /* Allocate memory for local reference block */
  1719. hba->lrb = devm_kzalloc(hba->dev,
  1720. hba->nutrs * sizeof(struct ufshcd_lrb),
  1721. GFP_KERNEL);
  1722. if (!hba->lrb) {
  1723. dev_err(hba->dev, "LRB Memory allocation failed\n");
  1724. goto out;
  1725. }
  1726. return 0;
  1727. out:
  1728. return -ENOMEM;
  1729. }
  1730. /**
  1731. * ufshcd_host_memory_configure - configure local reference block with
  1732. * memory offsets
  1733. * @hba: per adapter instance
  1734. *
  1735. * Configure Host memory space
  1736. * 1. Update Corresponding UTRD.UCDBA and UTRD.UCDBAU with UCD DMA
  1737. * address.
  1738. * 2. Update each UTRD with Response UPIU offset, Response UPIU length
  1739. * and PRDT offset.
  1740. * 3. Save the corresponding addresses of UTRD, UCD.CMD, UCD.RSP and UCD.PRDT
  1741. * into local reference block.
  1742. */
  1743. static void ufshcd_host_memory_configure(struct ufs_hba *hba)
  1744. {
  1745. struct utp_transfer_cmd_desc *cmd_descp;
  1746. struct utp_transfer_req_desc *utrdlp;
  1747. dma_addr_t cmd_desc_dma_addr;
  1748. dma_addr_t cmd_desc_element_addr;
  1749. u16 response_offset;
  1750. u16 prdt_offset;
  1751. int cmd_desc_size;
  1752. int i;
  1753. utrdlp = hba->utrdl_base_addr;
  1754. cmd_descp = hba->ucdl_base_addr;
  1755. response_offset =
  1756. offsetof(struct utp_transfer_cmd_desc, response_upiu);
  1757. prdt_offset =
  1758. offsetof(struct utp_transfer_cmd_desc, prd_table);
  1759. cmd_desc_size = sizeof(struct utp_transfer_cmd_desc);
  1760. cmd_desc_dma_addr = hba->ucdl_dma_addr;
  1761. for (i = 0; i < hba->nutrs; i++) {
  1762. /* Configure UTRD with command descriptor base address */
  1763. cmd_desc_element_addr =
  1764. (cmd_desc_dma_addr + (cmd_desc_size * i));
  1765. utrdlp[i].command_desc_base_addr_lo =
  1766. cpu_to_le32(lower_32_bits(cmd_desc_element_addr));
  1767. utrdlp[i].command_desc_base_addr_hi =
  1768. cpu_to_le32(upper_32_bits(cmd_desc_element_addr));
  1769. /* Response upiu and prdt offset should be in double words */
  1770. utrdlp[i].response_upiu_offset =
  1771. cpu_to_le16((response_offset >> 2));
  1772. utrdlp[i].prd_table_offset =
  1773. cpu_to_le16((prdt_offset >> 2));
  1774. utrdlp[i].response_upiu_length =
  1775. cpu_to_le16(ALIGNED_UPIU_SIZE >> 2);
  1776. hba->lrb[i].utr_descriptor_ptr = (utrdlp + i);
  1777. hba->lrb[i].ucd_req_ptr =
  1778. (struct utp_upiu_req *)(cmd_descp + i);
  1779. hba->lrb[i].ucd_rsp_ptr =
  1780. (struct utp_upiu_rsp *)cmd_descp[i].response_upiu;
  1781. hba->lrb[i].ucd_prdt_ptr =
  1782. (struct ufshcd_sg_entry *)cmd_descp[i].prd_table;
  1783. }
  1784. }
  1785. /**
  1786. * ufshcd_dme_link_startup - Notify Unipro to perform link startup
  1787. * @hba: per adapter instance
  1788. *
  1789. * UIC_CMD_DME_LINK_STARTUP command must be issued to Unipro layer,
  1790. * in order to initialize the Unipro link startup procedure.
  1791. * Once the Unipro links are up, the device connected to the controller
  1792. * is detected.
  1793. *
  1794. * Returns 0 on success, non-zero value on failure
  1795. */
  1796. static int ufshcd_dme_link_startup(struct ufs_hba *hba)
  1797. {
  1798. struct uic_command uic_cmd = {0};
  1799. int ret;
  1800. uic_cmd.command = UIC_CMD_DME_LINK_STARTUP;
  1801. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1802. if (ret)
  1803. dev_err(hba->dev,
  1804. "dme-link-startup: error code %d\n", ret);
  1805. return ret;
  1806. }
  1807. /**
  1808. * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET
  1809. * @hba: per adapter instance
  1810. * @attr_sel: uic command argument1
  1811. * @attr_set: attribute set type as uic command argument2
  1812. * @mib_val: setting value as uic command argument3
  1813. * @peer: indicate whether peer or local
  1814. *
  1815. * Returns 0 on success, non-zero value on failure
  1816. */
  1817. int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
  1818. u8 attr_set, u32 mib_val, u8 peer)
  1819. {
  1820. struct uic_command uic_cmd = {0};
  1821. static const char *const action[] = {
  1822. "dme-set",
  1823. "dme-peer-set"
  1824. };
  1825. const char *set = action[!!peer];
  1826. int ret;
  1827. uic_cmd.command = peer ?
  1828. UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET;
  1829. uic_cmd.argument1 = attr_sel;
  1830. uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set);
  1831. uic_cmd.argument3 = mib_val;
  1832. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1833. if (ret)
  1834. dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n",
  1835. set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret);
  1836. return ret;
  1837. }
  1838. EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr);
  1839. /**
  1840. * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET
  1841. * @hba: per adapter instance
  1842. * @attr_sel: uic command argument1
  1843. * @mib_val: the value of the attribute as returned by the UIC command
  1844. * @peer: indicate whether peer or local
  1845. *
  1846. * Returns 0 on success, non-zero value on failure
  1847. */
  1848. int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
  1849. u32 *mib_val, u8 peer)
  1850. {
  1851. struct uic_command uic_cmd = {0};
  1852. static const char *const action[] = {
  1853. "dme-get",
  1854. "dme-peer-get"
  1855. };
  1856. const char *get = action[!!peer];
  1857. int ret;
  1858. uic_cmd.command = peer ?
  1859. UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET;
  1860. uic_cmd.argument1 = attr_sel;
  1861. ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
  1862. if (ret) {
  1863. dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n",
  1864. get, UIC_GET_ATTR_ID(attr_sel), ret);
  1865. goto out;
  1866. }
  1867. if (mib_val)
  1868. *mib_val = uic_cmd.argument3;
  1869. out:
  1870. return ret;
  1871. }
  1872. EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
  1873. /**
  1874. * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
  1875. * state) and waits for it to take effect.
  1876. *
  1877. * @hba: per adapter instance
  1878. * @cmd: UIC command to execute
  1879. *
  1880. * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
  1881. * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
  1882. * and device UniPro link and hence it's final completion would be indicated by
  1883. * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
  1884. * addition to normal UIC command completion Status (UCCS). This function only
  1885. * returns after the relevant status bits indicate the completion.
  1886. *
  1887. * Returns 0 on success, non-zero value on failure
  1888. */
  1889. static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
  1890. {
  1891. struct completion uic_async_done;
  1892. unsigned long flags;
  1893. u8 status;
  1894. int ret;
  1895. mutex_lock(&hba->uic_cmd_mutex);
  1896. init_completion(&uic_async_done);
  1897. spin_lock_irqsave(hba->host->host_lock, flags);
  1898. hba->uic_async_done = &uic_async_done;
  1899. ret = __ufshcd_send_uic_cmd(hba, cmd);
  1900. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1901. if (ret) {
  1902. dev_err(hba->dev,
  1903. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  1904. cmd->command, cmd->argument3, ret);
  1905. goto out;
  1906. }
  1907. ret = ufshcd_wait_for_uic_cmd(hba, cmd);
  1908. if (ret) {
  1909. dev_err(hba->dev,
  1910. "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
  1911. cmd->command, cmd->argument3, ret);
  1912. goto out;
  1913. }
  1914. if (!wait_for_completion_timeout(hba->uic_async_done,
  1915. msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
  1916. dev_err(hba->dev,
  1917. "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
  1918. cmd->command, cmd->argument3);
  1919. ret = -ETIMEDOUT;
  1920. goto out;
  1921. }
  1922. status = ufshcd_get_upmcrs(hba);
  1923. if (status != PWR_LOCAL) {
  1924. dev_err(hba->dev,
  1925. "pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
  1926. cmd->command, status);
  1927. ret = (status != PWR_OK) ? status : -1;
  1928. }
  1929. out:
  1930. spin_lock_irqsave(hba->host->host_lock, flags);
  1931. hba->uic_async_done = NULL;
  1932. spin_unlock_irqrestore(hba->host->host_lock, flags);
  1933. mutex_unlock(&hba->uic_cmd_mutex);
  1934. return ret;
  1935. }
  1936. /**
  1937. * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
  1938. * using DME_SET primitives.
  1939. * @hba: per adapter instance
  1940. * @mode: powr mode value
  1941. *
  1942. * Returns 0 on success, non-zero value on failure
  1943. */
  1944. static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
  1945. {
  1946. struct uic_command uic_cmd = {0};
  1947. int ret;
  1948. uic_cmd.command = UIC_CMD_DME_SET;
  1949. uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
  1950. uic_cmd.argument3 = mode;
  1951. ufshcd_hold(hba, false);
  1952. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  1953. ufshcd_release(hba);
  1954. return ret;
  1955. }
  1956. static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
  1957. {
  1958. struct uic_command uic_cmd = {0};
  1959. uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
  1960. return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  1961. }
  1962. static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
  1963. {
  1964. struct uic_command uic_cmd = {0};
  1965. int ret;
  1966. uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
  1967. ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
  1968. if (ret) {
  1969. ufshcd_set_link_off(hba);
  1970. ret = ufshcd_host_reset_and_restore(hba);
  1971. }
  1972. return ret;
  1973. }
  1974. /**
  1975. * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
  1976. * @hba: per-adapter instance
  1977. */
  1978. static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
  1979. {
  1980. struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
  1981. if (hba->max_pwr_info.is_valid)
  1982. return 0;
  1983. pwr_info->pwr_tx = FASTAUTO_MODE;
  1984. pwr_info->pwr_rx = FASTAUTO_MODE;
  1985. pwr_info->hs_rate = PA_HS_MODE_B;
  1986. /* Get the connected lane count */
  1987. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
  1988. &pwr_info->lane_rx);
  1989. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
  1990. &pwr_info->lane_tx);
  1991. if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
  1992. dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
  1993. __func__,
  1994. pwr_info->lane_rx,
  1995. pwr_info->lane_tx);
  1996. return -EINVAL;
  1997. }
  1998. /*
  1999. * First, get the maximum gears of HS speed.
  2000. * If a zero value, it means there is no HSGEAR capability.
  2001. * Then, get the maximum gears of PWM speed.
  2002. */
  2003. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
  2004. if (!pwr_info->gear_rx) {
  2005. ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2006. &pwr_info->gear_rx);
  2007. if (!pwr_info->gear_rx) {
  2008. dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
  2009. __func__, pwr_info->gear_rx);
  2010. return -EINVAL;
  2011. }
  2012. pwr_info->pwr_rx = SLOWAUTO_MODE;
  2013. }
  2014. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
  2015. &pwr_info->gear_tx);
  2016. if (!pwr_info->gear_tx) {
  2017. ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
  2018. &pwr_info->gear_tx);
  2019. if (!pwr_info->gear_tx) {
  2020. dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
  2021. __func__, pwr_info->gear_tx);
  2022. return -EINVAL;
  2023. }
  2024. pwr_info->pwr_tx = SLOWAUTO_MODE;
  2025. }
  2026. hba->max_pwr_info.is_valid = true;
  2027. return 0;
  2028. }
  2029. static int ufshcd_change_power_mode(struct ufs_hba *hba,
  2030. struct ufs_pa_layer_attr *pwr_mode)
  2031. {
  2032. int ret;
  2033. /* if already configured to the requested pwr_mode */
  2034. if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
  2035. pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
  2036. pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
  2037. pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
  2038. pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
  2039. pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
  2040. pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
  2041. dev_dbg(hba->dev, "%s: power already configured\n", __func__);
  2042. return 0;
  2043. }
  2044. /*
  2045. * Configure attributes for power mode change with below.
  2046. * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION,
  2047. * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
  2048. * - PA_HSSERIES
  2049. */
  2050. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
  2051. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
  2052. pwr_mode->lane_rx);
  2053. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2054. pwr_mode->pwr_rx == FAST_MODE)
  2055. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
  2056. else
  2057. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
  2058. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
  2059. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
  2060. pwr_mode->lane_tx);
  2061. if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2062. pwr_mode->pwr_tx == FAST_MODE)
  2063. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
  2064. else
  2065. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
  2066. if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
  2067. pwr_mode->pwr_tx == FASTAUTO_MODE ||
  2068. pwr_mode->pwr_rx == FAST_MODE ||
  2069. pwr_mode->pwr_tx == FAST_MODE)
  2070. ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
  2071. pwr_mode->hs_rate);
  2072. ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
  2073. | pwr_mode->pwr_tx);
  2074. if (ret) {
  2075. dev_err(hba->dev,
  2076. "%s: power mode change failed %d\n", __func__, ret);
  2077. } else {
  2078. if (hba->vops && hba->vops->pwr_change_notify)
  2079. hba->vops->pwr_change_notify(hba,
  2080. POST_CHANGE, NULL, pwr_mode);
  2081. memcpy(&hba->pwr_info, pwr_mode,
  2082. sizeof(struct ufs_pa_layer_attr));
  2083. }
  2084. return ret;
  2085. }
  2086. /**
  2087. * ufshcd_config_pwr_mode - configure a new power mode
  2088. * @hba: per-adapter instance
  2089. * @desired_pwr_mode: desired power configuration
  2090. */
  2091. static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
  2092. struct ufs_pa_layer_attr *desired_pwr_mode)
  2093. {
  2094. struct ufs_pa_layer_attr final_params = { 0 };
  2095. int ret;
  2096. if (hba->vops && hba->vops->pwr_change_notify)
  2097. hba->vops->pwr_change_notify(hba,
  2098. PRE_CHANGE, desired_pwr_mode, &final_params);
  2099. else
  2100. memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
  2101. ret = ufshcd_change_power_mode(hba, &final_params);
  2102. return ret;
  2103. }
  2104. /**
  2105. * ufshcd_complete_dev_init() - checks device readiness
  2106. * hba: per-adapter instance
  2107. *
  2108. * Set fDeviceInit flag and poll until device toggles it.
  2109. */
  2110. static int ufshcd_complete_dev_init(struct ufs_hba *hba)
  2111. {
  2112. int i, retries, err = 0;
  2113. bool flag_res = 1;
  2114. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  2115. /* Set the fDeviceInit flag */
  2116. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  2117. QUERY_FLAG_IDN_FDEVICEINIT, NULL);
  2118. if (!err || err == -ETIMEDOUT)
  2119. break;
  2120. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  2121. }
  2122. if (err) {
  2123. dev_err(hba->dev,
  2124. "%s setting fDeviceInit flag failed with error %d\n",
  2125. __func__, err);
  2126. goto out;
  2127. }
  2128. /* poll for max. 100 iterations for fDeviceInit flag to clear */
  2129. for (i = 0; i < 100 && !err && flag_res; i++) {
  2130. for (retries = QUERY_REQ_RETRIES; retries > 0; retries--) {
  2131. err = ufshcd_query_flag(hba,
  2132. UPIU_QUERY_OPCODE_READ_FLAG,
  2133. QUERY_FLAG_IDN_FDEVICEINIT, &flag_res);
  2134. if (!err || err == -ETIMEDOUT)
  2135. break;
  2136. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__,
  2137. err);
  2138. }
  2139. }
  2140. if (err)
  2141. dev_err(hba->dev,
  2142. "%s reading fDeviceInit flag failed with error %d\n",
  2143. __func__, err);
  2144. else if (flag_res)
  2145. dev_err(hba->dev,
  2146. "%s fDeviceInit was not cleared by the device\n",
  2147. __func__);
  2148. out:
  2149. return err;
  2150. }
  2151. /**
  2152. * ufshcd_make_hba_operational - Make UFS controller operational
  2153. * @hba: per adapter instance
  2154. *
  2155. * To bring UFS host controller to operational state,
  2156. * 1. Enable required interrupts
  2157. * 2. Configure interrupt aggregation
  2158. * 3. Program UTRL and UTMRL base addres
  2159. * 4. Configure run-stop-registers
  2160. *
  2161. * Returns 0 on success, non-zero value on failure
  2162. */
  2163. static int ufshcd_make_hba_operational(struct ufs_hba *hba)
  2164. {
  2165. int err = 0;
  2166. u32 reg;
  2167. /* Enable required interrupts */
  2168. ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
  2169. /* Configure interrupt aggregation */
  2170. ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO);
  2171. /* Configure UTRL and UTMRL base address registers */
  2172. ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
  2173. REG_UTP_TRANSFER_REQ_LIST_BASE_L);
  2174. ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
  2175. REG_UTP_TRANSFER_REQ_LIST_BASE_H);
  2176. ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
  2177. REG_UTP_TASK_REQ_LIST_BASE_L);
  2178. ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
  2179. REG_UTP_TASK_REQ_LIST_BASE_H);
  2180. /*
  2181. * UCRDY, UTMRLDY and UTRLRDY bits must be 1
  2182. * DEI, HEI bits must be 0
  2183. */
  2184. reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
  2185. if (!(ufshcd_get_lists_status(reg))) {
  2186. ufshcd_enable_run_stop_reg(hba);
  2187. } else {
  2188. dev_err(hba->dev,
  2189. "Host controller not ready to process requests");
  2190. err = -EIO;
  2191. goto out;
  2192. }
  2193. out:
  2194. return err;
  2195. }
  2196. /**
  2197. * ufshcd_hba_enable - initialize the controller
  2198. * @hba: per adapter instance
  2199. *
  2200. * The controller resets itself and controller firmware initialization
  2201. * sequence kicks off. When controller is ready it will set
  2202. * the Host Controller Enable bit to 1.
  2203. *
  2204. * Returns 0 on success, non-zero value on failure
  2205. */
  2206. static int ufshcd_hba_enable(struct ufs_hba *hba)
  2207. {
  2208. int retry;
  2209. /*
  2210. * msleep of 1 and 5 used in this function might result in msleep(20),
  2211. * but it was necessary to send the UFS FPGA to reset mode during
  2212. * development and testing of this driver. msleep can be changed to
  2213. * mdelay and retry count can be reduced based on the controller.
  2214. */
  2215. if (!ufshcd_is_hba_active(hba)) {
  2216. /* change controller state to "reset state" */
  2217. ufshcd_hba_stop(hba);
  2218. /*
  2219. * This delay is based on the testing done with UFS host
  2220. * controller FPGA. The delay can be changed based on the
  2221. * host controller used.
  2222. */
  2223. msleep(5);
  2224. }
  2225. /* UniPro link is disabled at this point */
  2226. ufshcd_set_link_off(hba);
  2227. if (hba->vops && hba->vops->hce_enable_notify)
  2228. hba->vops->hce_enable_notify(hba, PRE_CHANGE);
  2229. /* start controller initialization sequence */
  2230. ufshcd_hba_start(hba);
  2231. /*
  2232. * To initialize a UFS host controller HCE bit must be set to 1.
  2233. * During initialization the HCE bit value changes from 1->0->1.
  2234. * When the host controller completes initialization sequence
  2235. * it sets the value of HCE bit to 1. The same HCE bit is read back
  2236. * to check if the controller has completed initialization sequence.
  2237. * So without this delay the value HCE = 1, set in the previous
  2238. * instruction might be read back.
  2239. * This delay can be changed based on the controller.
  2240. */
  2241. msleep(1);
  2242. /* wait for the host controller to complete initialization */
  2243. retry = 10;
  2244. while (ufshcd_is_hba_active(hba)) {
  2245. if (retry) {
  2246. retry--;
  2247. } else {
  2248. dev_err(hba->dev,
  2249. "Controller enable failed\n");
  2250. return -EIO;
  2251. }
  2252. msleep(5);
  2253. }
  2254. /* enable UIC related interrupts */
  2255. ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
  2256. if (hba->vops && hba->vops->hce_enable_notify)
  2257. hba->vops->hce_enable_notify(hba, POST_CHANGE);
  2258. return 0;
  2259. }
  2260. /**
  2261. * ufshcd_link_startup - Initialize unipro link startup
  2262. * @hba: per adapter instance
  2263. *
  2264. * Returns 0 for success, non-zero in case of failure
  2265. */
  2266. static int ufshcd_link_startup(struct ufs_hba *hba)
  2267. {
  2268. int ret;
  2269. int retries = DME_LINKSTARTUP_RETRIES;
  2270. do {
  2271. if (hba->vops && hba->vops->link_startup_notify)
  2272. hba->vops->link_startup_notify(hba, PRE_CHANGE);
  2273. ret = ufshcd_dme_link_startup(hba);
  2274. /* check if device is detected by inter-connect layer */
  2275. if (!ret && !ufshcd_is_device_present(hba)) {
  2276. dev_err(hba->dev, "%s: Device not present\n", __func__);
  2277. ret = -ENXIO;
  2278. goto out;
  2279. }
  2280. /*
  2281. * DME link lost indication is only received when link is up,
  2282. * but we can't be sure if the link is up until link startup
  2283. * succeeds. So reset the local Uni-Pro and try again.
  2284. */
  2285. if (ret && ufshcd_hba_enable(hba))
  2286. goto out;
  2287. } while (ret && retries--);
  2288. if (ret)
  2289. /* failed to get the link up... retire */
  2290. goto out;
  2291. /* Include any host controller configuration via UIC commands */
  2292. if (hba->vops && hba->vops->link_startup_notify) {
  2293. ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
  2294. if (ret)
  2295. goto out;
  2296. }
  2297. ret = ufshcd_make_hba_operational(hba);
  2298. out:
  2299. if (ret)
  2300. dev_err(hba->dev, "link startup failed %d\n", ret);
  2301. return ret;
  2302. }
  2303. /**
  2304. * ufshcd_verify_dev_init() - Verify device initialization
  2305. * @hba: per-adapter instance
  2306. *
  2307. * Send NOP OUT UPIU and wait for NOP IN response to check whether the
  2308. * device Transport Protocol (UTP) layer is ready after a reset.
  2309. * If the UTP layer at the device side is not initialized, it may
  2310. * not respond with NOP IN UPIU within timeout of %NOP_OUT_TIMEOUT
  2311. * and we retry sending NOP OUT for %NOP_OUT_RETRIES iterations.
  2312. */
  2313. static int ufshcd_verify_dev_init(struct ufs_hba *hba)
  2314. {
  2315. int err = 0;
  2316. int retries;
  2317. ufshcd_hold(hba, false);
  2318. mutex_lock(&hba->dev_cmd.lock);
  2319. for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
  2320. err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
  2321. NOP_OUT_TIMEOUT);
  2322. if (!err || err == -ETIMEDOUT)
  2323. break;
  2324. dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
  2325. }
  2326. mutex_unlock(&hba->dev_cmd.lock);
  2327. ufshcd_release(hba);
  2328. if (err)
  2329. dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
  2330. return err;
  2331. }
  2332. /**
  2333. * ufshcd_set_queue_depth - set lun queue depth
  2334. * @sdev: pointer to SCSI device
  2335. *
  2336. * Read bLUQueueDepth value and activate scsi tagged command
  2337. * queueing. For WLUN, queue depth is set to 1. For best-effort
  2338. * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
  2339. * value that host can queue.
  2340. */
  2341. static void ufshcd_set_queue_depth(struct scsi_device *sdev)
  2342. {
  2343. int ret = 0;
  2344. u8 lun_qdepth;
  2345. struct ufs_hba *hba;
  2346. hba = shost_priv(sdev->host);
  2347. lun_qdepth = hba->nutrs;
  2348. ret = ufshcd_read_unit_desc_param(hba,
  2349. ufshcd_scsi_to_upiu_lun(sdev->lun),
  2350. UNIT_DESC_PARAM_LU_Q_DEPTH,
  2351. &lun_qdepth,
  2352. sizeof(lun_qdepth));
  2353. /* Some WLUN doesn't support unit descriptor */
  2354. if (ret == -EOPNOTSUPP)
  2355. lun_qdepth = 1;
  2356. else if (!lun_qdepth)
  2357. /* eventually, we can figure out the real queue depth */
  2358. lun_qdepth = hba->nutrs;
  2359. else
  2360. lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
  2361. dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
  2362. __func__, lun_qdepth);
  2363. if (sdev->tagged_supported)
  2364. scsi_adjust_queue_depth(sdev, lun_qdepth);
  2365. }
  2366. /*
  2367. * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
  2368. * @hba: per-adapter instance
  2369. * @lun: UFS device lun id
  2370. * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
  2371. *
  2372. * Returns 0 in case of success and b_lu_write_protect status would be returned
  2373. * @b_lu_write_protect parameter.
  2374. * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
  2375. * Returns -EINVAL in case of invalid parameters passed to this function.
  2376. */
  2377. static int ufshcd_get_lu_wp(struct ufs_hba *hba,
  2378. u8 lun,
  2379. u8 *b_lu_write_protect)
  2380. {
  2381. int ret;
  2382. if (!b_lu_write_protect)
  2383. ret = -EINVAL;
  2384. /*
  2385. * According to UFS device spec, RPMB LU can't be write
  2386. * protected so skip reading bLUWriteProtect parameter for
  2387. * it. For other W-LUs, UNIT DESCRIPTOR is not available.
  2388. */
  2389. else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
  2390. ret = -ENOTSUPP;
  2391. else
  2392. ret = ufshcd_read_unit_desc_param(hba,
  2393. lun,
  2394. UNIT_DESC_PARAM_LU_WR_PROTECT,
  2395. b_lu_write_protect,
  2396. sizeof(*b_lu_write_protect));
  2397. return ret;
  2398. }
  2399. /**
  2400. * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
  2401. * status
  2402. * @hba: per-adapter instance
  2403. * @sdev: pointer to SCSI device
  2404. *
  2405. */
  2406. static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
  2407. struct scsi_device *sdev)
  2408. {
  2409. if (hba->dev_info.f_power_on_wp_en &&
  2410. !hba->dev_info.is_lu_power_on_wp) {
  2411. u8 b_lu_write_protect;
  2412. if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
  2413. &b_lu_write_protect) &&
  2414. (b_lu_write_protect == UFS_LU_POWER_ON_WP))
  2415. hba->dev_info.is_lu_power_on_wp = true;
  2416. }
  2417. }
  2418. /**
  2419. * ufshcd_slave_alloc - handle initial SCSI device configurations
  2420. * @sdev: pointer to SCSI device
  2421. *
  2422. * Returns success
  2423. */
  2424. static int ufshcd_slave_alloc(struct scsi_device *sdev)
  2425. {
  2426. struct ufs_hba *hba;
  2427. hba = shost_priv(sdev->host);
  2428. sdev->tagged_supported = 1;
  2429. /* Mode sense(6) is not supported by UFS, so use Mode sense(10) */
  2430. sdev->use_10_for_ms = 1;
  2431. scsi_set_tag_type(sdev, MSG_SIMPLE_TAG);
  2432. /* allow SCSI layer to restart the device in case of errors */
  2433. sdev->allow_restart = 1;
  2434. /* REPORT SUPPORTED OPERATION CODES is not supported */
  2435. sdev->no_report_opcodes = 1;
  2436. ufshcd_set_queue_depth(sdev);
  2437. ufshcd_get_lu_power_on_wp_status(hba, sdev);
  2438. return 0;
  2439. }
  2440. /**
  2441. * ufshcd_change_queue_depth - change queue depth
  2442. * @sdev: pointer to SCSI device
  2443. * @depth: required depth to set
  2444. * @reason: reason for changing the depth
  2445. *
  2446. * Change queue depth according to the reason and make sure
  2447. * the max. limits are not crossed.
  2448. */
  2449. static int ufshcd_change_queue_depth(struct scsi_device *sdev,
  2450. int depth, int reason)
  2451. {
  2452. struct ufs_hba *hba = shost_priv(sdev->host);
  2453. if (depth > hba->nutrs)
  2454. depth = hba->nutrs;
  2455. switch (reason) {
  2456. case SCSI_QDEPTH_DEFAULT:
  2457. case SCSI_QDEPTH_RAMP_UP:
  2458. if (!sdev->tagged_supported)
  2459. depth = 1;
  2460. scsi_adjust_queue_depth(sdev, depth);
  2461. break;
  2462. case SCSI_QDEPTH_QFULL:
  2463. scsi_track_queue_full(sdev, depth);
  2464. break;
  2465. default:
  2466. return -EOPNOTSUPP;
  2467. }
  2468. return depth;
  2469. }
  2470. /**
  2471. * ufshcd_slave_configure - adjust SCSI device configurations
  2472. * @sdev: pointer to SCSI device
  2473. */
  2474. static int ufshcd_slave_configure(struct scsi_device *sdev)
  2475. {
  2476. struct request_queue *q = sdev->request_queue;
  2477. blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
  2478. blk_queue_max_segment_size(q, PRDT_DATA_BYTE_COUNT_MAX);
  2479. return 0;
  2480. }
  2481. /**
  2482. * ufshcd_slave_destroy - remove SCSI device configurations
  2483. * @sdev: pointer to SCSI device
  2484. */
  2485. static void ufshcd_slave_destroy(struct scsi_device *sdev)
  2486. {
  2487. struct ufs_hba *hba;
  2488. hba = shost_priv(sdev->host);
  2489. /* Drop the reference as it won't be needed anymore */
  2490. if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
  2491. hba->sdev_ufs_device = NULL;
  2492. }
  2493. /**
  2494. * ufshcd_task_req_compl - handle task management request completion
  2495. * @hba: per adapter instance
  2496. * @index: index of the completed request
  2497. * @resp: task management service response
  2498. *
  2499. * Returns non-zero value on error, zero on success
  2500. */
  2501. static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index, u8 *resp)
  2502. {
  2503. struct utp_task_req_desc *task_req_descp;
  2504. struct utp_upiu_task_rsp *task_rsp_upiup;
  2505. unsigned long flags;
  2506. int ocs_value;
  2507. int task_result;
  2508. spin_lock_irqsave(hba->host->host_lock, flags);
  2509. /* Clear completed tasks from outstanding_tasks */
  2510. __clear_bit(index, &hba->outstanding_tasks);
  2511. task_req_descp = hba->utmrdl_base_addr;
  2512. ocs_value = ufshcd_get_tmr_ocs(&task_req_descp[index]);
  2513. if (ocs_value == OCS_SUCCESS) {
  2514. task_rsp_upiup = (struct utp_upiu_task_rsp *)
  2515. task_req_descp[index].task_rsp_upiu;
  2516. task_result = be32_to_cpu(task_rsp_upiup->header.dword_1);
  2517. task_result = ((task_result & MASK_TASK_RESPONSE) >> 8);
  2518. if (resp)
  2519. *resp = (u8)task_result;
  2520. } else {
  2521. dev_err(hba->dev, "%s: failed, ocs = 0x%x\n",
  2522. __func__, ocs_value);
  2523. }
  2524. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2525. return ocs_value;
  2526. }
  2527. /**
  2528. * ufshcd_scsi_cmd_status - Update SCSI command result based on SCSI status
  2529. * @lrb: pointer to local reference block of completed command
  2530. * @scsi_status: SCSI command status
  2531. *
  2532. * Returns value base on SCSI command status
  2533. */
  2534. static inline int
  2535. ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
  2536. {
  2537. int result = 0;
  2538. switch (scsi_status) {
  2539. case SAM_STAT_CHECK_CONDITION:
  2540. ufshcd_copy_sense_data(lrbp);
  2541. case SAM_STAT_GOOD:
  2542. result |= DID_OK << 16 |
  2543. COMMAND_COMPLETE << 8 |
  2544. scsi_status;
  2545. break;
  2546. case SAM_STAT_TASK_SET_FULL:
  2547. case SAM_STAT_BUSY:
  2548. case SAM_STAT_TASK_ABORTED:
  2549. ufshcd_copy_sense_data(lrbp);
  2550. result |= scsi_status;
  2551. break;
  2552. default:
  2553. result |= DID_ERROR << 16;
  2554. break;
  2555. } /* end of switch */
  2556. return result;
  2557. }
  2558. /**
  2559. * ufshcd_transfer_rsp_status - Get overall status of the response
  2560. * @hba: per adapter instance
  2561. * @lrb: pointer to local reference block of completed command
  2562. *
  2563. * Returns result of the command to notify SCSI midlayer
  2564. */
  2565. static inline int
  2566. ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
  2567. {
  2568. int result = 0;
  2569. int scsi_status;
  2570. int ocs;
  2571. /* overall command status of utrd */
  2572. ocs = ufshcd_get_tr_ocs(lrbp);
  2573. switch (ocs) {
  2574. case OCS_SUCCESS:
  2575. result = ufshcd_get_req_rsp(lrbp->ucd_rsp_ptr);
  2576. switch (result) {
  2577. case UPIU_TRANSACTION_RESPONSE:
  2578. /*
  2579. * get the response UPIU result to extract
  2580. * the SCSI command status
  2581. */
  2582. result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr);
  2583. /*
  2584. * get the result based on SCSI status response
  2585. * to notify the SCSI midlayer of the command status
  2586. */
  2587. scsi_status = result & MASK_SCSI_STATUS;
  2588. result = ufshcd_scsi_cmd_status(lrbp, scsi_status);
  2589. if (ufshcd_is_exception_event(lrbp->ucd_rsp_ptr))
  2590. schedule_work(&hba->eeh_work);
  2591. break;
  2592. case UPIU_TRANSACTION_REJECT_UPIU:
  2593. /* TODO: handle Reject UPIU Response */
  2594. result = DID_ERROR << 16;
  2595. dev_err(hba->dev,
  2596. "Reject UPIU not fully implemented\n");
  2597. break;
  2598. default:
  2599. result = DID_ERROR << 16;
  2600. dev_err(hba->dev,
  2601. "Unexpected request response code = %x\n",
  2602. result);
  2603. break;
  2604. }
  2605. break;
  2606. case OCS_ABORTED:
  2607. result |= DID_ABORT << 16;
  2608. break;
  2609. case OCS_INVALID_COMMAND_STATUS:
  2610. result |= DID_REQUEUE << 16;
  2611. break;
  2612. case OCS_INVALID_CMD_TABLE_ATTR:
  2613. case OCS_INVALID_PRDT_ATTR:
  2614. case OCS_MISMATCH_DATA_BUF_SIZE:
  2615. case OCS_MISMATCH_RESP_UPIU_SIZE:
  2616. case OCS_PEER_COMM_FAILURE:
  2617. case OCS_FATAL_ERROR:
  2618. default:
  2619. result |= DID_ERROR << 16;
  2620. dev_err(hba->dev,
  2621. "OCS error from controller = %x\n", ocs);
  2622. break;
  2623. } /* end of switch */
  2624. return result;
  2625. }
  2626. /**
  2627. * ufshcd_uic_cmd_compl - handle completion of uic command
  2628. * @hba: per adapter instance
  2629. * @intr_status: interrupt status generated by the controller
  2630. */
  2631. static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
  2632. {
  2633. if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) {
  2634. hba->active_uic_cmd->argument2 |=
  2635. ufshcd_get_uic_cmd_result(hba);
  2636. hba->active_uic_cmd->argument3 =
  2637. ufshcd_get_dme_attr_val(hba);
  2638. complete(&hba->active_uic_cmd->done);
  2639. }
  2640. if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
  2641. complete(hba->uic_async_done);
  2642. }
  2643. /**
  2644. * ufshcd_transfer_req_compl - handle SCSI and query command completion
  2645. * @hba: per adapter instance
  2646. */
  2647. static void ufshcd_transfer_req_compl(struct ufs_hba *hba)
  2648. {
  2649. struct ufshcd_lrb *lrbp;
  2650. struct scsi_cmnd *cmd;
  2651. unsigned long completed_reqs;
  2652. u32 tr_doorbell;
  2653. int result;
  2654. int index;
  2655. /* Resetting interrupt aggregation counters first and reading the
  2656. * DOOR_BELL afterward allows us to handle all the completed requests.
  2657. * In order to prevent other interrupts starvation the DB is read once
  2658. * after reset. The down side of this solution is the possibility of
  2659. * false interrupt if device completes another request after resetting
  2660. * aggregation and before reading the DB.
  2661. */
  2662. ufshcd_reset_intr_aggr(hba);
  2663. tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  2664. completed_reqs = tr_doorbell ^ hba->outstanding_reqs;
  2665. for_each_set_bit(index, &completed_reqs, hba->nutrs) {
  2666. lrbp = &hba->lrb[index];
  2667. cmd = lrbp->cmd;
  2668. if (cmd) {
  2669. result = ufshcd_transfer_rsp_status(hba, lrbp);
  2670. scsi_dma_unmap(cmd);
  2671. cmd->result = result;
  2672. /* Mark completed command as NULL in LRB */
  2673. lrbp->cmd = NULL;
  2674. clear_bit_unlock(index, &hba->lrb_in_use);
  2675. /* Do not touch lrbp after scsi done */
  2676. cmd->scsi_done(cmd);
  2677. __ufshcd_release(hba);
  2678. } else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
  2679. if (hba->dev_cmd.complete)
  2680. complete(hba->dev_cmd.complete);
  2681. }
  2682. }
  2683. /* clear corresponding bits of completed commands */
  2684. hba->outstanding_reqs ^= completed_reqs;
  2685. ufshcd_clk_scaling_update_busy(hba);
  2686. /* we might have free'd some tags above */
  2687. wake_up(&hba->dev_cmd.tag_wq);
  2688. }
  2689. /**
  2690. * ufshcd_disable_ee - disable exception event
  2691. * @hba: per-adapter instance
  2692. * @mask: exception event to disable
  2693. *
  2694. * Disables exception event in the device so that the EVENT_ALERT
  2695. * bit is not set.
  2696. *
  2697. * Returns zero on success, non-zero error value on failure.
  2698. */
  2699. static int ufshcd_disable_ee(struct ufs_hba *hba, u16 mask)
  2700. {
  2701. int err = 0;
  2702. u32 val;
  2703. if (!(hba->ee_ctrl_mask & mask))
  2704. goto out;
  2705. val = hba->ee_ctrl_mask & ~mask;
  2706. val &= 0xFFFF; /* 2 bytes */
  2707. err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  2708. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  2709. if (!err)
  2710. hba->ee_ctrl_mask &= ~mask;
  2711. out:
  2712. return err;
  2713. }
  2714. /**
  2715. * ufshcd_enable_ee - enable exception event
  2716. * @hba: per-adapter instance
  2717. * @mask: exception event to enable
  2718. *
  2719. * Enable corresponding exception event in the device to allow
  2720. * device to alert host in critical scenarios.
  2721. *
  2722. * Returns zero on success, non-zero error value on failure.
  2723. */
  2724. static int ufshcd_enable_ee(struct ufs_hba *hba, u16 mask)
  2725. {
  2726. int err = 0;
  2727. u32 val;
  2728. if (hba->ee_ctrl_mask & mask)
  2729. goto out;
  2730. val = hba->ee_ctrl_mask | mask;
  2731. val &= 0xFFFF; /* 2 bytes */
  2732. err = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  2733. QUERY_ATTR_IDN_EE_CONTROL, 0, 0, &val);
  2734. if (!err)
  2735. hba->ee_ctrl_mask |= mask;
  2736. out:
  2737. return err;
  2738. }
  2739. /**
  2740. * ufshcd_enable_auto_bkops - Allow device managed BKOPS
  2741. * @hba: per-adapter instance
  2742. *
  2743. * Allow device to manage background operations on its own. Enabling
  2744. * this might lead to inconsistent latencies during normal data transfers
  2745. * as the device is allowed to manage its own way of handling background
  2746. * operations.
  2747. *
  2748. * Returns zero on success, non-zero on failure.
  2749. */
  2750. static int ufshcd_enable_auto_bkops(struct ufs_hba *hba)
  2751. {
  2752. int err = 0;
  2753. if (hba->auto_bkops_enabled)
  2754. goto out;
  2755. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_SET_FLAG,
  2756. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  2757. if (err) {
  2758. dev_err(hba->dev, "%s: failed to enable bkops %d\n",
  2759. __func__, err);
  2760. goto out;
  2761. }
  2762. hba->auto_bkops_enabled = true;
  2763. /* No need of URGENT_BKOPS exception from the device */
  2764. err = ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  2765. if (err)
  2766. dev_err(hba->dev, "%s: failed to disable exception event %d\n",
  2767. __func__, err);
  2768. out:
  2769. return err;
  2770. }
  2771. /**
  2772. * ufshcd_disable_auto_bkops - block device in doing background operations
  2773. * @hba: per-adapter instance
  2774. *
  2775. * Disabling background operations improves command response latency but
  2776. * has drawback of device moving into critical state where the device is
  2777. * not-operable. Make sure to call ufshcd_enable_auto_bkops() whenever the
  2778. * host is idle so that BKOPS are managed effectively without any negative
  2779. * impacts.
  2780. *
  2781. * Returns zero on success, non-zero on failure.
  2782. */
  2783. static int ufshcd_disable_auto_bkops(struct ufs_hba *hba)
  2784. {
  2785. int err = 0;
  2786. if (!hba->auto_bkops_enabled)
  2787. goto out;
  2788. /*
  2789. * If host assisted BKOPs is to be enabled, make sure
  2790. * urgent bkops exception is allowed.
  2791. */
  2792. err = ufshcd_enable_ee(hba, MASK_EE_URGENT_BKOPS);
  2793. if (err) {
  2794. dev_err(hba->dev, "%s: failed to enable exception event %d\n",
  2795. __func__, err);
  2796. goto out;
  2797. }
  2798. err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
  2799. QUERY_FLAG_IDN_BKOPS_EN, NULL);
  2800. if (err) {
  2801. dev_err(hba->dev, "%s: failed to disable bkops %d\n",
  2802. __func__, err);
  2803. ufshcd_disable_ee(hba, MASK_EE_URGENT_BKOPS);
  2804. goto out;
  2805. }
  2806. hba->auto_bkops_enabled = false;
  2807. out:
  2808. return err;
  2809. }
  2810. /**
  2811. * ufshcd_force_reset_auto_bkops - force enable of auto bkops
  2812. * @hba: per adapter instance
  2813. *
  2814. * After a device reset the device may toggle the BKOPS_EN flag
  2815. * to default value. The s/w tracking variables should be updated
  2816. * as well. Do this by forcing enable of auto bkops.
  2817. */
  2818. static void ufshcd_force_reset_auto_bkops(struct ufs_hba *hba)
  2819. {
  2820. hba->auto_bkops_enabled = false;
  2821. hba->ee_ctrl_mask |= MASK_EE_URGENT_BKOPS;
  2822. ufshcd_enable_auto_bkops(hba);
  2823. }
  2824. static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
  2825. {
  2826. return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  2827. QUERY_ATTR_IDN_BKOPS_STATUS, 0, 0, status);
  2828. }
  2829. /**
  2830. * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
  2831. * @hba: per-adapter instance
  2832. * @status: bkops_status value
  2833. *
  2834. * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
  2835. * flag in the device to permit background operations if the device
  2836. * bkops_status is greater than or equal to "status" argument passed to
  2837. * this function, disable otherwise.
  2838. *
  2839. * Returns 0 for success, non-zero in case of failure.
  2840. *
  2841. * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
  2842. * to know whether auto bkops is enabled or disabled after this function
  2843. * returns control to it.
  2844. */
  2845. static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
  2846. enum bkops_status status)
  2847. {
  2848. int err;
  2849. u32 curr_status = 0;
  2850. err = ufshcd_get_bkops_status(hba, &curr_status);
  2851. if (err) {
  2852. dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
  2853. __func__, err);
  2854. goto out;
  2855. } else if (curr_status > BKOPS_STATUS_MAX) {
  2856. dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
  2857. __func__, curr_status);
  2858. err = -EINVAL;
  2859. goto out;
  2860. }
  2861. if (curr_status >= status)
  2862. err = ufshcd_enable_auto_bkops(hba);
  2863. else
  2864. err = ufshcd_disable_auto_bkops(hba);
  2865. out:
  2866. return err;
  2867. }
  2868. /**
  2869. * ufshcd_urgent_bkops - handle urgent bkops exception event
  2870. * @hba: per-adapter instance
  2871. *
  2872. * Enable fBackgroundOpsEn flag in the device to permit background
  2873. * operations.
  2874. *
  2875. * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
  2876. * and negative error value for any other failure.
  2877. */
  2878. static int ufshcd_urgent_bkops(struct ufs_hba *hba)
  2879. {
  2880. return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
  2881. }
  2882. static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
  2883. {
  2884. return ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
  2885. QUERY_ATTR_IDN_EE_STATUS, 0, 0, status);
  2886. }
  2887. /**
  2888. * ufshcd_exception_event_handler - handle exceptions raised by device
  2889. * @work: pointer to work data
  2890. *
  2891. * Read bExceptionEventStatus attribute from the device and handle the
  2892. * exception event accordingly.
  2893. */
  2894. static void ufshcd_exception_event_handler(struct work_struct *work)
  2895. {
  2896. struct ufs_hba *hba;
  2897. int err;
  2898. u32 status = 0;
  2899. hba = container_of(work, struct ufs_hba, eeh_work);
  2900. pm_runtime_get_sync(hba->dev);
  2901. err = ufshcd_get_ee_status(hba, &status);
  2902. if (err) {
  2903. dev_err(hba->dev, "%s: failed to get exception status %d\n",
  2904. __func__, err);
  2905. goto out;
  2906. }
  2907. status &= hba->ee_ctrl_mask;
  2908. if (status & MASK_EE_URGENT_BKOPS) {
  2909. err = ufshcd_urgent_bkops(hba);
  2910. if (err < 0)
  2911. dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
  2912. __func__, err);
  2913. }
  2914. out:
  2915. pm_runtime_put_sync(hba->dev);
  2916. return;
  2917. }
  2918. /**
  2919. * ufshcd_err_handler - handle UFS errors that require s/w attention
  2920. * @work: pointer to work structure
  2921. */
  2922. static void ufshcd_err_handler(struct work_struct *work)
  2923. {
  2924. struct ufs_hba *hba;
  2925. unsigned long flags;
  2926. u32 err_xfer = 0;
  2927. u32 err_tm = 0;
  2928. int err = 0;
  2929. int tag;
  2930. hba = container_of(work, struct ufs_hba, eh_work);
  2931. pm_runtime_get_sync(hba->dev);
  2932. ufshcd_hold(hba, false);
  2933. spin_lock_irqsave(hba->host->host_lock, flags);
  2934. if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
  2935. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2936. goto out;
  2937. }
  2938. hba->ufshcd_state = UFSHCD_STATE_RESET;
  2939. ufshcd_set_eh_in_progress(hba);
  2940. /* Complete requests that have door-bell cleared by h/w */
  2941. ufshcd_transfer_req_compl(hba);
  2942. ufshcd_tmc_handler(hba);
  2943. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2944. /* Clear pending transfer requests */
  2945. for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs)
  2946. if (ufshcd_clear_cmd(hba, tag))
  2947. err_xfer |= 1 << tag;
  2948. /* Clear pending task management requests */
  2949. for_each_set_bit(tag, &hba->outstanding_tasks, hba->nutmrs)
  2950. if (ufshcd_clear_tm_cmd(hba, tag))
  2951. err_tm |= 1 << tag;
  2952. /* Complete the requests that are cleared by s/w */
  2953. spin_lock_irqsave(hba->host->host_lock, flags);
  2954. ufshcd_transfer_req_compl(hba);
  2955. ufshcd_tmc_handler(hba);
  2956. spin_unlock_irqrestore(hba->host->host_lock, flags);
  2957. /* Fatal errors need reset */
  2958. if (err_xfer || err_tm || (hba->saved_err & INT_FATAL_ERRORS) ||
  2959. ((hba->saved_err & UIC_ERROR) &&
  2960. (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR))) {
  2961. err = ufshcd_reset_and_restore(hba);
  2962. if (err) {
  2963. dev_err(hba->dev, "%s: reset and restore failed\n",
  2964. __func__);
  2965. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  2966. }
  2967. /*
  2968. * Inform scsi mid-layer that we did reset and allow to handle
  2969. * Unit Attention properly.
  2970. */
  2971. scsi_report_bus_reset(hba->host, 0);
  2972. hba->saved_err = 0;
  2973. hba->saved_uic_err = 0;
  2974. }
  2975. ufshcd_clear_eh_in_progress(hba);
  2976. out:
  2977. scsi_unblock_requests(hba->host);
  2978. ufshcd_release(hba);
  2979. pm_runtime_put_sync(hba->dev);
  2980. }
  2981. /**
  2982. * ufshcd_update_uic_error - check and set fatal UIC error flags.
  2983. * @hba: per-adapter instance
  2984. */
  2985. static void ufshcd_update_uic_error(struct ufs_hba *hba)
  2986. {
  2987. u32 reg;
  2988. /* PA_INIT_ERROR is fatal and needs UIC reset */
  2989. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DATA_LINK_LAYER);
  2990. if (reg & UIC_DATA_LINK_LAYER_ERROR_PA_INIT)
  2991. hba->uic_error |= UFSHCD_UIC_DL_PA_INIT_ERROR;
  2992. /* UIC NL/TL/DME errors needs software retry */
  2993. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_NETWORK_LAYER);
  2994. if (reg)
  2995. hba->uic_error |= UFSHCD_UIC_NL_ERROR;
  2996. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_TRANSPORT_LAYER);
  2997. if (reg)
  2998. hba->uic_error |= UFSHCD_UIC_TL_ERROR;
  2999. reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_DME);
  3000. if (reg)
  3001. hba->uic_error |= UFSHCD_UIC_DME_ERROR;
  3002. dev_dbg(hba->dev, "%s: UIC error flags = 0x%08x\n",
  3003. __func__, hba->uic_error);
  3004. }
  3005. /**
  3006. * ufshcd_check_errors - Check for errors that need s/w attention
  3007. * @hba: per-adapter instance
  3008. */
  3009. static void ufshcd_check_errors(struct ufs_hba *hba)
  3010. {
  3011. bool queue_eh_work = false;
  3012. if (hba->errors & INT_FATAL_ERRORS)
  3013. queue_eh_work = true;
  3014. if (hba->errors & UIC_ERROR) {
  3015. hba->uic_error = 0;
  3016. ufshcd_update_uic_error(hba);
  3017. if (hba->uic_error)
  3018. queue_eh_work = true;
  3019. }
  3020. if (queue_eh_work) {
  3021. /* handle fatal errors only when link is functional */
  3022. if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
  3023. /* block commands from scsi mid-layer */
  3024. scsi_block_requests(hba->host);
  3025. /* transfer error masks to sticky bits */
  3026. hba->saved_err |= hba->errors;
  3027. hba->saved_uic_err |= hba->uic_error;
  3028. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3029. schedule_work(&hba->eh_work);
  3030. }
  3031. }
  3032. /*
  3033. * if (!queue_eh_work) -
  3034. * Other errors are either non-fatal where host recovers
  3035. * itself without s/w intervention or errors that will be
  3036. * handled by the SCSI core layer.
  3037. */
  3038. }
  3039. /**
  3040. * ufshcd_tmc_handler - handle task management function completion
  3041. * @hba: per adapter instance
  3042. */
  3043. static void ufshcd_tmc_handler(struct ufs_hba *hba)
  3044. {
  3045. u32 tm_doorbell;
  3046. tm_doorbell = ufshcd_readl(hba, REG_UTP_TASK_REQ_DOOR_BELL);
  3047. hba->tm_condition = tm_doorbell ^ hba->outstanding_tasks;
  3048. wake_up(&hba->tm_wq);
  3049. }
  3050. /**
  3051. * ufshcd_sl_intr - Interrupt service routine
  3052. * @hba: per adapter instance
  3053. * @intr_status: contains interrupts generated by the controller
  3054. */
  3055. static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
  3056. {
  3057. hba->errors = UFSHCD_ERROR_MASK & intr_status;
  3058. if (hba->errors)
  3059. ufshcd_check_errors(hba);
  3060. if (intr_status & UFSHCD_UIC_MASK)
  3061. ufshcd_uic_cmd_compl(hba, intr_status);
  3062. if (intr_status & UTP_TASK_REQ_COMPL)
  3063. ufshcd_tmc_handler(hba);
  3064. if (intr_status & UTP_TRANSFER_REQ_COMPL)
  3065. ufshcd_transfer_req_compl(hba);
  3066. }
  3067. /**
  3068. * ufshcd_intr - Main interrupt service routine
  3069. * @irq: irq number
  3070. * @__hba: pointer to adapter instance
  3071. *
  3072. * Returns IRQ_HANDLED - If interrupt is valid
  3073. * IRQ_NONE - If invalid interrupt
  3074. */
  3075. static irqreturn_t ufshcd_intr(int irq, void *__hba)
  3076. {
  3077. u32 intr_status;
  3078. irqreturn_t retval = IRQ_NONE;
  3079. struct ufs_hba *hba = __hba;
  3080. spin_lock(hba->host->host_lock);
  3081. intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
  3082. if (intr_status) {
  3083. ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
  3084. ufshcd_sl_intr(hba, intr_status);
  3085. retval = IRQ_HANDLED;
  3086. }
  3087. spin_unlock(hba->host->host_lock);
  3088. return retval;
  3089. }
  3090. static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag)
  3091. {
  3092. int err = 0;
  3093. u32 mask = 1 << tag;
  3094. unsigned long flags;
  3095. if (!test_bit(tag, &hba->outstanding_tasks))
  3096. goto out;
  3097. spin_lock_irqsave(hba->host->host_lock, flags);
  3098. ufshcd_writel(hba, ~(1 << tag), REG_UTP_TASK_REQ_LIST_CLEAR);
  3099. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3100. /* poll for max. 1 sec to clear door bell register by h/w */
  3101. err = ufshcd_wait_for_register(hba,
  3102. REG_UTP_TASK_REQ_DOOR_BELL,
  3103. mask, 0, 1000, 1000);
  3104. out:
  3105. return err;
  3106. }
  3107. /**
  3108. * ufshcd_issue_tm_cmd - issues task management commands to controller
  3109. * @hba: per adapter instance
  3110. * @lun_id: LUN ID to which TM command is sent
  3111. * @task_id: task ID to which the TM command is applicable
  3112. * @tm_function: task management function opcode
  3113. * @tm_response: task management service response return value
  3114. *
  3115. * Returns non-zero value on error, zero on success.
  3116. */
  3117. static int ufshcd_issue_tm_cmd(struct ufs_hba *hba, int lun_id, int task_id,
  3118. u8 tm_function, u8 *tm_response)
  3119. {
  3120. struct utp_task_req_desc *task_req_descp;
  3121. struct utp_upiu_task_req *task_req_upiup;
  3122. struct Scsi_Host *host;
  3123. unsigned long flags;
  3124. int free_slot;
  3125. int err;
  3126. int task_tag;
  3127. host = hba->host;
  3128. /*
  3129. * Get free slot, sleep if slots are unavailable.
  3130. * Even though we use wait_event() which sleeps indefinitely,
  3131. * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
  3132. */
  3133. wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
  3134. ufshcd_hold(hba, false);
  3135. spin_lock_irqsave(host->host_lock, flags);
  3136. task_req_descp = hba->utmrdl_base_addr;
  3137. task_req_descp += free_slot;
  3138. /* Configure task request descriptor */
  3139. task_req_descp->header.dword_0 = cpu_to_le32(UTP_REQ_DESC_INT_CMD);
  3140. task_req_descp->header.dword_2 =
  3141. cpu_to_le32(OCS_INVALID_COMMAND_STATUS);
  3142. /* Configure task request UPIU */
  3143. task_req_upiup =
  3144. (struct utp_upiu_task_req *) task_req_descp->task_req_upiu;
  3145. task_tag = hba->nutrs + free_slot;
  3146. task_req_upiup->header.dword_0 =
  3147. UPIU_HEADER_DWORD(UPIU_TRANSACTION_TASK_REQ, 0,
  3148. lun_id, task_tag);
  3149. task_req_upiup->header.dword_1 =
  3150. UPIU_HEADER_DWORD(0, tm_function, 0, 0);
  3151. /*
  3152. * The host shall provide the same value for LUN field in the basic
  3153. * header and for Input Parameter.
  3154. */
  3155. task_req_upiup->input_param1 = cpu_to_be32(lun_id);
  3156. task_req_upiup->input_param2 = cpu_to_be32(task_id);
  3157. /* send command to the controller */
  3158. __set_bit(free_slot, &hba->outstanding_tasks);
  3159. ufshcd_writel(hba, 1 << free_slot, REG_UTP_TASK_REQ_DOOR_BELL);
  3160. spin_unlock_irqrestore(host->host_lock, flags);
  3161. /* wait until the task management command is completed */
  3162. err = wait_event_timeout(hba->tm_wq,
  3163. test_bit(free_slot, &hba->tm_condition),
  3164. msecs_to_jiffies(TM_CMD_TIMEOUT));
  3165. if (!err) {
  3166. dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
  3167. __func__, tm_function);
  3168. if (ufshcd_clear_tm_cmd(hba, free_slot))
  3169. dev_WARN(hba->dev, "%s: unable clear tm cmd (slot %d) after timeout\n",
  3170. __func__, free_slot);
  3171. err = -ETIMEDOUT;
  3172. } else {
  3173. err = ufshcd_task_req_compl(hba, free_slot, tm_response);
  3174. }
  3175. clear_bit(free_slot, &hba->tm_condition);
  3176. ufshcd_put_tm_slot(hba, free_slot);
  3177. wake_up(&hba->tm_tag_wq);
  3178. ufshcd_release(hba);
  3179. return err;
  3180. }
  3181. /**
  3182. * ufshcd_eh_device_reset_handler - device reset handler registered to
  3183. * scsi layer.
  3184. * @cmd: SCSI command pointer
  3185. *
  3186. * Returns SUCCESS/FAILED
  3187. */
  3188. static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd)
  3189. {
  3190. struct Scsi_Host *host;
  3191. struct ufs_hba *hba;
  3192. unsigned int tag;
  3193. u32 pos;
  3194. int err;
  3195. u8 resp = 0xF;
  3196. struct ufshcd_lrb *lrbp;
  3197. unsigned long flags;
  3198. host = cmd->device->host;
  3199. hba = shost_priv(host);
  3200. tag = cmd->request->tag;
  3201. lrbp = &hba->lrb[tag];
  3202. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, 0, UFS_LOGICAL_RESET, &resp);
  3203. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3204. if (!err)
  3205. err = resp;
  3206. goto out;
  3207. }
  3208. /* clear the commands that were pending for corresponding LUN */
  3209. for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) {
  3210. if (hba->lrb[pos].lun == lrbp->lun) {
  3211. err = ufshcd_clear_cmd(hba, pos);
  3212. if (err)
  3213. break;
  3214. }
  3215. }
  3216. spin_lock_irqsave(host->host_lock, flags);
  3217. ufshcd_transfer_req_compl(hba);
  3218. spin_unlock_irqrestore(host->host_lock, flags);
  3219. out:
  3220. if (!err) {
  3221. err = SUCCESS;
  3222. } else {
  3223. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  3224. err = FAILED;
  3225. }
  3226. return err;
  3227. }
  3228. /**
  3229. * ufshcd_abort - abort a specific command
  3230. * @cmd: SCSI command pointer
  3231. *
  3232. * Abort the pending command in device by sending UFS_ABORT_TASK task management
  3233. * command, and in host controller by clearing the door-bell register. There can
  3234. * be race between controller sending the command to the device while abort is
  3235. * issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
  3236. * really issued and then try to abort it.
  3237. *
  3238. * Returns SUCCESS/FAILED
  3239. */
  3240. static int ufshcd_abort(struct scsi_cmnd *cmd)
  3241. {
  3242. struct Scsi_Host *host;
  3243. struct ufs_hba *hba;
  3244. unsigned long flags;
  3245. unsigned int tag;
  3246. int err = 0;
  3247. int poll_cnt;
  3248. u8 resp = 0xF;
  3249. struct ufshcd_lrb *lrbp;
  3250. u32 reg;
  3251. host = cmd->device->host;
  3252. hba = shost_priv(host);
  3253. tag = cmd->request->tag;
  3254. ufshcd_hold(hba, false);
  3255. /* If command is already aborted/completed, return SUCCESS */
  3256. if (!(test_bit(tag, &hba->outstanding_reqs)))
  3257. goto out;
  3258. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  3259. if (!(reg & (1 << tag))) {
  3260. dev_err(hba->dev,
  3261. "%s: cmd was completed, but without a notifying intr, tag = %d",
  3262. __func__, tag);
  3263. }
  3264. lrbp = &hba->lrb[tag];
  3265. for (poll_cnt = 100; poll_cnt; poll_cnt--) {
  3266. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  3267. UFS_QUERY_TASK, &resp);
  3268. if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
  3269. /* cmd pending in the device */
  3270. break;
  3271. } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3272. /*
  3273. * cmd not pending in the device, check if it is
  3274. * in transition.
  3275. */
  3276. reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
  3277. if (reg & (1 << tag)) {
  3278. /* sleep for max. 200us to stabilize */
  3279. usleep_range(100, 200);
  3280. continue;
  3281. }
  3282. /* command completed already */
  3283. goto out;
  3284. } else {
  3285. if (!err)
  3286. err = resp; /* service response error */
  3287. goto out;
  3288. }
  3289. }
  3290. if (!poll_cnt) {
  3291. err = -EBUSY;
  3292. goto out;
  3293. }
  3294. err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
  3295. UFS_ABORT_TASK, &resp);
  3296. if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
  3297. if (!err)
  3298. err = resp; /* service response error */
  3299. goto out;
  3300. }
  3301. err = ufshcd_clear_cmd(hba, tag);
  3302. if (err)
  3303. goto out;
  3304. scsi_dma_unmap(cmd);
  3305. spin_lock_irqsave(host->host_lock, flags);
  3306. __clear_bit(tag, &hba->outstanding_reqs);
  3307. hba->lrb[tag].cmd = NULL;
  3308. spin_unlock_irqrestore(host->host_lock, flags);
  3309. clear_bit_unlock(tag, &hba->lrb_in_use);
  3310. wake_up(&hba->dev_cmd.tag_wq);
  3311. out:
  3312. if (!err) {
  3313. err = SUCCESS;
  3314. } else {
  3315. dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
  3316. err = FAILED;
  3317. }
  3318. /*
  3319. * This ufshcd_release() corresponds to the original scsi cmd that got
  3320. * aborted here (as we won't get any IRQ for it).
  3321. */
  3322. ufshcd_release(hba);
  3323. return err;
  3324. }
  3325. /**
  3326. * ufshcd_host_reset_and_restore - reset and restore host controller
  3327. * @hba: per-adapter instance
  3328. *
  3329. * Note that host controller reset may issue DME_RESET to
  3330. * local and remote (device) Uni-Pro stack and the attributes
  3331. * are reset to default state.
  3332. *
  3333. * Returns zero on success, non-zero on failure
  3334. */
  3335. static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
  3336. {
  3337. int err;
  3338. unsigned long flags;
  3339. /* Reset the host controller */
  3340. spin_lock_irqsave(hba->host->host_lock, flags);
  3341. ufshcd_hba_stop(hba);
  3342. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3343. err = ufshcd_hba_enable(hba);
  3344. if (err)
  3345. goto out;
  3346. /* Establish the link again and restore the device */
  3347. err = ufshcd_probe_hba(hba);
  3348. if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
  3349. err = -EIO;
  3350. out:
  3351. if (err)
  3352. dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
  3353. return err;
  3354. }
  3355. /**
  3356. * ufshcd_reset_and_restore - reset and re-initialize host/device
  3357. * @hba: per-adapter instance
  3358. *
  3359. * Reset and recover device, host and re-establish link. This
  3360. * is helpful to recover the communication in fatal error conditions.
  3361. *
  3362. * Returns zero on success, non-zero on failure
  3363. */
  3364. static int ufshcd_reset_and_restore(struct ufs_hba *hba)
  3365. {
  3366. int err = 0;
  3367. unsigned long flags;
  3368. int retries = MAX_HOST_RESET_RETRIES;
  3369. do {
  3370. err = ufshcd_host_reset_and_restore(hba);
  3371. } while (err && --retries);
  3372. /*
  3373. * After reset the door-bell might be cleared, complete
  3374. * outstanding requests in s/w here.
  3375. */
  3376. spin_lock_irqsave(hba->host->host_lock, flags);
  3377. ufshcd_transfer_req_compl(hba);
  3378. ufshcd_tmc_handler(hba);
  3379. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3380. return err;
  3381. }
  3382. /**
  3383. * ufshcd_eh_host_reset_handler - host reset handler registered to scsi layer
  3384. * @cmd - SCSI command pointer
  3385. *
  3386. * Returns SUCCESS/FAILED
  3387. */
  3388. static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
  3389. {
  3390. int err;
  3391. unsigned long flags;
  3392. struct ufs_hba *hba;
  3393. hba = shost_priv(cmd->device->host);
  3394. ufshcd_hold(hba, false);
  3395. /*
  3396. * Check if there is any race with fatal error handling.
  3397. * If so, wait for it to complete. Even though fatal error
  3398. * handling does reset and restore in some cases, don't assume
  3399. * anything out of it. We are just avoiding race here.
  3400. */
  3401. do {
  3402. spin_lock_irqsave(hba->host->host_lock, flags);
  3403. if (!(work_pending(&hba->eh_work) ||
  3404. hba->ufshcd_state == UFSHCD_STATE_RESET))
  3405. break;
  3406. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3407. dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
  3408. flush_work(&hba->eh_work);
  3409. } while (1);
  3410. hba->ufshcd_state = UFSHCD_STATE_RESET;
  3411. ufshcd_set_eh_in_progress(hba);
  3412. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3413. err = ufshcd_reset_and_restore(hba);
  3414. spin_lock_irqsave(hba->host->host_lock, flags);
  3415. if (!err) {
  3416. err = SUCCESS;
  3417. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  3418. } else {
  3419. err = FAILED;
  3420. hba->ufshcd_state = UFSHCD_STATE_ERROR;
  3421. }
  3422. ufshcd_clear_eh_in_progress(hba);
  3423. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3424. ufshcd_release(hba);
  3425. return err;
  3426. }
  3427. /**
  3428. * ufshcd_get_max_icc_level - calculate the ICC level
  3429. * @sup_curr_uA: max. current supported by the regulator
  3430. * @start_scan: row at the desc table to start scan from
  3431. * @buff: power descriptor buffer
  3432. *
  3433. * Returns calculated max ICC level for specific regulator
  3434. */
  3435. static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
  3436. {
  3437. int i;
  3438. int curr_uA;
  3439. u16 data;
  3440. u16 unit;
  3441. for (i = start_scan; i >= 0; i--) {
  3442. data = be16_to_cpu(*((u16 *)(buff + 2*i)));
  3443. unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
  3444. ATTR_ICC_LVL_UNIT_OFFSET;
  3445. curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
  3446. switch (unit) {
  3447. case UFSHCD_NANO_AMP:
  3448. curr_uA = curr_uA / 1000;
  3449. break;
  3450. case UFSHCD_MILI_AMP:
  3451. curr_uA = curr_uA * 1000;
  3452. break;
  3453. case UFSHCD_AMP:
  3454. curr_uA = curr_uA * 1000 * 1000;
  3455. break;
  3456. case UFSHCD_MICRO_AMP:
  3457. default:
  3458. break;
  3459. }
  3460. if (sup_curr_uA >= curr_uA)
  3461. break;
  3462. }
  3463. if (i < 0) {
  3464. i = 0;
  3465. pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
  3466. }
  3467. return (u32)i;
  3468. }
  3469. /**
  3470. * ufshcd_calc_icc_level - calculate the max ICC level
  3471. * In case regulators are not initialized we'll return 0
  3472. * @hba: per-adapter instance
  3473. * @desc_buf: power descriptor buffer to extract ICC levels from.
  3474. * @len: length of desc_buff
  3475. *
  3476. * Returns calculated ICC level
  3477. */
  3478. static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
  3479. u8 *desc_buf, int len)
  3480. {
  3481. u32 icc_level = 0;
  3482. if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
  3483. !hba->vreg_info.vccq2) {
  3484. dev_err(hba->dev,
  3485. "%s: Regulator capability was not set, actvIccLevel=%d",
  3486. __func__, icc_level);
  3487. goto out;
  3488. }
  3489. if (hba->vreg_info.vcc)
  3490. icc_level = ufshcd_get_max_icc_level(
  3491. hba->vreg_info.vcc->max_uA,
  3492. POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
  3493. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
  3494. if (hba->vreg_info.vccq)
  3495. icc_level = ufshcd_get_max_icc_level(
  3496. hba->vreg_info.vccq->max_uA,
  3497. icc_level,
  3498. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
  3499. if (hba->vreg_info.vccq2)
  3500. icc_level = ufshcd_get_max_icc_level(
  3501. hba->vreg_info.vccq2->max_uA,
  3502. icc_level,
  3503. &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
  3504. out:
  3505. return icc_level;
  3506. }
  3507. static void ufshcd_init_icc_levels(struct ufs_hba *hba)
  3508. {
  3509. int ret;
  3510. int buff_len = QUERY_DESC_POWER_MAX_SIZE;
  3511. u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
  3512. ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
  3513. if (ret) {
  3514. dev_err(hba->dev,
  3515. "%s: Failed reading power descriptor.len = %d ret = %d",
  3516. __func__, buff_len, ret);
  3517. return;
  3518. }
  3519. hba->init_prefetch_data.icc_level =
  3520. ufshcd_find_max_sup_active_icc_level(hba,
  3521. desc_buf, buff_len);
  3522. dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
  3523. __func__, hba->init_prefetch_data.icc_level);
  3524. ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
  3525. QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
  3526. &hba->init_prefetch_data.icc_level);
  3527. if (ret)
  3528. dev_err(hba->dev,
  3529. "%s: Failed configuring bActiveICCLevel = %d ret = %d",
  3530. __func__, hba->init_prefetch_data.icc_level , ret);
  3531. }
  3532. /**
  3533. * ufshcd_scsi_add_wlus - Adds required W-LUs
  3534. * @hba: per-adapter instance
  3535. *
  3536. * UFS device specification requires the UFS devices to support 4 well known
  3537. * logical units:
  3538. * "REPORT_LUNS" (address: 01h)
  3539. * "UFS Device" (address: 50h)
  3540. * "RPMB" (address: 44h)
  3541. * "BOOT" (address: 30h)
  3542. * UFS device's power management needs to be controlled by "POWER CONDITION"
  3543. * field of SSU (START STOP UNIT) command. But this "power condition" field
  3544. * will take effect only when its sent to "UFS device" well known logical unit
  3545. * hence we require the scsi_device instance to represent this logical unit in
  3546. * order for the UFS host driver to send the SSU command for power management.
  3547. * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
  3548. * Block) LU so user space process can control this LU. User space may also
  3549. * want to have access to BOOT LU.
  3550. * This function adds scsi device instances for each of all well known LUs
  3551. * (except "REPORT LUNS" LU).
  3552. *
  3553. * Returns zero on success (all required W-LUs are added successfully),
  3554. * non-zero error value on failure (if failed to add any of the required W-LU).
  3555. */
  3556. static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
  3557. {
  3558. int ret = 0;
  3559. hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
  3560. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
  3561. if (IS_ERR(hba->sdev_ufs_device)) {
  3562. ret = PTR_ERR(hba->sdev_ufs_device);
  3563. hba->sdev_ufs_device = NULL;
  3564. goto out;
  3565. }
  3566. hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
  3567. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
  3568. if (IS_ERR(hba->sdev_boot)) {
  3569. ret = PTR_ERR(hba->sdev_boot);
  3570. hba->sdev_boot = NULL;
  3571. goto remove_sdev_ufs_device;
  3572. }
  3573. hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
  3574. ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
  3575. if (IS_ERR(hba->sdev_rpmb)) {
  3576. ret = PTR_ERR(hba->sdev_rpmb);
  3577. hba->sdev_rpmb = NULL;
  3578. goto remove_sdev_boot;
  3579. }
  3580. goto out;
  3581. remove_sdev_boot:
  3582. scsi_remove_device(hba->sdev_boot);
  3583. remove_sdev_ufs_device:
  3584. scsi_remove_device(hba->sdev_ufs_device);
  3585. out:
  3586. return ret;
  3587. }
  3588. /**
  3589. * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
  3590. * ufshcd_scsi_add_wlus()
  3591. * @hba: per-adapter instance
  3592. *
  3593. */
  3594. static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
  3595. {
  3596. if (hba->sdev_ufs_device) {
  3597. scsi_remove_device(hba->sdev_ufs_device);
  3598. hba->sdev_ufs_device = NULL;
  3599. }
  3600. if (hba->sdev_boot) {
  3601. scsi_remove_device(hba->sdev_boot);
  3602. hba->sdev_boot = NULL;
  3603. }
  3604. if (hba->sdev_rpmb) {
  3605. scsi_remove_device(hba->sdev_rpmb);
  3606. hba->sdev_rpmb = NULL;
  3607. }
  3608. }
  3609. /**
  3610. * ufshcd_probe_hba - probe hba to detect device and initialize
  3611. * @hba: per-adapter instance
  3612. *
  3613. * Execute link-startup and verify device initialization
  3614. */
  3615. static int ufshcd_probe_hba(struct ufs_hba *hba)
  3616. {
  3617. int ret;
  3618. ret = ufshcd_link_startup(hba);
  3619. if (ret)
  3620. goto out;
  3621. /* UniPro link is active now */
  3622. ufshcd_set_link_active(hba);
  3623. ret = ufshcd_verify_dev_init(hba);
  3624. if (ret)
  3625. goto out;
  3626. ret = ufshcd_complete_dev_init(hba);
  3627. if (ret)
  3628. goto out;
  3629. /* UFS device is also active now */
  3630. ufshcd_set_ufs_dev_active(hba);
  3631. ufshcd_force_reset_auto_bkops(hba);
  3632. hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
  3633. hba->wlun_dev_clr_ua = true;
  3634. if (ufshcd_get_max_pwr_mode(hba)) {
  3635. dev_err(hba->dev,
  3636. "%s: Failed getting max supported power mode\n",
  3637. __func__);
  3638. } else {
  3639. ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
  3640. if (ret)
  3641. dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
  3642. __func__, ret);
  3643. }
  3644. /*
  3645. * If we are in error handling context or in power management callbacks
  3646. * context, no need to scan the host
  3647. */
  3648. if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  3649. bool flag;
  3650. /* clear any previous UFS device information */
  3651. memset(&hba->dev_info, 0, sizeof(hba->dev_info));
  3652. if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
  3653. QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
  3654. hba->dev_info.f_power_on_wp_en = flag;
  3655. if (!hba->is_init_prefetch)
  3656. ufshcd_init_icc_levels(hba);
  3657. /* Add required well known logical units to scsi mid layer */
  3658. if (ufshcd_scsi_add_wlus(hba))
  3659. goto out;
  3660. scsi_scan_host(hba->host);
  3661. pm_runtime_put_sync(hba->dev);
  3662. }
  3663. if (!hba->is_init_prefetch)
  3664. hba->is_init_prefetch = true;
  3665. /* Resume devfreq after UFS device is detected */
  3666. if (ufshcd_is_clkscaling_enabled(hba))
  3667. devfreq_resume_device(hba->devfreq);
  3668. out:
  3669. /*
  3670. * If we failed to initialize the device or the device is not
  3671. * present, turn off the power/clocks etc.
  3672. */
  3673. if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
  3674. pm_runtime_put_sync(hba->dev);
  3675. ufshcd_hba_exit(hba);
  3676. }
  3677. return ret;
  3678. }
  3679. /**
  3680. * ufshcd_async_scan - asynchronous execution for probing hba
  3681. * @data: data pointer to pass to this function
  3682. * @cookie: cookie data
  3683. */
  3684. static void ufshcd_async_scan(void *data, async_cookie_t cookie)
  3685. {
  3686. struct ufs_hba *hba = (struct ufs_hba *)data;
  3687. ufshcd_probe_hba(hba);
  3688. }
  3689. static struct scsi_host_template ufshcd_driver_template = {
  3690. .module = THIS_MODULE,
  3691. .name = UFSHCD,
  3692. .proc_name = UFSHCD,
  3693. .queuecommand = ufshcd_queuecommand,
  3694. .slave_alloc = ufshcd_slave_alloc,
  3695. .slave_configure = ufshcd_slave_configure,
  3696. .slave_destroy = ufshcd_slave_destroy,
  3697. .change_queue_depth = ufshcd_change_queue_depth,
  3698. .eh_abort_handler = ufshcd_abort,
  3699. .eh_device_reset_handler = ufshcd_eh_device_reset_handler,
  3700. .eh_host_reset_handler = ufshcd_eh_host_reset_handler,
  3701. .this_id = -1,
  3702. .sg_tablesize = SG_ALL,
  3703. .cmd_per_lun = UFSHCD_CMD_PER_LUN,
  3704. .can_queue = UFSHCD_CAN_QUEUE,
  3705. .max_host_blocked = 1,
  3706. .use_blk_tags = 1,
  3707. };
  3708. static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
  3709. int ua)
  3710. {
  3711. int ret = 0;
  3712. struct regulator *reg = vreg->reg;
  3713. const char *name = vreg->name;
  3714. BUG_ON(!vreg);
  3715. ret = regulator_set_optimum_mode(reg, ua);
  3716. if (ret >= 0) {
  3717. /*
  3718. * regulator_set_optimum_mode() returns new regulator
  3719. * mode upon success.
  3720. */
  3721. ret = 0;
  3722. } else {
  3723. dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
  3724. __func__, name, ua, ret);
  3725. }
  3726. return ret;
  3727. }
  3728. static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
  3729. struct ufs_vreg *vreg)
  3730. {
  3731. return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
  3732. }
  3733. static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
  3734. struct ufs_vreg *vreg)
  3735. {
  3736. return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
  3737. }
  3738. static int ufshcd_config_vreg(struct device *dev,
  3739. struct ufs_vreg *vreg, bool on)
  3740. {
  3741. int ret = 0;
  3742. struct regulator *reg = vreg->reg;
  3743. const char *name = vreg->name;
  3744. int min_uV, uA_load;
  3745. BUG_ON(!vreg);
  3746. if (regulator_count_voltages(reg) > 0) {
  3747. min_uV = on ? vreg->min_uV : 0;
  3748. ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
  3749. if (ret) {
  3750. dev_err(dev, "%s: %s set voltage failed, err=%d\n",
  3751. __func__, name, ret);
  3752. goto out;
  3753. }
  3754. uA_load = on ? vreg->max_uA : 0;
  3755. ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
  3756. if (ret)
  3757. goto out;
  3758. }
  3759. out:
  3760. return ret;
  3761. }
  3762. static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
  3763. {
  3764. int ret = 0;
  3765. if (!vreg || vreg->enabled)
  3766. goto out;
  3767. ret = ufshcd_config_vreg(dev, vreg, true);
  3768. if (!ret)
  3769. ret = regulator_enable(vreg->reg);
  3770. if (!ret)
  3771. vreg->enabled = true;
  3772. else
  3773. dev_err(dev, "%s: %s enable failed, err=%d\n",
  3774. __func__, vreg->name, ret);
  3775. out:
  3776. return ret;
  3777. }
  3778. static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
  3779. {
  3780. int ret = 0;
  3781. if (!vreg || !vreg->enabled)
  3782. goto out;
  3783. ret = regulator_disable(vreg->reg);
  3784. if (!ret) {
  3785. /* ignore errors on applying disable config */
  3786. ufshcd_config_vreg(dev, vreg, false);
  3787. vreg->enabled = false;
  3788. } else {
  3789. dev_err(dev, "%s: %s disable failed, err=%d\n",
  3790. __func__, vreg->name, ret);
  3791. }
  3792. out:
  3793. return ret;
  3794. }
  3795. static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
  3796. {
  3797. int ret = 0;
  3798. struct device *dev = hba->dev;
  3799. struct ufs_vreg_info *info = &hba->vreg_info;
  3800. if (!info)
  3801. goto out;
  3802. ret = ufshcd_toggle_vreg(dev, info->vcc, on);
  3803. if (ret)
  3804. goto out;
  3805. ret = ufshcd_toggle_vreg(dev, info->vccq, on);
  3806. if (ret)
  3807. goto out;
  3808. ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
  3809. if (ret)
  3810. goto out;
  3811. out:
  3812. if (ret) {
  3813. ufshcd_toggle_vreg(dev, info->vccq2, false);
  3814. ufshcd_toggle_vreg(dev, info->vccq, false);
  3815. ufshcd_toggle_vreg(dev, info->vcc, false);
  3816. }
  3817. return ret;
  3818. }
  3819. static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
  3820. {
  3821. struct ufs_vreg_info *info = &hba->vreg_info;
  3822. if (info)
  3823. return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
  3824. return 0;
  3825. }
  3826. static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
  3827. {
  3828. int ret = 0;
  3829. if (!vreg)
  3830. goto out;
  3831. vreg->reg = devm_regulator_get(dev, vreg->name);
  3832. if (IS_ERR(vreg->reg)) {
  3833. ret = PTR_ERR(vreg->reg);
  3834. dev_err(dev, "%s: %s get failed, err=%d\n",
  3835. __func__, vreg->name, ret);
  3836. }
  3837. out:
  3838. return ret;
  3839. }
  3840. static int ufshcd_init_vreg(struct ufs_hba *hba)
  3841. {
  3842. int ret = 0;
  3843. struct device *dev = hba->dev;
  3844. struct ufs_vreg_info *info = &hba->vreg_info;
  3845. if (!info)
  3846. goto out;
  3847. ret = ufshcd_get_vreg(dev, info->vcc);
  3848. if (ret)
  3849. goto out;
  3850. ret = ufshcd_get_vreg(dev, info->vccq);
  3851. if (ret)
  3852. goto out;
  3853. ret = ufshcd_get_vreg(dev, info->vccq2);
  3854. out:
  3855. return ret;
  3856. }
  3857. static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
  3858. {
  3859. struct ufs_vreg_info *info = &hba->vreg_info;
  3860. if (info)
  3861. return ufshcd_get_vreg(hba->dev, info->vdd_hba);
  3862. return 0;
  3863. }
  3864. static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
  3865. bool skip_ref_clk)
  3866. {
  3867. int ret = 0;
  3868. struct ufs_clk_info *clki;
  3869. struct list_head *head = &hba->clk_list_head;
  3870. unsigned long flags;
  3871. if (!head || list_empty(head))
  3872. goto out;
  3873. list_for_each_entry(clki, head, list) {
  3874. if (!IS_ERR_OR_NULL(clki->clk)) {
  3875. if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
  3876. continue;
  3877. if (on && !clki->enabled) {
  3878. ret = clk_prepare_enable(clki->clk);
  3879. if (ret) {
  3880. dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
  3881. __func__, clki->name, ret);
  3882. goto out;
  3883. }
  3884. } else if (!on && clki->enabled) {
  3885. clk_disable_unprepare(clki->clk);
  3886. }
  3887. clki->enabled = on;
  3888. dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
  3889. clki->name, on ? "en" : "dis");
  3890. }
  3891. }
  3892. if (hba->vops && hba->vops->setup_clocks)
  3893. ret = hba->vops->setup_clocks(hba, on);
  3894. out:
  3895. if (ret) {
  3896. list_for_each_entry(clki, head, list) {
  3897. if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
  3898. clk_disable_unprepare(clki->clk);
  3899. }
  3900. } else if (!ret && on) {
  3901. spin_lock_irqsave(hba->host->host_lock, flags);
  3902. hba->clk_gating.state = CLKS_ON;
  3903. spin_unlock_irqrestore(hba->host->host_lock, flags);
  3904. }
  3905. return ret;
  3906. }
  3907. static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
  3908. {
  3909. return __ufshcd_setup_clocks(hba, on, false);
  3910. }
  3911. static int ufshcd_init_clocks(struct ufs_hba *hba)
  3912. {
  3913. int ret = 0;
  3914. struct ufs_clk_info *clki;
  3915. struct device *dev = hba->dev;
  3916. struct list_head *head = &hba->clk_list_head;
  3917. if (!head || list_empty(head))
  3918. goto out;
  3919. list_for_each_entry(clki, head, list) {
  3920. if (!clki->name)
  3921. continue;
  3922. clki->clk = devm_clk_get(dev, clki->name);
  3923. if (IS_ERR(clki->clk)) {
  3924. ret = PTR_ERR(clki->clk);
  3925. dev_err(dev, "%s: %s clk get failed, %d\n",
  3926. __func__, clki->name, ret);
  3927. goto out;
  3928. }
  3929. if (clki->max_freq) {
  3930. ret = clk_set_rate(clki->clk, clki->max_freq);
  3931. if (ret) {
  3932. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  3933. __func__, clki->name,
  3934. clki->max_freq, ret);
  3935. goto out;
  3936. }
  3937. clki->curr_freq = clki->max_freq;
  3938. }
  3939. dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
  3940. clki->name, clk_get_rate(clki->clk));
  3941. }
  3942. out:
  3943. return ret;
  3944. }
  3945. static int ufshcd_variant_hba_init(struct ufs_hba *hba)
  3946. {
  3947. int err = 0;
  3948. if (!hba->vops)
  3949. goto out;
  3950. if (hba->vops->init) {
  3951. err = hba->vops->init(hba);
  3952. if (err)
  3953. goto out;
  3954. }
  3955. if (hba->vops->setup_regulators) {
  3956. err = hba->vops->setup_regulators(hba, true);
  3957. if (err)
  3958. goto out_exit;
  3959. }
  3960. goto out;
  3961. out_exit:
  3962. if (hba->vops->exit)
  3963. hba->vops->exit(hba);
  3964. out:
  3965. if (err)
  3966. dev_err(hba->dev, "%s: variant %s init failed err %d\n",
  3967. __func__, hba->vops ? hba->vops->name : "", err);
  3968. return err;
  3969. }
  3970. static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
  3971. {
  3972. if (!hba->vops)
  3973. return;
  3974. if (hba->vops->setup_clocks)
  3975. hba->vops->setup_clocks(hba, false);
  3976. if (hba->vops->setup_regulators)
  3977. hba->vops->setup_regulators(hba, false);
  3978. if (hba->vops->exit)
  3979. hba->vops->exit(hba);
  3980. }
  3981. static int ufshcd_hba_init(struct ufs_hba *hba)
  3982. {
  3983. int err;
  3984. /*
  3985. * Handle host controller power separately from the UFS device power
  3986. * rails as it will help controlling the UFS host controller power
  3987. * collapse easily which is different than UFS device power collapse.
  3988. * Also, enable the host controller power before we go ahead with rest
  3989. * of the initialization here.
  3990. */
  3991. err = ufshcd_init_hba_vreg(hba);
  3992. if (err)
  3993. goto out;
  3994. err = ufshcd_setup_hba_vreg(hba, true);
  3995. if (err)
  3996. goto out;
  3997. err = ufshcd_init_clocks(hba);
  3998. if (err)
  3999. goto out_disable_hba_vreg;
  4000. err = ufshcd_setup_clocks(hba, true);
  4001. if (err)
  4002. goto out_disable_hba_vreg;
  4003. err = ufshcd_init_vreg(hba);
  4004. if (err)
  4005. goto out_disable_clks;
  4006. err = ufshcd_setup_vreg(hba, true);
  4007. if (err)
  4008. goto out_disable_clks;
  4009. err = ufshcd_variant_hba_init(hba);
  4010. if (err)
  4011. goto out_disable_vreg;
  4012. hba->is_powered = true;
  4013. goto out;
  4014. out_disable_vreg:
  4015. ufshcd_setup_vreg(hba, false);
  4016. out_disable_clks:
  4017. ufshcd_setup_clocks(hba, false);
  4018. out_disable_hba_vreg:
  4019. ufshcd_setup_hba_vreg(hba, false);
  4020. out:
  4021. return err;
  4022. }
  4023. static void ufshcd_hba_exit(struct ufs_hba *hba)
  4024. {
  4025. if (hba->is_powered) {
  4026. ufshcd_variant_hba_exit(hba);
  4027. ufshcd_setup_vreg(hba, false);
  4028. ufshcd_setup_clocks(hba, false);
  4029. ufshcd_setup_hba_vreg(hba, false);
  4030. hba->is_powered = false;
  4031. }
  4032. }
  4033. static int
  4034. ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
  4035. {
  4036. unsigned char cmd[6] = {REQUEST_SENSE,
  4037. 0,
  4038. 0,
  4039. 0,
  4040. SCSI_SENSE_BUFFERSIZE,
  4041. 0};
  4042. char *buffer;
  4043. int ret;
  4044. buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
  4045. if (!buffer) {
  4046. ret = -ENOMEM;
  4047. goto out;
  4048. }
  4049. ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
  4050. SCSI_SENSE_BUFFERSIZE, NULL,
  4051. msecs_to_jiffies(1000), 3, NULL, REQ_PM);
  4052. if (ret)
  4053. pr_err("%s: failed with err %d\n", __func__, ret);
  4054. kfree(buffer);
  4055. out:
  4056. return ret;
  4057. }
  4058. /**
  4059. * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
  4060. * power mode
  4061. * @hba: per adapter instance
  4062. * @pwr_mode: device power mode to set
  4063. *
  4064. * Returns 0 if requested power mode is set successfully
  4065. * Returns non-zero if failed to set the requested power mode
  4066. */
  4067. static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
  4068. enum ufs_dev_pwr_mode pwr_mode)
  4069. {
  4070. unsigned char cmd[6] = { START_STOP };
  4071. struct scsi_sense_hdr sshdr;
  4072. struct scsi_device *sdp = hba->sdev_ufs_device;
  4073. int ret;
  4074. if (!sdp || !scsi_device_online(sdp))
  4075. return -ENODEV;
  4076. /*
  4077. * If scsi commands fail, the scsi mid-layer schedules scsi error-
  4078. * handling, which would wait for host to be resumed. Since we know
  4079. * we are functional while we are here, skip host resume in error
  4080. * handling context.
  4081. */
  4082. hba->host->eh_noresume = 1;
  4083. if (hba->wlun_dev_clr_ua) {
  4084. ret = ufshcd_send_request_sense(hba, sdp);
  4085. if (ret)
  4086. goto out;
  4087. /* Unit attention condition is cleared now */
  4088. hba->wlun_dev_clr_ua = false;
  4089. }
  4090. cmd[4] = pwr_mode << 4;
  4091. /*
  4092. * Current function would be generally called from the power management
  4093. * callbacks hence set the REQ_PM flag so that it doesn't resume the
  4094. * already suspended childs.
  4095. */
  4096. ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
  4097. START_STOP_TIMEOUT, 0, NULL, REQ_PM);
  4098. if (ret) {
  4099. sdev_printk(KERN_WARNING, sdp,
  4100. "START_STOP failed for power mode: %d, result %x\n",
  4101. pwr_mode, ret);
  4102. if (driver_byte(ret) & DRIVER_SENSE) {
  4103. scsi_show_sense_hdr(sdp, NULL, &sshdr);
  4104. scsi_show_extd_sense(sdp, NULL, sshdr.asc, sshdr.ascq);
  4105. }
  4106. }
  4107. if (!ret)
  4108. hba->curr_dev_pwr_mode = pwr_mode;
  4109. out:
  4110. hba->host->eh_noresume = 0;
  4111. return ret;
  4112. }
  4113. static int ufshcd_link_state_transition(struct ufs_hba *hba,
  4114. enum uic_link_state req_link_state,
  4115. int check_for_bkops)
  4116. {
  4117. int ret = 0;
  4118. if (req_link_state == hba->uic_link_state)
  4119. return 0;
  4120. if (req_link_state == UIC_LINK_HIBERN8_STATE) {
  4121. ret = ufshcd_uic_hibern8_enter(hba);
  4122. if (!ret)
  4123. ufshcd_set_link_hibern8(hba);
  4124. else
  4125. goto out;
  4126. }
  4127. /*
  4128. * If autobkops is enabled, link can't be turned off because
  4129. * turning off the link would also turn off the device.
  4130. */
  4131. else if ((req_link_state == UIC_LINK_OFF_STATE) &&
  4132. (!check_for_bkops || (check_for_bkops &&
  4133. !hba->auto_bkops_enabled))) {
  4134. /*
  4135. * Change controller state to "reset state" which
  4136. * should also put the link in off/reset state
  4137. */
  4138. ufshcd_hba_stop(hba);
  4139. /*
  4140. * TODO: Check if we need any delay to make sure that
  4141. * controller is reset
  4142. */
  4143. ufshcd_set_link_off(hba);
  4144. }
  4145. out:
  4146. return ret;
  4147. }
  4148. static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
  4149. {
  4150. /*
  4151. * If UFS device is either in UFS_Sleep turn off VCC rail to save some
  4152. * power.
  4153. *
  4154. * If UFS device and link is in OFF state, all power supplies (VCC,
  4155. * VCCQ, VCCQ2) can be turned off if power on write protect is not
  4156. * required. If UFS link is inactive (Hibern8 or OFF state) and device
  4157. * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
  4158. *
  4159. * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
  4160. * in low power state which would save some power.
  4161. */
  4162. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  4163. !hba->dev_info.is_lu_power_on_wp) {
  4164. ufshcd_setup_vreg(hba, false);
  4165. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  4166. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  4167. if (!ufshcd_is_link_active(hba)) {
  4168. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  4169. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
  4170. }
  4171. }
  4172. }
  4173. static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
  4174. {
  4175. int ret = 0;
  4176. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
  4177. !hba->dev_info.is_lu_power_on_wp) {
  4178. ret = ufshcd_setup_vreg(hba, true);
  4179. } else if (!ufshcd_is_ufs_dev_active(hba)) {
  4180. ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
  4181. if (!ret && !ufshcd_is_link_active(hba)) {
  4182. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
  4183. if (ret)
  4184. goto vcc_disable;
  4185. ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
  4186. if (ret)
  4187. goto vccq_lpm;
  4188. }
  4189. }
  4190. goto out;
  4191. vccq_lpm:
  4192. ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
  4193. vcc_disable:
  4194. ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
  4195. out:
  4196. return ret;
  4197. }
  4198. static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
  4199. {
  4200. if (ufshcd_is_link_off(hba))
  4201. ufshcd_setup_hba_vreg(hba, false);
  4202. }
  4203. static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
  4204. {
  4205. if (ufshcd_is_link_off(hba))
  4206. ufshcd_setup_hba_vreg(hba, true);
  4207. }
  4208. /**
  4209. * ufshcd_suspend - helper function for suspend operations
  4210. * @hba: per adapter instance
  4211. * @pm_op: desired low power operation type
  4212. *
  4213. * This function will try to put the UFS device and link into low power
  4214. * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
  4215. * (System PM level).
  4216. *
  4217. * If this function is called during shutdown, it will make sure that
  4218. * both UFS device and UFS link is powered off.
  4219. *
  4220. * NOTE: UFS device & link must be active before we enter in this function.
  4221. *
  4222. * Returns 0 for success and non-zero for failure
  4223. */
  4224. static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  4225. {
  4226. int ret = 0;
  4227. enum ufs_pm_level pm_lvl;
  4228. enum ufs_dev_pwr_mode req_dev_pwr_mode;
  4229. enum uic_link_state req_link_state;
  4230. hba->pm_op_in_progress = 1;
  4231. if (!ufshcd_is_shutdown_pm(pm_op)) {
  4232. pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
  4233. hba->rpm_lvl : hba->spm_lvl;
  4234. req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
  4235. req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
  4236. } else {
  4237. req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
  4238. req_link_state = UIC_LINK_OFF_STATE;
  4239. }
  4240. /*
  4241. * If we can't transition into any of the low power modes
  4242. * just gate the clocks.
  4243. */
  4244. ufshcd_hold(hba, false);
  4245. hba->clk_gating.is_suspended = true;
  4246. if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
  4247. req_link_state == UIC_LINK_ACTIVE_STATE) {
  4248. goto disable_clks;
  4249. }
  4250. if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
  4251. (req_link_state == hba->uic_link_state))
  4252. goto out;
  4253. /* UFS device & link must be active before we enter in this function */
  4254. if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
  4255. ret = -EINVAL;
  4256. goto out;
  4257. }
  4258. if (ufshcd_is_runtime_pm(pm_op)) {
  4259. if (ufshcd_can_autobkops_during_suspend(hba)) {
  4260. /*
  4261. * The device is idle with no requests in the queue,
  4262. * allow background operations if bkops status shows
  4263. * that performance might be impacted.
  4264. */
  4265. ret = ufshcd_urgent_bkops(hba);
  4266. if (ret)
  4267. goto enable_gating;
  4268. } else {
  4269. /* make sure that auto bkops is disabled */
  4270. ufshcd_disable_auto_bkops(hba);
  4271. }
  4272. }
  4273. if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
  4274. ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
  4275. !ufshcd_is_runtime_pm(pm_op))) {
  4276. /* ensure that bkops is disabled */
  4277. ufshcd_disable_auto_bkops(hba);
  4278. ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
  4279. if (ret)
  4280. goto enable_gating;
  4281. }
  4282. ret = ufshcd_link_state_transition(hba, req_link_state, 1);
  4283. if (ret)
  4284. goto set_dev_active;
  4285. ufshcd_vreg_set_lpm(hba);
  4286. disable_clks:
  4287. /*
  4288. * The clock scaling needs access to controller registers. Hence, Wait
  4289. * for pending clock scaling work to be done before clocks are
  4290. * turned off.
  4291. */
  4292. if (ufshcd_is_clkscaling_enabled(hba)) {
  4293. devfreq_suspend_device(hba->devfreq);
  4294. hba->clk_scaling.window_start_t = 0;
  4295. }
  4296. /*
  4297. * Call vendor specific suspend callback. As these callbacks may access
  4298. * vendor specific host controller register space call them before the
  4299. * host clocks are ON.
  4300. */
  4301. if (hba->vops && hba->vops->suspend) {
  4302. ret = hba->vops->suspend(hba, pm_op);
  4303. if (ret)
  4304. goto set_link_active;
  4305. }
  4306. if (hba->vops && hba->vops->setup_clocks) {
  4307. ret = hba->vops->setup_clocks(hba, false);
  4308. if (ret)
  4309. goto vops_resume;
  4310. }
  4311. if (!ufshcd_is_link_active(hba))
  4312. ufshcd_setup_clocks(hba, false);
  4313. else
  4314. /* If link is active, device ref_clk can't be switched off */
  4315. __ufshcd_setup_clocks(hba, false, true);
  4316. hba->clk_gating.state = CLKS_OFF;
  4317. /*
  4318. * Disable the host irq as host controller as there won't be any
  4319. * host controller trasanction expected till resume.
  4320. */
  4321. ufshcd_disable_irq(hba);
  4322. /* Put the host controller in low power mode if possible */
  4323. ufshcd_hba_vreg_set_lpm(hba);
  4324. goto out;
  4325. vops_resume:
  4326. if (hba->vops && hba->vops->resume)
  4327. hba->vops->resume(hba, pm_op);
  4328. set_link_active:
  4329. ufshcd_vreg_set_hpm(hba);
  4330. if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
  4331. ufshcd_set_link_active(hba);
  4332. else if (ufshcd_is_link_off(hba))
  4333. ufshcd_host_reset_and_restore(hba);
  4334. set_dev_active:
  4335. if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
  4336. ufshcd_disable_auto_bkops(hba);
  4337. enable_gating:
  4338. hba->clk_gating.is_suspended = false;
  4339. ufshcd_release(hba);
  4340. out:
  4341. hba->pm_op_in_progress = 0;
  4342. return ret;
  4343. }
  4344. /**
  4345. * ufshcd_resume - helper function for resume operations
  4346. * @hba: per adapter instance
  4347. * @pm_op: runtime PM or system PM
  4348. *
  4349. * This function basically brings the UFS device, UniPro link and controller
  4350. * to active state.
  4351. *
  4352. * Returns 0 for success and non-zero for failure
  4353. */
  4354. static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
  4355. {
  4356. int ret;
  4357. enum uic_link_state old_link_state;
  4358. hba->pm_op_in_progress = 1;
  4359. old_link_state = hba->uic_link_state;
  4360. ufshcd_hba_vreg_set_hpm(hba);
  4361. /* Make sure clocks are enabled before accessing controller */
  4362. ret = ufshcd_setup_clocks(hba, true);
  4363. if (ret)
  4364. goto out;
  4365. /* enable the host irq as host controller would be active soon */
  4366. ret = ufshcd_enable_irq(hba);
  4367. if (ret)
  4368. goto disable_irq_and_vops_clks;
  4369. ret = ufshcd_vreg_set_hpm(hba);
  4370. if (ret)
  4371. goto disable_irq_and_vops_clks;
  4372. /*
  4373. * Call vendor specific resume callback. As these callbacks may access
  4374. * vendor specific host controller register space call them when the
  4375. * host clocks are ON.
  4376. */
  4377. if (hba->vops && hba->vops->resume) {
  4378. ret = hba->vops->resume(hba, pm_op);
  4379. if (ret)
  4380. goto disable_vreg;
  4381. }
  4382. if (ufshcd_is_link_hibern8(hba)) {
  4383. ret = ufshcd_uic_hibern8_exit(hba);
  4384. if (!ret)
  4385. ufshcd_set_link_active(hba);
  4386. else
  4387. goto vendor_suspend;
  4388. } else if (ufshcd_is_link_off(hba)) {
  4389. ret = ufshcd_host_reset_and_restore(hba);
  4390. /*
  4391. * ufshcd_host_reset_and_restore() should have already
  4392. * set the link state as active
  4393. */
  4394. if (ret || !ufshcd_is_link_active(hba))
  4395. goto vendor_suspend;
  4396. }
  4397. if (!ufshcd_is_ufs_dev_active(hba)) {
  4398. ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
  4399. if (ret)
  4400. goto set_old_link_state;
  4401. }
  4402. /*
  4403. * If BKOPs operations are urgently needed at this moment then
  4404. * keep auto-bkops enabled or else disable it.
  4405. */
  4406. ufshcd_urgent_bkops(hba);
  4407. hba->clk_gating.is_suspended = false;
  4408. if (ufshcd_is_clkscaling_enabled(hba))
  4409. devfreq_resume_device(hba->devfreq);
  4410. /* Schedule clock gating in case of no access to UFS device yet */
  4411. ufshcd_release(hba);
  4412. goto out;
  4413. set_old_link_state:
  4414. ufshcd_link_state_transition(hba, old_link_state, 0);
  4415. vendor_suspend:
  4416. if (hba->vops && hba->vops->suspend)
  4417. hba->vops->suspend(hba, pm_op);
  4418. disable_vreg:
  4419. ufshcd_vreg_set_lpm(hba);
  4420. disable_irq_and_vops_clks:
  4421. ufshcd_disable_irq(hba);
  4422. ufshcd_setup_clocks(hba, false);
  4423. out:
  4424. hba->pm_op_in_progress = 0;
  4425. return ret;
  4426. }
  4427. /**
  4428. * ufshcd_system_suspend - system suspend routine
  4429. * @hba: per adapter instance
  4430. * @pm_op: runtime PM or system PM
  4431. *
  4432. * Check the description of ufshcd_suspend() function for more details.
  4433. *
  4434. * Returns 0 for success and non-zero for failure
  4435. */
  4436. int ufshcd_system_suspend(struct ufs_hba *hba)
  4437. {
  4438. int ret = 0;
  4439. if (!hba || !hba->is_powered)
  4440. goto out;
  4441. if (pm_runtime_suspended(hba->dev)) {
  4442. if (hba->rpm_lvl == hba->spm_lvl)
  4443. /*
  4444. * There is possibility that device may still be in
  4445. * active state during the runtime suspend.
  4446. */
  4447. if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
  4448. hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
  4449. goto out;
  4450. /*
  4451. * UFS device and/or UFS link low power states during runtime
  4452. * suspend seems to be different than what is expected during
  4453. * system suspend. Hence runtime resume the devic & link and
  4454. * let the system suspend low power states to take effect.
  4455. * TODO: If resume takes longer time, we might have optimize
  4456. * it in future by not resuming everything if possible.
  4457. */
  4458. ret = ufshcd_runtime_resume(hba);
  4459. if (ret)
  4460. goto out;
  4461. }
  4462. ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
  4463. out:
  4464. if (!ret)
  4465. hba->is_sys_suspended = true;
  4466. return ret;
  4467. }
  4468. EXPORT_SYMBOL(ufshcd_system_suspend);
  4469. /**
  4470. * ufshcd_system_resume - system resume routine
  4471. * @hba: per adapter instance
  4472. *
  4473. * Returns 0 for success and non-zero for failure
  4474. */
  4475. int ufshcd_system_resume(struct ufs_hba *hba)
  4476. {
  4477. if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
  4478. /*
  4479. * Let the runtime resume take care of resuming
  4480. * if runtime suspended.
  4481. */
  4482. return 0;
  4483. return ufshcd_resume(hba, UFS_SYSTEM_PM);
  4484. }
  4485. EXPORT_SYMBOL(ufshcd_system_resume);
  4486. /**
  4487. * ufshcd_runtime_suspend - runtime suspend routine
  4488. * @hba: per adapter instance
  4489. *
  4490. * Check the description of ufshcd_suspend() function for more details.
  4491. *
  4492. * Returns 0 for success and non-zero for failure
  4493. */
  4494. int ufshcd_runtime_suspend(struct ufs_hba *hba)
  4495. {
  4496. if (!hba || !hba->is_powered)
  4497. return 0;
  4498. return ufshcd_suspend(hba, UFS_RUNTIME_PM);
  4499. }
  4500. EXPORT_SYMBOL(ufshcd_runtime_suspend);
  4501. /**
  4502. * ufshcd_runtime_resume - runtime resume routine
  4503. * @hba: per adapter instance
  4504. *
  4505. * This function basically brings the UFS device, UniPro link and controller
  4506. * to active state. Following operations are done in this function:
  4507. *
  4508. * 1. Turn on all the controller related clocks
  4509. * 2. Bring the UniPro link out of Hibernate state
  4510. * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
  4511. * to active state.
  4512. * 4. If auto-bkops is enabled on the device, disable it.
  4513. *
  4514. * So following would be the possible power state after this function return
  4515. * successfully:
  4516. * S1: UFS device in Active state with VCC rail ON
  4517. * UniPro link in Active state
  4518. * All the UFS/UniPro controller clocks are ON
  4519. *
  4520. * Returns 0 for success and non-zero for failure
  4521. */
  4522. int ufshcd_runtime_resume(struct ufs_hba *hba)
  4523. {
  4524. if (!hba || !hba->is_powered)
  4525. return 0;
  4526. else
  4527. return ufshcd_resume(hba, UFS_RUNTIME_PM);
  4528. }
  4529. EXPORT_SYMBOL(ufshcd_runtime_resume);
  4530. int ufshcd_runtime_idle(struct ufs_hba *hba)
  4531. {
  4532. return 0;
  4533. }
  4534. EXPORT_SYMBOL(ufshcd_runtime_idle);
  4535. /**
  4536. * ufshcd_shutdown - shutdown routine
  4537. * @hba: per adapter instance
  4538. *
  4539. * This function would power off both UFS device and UFS link.
  4540. *
  4541. * Returns 0 always to allow force shutdown even in case of errors.
  4542. */
  4543. int ufshcd_shutdown(struct ufs_hba *hba)
  4544. {
  4545. int ret = 0;
  4546. if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
  4547. goto out;
  4548. if (pm_runtime_suspended(hba->dev)) {
  4549. ret = ufshcd_runtime_resume(hba);
  4550. if (ret)
  4551. goto out;
  4552. }
  4553. ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
  4554. out:
  4555. if (ret)
  4556. dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
  4557. /* allow force shutdown even in case of errors */
  4558. return 0;
  4559. }
  4560. EXPORT_SYMBOL(ufshcd_shutdown);
  4561. /**
  4562. * ufshcd_remove - de-allocate SCSI host and host memory space
  4563. * data structure memory
  4564. * @hba - per adapter instance
  4565. */
  4566. void ufshcd_remove(struct ufs_hba *hba)
  4567. {
  4568. scsi_remove_host(hba->host);
  4569. ufshcd_scsi_remove_wlus(hba);
  4570. /* disable interrupts */
  4571. ufshcd_disable_intr(hba, hba->intr_mask);
  4572. ufshcd_hba_stop(hba);
  4573. scsi_host_put(hba->host);
  4574. ufshcd_exit_clk_gating(hba);
  4575. if (ufshcd_is_clkscaling_enabled(hba))
  4576. devfreq_remove_device(hba->devfreq);
  4577. ufshcd_hba_exit(hba);
  4578. }
  4579. EXPORT_SYMBOL_GPL(ufshcd_remove);
  4580. /**
  4581. * ufshcd_set_dma_mask - Set dma mask based on the controller
  4582. * addressing capability
  4583. * @hba: per adapter instance
  4584. *
  4585. * Returns 0 for success, non-zero for failure
  4586. */
  4587. static int ufshcd_set_dma_mask(struct ufs_hba *hba)
  4588. {
  4589. if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT) {
  4590. if (!dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(64)))
  4591. return 0;
  4592. }
  4593. return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32));
  4594. }
  4595. /**
  4596. * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  4597. * @dev: pointer to device handle
  4598. * @hba_handle: driver private handle
  4599. * Returns 0 on success, non-zero value on failure
  4600. */
  4601. int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
  4602. {
  4603. struct Scsi_Host *host;
  4604. struct ufs_hba *hba;
  4605. int err = 0;
  4606. if (!dev) {
  4607. dev_err(dev,
  4608. "Invalid memory reference for dev is NULL\n");
  4609. err = -ENODEV;
  4610. goto out_error;
  4611. }
  4612. host = scsi_host_alloc(&ufshcd_driver_template,
  4613. sizeof(struct ufs_hba));
  4614. if (!host) {
  4615. dev_err(dev, "scsi_host_alloc failed\n");
  4616. err = -ENOMEM;
  4617. goto out_error;
  4618. }
  4619. hba = shost_priv(host);
  4620. hba->host = host;
  4621. hba->dev = dev;
  4622. *hba_handle = hba;
  4623. out_error:
  4624. return err;
  4625. }
  4626. EXPORT_SYMBOL(ufshcd_alloc_host);
  4627. static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
  4628. {
  4629. int ret = 0;
  4630. struct ufs_clk_info *clki;
  4631. struct list_head *head = &hba->clk_list_head;
  4632. if (!head || list_empty(head))
  4633. goto out;
  4634. list_for_each_entry(clki, head, list) {
  4635. if (!IS_ERR_OR_NULL(clki->clk)) {
  4636. if (scale_up && clki->max_freq) {
  4637. if (clki->curr_freq == clki->max_freq)
  4638. continue;
  4639. ret = clk_set_rate(clki->clk, clki->max_freq);
  4640. if (ret) {
  4641. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4642. __func__, clki->name,
  4643. clki->max_freq, ret);
  4644. break;
  4645. }
  4646. clki->curr_freq = clki->max_freq;
  4647. } else if (!scale_up && clki->min_freq) {
  4648. if (clki->curr_freq == clki->min_freq)
  4649. continue;
  4650. ret = clk_set_rate(clki->clk, clki->min_freq);
  4651. if (ret) {
  4652. dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
  4653. __func__, clki->name,
  4654. clki->min_freq, ret);
  4655. break;
  4656. }
  4657. clki->curr_freq = clki->min_freq;
  4658. }
  4659. }
  4660. dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
  4661. clki->name, clk_get_rate(clki->clk));
  4662. }
  4663. if (hba->vops->clk_scale_notify)
  4664. hba->vops->clk_scale_notify(hba);
  4665. out:
  4666. return ret;
  4667. }
  4668. static int ufshcd_devfreq_target(struct device *dev,
  4669. unsigned long *freq, u32 flags)
  4670. {
  4671. int err = 0;
  4672. struct ufs_hba *hba = dev_get_drvdata(dev);
  4673. if (!ufshcd_is_clkscaling_enabled(hba))
  4674. return -EINVAL;
  4675. if (*freq == UINT_MAX)
  4676. err = ufshcd_scale_clks(hba, true);
  4677. else if (*freq == 0)
  4678. err = ufshcd_scale_clks(hba, false);
  4679. return err;
  4680. }
  4681. static int ufshcd_devfreq_get_dev_status(struct device *dev,
  4682. struct devfreq_dev_status *stat)
  4683. {
  4684. struct ufs_hba *hba = dev_get_drvdata(dev);
  4685. struct ufs_clk_scaling *scaling = &hba->clk_scaling;
  4686. unsigned long flags;
  4687. if (!ufshcd_is_clkscaling_enabled(hba))
  4688. return -EINVAL;
  4689. memset(stat, 0, sizeof(*stat));
  4690. spin_lock_irqsave(hba->host->host_lock, flags);
  4691. if (!scaling->window_start_t)
  4692. goto start_window;
  4693. if (scaling->is_busy_started)
  4694. scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
  4695. scaling->busy_start_t));
  4696. stat->total_time = jiffies_to_usecs((long)jiffies -
  4697. (long)scaling->window_start_t);
  4698. stat->busy_time = scaling->tot_busy_t;
  4699. start_window:
  4700. scaling->window_start_t = jiffies;
  4701. scaling->tot_busy_t = 0;
  4702. if (hba->outstanding_reqs) {
  4703. scaling->busy_start_t = ktime_get();
  4704. scaling->is_busy_started = true;
  4705. } else {
  4706. scaling->busy_start_t = ktime_set(0, 0);
  4707. scaling->is_busy_started = false;
  4708. }
  4709. spin_unlock_irqrestore(hba->host->host_lock, flags);
  4710. return 0;
  4711. }
  4712. static struct devfreq_dev_profile ufs_devfreq_profile = {
  4713. .polling_ms = 100,
  4714. .target = ufshcd_devfreq_target,
  4715. .get_dev_status = ufshcd_devfreq_get_dev_status,
  4716. };
  4717. /**
  4718. * ufshcd_init - Driver initialization routine
  4719. * @hba: per-adapter instance
  4720. * @mmio_base: base register address
  4721. * @irq: Interrupt line of device
  4722. * Returns 0 on success, non-zero value on failure
  4723. */
  4724. int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
  4725. {
  4726. int err;
  4727. struct Scsi_Host *host = hba->host;
  4728. struct device *dev = hba->dev;
  4729. if (!mmio_base) {
  4730. dev_err(hba->dev,
  4731. "Invalid memory reference for mmio_base is NULL\n");
  4732. err = -ENODEV;
  4733. goto out_error;
  4734. }
  4735. hba->mmio_base = mmio_base;
  4736. hba->irq = irq;
  4737. err = ufshcd_hba_init(hba);
  4738. if (err)
  4739. goto out_error;
  4740. /* Read capabilities registers */
  4741. ufshcd_hba_capabilities(hba);
  4742. /* Get UFS version supported by the controller */
  4743. hba->ufs_version = ufshcd_get_ufs_version(hba);
  4744. /* Get Interrupt bit mask per version */
  4745. hba->intr_mask = ufshcd_get_intr_mask(hba);
  4746. err = ufshcd_set_dma_mask(hba);
  4747. if (err) {
  4748. dev_err(hba->dev, "set dma mask failed\n");
  4749. goto out_disable;
  4750. }
  4751. /* Allocate memory for host memory space */
  4752. err = ufshcd_memory_alloc(hba);
  4753. if (err) {
  4754. dev_err(hba->dev, "Memory allocation failed\n");
  4755. goto out_disable;
  4756. }
  4757. /* Configure LRB */
  4758. ufshcd_host_memory_configure(hba);
  4759. host->can_queue = hba->nutrs;
  4760. host->cmd_per_lun = hba->nutrs;
  4761. host->max_id = UFSHCD_MAX_ID;
  4762. host->max_lun = UFS_MAX_LUNS;
  4763. host->max_channel = UFSHCD_MAX_CHANNEL;
  4764. host->unique_id = host->host_no;
  4765. host->max_cmd_len = MAX_CDB_SIZE;
  4766. hba->max_pwr_info.is_valid = false;
  4767. /* Initailize wait queue for task management */
  4768. init_waitqueue_head(&hba->tm_wq);
  4769. init_waitqueue_head(&hba->tm_tag_wq);
  4770. /* Initialize work queues */
  4771. INIT_WORK(&hba->eh_work, ufshcd_err_handler);
  4772. INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
  4773. /* Initialize UIC command mutex */
  4774. mutex_init(&hba->uic_cmd_mutex);
  4775. /* Initialize mutex for device management commands */
  4776. mutex_init(&hba->dev_cmd.lock);
  4777. /* Initialize device management tag acquire wait queue */
  4778. init_waitqueue_head(&hba->dev_cmd.tag_wq);
  4779. ufshcd_init_clk_gating(hba);
  4780. /* IRQ registration */
  4781. err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
  4782. if (err) {
  4783. dev_err(hba->dev, "request irq failed\n");
  4784. goto exit_gating;
  4785. } else {
  4786. hba->is_irq_enabled = true;
  4787. }
  4788. /* Enable SCSI tag mapping */
  4789. err = scsi_init_shared_tag_map(host, host->can_queue);
  4790. if (err) {
  4791. dev_err(hba->dev, "init shared queue failed\n");
  4792. goto exit_gating;
  4793. }
  4794. err = scsi_add_host(host, hba->dev);
  4795. if (err) {
  4796. dev_err(hba->dev, "scsi_add_host failed\n");
  4797. goto exit_gating;
  4798. }
  4799. /* Host controller enable */
  4800. err = ufshcd_hba_enable(hba);
  4801. if (err) {
  4802. dev_err(hba->dev, "Host controller enable failed\n");
  4803. goto out_remove_scsi_host;
  4804. }
  4805. if (ufshcd_is_clkscaling_enabled(hba)) {
  4806. hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
  4807. "simple_ondemand", NULL);
  4808. if (IS_ERR(hba->devfreq)) {
  4809. dev_err(hba->dev, "Unable to register with devfreq %ld\n",
  4810. PTR_ERR(hba->devfreq));
  4811. goto out_remove_scsi_host;
  4812. }
  4813. /* Suspend devfreq until the UFS device is detected */
  4814. devfreq_suspend_device(hba->devfreq);
  4815. hba->clk_scaling.window_start_t = 0;
  4816. }
  4817. /* Hold auto suspend until async scan completes */
  4818. pm_runtime_get_sync(dev);
  4819. /*
  4820. * The device-initialize-sequence hasn't been invoked yet.
  4821. * Set the device to power-off state
  4822. */
  4823. ufshcd_set_ufs_dev_poweroff(hba);
  4824. async_schedule(ufshcd_async_scan, hba);
  4825. return 0;
  4826. out_remove_scsi_host:
  4827. scsi_remove_host(hba->host);
  4828. exit_gating:
  4829. ufshcd_exit_clk_gating(hba);
  4830. out_disable:
  4831. hba->is_irq_enabled = false;
  4832. scsi_host_put(host);
  4833. ufshcd_hba_exit(hba);
  4834. out_error:
  4835. return err;
  4836. }
  4837. EXPORT_SYMBOL_GPL(ufshcd_init);
  4838. MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
  4839. MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
  4840. MODULE_DESCRIPTION("Generic UFS host controller driver Core");
  4841. MODULE_LICENSE("GPL");
  4842. MODULE_VERSION(UFSHCD_DRIVER_VERSION);