skd_main.c 135 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10. * Initial Driver Design!
  11. * Thomas Swann <tswann@stec-inc.com>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14. * biomode implementation.
  15. * Akhil Bhansali <abhansali@stec-inc.com>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/aer.h>
  39. #include <linux/ctype.h>
  40. #include <linux/wait.h>
  41. #include <linux/uio.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/sg.h>
  44. #include <linux/io.h>
  45. #include <linux/uaccess.h>
  46. #include <asm/unaligned.h>
  47. #include "skd_s1120.h"
  48. static int skd_dbg_level;
  49. static int skd_isr_comp_limit = 4;
  50. enum {
  51. STEC_LINK_2_5GTS = 0,
  52. STEC_LINK_5GTS = 1,
  53. STEC_LINK_8GTS = 2,
  54. STEC_LINK_UNKNOWN = 0xFF
  55. };
  56. enum {
  57. SKD_FLUSH_INITIALIZER,
  58. SKD_FLUSH_ZERO_SIZE_FIRST,
  59. SKD_FLUSH_DATA_SECOND,
  60. };
  61. #define SKD_ASSERT(expr) \
  62. do { \
  63. if (unlikely(!(expr))) { \
  64. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  65. # expr, __FILE__, __func__, __LINE__); \
  66. } \
  67. } while (0)
  68. #define DRV_NAME "skd"
  69. #define DRV_VERSION "2.2.1"
  70. #define DRV_BUILD_ID "0260"
  71. #define PFX DRV_NAME ": "
  72. #define DRV_BIN_VERSION 0x100
  73. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  74. MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  77. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  78. #define PCI_VENDOR_ID_STEC 0x1B39
  79. #define PCI_DEVICE_ID_S1120 0x0001
  80. #define SKD_FUA_NV (1 << 1)
  81. #define SKD_MINORS_PER_DEVICE 16
  82. #define SKD_MAX_QUEUE_DEPTH 200u
  83. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  84. #define SKD_N_FITMSG_BYTES (512u)
  85. #define SKD_N_SPECIAL_CONTEXT 32u
  86. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  87. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  88. * 128KB limit. That allows 4096*4K = 16M xfer size
  89. */
  90. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  91. #define SKD_N_SG_PER_SPECIAL 256u
  92. #define SKD_N_COMPLETION_ENTRY 256u
  93. #define SKD_N_READ_CAP_BYTES (8u)
  94. #define SKD_N_INTERNAL_BYTES (512u)
  95. /* 5 bits of uniqifier, 0xF800 */
  96. #define SKD_ID_INCR (0x400)
  97. #define SKD_ID_TABLE_MASK (3u << 8u)
  98. #define SKD_ID_RW_REQUEST (0u << 8u)
  99. #define SKD_ID_INTERNAL (1u << 8u)
  100. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  101. #define SKD_ID_FIT_MSG (3u << 8u)
  102. #define SKD_ID_SLOT_MASK 0x00FFu
  103. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  104. #define SKD_N_TIMEOUT_SLOT 4u
  105. #define SKD_TIMEOUT_SLOT_MASK 3u
  106. #define SKD_N_MAX_SECTORS 2048u
  107. #define SKD_MAX_RETRIES 2u
  108. #define SKD_TIMER_SECONDS(seconds) (seconds)
  109. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  110. #define INQ_STD_NBYTES 36
  111. enum skd_drvr_state {
  112. SKD_DRVR_STATE_LOAD,
  113. SKD_DRVR_STATE_IDLE,
  114. SKD_DRVR_STATE_BUSY,
  115. SKD_DRVR_STATE_STARTING,
  116. SKD_DRVR_STATE_ONLINE,
  117. SKD_DRVR_STATE_PAUSING,
  118. SKD_DRVR_STATE_PAUSED,
  119. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  120. SKD_DRVR_STATE_RESTARTING,
  121. SKD_DRVR_STATE_RESUMING,
  122. SKD_DRVR_STATE_STOPPING,
  123. SKD_DRVR_STATE_FAULT,
  124. SKD_DRVR_STATE_DISAPPEARED,
  125. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  126. SKD_DRVR_STATE_BUSY_ERASE,
  127. SKD_DRVR_STATE_BUSY_SANITIZE,
  128. SKD_DRVR_STATE_BUSY_IMMINENT,
  129. SKD_DRVR_STATE_WAIT_BOOT,
  130. SKD_DRVR_STATE_SYNCING,
  131. };
  132. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  133. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  134. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  135. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  136. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  137. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  138. #define SKD_START_WAIT_SECONDS 90u
  139. enum skd_req_state {
  140. SKD_REQ_STATE_IDLE,
  141. SKD_REQ_STATE_SETUP,
  142. SKD_REQ_STATE_BUSY,
  143. SKD_REQ_STATE_COMPLETED,
  144. SKD_REQ_STATE_TIMEOUT,
  145. SKD_REQ_STATE_ABORTED,
  146. };
  147. enum skd_fit_msg_state {
  148. SKD_MSG_STATE_IDLE,
  149. SKD_MSG_STATE_BUSY,
  150. };
  151. enum skd_check_status_action {
  152. SKD_CHECK_STATUS_REPORT_GOOD,
  153. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  154. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  155. SKD_CHECK_STATUS_REPORT_ERROR,
  156. SKD_CHECK_STATUS_BUSY_IMMINENT,
  157. };
  158. struct skd_fitmsg_context {
  159. enum skd_fit_msg_state state;
  160. struct skd_fitmsg_context *next;
  161. u32 id;
  162. u16 outstanding;
  163. u32 length;
  164. u32 offset;
  165. u8 *msg_buf;
  166. dma_addr_t mb_dma_address;
  167. };
  168. struct skd_request_context {
  169. enum skd_req_state state;
  170. struct skd_request_context *next;
  171. u16 id;
  172. u32 fitmsg_id;
  173. struct request *req;
  174. u8 flush_cmd;
  175. u32 timeout_stamp;
  176. u8 sg_data_dir;
  177. struct scatterlist *sg;
  178. u32 n_sg;
  179. u32 sg_byte_count;
  180. struct fit_sg_descriptor *sksg_list;
  181. dma_addr_t sksg_dma_address;
  182. struct fit_completion_entry_v1 completion;
  183. struct fit_comp_error_info err_info;
  184. };
  185. #define SKD_DATA_DIR_HOST_TO_CARD 1
  186. #define SKD_DATA_DIR_CARD_TO_HOST 2
  187. struct skd_special_context {
  188. struct skd_request_context req;
  189. u8 orphaned;
  190. void *data_buf;
  191. dma_addr_t db_dma_address;
  192. u8 *msg_buf;
  193. dma_addr_t mb_dma_address;
  194. };
  195. struct skd_sg_io {
  196. fmode_t mode;
  197. void __user *argp;
  198. struct sg_io_hdr sg;
  199. u8 cdb[16];
  200. u32 dxfer_len;
  201. u32 iovcnt;
  202. struct sg_iovec *iov;
  203. struct sg_iovec no_iov_iov;
  204. struct skd_special_context *skspcl;
  205. };
  206. typedef enum skd_irq_type {
  207. SKD_IRQ_LEGACY,
  208. SKD_IRQ_MSI,
  209. SKD_IRQ_MSIX
  210. } skd_irq_type_t;
  211. #define SKD_MAX_BARS 2
  212. struct skd_device {
  213. volatile void __iomem *mem_map[SKD_MAX_BARS];
  214. resource_size_t mem_phys[SKD_MAX_BARS];
  215. u32 mem_size[SKD_MAX_BARS];
  216. struct skd_msix_entry *msix_entries;
  217. struct pci_dev *pdev;
  218. int pcie_error_reporting_is_enabled;
  219. spinlock_t lock;
  220. struct gendisk *disk;
  221. struct request_queue *queue;
  222. struct device *class_dev;
  223. int gendisk_on;
  224. int sync_done;
  225. atomic_t device_count;
  226. u32 devno;
  227. u32 major;
  228. char name[32];
  229. char isr_name[30];
  230. enum skd_drvr_state state;
  231. u32 drive_state;
  232. u32 in_flight;
  233. u32 cur_max_queue_depth;
  234. u32 queue_low_water_mark;
  235. u32 dev_max_queue_depth;
  236. u32 num_fitmsg_context;
  237. u32 num_req_context;
  238. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  239. u32 timeout_stamp;
  240. struct skd_fitmsg_context *skmsg_free_list;
  241. struct skd_fitmsg_context *skmsg_table;
  242. struct skd_request_context *skreq_free_list;
  243. struct skd_request_context *skreq_table;
  244. struct skd_special_context *skspcl_free_list;
  245. struct skd_special_context *skspcl_table;
  246. struct skd_special_context internal_skspcl;
  247. u32 read_cap_blocksize;
  248. u32 read_cap_last_lba;
  249. int read_cap_is_valid;
  250. int inquiry_is_valid;
  251. u8 inq_serial_num[13]; /*12 chars plus null term */
  252. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  253. u8 skcomp_cycle;
  254. u32 skcomp_ix;
  255. struct fit_completion_entry_v1 *skcomp_table;
  256. struct fit_comp_error_info *skerr_table;
  257. dma_addr_t cq_dma_address;
  258. wait_queue_head_t waitq;
  259. struct timer_list timer;
  260. u32 timer_countdown;
  261. u32 timer_substate;
  262. int n_special;
  263. int sgs_per_request;
  264. u32 last_mtd;
  265. u32 proto_ver;
  266. int dbg_level;
  267. u32 connect_time_stamp;
  268. int connect_retries;
  269. #define SKD_MAX_CONNECT_RETRIES 16
  270. u32 drive_jiffies;
  271. u32 timo_slot;
  272. struct work_struct completion_worker;
  273. };
  274. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  275. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  276. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  277. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  278. {
  279. u32 val;
  280. if (likely(skdev->dbg_level < 2))
  281. return readl(skdev->mem_map[1] + offset);
  282. else {
  283. barrier();
  284. val = readl(skdev->mem_map[1] + offset);
  285. barrier();
  286. pr_debug("%s:%s:%d offset %x = %x\n",
  287. skdev->name, __func__, __LINE__, offset, val);
  288. return val;
  289. }
  290. }
  291. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  292. u32 offset)
  293. {
  294. if (likely(skdev->dbg_level < 2)) {
  295. writel(val, skdev->mem_map[1] + offset);
  296. barrier();
  297. } else {
  298. barrier();
  299. writel(val, skdev->mem_map[1] + offset);
  300. barrier();
  301. pr_debug("%s:%s:%d offset %x = %x\n",
  302. skdev->name, __func__, __LINE__, offset, val);
  303. }
  304. }
  305. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  306. u32 offset)
  307. {
  308. if (likely(skdev->dbg_level < 2)) {
  309. writeq(val, skdev->mem_map[1] + offset);
  310. barrier();
  311. } else {
  312. barrier();
  313. writeq(val, skdev->mem_map[1] + offset);
  314. barrier();
  315. pr_debug("%s:%s:%d offset %x = %016llx\n",
  316. skdev->name, __func__, __LINE__, offset, val);
  317. }
  318. }
  319. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  320. static int skd_isr_type = SKD_IRQ_DEFAULT;
  321. module_param(skd_isr_type, int, 0444);
  322. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  323. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  324. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  325. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  326. module_param(skd_max_req_per_msg, int, 0444);
  327. MODULE_PARM_DESC(skd_max_req_per_msg,
  328. "Maximum SCSI requests packed in a single message."
  329. " (1-14, default==1)");
  330. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  331. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  332. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  333. module_param(skd_max_queue_depth, int, 0444);
  334. MODULE_PARM_DESC(skd_max_queue_depth,
  335. "Maximum SCSI requests issued to s1120."
  336. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  337. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  338. module_param(skd_sgs_per_request, int, 0444);
  339. MODULE_PARM_DESC(skd_sgs_per_request,
  340. "Maximum SG elements per block request."
  341. " (1-4096, default==256)");
  342. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  343. module_param(skd_max_pass_thru, int, 0444);
  344. MODULE_PARM_DESC(skd_max_pass_thru,
  345. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  346. module_param(skd_dbg_level, int, 0444);
  347. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  348. module_param(skd_isr_comp_limit, int, 0444);
  349. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  350. /* Major device number dynamically assigned. */
  351. static u32 skd_major;
  352. static void skd_destruct(struct skd_device *skdev);
  353. static const struct block_device_operations skd_blockdev_ops;
  354. static void skd_send_fitmsg(struct skd_device *skdev,
  355. struct skd_fitmsg_context *skmsg);
  356. static void skd_send_special_fitmsg(struct skd_device *skdev,
  357. struct skd_special_context *skspcl);
  358. static void skd_request_fn(struct request_queue *rq);
  359. static void skd_end_request(struct skd_device *skdev,
  360. struct skd_request_context *skreq, int error);
  361. static int skd_preop_sg_list(struct skd_device *skdev,
  362. struct skd_request_context *skreq);
  363. static void skd_postop_sg_list(struct skd_device *skdev,
  364. struct skd_request_context *skreq);
  365. static void skd_restart_device(struct skd_device *skdev);
  366. static int skd_quiesce_dev(struct skd_device *skdev);
  367. static int skd_unquiesce_dev(struct skd_device *skdev);
  368. static void skd_release_special(struct skd_device *skdev,
  369. struct skd_special_context *skspcl);
  370. static void skd_disable_interrupts(struct skd_device *skdev);
  371. static void skd_isr_fwstate(struct skd_device *skdev);
  372. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  373. static void skd_soft_reset(struct skd_device *skdev);
  374. static const char *skd_name(struct skd_device *skdev);
  375. const char *skd_drive_state_to_str(int state);
  376. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  377. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  378. static void skd_log_skmsg(struct skd_device *skdev,
  379. struct skd_fitmsg_context *skmsg, const char *event);
  380. static void skd_log_skreq(struct skd_device *skdev,
  381. struct skd_request_context *skreq, const char *event);
  382. /*
  383. *****************************************************************************
  384. * READ/WRITE REQUESTS
  385. *****************************************************************************
  386. */
  387. static void skd_fail_all_pending(struct skd_device *skdev)
  388. {
  389. struct request_queue *q = skdev->queue;
  390. struct request *req;
  391. for (;; ) {
  392. req = blk_peek_request(q);
  393. if (req == NULL)
  394. break;
  395. blk_start_request(req);
  396. __blk_end_request_all(req, -EIO);
  397. }
  398. }
  399. static void
  400. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  401. int data_dir, unsigned lba,
  402. unsigned count)
  403. {
  404. if (data_dir == READ)
  405. scsi_req->cdb[0] = 0x28;
  406. else
  407. scsi_req->cdb[0] = 0x2a;
  408. scsi_req->cdb[1] = 0;
  409. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  410. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  411. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  412. scsi_req->cdb[5] = (lba & 0xff);
  413. scsi_req->cdb[6] = 0;
  414. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  415. scsi_req->cdb[8] = count & 0xff;
  416. scsi_req->cdb[9] = 0;
  417. }
  418. static void
  419. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  420. struct skd_request_context *skreq)
  421. {
  422. skreq->flush_cmd = 1;
  423. scsi_req->cdb[0] = 0x35;
  424. scsi_req->cdb[1] = 0;
  425. scsi_req->cdb[2] = 0;
  426. scsi_req->cdb[3] = 0;
  427. scsi_req->cdb[4] = 0;
  428. scsi_req->cdb[5] = 0;
  429. scsi_req->cdb[6] = 0;
  430. scsi_req->cdb[7] = 0;
  431. scsi_req->cdb[8] = 0;
  432. scsi_req->cdb[9] = 0;
  433. }
  434. static void skd_request_fn_not_online(struct request_queue *q);
  435. static void skd_request_fn(struct request_queue *q)
  436. {
  437. struct skd_device *skdev = q->queuedata;
  438. struct skd_fitmsg_context *skmsg = NULL;
  439. struct fit_msg_hdr *fmh = NULL;
  440. struct skd_request_context *skreq;
  441. struct request *req = NULL;
  442. struct skd_scsi_request *scsi_req;
  443. unsigned long io_flags;
  444. int error;
  445. u32 lba;
  446. u32 count;
  447. int data_dir;
  448. u32 be_lba;
  449. u32 be_count;
  450. u64 be_dmaa;
  451. u64 cmdctxt;
  452. u32 timo_slot;
  453. void *cmd_ptr;
  454. int flush, fua;
  455. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  456. skd_request_fn_not_online(q);
  457. return;
  458. }
  459. if (blk_queue_stopped(skdev->queue)) {
  460. if (skdev->skmsg_free_list == NULL ||
  461. skdev->skreq_free_list == NULL ||
  462. skdev->in_flight >= skdev->queue_low_water_mark)
  463. /* There is still some kind of shortage */
  464. return;
  465. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  466. }
  467. /*
  468. * Stop conditions:
  469. * - There are no more native requests
  470. * - There are already the maximum number of requests in progress
  471. * - There are no more skd_request_context entries
  472. * - There are no more FIT msg buffers
  473. */
  474. for (;; ) {
  475. flush = fua = 0;
  476. req = blk_peek_request(q);
  477. /* Are there any native requests to start? */
  478. if (req == NULL)
  479. break;
  480. lba = (u32)blk_rq_pos(req);
  481. count = blk_rq_sectors(req);
  482. data_dir = rq_data_dir(req);
  483. io_flags = req->cmd_flags;
  484. if (req_op(req) == REQ_OP_FLUSH)
  485. flush++;
  486. if (io_flags & REQ_FUA)
  487. fua++;
  488. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  489. "count=%u(0x%x) dir=%d\n",
  490. skdev->name, __func__, __LINE__,
  491. req, lba, lba, count, count, data_dir);
  492. /* At this point we know there is a request */
  493. /* Are too many requets already in progress? */
  494. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  495. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  496. skdev->name, __func__, __LINE__,
  497. skdev->in_flight, skdev->cur_max_queue_depth);
  498. break;
  499. }
  500. /* Is a skd_request_context available? */
  501. skreq = skdev->skreq_free_list;
  502. if (skreq == NULL) {
  503. pr_debug("%s:%s:%d Out of req=%p\n",
  504. skdev->name, __func__, __LINE__, q);
  505. break;
  506. }
  507. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  508. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  509. /* Now we check to see if we can get a fit msg */
  510. if (skmsg == NULL) {
  511. if (skdev->skmsg_free_list == NULL) {
  512. pr_debug("%s:%s:%d Out of msg\n",
  513. skdev->name, __func__, __LINE__);
  514. break;
  515. }
  516. }
  517. skreq->flush_cmd = 0;
  518. skreq->n_sg = 0;
  519. skreq->sg_byte_count = 0;
  520. /*
  521. * OK to now dequeue request from q.
  522. *
  523. * At this point we are comitted to either start or reject
  524. * the native request. Note that skd_request_context is
  525. * available but is still at the head of the free list.
  526. */
  527. blk_start_request(req);
  528. skreq->req = req;
  529. skreq->fitmsg_id = 0;
  530. /* Either a FIT msg is in progress or we have to start one. */
  531. if (skmsg == NULL) {
  532. /* Are there any FIT msg buffers available? */
  533. skmsg = skdev->skmsg_free_list;
  534. if (skmsg == NULL) {
  535. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  536. skdev->name, __func__, __LINE__,
  537. skdev);
  538. break;
  539. }
  540. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  541. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  542. skdev->skmsg_free_list = skmsg->next;
  543. skmsg->state = SKD_MSG_STATE_BUSY;
  544. skmsg->id += SKD_ID_INCR;
  545. /* Initialize the FIT msg header */
  546. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  547. memset(fmh, 0, sizeof(*fmh));
  548. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  549. skmsg->length = sizeof(*fmh);
  550. }
  551. skreq->fitmsg_id = skmsg->id;
  552. /*
  553. * Note that a FIT msg may have just been started
  554. * but contains no SoFIT requests yet.
  555. */
  556. /*
  557. * Transcode the request, checking as we go. The outcome of
  558. * the transcoding is represented by the error variable.
  559. */
  560. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  561. memset(cmd_ptr, 0, 32);
  562. be_lba = cpu_to_be32(lba);
  563. be_count = cpu_to_be32(count);
  564. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  565. cmdctxt = skreq->id + SKD_ID_INCR;
  566. scsi_req = cmd_ptr;
  567. scsi_req->hdr.tag = cmdctxt;
  568. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  569. if (data_dir == READ)
  570. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  571. else
  572. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  573. if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  574. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  575. SKD_ASSERT(skreq->flush_cmd == 1);
  576. } else {
  577. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  578. }
  579. if (fua)
  580. scsi_req->cdb[1] |= SKD_FUA_NV;
  581. if (!req->bio)
  582. goto skip_sg;
  583. error = skd_preop_sg_list(skdev, skreq);
  584. if (error != 0) {
  585. /*
  586. * Complete the native request with error.
  587. * Note that the request context is still at the
  588. * head of the free list, and that the SoFIT request
  589. * was encoded into the FIT msg buffer but the FIT
  590. * msg length has not been updated. In short, the
  591. * only resource that has been allocated but might
  592. * not be used is that the FIT msg could be empty.
  593. */
  594. pr_debug("%s:%s:%d error Out\n",
  595. skdev->name, __func__, __LINE__);
  596. skd_end_request(skdev, skreq, error);
  597. continue;
  598. }
  599. skip_sg:
  600. scsi_req->hdr.sg_list_len_bytes =
  601. cpu_to_be32(skreq->sg_byte_count);
  602. /* Complete resource allocations. */
  603. skdev->skreq_free_list = skreq->next;
  604. skreq->state = SKD_REQ_STATE_BUSY;
  605. skreq->id += SKD_ID_INCR;
  606. skmsg->length += sizeof(struct skd_scsi_request);
  607. fmh->num_protocol_cmds_coalesced++;
  608. /*
  609. * Update the active request counts.
  610. * Capture the timeout timestamp.
  611. */
  612. skreq->timeout_stamp = skdev->timeout_stamp;
  613. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  614. skdev->timeout_slot[timo_slot]++;
  615. skdev->in_flight++;
  616. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  617. skdev->name, __func__, __LINE__,
  618. skreq->id, skdev->in_flight);
  619. /*
  620. * If the FIT msg buffer is full send it.
  621. */
  622. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  623. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  624. skd_send_fitmsg(skdev, skmsg);
  625. skmsg = NULL;
  626. fmh = NULL;
  627. }
  628. }
  629. /*
  630. * Is a FIT msg in progress? If it is empty put the buffer back
  631. * on the free list. If it is non-empty send what we got.
  632. * This minimizes latency when there are fewer requests than
  633. * what fits in a FIT msg.
  634. */
  635. if (skmsg != NULL) {
  636. /* Bigger than just a FIT msg header? */
  637. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  638. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  639. skdev->name, __func__, __LINE__,
  640. skmsg, skmsg->length);
  641. skd_send_fitmsg(skdev, skmsg);
  642. } else {
  643. /*
  644. * The FIT msg is empty. It means we got started
  645. * on the msg, but the requests were rejected.
  646. */
  647. skmsg->state = SKD_MSG_STATE_IDLE;
  648. skmsg->id += SKD_ID_INCR;
  649. skmsg->next = skdev->skmsg_free_list;
  650. skdev->skmsg_free_list = skmsg;
  651. }
  652. skmsg = NULL;
  653. fmh = NULL;
  654. }
  655. /*
  656. * If req is non-NULL it means there is something to do but
  657. * we are out of a resource.
  658. */
  659. if (req)
  660. blk_stop_queue(skdev->queue);
  661. }
  662. static void skd_end_request(struct skd_device *skdev,
  663. struct skd_request_context *skreq, int error)
  664. {
  665. if (unlikely(error)) {
  666. struct request *req = skreq->req;
  667. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  668. u32 lba = (u32)blk_rq_pos(req);
  669. u32 count = blk_rq_sectors(req);
  670. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  671. skd_name(skdev), cmd, lba, count, skreq->id);
  672. } else
  673. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  674. skdev->name, __func__, __LINE__, skreq->id, error);
  675. __blk_end_request_all(skreq->req, error);
  676. }
  677. static int skd_preop_sg_list(struct skd_device *skdev,
  678. struct skd_request_context *skreq)
  679. {
  680. struct request *req = skreq->req;
  681. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  682. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  683. struct scatterlist *sg = &skreq->sg[0];
  684. int n_sg;
  685. int i;
  686. skreq->sg_byte_count = 0;
  687. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  688. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  689. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  690. if (n_sg <= 0)
  691. return -EINVAL;
  692. /*
  693. * Map scatterlist to PCI bus addresses.
  694. * Note PCI might change the number of entries.
  695. */
  696. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  697. if (n_sg <= 0)
  698. return -EINVAL;
  699. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  700. skreq->n_sg = n_sg;
  701. for (i = 0; i < n_sg; i++) {
  702. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  703. u32 cnt = sg_dma_len(&sg[i]);
  704. uint64_t dma_addr = sg_dma_address(&sg[i]);
  705. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  706. sgd->byte_count = cnt;
  707. skreq->sg_byte_count += cnt;
  708. sgd->host_side_addr = dma_addr;
  709. sgd->dev_side_addr = 0;
  710. }
  711. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  712. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  713. if (unlikely(skdev->dbg_level > 1)) {
  714. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  715. skdev->name, __func__, __LINE__,
  716. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  717. for (i = 0; i < n_sg; i++) {
  718. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  719. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  720. "addr=0x%llx next=0x%llx\n",
  721. skdev->name, __func__, __LINE__,
  722. i, sgd->byte_count, sgd->control,
  723. sgd->host_side_addr, sgd->next_desc_ptr);
  724. }
  725. }
  726. return 0;
  727. }
  728. static void skd_postop_sg_list(struct skd_device *skdev,
  729. struct skd_request_context *skreq)
  730. {
  731. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  732. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  733. /*
  734. * restore the next ptr for next IO request so we
  735. * don't have to set it every time.
  736. */
  737. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  738. skreq->sksg_dma_address +
  739. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  740. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  741. }
  742. static void skd_request_fn_not_online(struct request_queue *q)
  743. {
  744. struct skd_device *skdev = q->queuedata;
  745. int error;
  746. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  747. skd_log_skdev(skdev, "req_not_online");
  748. switch (skdev->state) {
  749. case SKD_DRVR_STATE_PAUSING:
  750. case SKD_DRVR_STATE_PAUSED:
  751. case SKD_DRVR_STATE_STARTING:
  752. case SKD_DRVR_STATE_RESTARTING:
  753. case SKD_DRVR_STATE_WAIT_BOOT:
  754. /* In case of starting, we haven't started the queue,
  755. * so we can't get here... but requests are
  756. * possibly hanging out waiting for us because we
  757. * reported the dev/skd0 already. They'll wait
  758. * forever if connect doesn't complete.
  759. * What to do??? delay dev/skd0 ??
  760. */
  761. case SKD_DRVR_STATE_BUSY:
  762. case SKD_DRVR_STATE_BUSY_IMMINENT:
  763. case SKD_DRVR_STATE_BUSY_ERASE:
  764. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  765. return;
  766. case SKD_DRVR_STATE_BUSY_SANITIZE:
  767. case SKD_DRVR_STATE_STOPPING:
  768. case SKD_DRVR_STATE_SYNCING:
  769. case SKD_DRVR_STATE_FAULT:
  770. case SKD_DRVR_STATE_DISAPPEARED:
  771. default:
  772. error = -EIO;
  773. break;
  774. }
  775. /* If we get here, terminate all pending block requeusts
  776. * with EIO and any scsi pass thru with appropriate sense
  777. */
  778. skd_fail_all_pending(skdev);
  779. }
  780. /*
  781. *****************************************************************************
  782. * TIMER
  783. *****************************************************************************
  784. */
  785. static void skd_timer_tick_not_online(struct skd_device *skdev);
  786. static void skd_timer_tick(ulong arg)
  787. {
  788. struct skd_device *skdev = (struct skd_device *)arg;
  789. u32 timo_slot;
  790. u32 overdue_timestamp;
  791. unsigned long reqflags;
  792. u32 state;
  793. if (skdev->state == SKD_DRVR_STATE_FAULT)
  794. /* The driver has declared fault, and we want it to
  795. * stay that way until driver is reloaded.
  796. */
  797. return;
  798. spin_lock_irqsave(&skdev->lock, reqflags);
  799. state = SKD_READL(skdev, FIT_STATUS);
  800. state &= FIT_SR_DRIVE_STATE_MASK;
  801. if (state != skdev->drive_state)
  802. skd_isr_fwstate(skdev);
  803. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  804. skd_timer_tick_not_online(skdev);
  805. goto timer_func_out;
  806. }
  807. skdev->timeout_stamp++;
  808. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  809. /*
  810. * All requests that happened during the previous use of
  811. * this slot should be done by now. The previous use was
  812. * over 7 seconds ago.
  813. */
  814. if (skdev->timeout_slot[timo_slot] == 0)
  815. goto timer_func_out;
  816. /* Something is overdue */
  817. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  818. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  819. skdev->name, __func__, __LINE__,
  820. skdev->timeout_slot[timo_slot], skdev->in_flight);
  821. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  822. skd_name(skdev), skdev->timeout_slot[timo_slot],
  823. skdev->in_flight);
  824. skdev->timer_countdown = SKD_DRAINING_TIMO;
  825. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  826. skdev->timo_slot = timo_slot;
  827. blk_stop_queue(skdev->queue);
  828. timer_func_out:
  829. mod_timer(&skdev->timer, (jiffies + HZ));
  830. spin_unlock_irqrestore(&skdev->lock, reqflags);
  831. }
  832. static void skd_timer_tick_not_online(struct skd_device *skdev)
  833. {
  834. switch (skdev->state) {
  835. case SKD_DRVR_STATE_IDLE:
  836. case SKD_DRVR_STATE_LOAD:
  837. break;
  838. case SKD_DRVR_STATE_BUSY_SANITIZE:
  839. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  840. skdev->name, __func__, __LINE__,
  841. skdev->drive_state, skdev->state);
  842. /* If we've been in sanitize for 3 seconds, we figure we're not
  843. * going to get anymore completions, so recover requests now
  844. */
  845. if (skdev->timer_countdown > 0) {
  846. skdev->timer_countdown--;
  847. return;
  848. }
  849. skd_recover_requests(skdev, 0);
  850. break;
  851. case SKD_DRVR_STATE_BUSY:
  852. case SKD_DRVR_STATE_BUSY_IMMINENT:
  853. case SKD_DRVR_STATE_BUSY_ERASE:
  854. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  855. skdev->name, __func__, __LINE__,
  856. skdev->state, skdev->timer_countdown);
  857. if (skdev->timer_countdown > 0) {
  858. skdev->timer_countdown--;
  859. return;
  860. }
  861. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  862. skdev->name, __func__, __LINE__,
  863. skdev->state, skdev->timer_countdown);
  864. skd_restart_device(skdev);
  865. break;
  866. case SKD_DRVR_STATE_WAIT_BOOT:
  867. case SKD_DRVR_STATE_STARTING:
  868. if (skdev->timer_countdown > 0) {
  869. skdev->timer_countdown--;
  870. return;
  871. }
  872. /* For now, we fault the drive. Could attempt resets to
  873. * revcover at some point. */
  874. skdev->state = SKD_DRVR_STATE_FAULT;
  875. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  876. skd_name(skdev), skdev->drive_state);
  877. /*start the queue so we can respond with error to requests */
  878. /* wakeup anyone waiting for startup complete */
  879. blk_start_queue(skdev->queue);
  880. skdev->gendisk_on = -1;
  881. wake_up_interruptible(&skdev->waitq);
  882. break;
  883. case SKD_DRVR_STATE_ONLINE:
  884. /* shouldn't get here. */
  885. break;
  886. case SKD_DRVR_STATE_PAUSING:
  887. case SKD_DRVR_STATE_PAUSED:
  888. break;
  889. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  890. pr_debug("%s:%s:%d "
  891. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  892. skdev->name, __func__, __LINE__,
  893. skdev->timo_slot,
  894. skdev->timer_countdown,
  895. skdev->in_flight,
  896. skdev->timeout_slot[skdev->timo_slot]);
  897. /* if the slot has cleared we can let the I/O continue */
  898. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  899. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  900. skdev->name, __func__, __LINE__);
  901. skdev->state = SKD_DRVR_STATE_ONLINE;
  902. blk_start_queue(skdev->queue);
  903. return;
  904. }
  905. if (skdev->timer_countdown > 0) {
  906. skdev->timer_countdown--;
  907. return;
  908. }
  909. skd_restart_device(skdev);
  910. break;
  911. case SKD_DRVR_STATE_RESTARTING:
  912. if (skdev->timer_countdown > 0) {
  913. skdev->timer_countdown--;
  914. return;
  915. }
  916. /* For now, we fault the drive. Could attempt resets to
  917. * revcover at some point. */
  918. skdev->state = SKD_DRVR_STATE_FAULT;
  919. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  920. skd_name(skdev), skdev->drive_state);
  921. /*
  922. * Recovering does two things:
  923. * 1. completes IO with error
  924. * 2. reclaims dma resources
  925. * When is it safe to recover requests?
  926. * - if the drive state is faulted
  927. * - if the state is still soft reset after out timeout
  928. * - if the drive registers are dead (state = FF)
  929. * If it is "unsafe", we still need to recover, so we will
  930. * disable pci bus mastering and disable our interrupts.
  931. */
  932. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  933. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  934. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  935. /* It never came out of soft reset. Try to
  936. * recover the requests and then let them
  937. * fail. This is to mitigate hung processes. */
  938. skd_recover_requests(skdev, 0);
  939. else {
  940. pr_err("(%s): Disable BusMaster (%x)\n",
  941. skd_name(skdev), skdev->drive_state);
  942. pci_disable_device(skdev->pdev);
  943. skd_disable_interrupts(skdev);
  944. skd_recover_requests(skdev, 0);
  945. }
  946. /*start the queue so we can respond with error to requests */
  947. /* wakeup anyone waiting for startup complete */
  948. blk_start_queue(skdev->queue);
  949. skdev->gendisk_on = -1;
  950. wake_up_interruptible(&skdev->waitq);
  951. break;
  952. case SKD_DRVR_STATE_RESUMING:
  953. case SKD_DRVR_STATE_STOPPING:
  954. case SKD_DRVR_STATE_SYNCING:
  955. case SKD_DRVR_STATE_FAULT:
  956. case SKD_DRVR_STATE_DISAPPEARED:
  957. default:
  958. break;
  959. }
  960. }
  961. static int skd_start_timer(struct skd_device *skdev)
  962. {
  963. int rc;
  964. init_timer(&skdev->timer);
  965. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  966. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  967. if (rc)
  968. pr_err("%s: failed to start timer %d\n",
  969. __func__, rc);
  970. return rc;
  971. }
  972. static void skd_kill_timer(struct skd_device *skdev)
  973. {
  974. del_timer_sync(&skdev->timer);
  975. }
  976. /*
  977. *****************************************************************************
  978. * IOCTL
  979. *****************************************************************************
  980. */
  981. static int skd_ioctl_sg_io(struct skd_device *skdev,
  982. fmode_t mode, void __user *argp);
  983. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  984. struct skd_sg_io *sksgio);
  985. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  986. struct skd_sg_io *sksgio);
  987. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  988. struct skd_sg_io *sksgio);
  989. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  990. struct skd_sg_io *sksgio, int dxfer_dir);
  991. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  992. struct skd_sg_io *sksgio);
  993. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  994. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  995. struct skd_sg_io *sksgio);
  996. static int skd_sg_io_put_status(struct skd_device *skdev,
  997. struct skd_sg_io *sksgio);
  998. static void skd_complete_special(struct skd_device *skdev,
  999. volatile struct fit_completion_entry_v1
  1000. *skcomp,
  1001. volatile struct fit_comp_error_info *skerr,
  1002. struct skd_special_context *skspcl);
  1003. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1004. uint cmd_in, ulong arg)
  1005. {
  1006. static const int sg_version_num = 30527;
  1007. int rc = 0, timeout;
  1008. struct gendisk *disk = bdev->bd_disk;
  1009. struct skd_device *skdev = disk->private_data;
  1010. int __user *p = (int __user *)arg;
  1011. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1012. skdev->name, __func__, __LINE__,
  1013. disk->disk_name, current->comm, mode, cmd_in, arg);
  1014. if (!capable(CAP_SYS_ADMIN))
  1015. return -EPERM;
  1016. switch (cmd_in) {
  1017. case SG_SET_TIMEOUT:
  1018. rc = get_user(timeout, p);
  1019. if (!rc)
  1020. disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
  1021. break;
  1022. case SG_GET_TIMEOUT:
  1023. rc = jiffies_to_clock_t(disk->queue->sg_timeout);
  1024. break;
  1025. case SG_GET_VERSION_NUM:
  1026. rc = put_user(sg_version_num, p);
  1027. break;
  1028. case SG_IO:
  1029. rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
  1030. break;
  1031. default:
  1032. rc = -ENOTTY;
  1033. break;
  1034. }
  1035. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1036. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1037. return rc;
  1038. }
  1039. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1040. void __user *argp)
  1041. {
  1042. int rc;
  1043. struct skd_sg_io sksgio;
  1044. memset(&sksgio, 0, sizeof(sksgio));
  1045. sksgio.mode = mode;
  1046. sksgio.argp = argp;
  1047. sksgio.iov = &sksgio.no_iov_iov;
  1048. switch (skdev->state) {
  1049. case SKD_DRVR_STATE_ONLINE:
  1050. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1051. break;
  1052. default:
  1053. pr_debug("%s:%s:%d drive not online\n",
  1054. skdev->name, __func__, __LINE__);
  1055. rc = -ENXIO;
  1056. goto out;
  1057. }
  1058. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1059. if (rc)
  1060. goto out;
  1061. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1062. if (rc)
  1063. goto out;
  1064. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1065. if (rc)
  1066. goto out;
  1067. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1068. if (rc)
  1069. goto out;
  1070. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1071. if (rc)
  1072. goto out;
  1073. rc = skd_sg_io_await(skdev, &sksgio);
  1074. if (rc)
  1075. goto out;
  1076. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1077. if (rc)
  1078. goto out;
  1079. rc = skd_sg_io_put_status(skdev, &sksgio);
  1080. if (rc)
  1081. goto out;
  1082. rc = 0;
  1083. out:
  1084. skd_sg_io_release_skspcl(skdev, &sksgio);
  1085. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1086. kfree(sksgio.iov);
  1087. return rc;
  1088. }
  1089. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1090. struct skd_sg_io *sksgio)
  1091. {
  1092. struct sg_io_hdr *sgp = &sksgio->sg;
  1093. int i, acc;
  1094. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1095. pr_debug("%s:%s:%d access sg failed %p\n",
  1096. skdev->name, __func__, __LINE__, sksgio->argp);
  1097. return -EFAULT;
  1098. }
  1099. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1100. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1101. skdev->name, __func__, __LINE__, sksgio->argp);
  1102. return -EFAULT;
  1103. }
  1104. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1105. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1106. skdev->name, __func__, __LINE__, sgp->interface_id);
  1107. return -EINVAL;
  1108. }
  1109. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1110. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1111. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1112. return -EINVAL;
  1113. }
  1114. if (sgp->iovec_count > 256) {
  1115. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1116. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1117. return -EINVAL;
  1118. }
  1119. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1120. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1121. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1122. return -EINVAL;
  1123. }
  1124. switch (sgp->dxfer_direction) {
  1125. case SG_DXFER_NONE:
  1126. acc = -1;
  1127. break;
  1128. case SG_DXFER_TO_DEV:
  1129. acc = VERIFY_READ;
  1130. break;
  1131. case SG_DXFER_FROM_DEV:
  1132. case SG_DXFER_TO_FROM_DEV:
  1133. acc = VERIFY_WRITE;
  1134. break;
  1135. default:
  1136. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1137. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1138. return -EINVAL;
  1139. }
  1140. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1141. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1142. skdev->name, __func__, __LINE__, sgp->cmdp);
  1143. return -EFAULT;
  1144. }
  1145. if (sgp->mx_sb_len != 0) {
  1146. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1147. pr_debug("%s:%s:%d access sbp failed %p\n",
  1148. skdev->name, __func__, __LINE__, sgp->sbp);
  1149. return -EFAULT;
  1150. }
  1151. }
  1152. if (sgp->iovec_count == 0) {
  1153. sksgio->iov[0].iov_base = sgp->dxferp;
  1154. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1155. sksgio->iovcnt = 1;
  1156. sksgio->dxfer_len = sgp->dxfer_len;
  1157. } else {
  1158. struct sg_iovec *iov;
  1159. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1160. size_t iov_data_len;
  1161. iov = kmalloc(nbytes, GFP_KERNEL);
  1162. if (iov == NULL) {
  1163. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1164. skdev->name, __func__, __LINE__,
  1165. sgp->iovec_count);
  1166. return -ENOMEM;
  1167. }
  1168. sksgio->iov = iov;
  1169. sksgio->iovcnt = sgp->iovec_count;
  1170. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1171. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1172. skdev->name, __func__, __LINE__, sgp->dxferp);
  1173. return -EFAULT;
  1174. }
  1175. /*
  1176. * Sum up the vecs, making sure they don't overflow
  1177. */
  1178. iov_data_len = 0;
  1179. for (i = 0; i < sgp->iovec_count; i++) {
  1180. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1181. return -EINVAL;
  1182. iov_data_len += iov[i].iov_len;
  1183. }
  1184. /* SG_IO howto says that the shorter of the two wins */
  1185. if (sgp->dxfer_len < iov_data_len) {
  1186. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1187. sgp->iovec_count,
  1188. sgp->dxfer_len);
  1189. sksgio->dxfer_len = sgp->dxfer_len;
  1190. } else
  1191. sksgio->dxfer_len = iov_data_len;
  1192. }
  1193. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1194. struct sg_iovec *iov = sksgio->iov;
  1195. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1196. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1197. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1198. skdev->name, __func__, __LINE__,
  1199. iov->iov_base, (int)iov->iov_len);
  1200. return -EFAULT;
  1201. }
  1202. }
  1203. }
  1204. return 0;
  1205. }
  1206. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1207. struct skd_sg_io *sksgio)
  1208. {
  1209. struct skd_special_context *skspcl = NULL;
  1210. int rc;
  1211. for (;;) {
  1212. ulong flags;
  1213. spin_lock_irqsave(&skdev->lock, flags);
  1214. skspcl = skdev->skspcl_free_list;
  1215. if (skspcl != NULL) {
  1216. skdev->skspcl_free_list =
  1217. (struct skd_special_context *)skspcl->req.next;
  1218. skspcl->req.id += SKD_ID_INCR;
  1219. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1220. skspcl->orphaned = 0;
  1221. skspcl->req.n_sg = 0;
  1222. }
  1223. spin_unlock_irqrestore(&skdev->lock, flags);
  1224. if (skspcl != NULL) {
  1225. rc = 0;
  1226. break;
  1227. }
  1228. pr_debug("%s:%s:%d blocking\n",
  1229. skdev->name, __func__, __LINE__);
  1230. rc = wait_event_interruptible_timeout(
  1231. skdev->waitq,
  1232. (skdev->skspcl_free_list != NULL),
  1233. msecs_to_jiffies(sksgio->sg.timeout));
  1234. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1235. skdev->name, __func__, __LINE__, rc);
  1236. if (rc <= 0) {
  1237. if (rc == 0)
  1238. rc = -ETIMEDOUT;
  1239. else
  1240. rc = -EINTR;
  1241. break;
  1242. }
  1243. /*
  1244. * If we get here rc > 0 meaning the timeout to
  1245. * wait_event_interruptible_timeout() had time left, hence the
  1246. * sought event -- non-empty free list -- happened.
  1247. * Retry the allocation.
  1248. */
  1249. }
  1250. sksgio->skspcl = skspcl;
  1251. return rc;
  1252. }
  1253. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1254. struct skd_request_context *skreq,
  1255. u32 dxfer_len)
  1256. {
  1257. u32 resid = dxfer_len;
  1258. /*
  1259. * The DMA engine must have aligned addresses and byte counts.
  1260. */
  1261. resid += (-resid) & 3;
  1262. skreq->sg_byte_count = resid;
  1263. skreq->n_sg = 0;
  1264. while (resid > 0) {
  1265. u32 nbytes = PAGE_SIZE;
  1266. u32 ix = skreq->n_sg;
  1267. struct scatterlist *sg = &skreq->sg[ix];
  1268. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1269. struct page *page;
  1270. if (nbytes > resid)
  1271. nbytes = resid;
  1272. page = alloc_page(GFP_KERNEL);
  1273. if (page == NULL)
  1274. return -ENOMEM;
  1275. sg_set_page(sg, page, nbytes, 0);
  1276. /* TODO: This should be going through a pci_???()
  1277. * routine to do proper mapping. */
  1278. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1279. sksg->byte_count = nbytes;
  1280. sksg->host_side_addr = sg_phys(sg);
  1281. sksg->dev_side_addr = 0;
  1282. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1283. (ix + 1) * sizeof(*sksg);
  1284. skreq->n_sg++;
  1285. resid -= nbytes;
  1286. }
  1287. if (skreq->n_sg > 0) {
  1288. u32 ix = skreq->n_sg - 1;
  1289. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1290. sksg->control = FIT_SGD_CONTROL_LAST;
  1291. sksg->next_desc_ptr = 0;
  1292. }
  1293. if (unlikely(skdev->dbg_level > 1)) {
  1294. u32 i;
  1295. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1296. skdev->name, __func__, __LINE__,
  1297. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1298. for (i = 0; i < skreq->n_sg; i++) {
  1299. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1300. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1301. "addr=0x%llx next=0x%llx\n",
  1302. skdev->name, __func__, __LINE__,
  1303. i, sgd->byte_count, sgd->control,
  1304. sgd->host_side_addr, sgd->next_desc_ptr);
  1305. }
  1306. }
  1307. return 0;
  1308. }
  1309. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1310. struct skd_sg_io *sksgio)
  1311. {
  1312. struct skd_special_context *skspcl = sksgio->skspcl;
  1313. struct skd_request_context *skreq = &skspcl->req;
  1314. u32 dxfer_len = sksgio->dxfer_len;
  1315. int rc;
  1316. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1317. /*
  1318. * Eventually, errors or not, skd_release_special() is called
  1319. * to recover allocations including partial allocations.
  1320. */
  1321. return rc;
  1322. }
  1323. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1324. struct skd_sg_io *sksgio, int dxfer_dir)
  1325. {
  1326. struct skd_special_context *skspcl = sksgio->skspcl;
  1327. u32 iov_ix = 0;
  1328. struct sg_iovec curiov;
  1329. u32 sksg_ix = 0;
  1330. u8 *bufp = NULL;
  1331. u32 buf_len = 0;
  1332. u32 resid = sksgio->dxfer_len;
  1333. int rc;
  1334. curiov.iov_len = 0;
  1335. curiov.iov_base = NULL;
  1336. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1337. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1338. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1339. return 0;
  1340. }
  1341. while (resid > 0) {
  1342. u32 nbytes = PAGE_SIZE;
  1343. if (curiov.iov_len == 0) {
  1344. curiov = sksgio->iov[iov_ix++];
  1345. continue;
  1346. }
  1347. if (buf_len == 0) {
  1348. struct page *page;
  1349. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1350. bufp = page_address(page);
  1351. buf_len = PAGE_SIZE;
  1352. }
  1353. nbytes = min_t(u32, nbytes, resid);
  1354. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1355. nbytes = min_t(u32, nbytes, buf_len);
  1356. if (dxfer_dir == SG_DXFER_TO_DEV)
  1357. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1358. else
  1359. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1360. if (rc)
  1361. return -EFAULT;
  1362. resid -= nbytes;
  1363. curiov.iov_len -= nbytes;
  1364. curiov.iov_base += nbytes;
  1365. buf_len -= nbytes;
  1366. }
  1367. return 0;
  1368. }
  1369. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1370. struct skd_sg_io *sksgio)
  1371. {
  1372. struct skd_special_context *skspcl = sksgio->skspcl;
  1373. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1374. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1375. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1376. /* Initialize the FIT msg header */
  1377. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1378. fmh->num_protocol_cmds_coalesced = 1;
  1379. /* Initialize the SCSI request */
  1380. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1381. scsi_req->hdr.sg_list_dma_address =
  1382. cpu_to_be64(skspcl->req.sksg_dma_address);
  1383. scsi_req->hdr.tag = skspcl->req.id;
  1384. scsi_req->hdr.sg_list_len_bytes =
  1385. cpu_to_be32(skspcl->req.sg_byte_count);
  1386. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1387. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1388. skd_send_special_fitmsg(skdev, skspcl);
  1389. return 0;
  1390. }
  1391. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1392. {
  1393. unsigned long flags;
  1394. int rc;
  1395. rc = wait_event_interruptible_timeout(skdev->waitq,
  1396. (sksgio->skspcl->req.state !=
  1397. SKD_REQ_STATE_BUSY),
  1398. msecs_to_jiffies(sksgio->sg.
  1399. timeout));
  1400. spin_lock_irqsave(&skdev->lock, flags);
  1401. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1402. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1403. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1404. /* Build check cond, sense and let command finish. */
  1405. /* For a timeout, we must fabricate completion and sense
  1406. * data to complete the command */
  1407. sksgio->skspcl->req.completion.status =
  1408. SAM_STAT_CHECK_CONDITION;
  1409. memset(&sksgio->skspcl->req.err_info, 0,
  1410. sizeof(sksgio->skspcl->req.err_info));
  1411. sksgio->skspcl->req.err_info.type = 0x70;
  1412. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1413. sksgio->skspcl->req.err_info.code = 0x44;
  1414. sksgio->skspcl->req.err_info.qual = 0;
  1415. rc = 0;
  1416. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1417. /* No longer on the adapter. We finish. */
  1418. rc = 0;
  1419. else {
  1420. /* Something's gone wrong. Still busy. Timeout or
  1421. * user interrupted (control-C). Mark as an orphan
  1422. * so it will be disposed when completed. */
  1423. sksgio->skspcl->orphaned = 1;
  1424. sksgio->skspcl = NULL;
  1425. if (rc == 0) {
  1426. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1427. skdev->name, __func__, __LINE__,
  1428. sksgio, sksgio->sg.timeout);
  1429. rc = -ETIMEDOUT;
  1430. } else {
  1431. pr_debug("%s:%s:%d cntlc %p\n",
  1432. skdev->name, __func__, __LINE__, sksgio);
  1433. rc = -EINTR;
  1434. }
  1435. }
  1436. spin_unlock_irqrestore(&skdev->lock, flags);
  1437. return rc;
  1438. }
  1439. static int skd_sg_io_put_status(struct skd_device *skdev,
  1440. struct skd_sg_io *sksgio)
  1441. {
  1442. struct sg_io_hdr *sgp = &sksgio->sg;
  1443. struct skd_special_context *skspcl = sksgio->skspcl;
  1444. int resid = 0;
  1445. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1446. sgp->status = skspcl->req.completion.status;
  1447. resid = sksgio->dxfer_len - nb;
  1448. sgp->masked_status = sgp->status & STATUS_MASK;
  1449. sgp->msg_status = 0;
  1450. sgp->host_status = 0;
  1451. sgp->driver_status = 0;
  1452. sgp->resid = resid;
  1453. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1454. sgp->info |= SG_INFO_CHECK;
  1455. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1456. skdev->name, __func__, __LINE__,
  1457. sgp->status, sgp->masked_status, sgp->resid);
  1458. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1459. if (sgp->mx_sb_len > 0) {
  1460. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1461. u32 nbytes = sizeof(*ei);
  1462. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1463. sgp->sb_len_wr = nbytes;
  1464. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1465. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1466. skdev->name, __func__, __LINE__,
  1467. sgp->sbp);
  1468. return -EFAULT;
  1469. }
  1470. }
  1471. }
  1472. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1473. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1474. skdev->name, __func__, __LINE__, sksgio->argp);
  1475. return -EFAULT;
  1476. }
  1477. return 0;
  1478. }
  1479. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1480. struct skd_sg_io *sksgio)
  1481. {
  1482. struct skd_special_context *skspcl = sksgio->skspcl;
  1483. if (skspcl != NULL) {
  1484. ulong flags;
  1485. sksgio->skspcl = NULL;
  1486. spin_lock_irqsave(&skdev->lock, flags);
  1487. skd_release_special(skdev, skspcl);
  1488. spin_unlock_irqrestore(&skdev->lock, flags);
  1489. }
  1490. return 0;
  1491. }
  1492. /*
  1493. *****************************************************************************
  1494. * INTERNAL REQUESTS -- generated by driver itself
  1495. *****************************************************************************
  1496. */
  1497. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1498. {
  1499. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1500. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1501. struct fit_msg_hdr *fmh;
  1502. uint64_t dma_address;
  1503. struct skd_scsi_request *scsi;
  1504. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1505. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1506. fmh->num_protocol_cmds_coalesced = 1;
  1507. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1508. memset(scsi, 0, sizeof(*scsi));
  1509. dma_address = skspcl->req.sksg_dma_address;
  1510. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1511. sgd->control = FIT_SGD_CONTROL_LAST;
  1512. sgd->byte_count = 0;
  1513. sgd->host_side_addr = skspcl->db_dma_address;
  1514. sgd->dev_side_addr = 0;
  1515. sgd->next_desc_ptr = 0LL;
  1516. return 1;
  1517. }
  1518. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1519. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1520. struct skd_special_context *skspcl,
  1521. u8 opcode)
  1522. {
  1523. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1524. struct skd_scsi_request *scsi;
  1525. unsigned char *buf = skspcl->data_buf;
  1526. int i;
  1527. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1528. /*
  1529. * A refresh is already in progress.
  1530. * Just wait for it to finish.
  1531. */
  1532. return;
  1533. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1534. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1535. skspcl->req.id += SKD_ID_INCR;
  1536. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1537. scsi->hdr.tag = skspcl->req.id;
  1538. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1539. switch (opcode) {
  1540. case TEST_UNIT_READY:
  1541. scsi->cdb[0] = TEST_UNIT_READY;
  1542. sgd->byte_count = 0;
  1543. scsi->hdr.sg_list_len_bytes = 0;
  1544. break;
  1545. case READ_CAPACITY:
  1546. scsi->cdb[0] = READ_CAPACITY;
  1547. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1548. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1549. break;
  1550. case INQUIRY:
  1551. scsi->cdb[0] = INQUIRY;
  1552. scsi->cdb[1] = 0x01; /* evpd */
  1553. scsi->cdb[2] = 0x80; /* serial number page */
  1554. scsi->cdb[4] = 0x10;
  1555. sgd->byte_count = 16;
  1556. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1557. break;
  1558. case SYNCHRONIZE_CACHE:
  1559. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1560. sgd->byte_count = 0;
  1561. scsi->hdr.sg_list_len_bytes = 0;
  1562. break;
  1563. case WRITE_BUFFER:
  1564. scsi->cdb[0] = WRITE_BUFFER;
  1565. scsi->cdb[1] = 0x02;
  1566. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1567. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1568. sgd->byte_count = WR_BUF_SIZE;
  1569. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1570. /* fill incrementing byte pattern */
  1571. for (i = 0; i < sgd->byte_count; i++)
  1572. buf[i] = i & 0xFF;
  1573. break;
  1574. case READ_BUFFER:
  1575. scsi->cdb[0] = READ_BUFFER;
  1576. scsi->cdb[1] = 0x02;
  1577. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1578. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1579. sgd->byte_count = WR_BUF_SIZE;
  1580. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1581. memset(skspcl->data_buf, 0, sgd->byte_count);
  1582. break;
  1583. default:
  1584. SKD_ASSERT("Don't know what to send");
  1585. return;
  1586. }
  1587. skd_send_special_fitmsg(skdev, skspcl);
  1588. }
  1589. static void skd_refresh_device_data(struct skd_device *skdev)
  1590. {
  1591. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1592. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1593. }
  1594. static int skd_chk_read_buf(struct skd_device *skdev,
  1595. struct skd_special_context *skspcl)
  1596. {
  1597. unsigned char *buf = skspcl->data_buf;
  1598. int i;
  1599. /* check for incrementing byte pattern */
  1600. for (i = 0; i < WR_BUF_SIZE; i++)
  1601. if (buf[i] != (i & 0xFF))
  1602. return 1;
  1603. return 0;
  1604. }
  1605. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1606. u8 code, u8 qual, u8 fruc)
  1607. {
  1608. /* If the check condition is of special interest, log a message */
  1609. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1610. && (code == 0x04) && (qual == 0x06)) {
  1611. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1612. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1613. skd_name(skdev), key, code, qual, fruc);
  1614. }
  1615. }
  1616. static void skd_complete_internal(struct skd_device *skdev,
  1617. volatile struct fit_completion_entry_v1
  1618. *skcomp,
  1619. volatile struct fit_comp_error_info *skerr,
  1620. struct skd_special_context *skspcl)
  1621. {
  1622. u8 *buf = skspcl->data_buf;
  1623. u8 status;
  1624. int i;
  1625. struct skd_scsi_request *scsi =
  1626. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1627. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1628. pr_debug("%s:%s:%d complete internal %x\n",
  1629. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1630. skspcl->req.completion = *skcomp;
  1631. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1632. skspcl->req.id += SKD_ID_INCR;
  1633. status = skspcl->req.completion.status;
  1634. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1635. skerr->qual, skerr->fruc);
  1636. switch (scsi->cdb[0]) {
  1637. case TEST_UNIT_READY:
  1638. if (status == SAM_STAT_GOOD)
  1639. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1640. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1641. (skerr->key == MEDIUM_ERROR))
  1642. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1643. else {
  1644. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1645. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1646. skdev->name, __func__, __LINE__,
  1647. skdev->state);
  1648. return;
  1649. }
  1650. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1651. skdev->name, __func__, __LINE__);
  1652. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1653. }
  1654. break;
  1655. case WRITE_BUFFER:
  1656. if (status == SAM_STAT_GOOD)
  1657. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1658. else {
  1659. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1660. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1661. skdev->name, __func__, __LINE__,
  1662. skdev->state);
  1663. return;
  1664. }
  1665. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1666. skdev->name, __func__, __LINE__);
  1667. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1668. }
  1669. break;
  1670. case READ_BUFFER:
  1671. if (status == SAM_STAT_GOOD) {
  1672. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1673. skd_send_internal_skspcl(skdev, skspcl,
  1674. READ_CAPACITY);
  1675. else {
  1676. pr_err(
  1677. "(%s):*** W/R Buffer mismatch %d ***\n",
  1678. skd_name(skdev), skdev->connect_retries);
  1679. if (skdev->connect_retries <
  1680. SKD_MAX_CONNECT_RETRIES) {
  1681. skdev->connect_retries++;
  1682. skd_soft_reset(skdev);
  1683. } else {
  1684. pr_err(
  1685. "(%s): W/R Buffer Connect Error\n",
  1686. skd_name(skdev));
  1687. return;
  1688. }
  1689. }
  1690. } else {
  1691. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1692. pr_debug("%s:%s:%d "
  1693. "read buffer failed, don't send anymore state 0x%x\n",
  1694. skdev->name, __func__, __LINE__,
  1695. skdev->state);
  1696. return;
  1697. }
  1698. pr_debug("%s:%s:%d "
  1699. "**** read buffer failed, retry skerr\n",
  1700. skdev->name, __func__, __LINE__);
  1701. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1702. }
  1703. break;
  1704. case READ_CAPACITY:
  1705. skdev->read_cap_is_valid = 0;
  1706. if (status == SAM_STAT_GOOD) {
  1707. skdev->read_cap_last_lba =
  1708. (buf[0] << 24) | (buf[1] << 16) |
  1709. (buf[2] << 8) | buf[3];
  1710. skdev->read_cap_blocksize =
  1711. (buf[4] << 24) | (buf[5] << 16) |
  1712. (buf[6] << 8) | buf[7];
  1713. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1714. skdev->name, __func__, __LINE__,
  1715. skdev->read_cap_last_lba,
  1716. skdev->read_cap_blocksize);
  1717. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1718. skdev->read_cap_is_valid = 1;
  1719. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1720. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1721. (skerr->key == MEDIUM_ERROR)) {
  1722. skdev->read_cap_last_lba = ~0;
  1723. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1724. pr_debug("%s:%s:%d "
  1725. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1726. skdev->name, __func__, __LINE__);
  1727. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1728. } else {
  1729. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1730. skdev->name, __func__, __LINE__);
  1731. skd_send_internal_skspcl(skdev, skspcl,
  1732. TEST_UNIT_READY);
  1733. }
  1734. break;
  1735. case INQUIRY:
  1736. skdev->inquiry_is_valid = 0;
  1737. if (status == SAM_STAT_GOOD) {
  1738. skdev->inquiry_is_valid = 1;
  1739. for (i = 0; i < 12; i++)
  1740. skdev->inq_serial_num[i] = buf[i + 4];
  1741. skdev->inq_serial_num[12] = 0;
  1742. }
  1743. if (skd_unquiesce_dev(skdev) < 0)
  1744. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1745. skdev->name, __func__, __LINE__);
  1746. /* connection is complete */
  1747. skdev->connect_retries = 0;
  1748. break;
  1749. case SYNCHRONIZE_CACHE:
  1750. if (status == SAM_STAT_GOOD)
  1751. skdev->sync_done = 1;
  1752. else
  1753. skdev->sync_done = -1;
  1754. wake_up_interruptible(&skdev->waitq);
  1755. break;
  1756. default:
  1757. SKD_ASSERT("we didn't send this");
  1758. }
  1759. }
  1760. /*
  1761. *****************************************************************************
  1762. * FIT MESSAGES
  1763. *****************************************************************************
  1764. */
  1765. static void skd_send_fitmsg(struct skd_device *skdev,
  1766. struct skd_fitmsg_context *skmsg)
  1767. {
  1768. u64 qcmd;
  1769. struct fit_msg_hdr *fmh;
  1770. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1771. skdev->name, __func__, __LINE__,
  1772. skmsg->mb_dma_address, skdev->in_flight);
  1773. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1774. skdev->name, __func__, __LINE__,
  1775. skmsg->msg_buf, skmsg->offset);
  1776. qcmd = skmsg->mb_dma_address;
  1777. qcmd |= FIT_QCMD_QID_NORMAL;
  1778. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1779. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1780. if (unlikely(skdev->dbg_level > 1)) {
  1781. u8 *bp = (u8 *)skmsg->msg_buf;
  1782. int i;
  1783. for (i = 0; i < skmsg->length; i += 8) {
  1784. pr_debug("%s:%s:%d msg[%2d] %8ph\n",
  1785. skdev->name, __func__, __LINE__, i, &bp[i]);
  1786. if (i == 0)
  1787. i = 64 - 8;
  1788. }
  1789. }
  1790. if (skmsg->length > 256)
  1791. qcmd |= FIT_QCMD_MSGSIZE_512;
  1792. else if (skmsg->length > 128)
  1793. qcmd |= FIT_QCMD_MSGSIZE_256;
  1794. else if (skmsg->length > 64)
  1795. qcmd |= FIT_QCMD_MSGSIZE_128;
  1796. else
  1797. /*
  1798. * This makes no sense because the FIT msg header is
  1799. * 64 bytes. If the msg is only 64 bytes long it has
  1800. * no payload.
  1801. */
  1802. qcmd |= FIT_QCMD_MSGSIZE_64;
  1803. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1804. }
  1805. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1806. struct skd_special_context *skspcl)
  1807. {
  1808. u64 qcmd;
  1809. if (unlikely(skdev->dbg_level > 1)) {
  1810. u8 *bp = (u8 *)skspcl->msg_buf;
  1811. int i;
  1812. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1813. pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
  1814. skdev->name, __func__, __LINE__, i, &bp[i]);
  1815. if (i == 0)
  1816. i = 64 - 8;
  1817. }
  1818. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1819. skdev->name, __func__, __LINE__,
  1820. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1821. skspcl->req.sksg_dma_address);
  1822. for (i = 0; i < skspcl->req.n_sg; i++) {
  1823. struct fit_sg_descriptor *sgd =
  1824. &skspcl->req.sksg_list[i];
  1825. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1826. "addr=0x%llx next=0x%llx\n",
  1827. skdev->name, __func__, __LINE__,
  1828. i, sgd->byte_count, sgd->control,
  1829. sgd->host_side_addr, sgd->next_desc_ptr);
  1830. }
  1831. }
  1832. /*
  1833. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1834. * and one 64-byte SSDI command.
  1835. */
  1836. qcmd = skspcl->mb_dma_address;
  1837. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1838. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1839. }
  1840. /*
  1841. *****************************************************************************
  1842. * COMPLETION QUEUE
  1843. *****************************************************************************
  1844. */
  1845. static void skd_complete_other(struct skd_device *skdev,
  1846. volatile struct fit_completion_entry_v1 *skcomp,
  1847. volatile struct fit_comp_error_info *skerr);
  1848. struct sns_info {
  1849. u8 type;
  1850. u8 stat;
  1851. u8 key;
  1852. u8 asc;
  1853. u8 ascq;
  1854. u8 mask;
  1855. enum skd_check_status_action action;
  1856. };
  1857. static struct sns_info skd_chkstat_table[] = {
  1858. /* Good */
  1859. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1860. SKD_CHECK_STATUS_REPORT_GOOD },
  1861. /* Smart alerts */
  1862. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1863. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1864. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1865. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1866. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1867. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1868. /* Retry (with limits) */
  1869. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1870. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1871. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1872. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1873. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1874. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1875. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1876. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1877. /* Busy (or about to be) */
  1878. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1879. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1880. };
  1881. /*
  1882. * Look up status and sense data to decide how to handle the error
  1883. * from the device.
  1884. * mask says which fields must match e.g., mask=0x18 means check
  1885. * type and stat, ignore key, asc, ascq.
  1886. */
  1887. static enum skd_check_status_action
  1888. skd_check_status(struct skd_device *skdev,
  1889. u8 cmp_status, volatile struct fit_comp_error_info *skerr)
  1890. {
  1891. int i, n;
  1892. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1893. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1894. skerr->fruc);
  1895. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1896. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1897. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1898. /* Does the info match an entry in the good category? */
  1899. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1900. for (i = 0; i < n; i++) {
  1901. struct sns_info *sns = &skd_chkstat_table[i];
  1902. if (sns->mask & 0x10)
  1903. if (skerr->type != sns->type)
  1904. continue;
  1905. if (sns->mask & 0x08)
  1906. if (cmp_status != sns->stat)
  1907. continue;
  1908. if (sns->mask & 0x04)
  1909. if (skerr->key != sns->key)
  1910. continue;
  1911. if (sns->mask & 0x02)
  1912. if (skerr->code != sns->asc)
  1913. continue;
  1914. if (sns->mask & 0x01)
  1915. if (skerr->qual != sns->ascq)
  1916. continue;
  1917. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1918. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1919. "%02x/%02x/%02x\n",
  1920. skd_name(skdev), skerr->key,
  1921. skerr->code, skerr->qual);
  1922. }
  1923. return sns->action;
  1924. }
  1925. /* No other match, so nonzero status means error,
  1926. * zero status means good
  1927. */
  1928. if (cmp_status) {
  1929. pr_debug("%s:%s:%d status check: error\n",
  1930. skdev->name, __func__, __LINE__);
  1931. return SKD_CHECK_STATUS_REPORT_ERROR;
  1932. }
  1933. pr_debug("%s:%s:%d status check good default\n",
  1934. skdev->name, __func__, __LINE__);
  1935. return SKD_CHECK_STATUS_REPORT_GOOD;
  1936. }
  1937. static void skd_resolve_req_exception(struct skd_device *skdev,
  1938. struct skd_request_context *skreq)
  1939. {
  1940. u8 cmp_status = skreq->completion.status;
  1941. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1942. case SKD_CHECK_STATUS_REPORT_GOOD:
  1943. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1944. skd_end_request(skdev, skreq, 0);
  1945. break;
  1946. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1947. skd_log_skreq(skdev, skreq, "retry(busy)");
  1948. blk_requeue_request(skdev->queue, skreq->req);
  1949. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  1950. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1951. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1952. skd_quiesce_dev(skdev);
  1953. break;
  1954. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  1955. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  1956. skd_log_skreq(skdev, skreq, "retry");
  1957. blk_requeue_request(skdev->queue, skreq->req);
  1958. break;
  1959. }
  1960. /* fall through to report error */
  1961. case SKD_CHECK_STATUS_REPORT_ERROR:
  1962. default:
  1963. skd_end_request(skdev, skreq, -EIO);
  1964. break;
  1965. }
  1966. }
  1967. /* assume spinlock is already held */
  1968. static void skd_release_skreq(struct skd_device *skdev,
  1969. struct skd_request_context *skreq)
  1970. {
  1971. u32 msg_slot;
  1972. struct skd_fitmsg_context *skmsg;
  1973. u32 timo_slot;
  1974. /*
  1975. * Reclaim the FIT msg buffer if this is
  1976. * the first of the requests it carried to
  1977. * be completed. The FIT msg buffer used to
  1978. * send this request cannot be reused until
  1979. * we are sure the s1120 card has copied
  1980. * it to its memory. The FIT msg might have
  1981. * contained several requests. As soon as
  1982. * any of them are completed we know that
  1983. * the entire FIT msg was transferred.
  1984. * Only the first completed request will
  1985. * match the FIT msg buffer id. The FIT
  1986. * msg buffer id is immediately updated.
  1987. * When subsequent requests complete the FIT
  1988. * msg buffer id won't match, so we know
  1989. * quite cheaply that it is already done.
  1990. */
  1991. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  1992. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  1993. skmsg = &skdev->skmsg_table[msg_slot];
  1994. if (skmsg->id == skreq->fitmsg_id) {
  1995. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  1996. SKD_ASSERT(skmsg->outstanding > 0);
  1997. skmsg->outstanding--;
  1998. if (skmsg->outstanding == 0) {
  1999. skmsg->state = SKD_MSG_STATE_IDLE;
  2000. skmsg->id += SKD_ID_INCR;
  2001. skmsg->next = skdev->skmsg_free_list;
  2002. skdev->skmsg_free_list = skmsg;
  2003. }
  2004. }
  2005. /*
  2006. * Decrease the number of active requests.
  2007. * Also decrements the count in the timeout slot.
  2008. */
  2009. SKD_ASSERT(skdev->in_flight > 0);
  2010. skdev->in_flight -= 1;
  2011. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2012. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2013. skdev->timeout_slot[timo_slot] -= 1;
  2014. /*
  2015. * Reset backpointer
  2016. */
  2017. skreq->req = NULL;
  2018. /*
  2019. * Reclaim the skd_request_context
  2020. */
  2021. skreq->state = SKD_REQ_STATE_IDLE;
  2022. skreq->id += SKD_ID_INCR;
  2023. skreq->next = skdev->skreq_free_list;
  2024. skdev->skreq_free_list = skreq;
  2025. }
  2026. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2027. static void skd_do_inq_page_00(struct skd_device *skdev,
  2028. volatile struct fit_completion_entry_v1 *skcomp,
  2029. volatile struct fit_comp_error_info *skerr,
  2030. uint8_t *cdb, uint8_t *buf)
  2031. {
  2032. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2033. /* Caller requested "supported pages". The driver needs to insert
  2034. * its page.
  2035. */
  2036. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2037. skdev->name, __func__, __LINE__);
  2038. /* If the device rejected the request because the CDB was
  2039. * improperly formed, then just leave.
  2040. */
  2041. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2042. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2043. return;
  2044. /* Get the amount of space the caller allocated */
  2045. max_bytes = (cdb[3] << 8) | cdb[4];
  2046. /* Get the number of pages actually returned by the device */
  2047. drive_pages = (buf[2] << 8) | buf[3];
  2048. drive_bytes = drive_pages + 4;
  2049. new_size = drive_pages + 1;
  2050. /* Supported pages must be in numerical order, so find where
  2051. * the driver page needs to be inserted into the list of
  2052. * pages returned by the device.
  2053. */
  2054. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2055. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2056. return; /* Device using this page code. abort */
  2057. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2058. break;
  2059. }
  2060. if (insert_pt < max_bytes) {
  2061. uint16_t u;
  2062. /* Shift everything up one byte to make room. */
  2063. for (u = new_size + 3; u > insert_pt; u--)
  2064. buf[u] = buf[u - 1];
  2065. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2066. /* SCSI byte order increment of num_returned_bytes by 1 */
  2067. skcomp->num_returned_bytes =
  2068. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2069. skcomp->num_returned_bytes =
  2070. be32_to_cpu(skcomp->num_returned_bytes);
  2071. }
  2072. /* update page length field to reflect the driver's page too */
  2073. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2074. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2075. }
  2076. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2077. {
  2078. int pcie_reg;
  2079. u16 pci_bus_speed;
  2080. u8 pci_lanes;
  2081. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2082. if (pcie_reg) {
  2083. u16 linksta;
  2084. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2085. pci_bus_speed = linksta & 0xF;
  2086. pci_lanes = (linksta & 0x3F0) >> 4;
  2087. } else {
  2088. *speed = STEC_LINK_UNKNOWN;
  2089. *width = 0xFF;
  2090. return;
  2091. }
  2092. switch (pci_bus_speed) {
  2093. case 1:
  2094. *speed = STEC_LINK_2_5GTS;
  2095. break;
  2096. case 2:
  2097. *speed = STEC_LINK_5GTS;
  2098. break;
  2099. case 3:
  2100. *speed = STEC_LINK_8GTS;
  2101. break;
  2102. default:
  2103. *speed = STEC_LINK_UNKNOWN;
  2104. break;
  2105. }
  2106. if (pci_lanes <= 0x20)
  2107. *width = pci_lanes;
  2108. else
  2109. *width = 0xFF;
  2110. }
  2111. static void skd_do_inq_page_da(struct skd_device *skdev,
  2112. volatile struct fit_completion_entry_v1 *skcomp,
  2113. volatile struct fit_comp_error_info *skerr,
  2114. uint8_t *cdb, uint8_t *buf)
  2115. {
  2116. struct pci_dev *pdev = skdev->pdev;
  2117. unsigned max_bytes;
  2118. struct driver_inquiry_data inq;
  2119. u16 val;
  2120. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2121. skdev->name, __func__, __LINE__);
  2122. memset(&inq, 0, sizeof(inq));
  2123. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2124. skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2125. inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
  2126. inq.pcie_device_number = PCI_SLOT(pdev->devfn);
  2127. inq.pcie_function_number = PCI_FUNC(pdev->devfn);
  2128. pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
  2129. inq.pcie_vendor_id = cpu_to_be16(val);
  2130. pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
  2131. inq.pcie_device_id = cpu_to_be16(val);
  2132. pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
  2133. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2134. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
  2135. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2136. /* Driver version, fixed lenth, padded with spaces on the right */
  2137. inq.driver_version_length = sizeof(inq.driver_version);
  2138. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2139. memcpy(inq.driver_version, DRV_VER_COMPL,
  2140. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2141. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2142. /* Clear the error set by the device */
  2143. skcomp->status = SAM_STAT_GOOD;
  2144. memset((void *)skerr, 0, sizeof(*skerr));
  2145. /* copy response into output buffer */
  2146. max_bytes = (cdb[3] << 8) | cdb[4];
  2147. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2148. skcomp->num_returned_bytes =
  2149. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2150. }
  2151. static void skd_do_driver_inq(struct skd_device *skdev,
  2152. volatile struct fit_completion_entry_v1 *skcomp,
  2153. volatile struct fit_comp_error_info *skerr,
  2154. uint8_t *cdb, uint8_t *buf)
  2155. {
  2156. if (!buf)
  2157. return;
  2158. else if (cdb[0] != INQUIRY)
  2159. return; /* Not an INQUIRY */
  2160. else if ((cdb[1] & 1) == 0)
  2161. return; /* EVPD not set */
  2162. else if (cdb[2] == 0)
  2163. /* Need to add driver's page to supported pages list */
  2164. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2165. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2166. /* Caller requested driver's page */
  2167. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2168. }
  2169. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2170. {
  2171. if (!sg)
  2172. return NULL;
  2173. if (!sg_page(sg))
  2174. return NULL;
  2175. return sg_virt(sg);
  2176. }
  2177. static void skd_process_scsi_inq(struct skd_device *skdev,
  2178. volatile struct fit_completion_entry_v1
  2179. *skcomp,
  2180. volatile struct fit_comp_error_info *skerr,
  2181. struct skd_special_context *skspcl)
  2182. {
  2183. uint8_t *buf;
  2184. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2185. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2186. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2187. skspcl->req.sg_data_dir);
  2188. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2189. if (buf)
  2190. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2191. }
  2192. static int skd_isr_completion_posted(struct skd_device *skdev,
  2193. int limit, int *enqueued)
  2194. {
  2195. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2196. volatile struct fit_comp_error_info *skerr;
  2197. u16 req_id;
  2198. u32 req_slot;
  2199. struct skd_request_context *skreq;
  2200. u16 cmp_cntxt = 0;
  2201. u8 cmp_status = 0;
  2202. u8 cmp_cycle = 0;
  2203. u32 cmp_bytes = 0;
  2204. int rc = 0;
  2205. int processed = 0;
  2206. for (;; ) {
  2207. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2208. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2209. cmp_cycle = skcmp->cycle;
  2210. cmp_cntxt = skcmp->tag;
  2211. cmp_status = skcmp->status;
  2212. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2213. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2214. pr_debug("%s:%s:%d "
  2215. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2216. "busy=%d rbytes=0x%x proto=%d\n",
  2217. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2218. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2219. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2220. if (cmp_cycle != skdev->skcomp_cycle) {
  2221. pr_debug("%s:%s:%d end of completions\n",
  2222. skdev->name, __func__, __LINE__);
  2223. break;
  2224. }
  2225. /*
  2226. * Update the completion queue head index and possibly
  2227. * the completion cycle count. 8-bit wrap-around.
  2228. */
  2229. skdev->skcomp_ix++;
  2230. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2231. skdev->skcomp_ix = 0;
  2232. skdev->skcomp_cycle++;
  2233. }
  2234. /*
  2235. * The command context is a unique 32-bit ID. The low order
  2236. * bits help locate the request. The request is usually a
  2237. * r/w request (see skd_start() above) or a special request.
  2238. */
  2239. req_id = cmp_cntxt;
  2240. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2241. /* Is this other than a r/w request? */
  2242. if (req_slot >= skdev->num_req_context) {
  2243. /*
  2244. * This is not a completion for a r/w request.
  2245. */
  2246. skd_complete_other(skdev, skcmp, skerr);
  2247. continue;
  2248. }
  2249. skreq = &skdev->skreq_table[req_slot];
  2250. /*
  2251. * Make sure the request ID for the slot matches.
  2252. */
  2253. if (skreq->id != req_id) {
  2254. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2255. skdev->name, __func__, __LINE__,
  2256. req_id, skreq->id);
  2257. {
  2258. u16 new_id = cmp_cntxt;
  2259. pr_err("(%s): Completion mismatch "
  2260. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2261. skd_name(skdev), req_id,
  2262. skreq->id, new_id);
  2263. continue;
  2264. }
  2265. }
  2266. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2267. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2268. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2269. skdev->name, __func__, __LINE__,
  2270. skreq, skreq->id);
  2271. /* a previously timed out command can
  2272. * now be cleaned up */
  2273. skd_release_skreq(skdev, skreq);
  2274. continue;
  2275. }
  2276. skreq->completion = *skcmp;
  2277. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2278. skreq->err_info = *skerr;
  2279. skd_log_check_status(skdev, cmp_status, skerr->key,
  2280. skerr->code, skerr->qual,
  2281. skerr->fruc);
  2282. }
  2283. /* Release DMA resources for the request. */
  2284. if (skreq->n_sg > 0)
  2285. skd_postop_sg_list(skdev, skreq);
  2286. if (!skreq->req) {
  2287. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2288. "req=0x%x req_id=0x%x\n",
  2289. skdev->name, __func__, __LINE__,
  2290. skreq, skreq->id, req_id);
  2291. } else {
  2292. /*
  2293. * Capture the outcome and post it back to the
  2294. * native request.
  2295. */
  2296. if (likely(cmp_status == SAM_STAT_GOOD))
  2297. skd_end_request(skdev, skreq, 0);
  2298. else
  2299. skd_resolve_req_exception(skdev, skreq);
  2300. }
  2301. /*
  2302. * Release the skreq, its FIT msg (if one), timeout slot,
  2303. * and queue depth.
  2304. */
  2305. skd_release_skreq(skdev, skreq);
  2306. /* skd_isr_comp_limit equal zero means no limit */
  2307. if (limit) {
  2308. if (++processed >= limit) {
  2309. rc = 1;
  2310. break;
  2311. }
  2312. }
  2313. }
  2314. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2315. && (skdev->in_flight) == 0) {
  2316. skdev->state = SKD_DRVR_STATE_PAUSED;
  2317. wake_up_interruptible(&skdev->waitq);
  2318. }
  2319. return rc;
  2320. }
  2321. static void skd_complete_other(struct skd_device *skdev,
  2322. volatile struct fit_completion_entry_v1 *skcomp,
  2323. volatile struct fit_comp_error_info *skerr)
  2324. {
  2325. u32 req_id = 0;
  2326. u32 req_table;
  2327. u32 req_slot;
  2328. struct skd_special_context *skspcl;
  2329. req_id = skcomp->tag;
  2330. req_table = req_id & SKD_ID_TABLE_MASK;
  2331. req_slot = req_id & SKD_ID_SLOT_MASK;
  2332. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2333. skdev->name, __func__, __LINE__,
  2334. req_table, req_id, req_slot);
  2335. /*
  2336. * Based on the request id, determine how to dispatch this completion.
  2337. * This swich/case is finding the good cases and forwarding the
  2338. * completion entry. Errors are reported below the switch.
  2339. */
  2340. switch (req_table) {
  2341. case SKD_ID_RW_REQUEST:
  2342. /*
  2343. * The caller, skd_completion_posted_isr() above,
  2344. * handles r/w requests. The only way we get here
  2345. * is if the req_slot is out of bounds.
  2346. */
  2347. break;
  2348. case SKD_ID_SPECIAL_REQUEST:
  2349. /*
  2350. * Make sure the req_slot is in bounds and that the id
  2351. * matches.
  2352. */
  2353. if (req_slot < skdev->n_special) {
  2354. skspcl = &skdev->skspcl_table[req_slot];
  2355. if (skspcl->req.id == req_id &&
  2356. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2357. skd_complete_special(skdev,
  2358. skcomp, skerr, skspcl);
  2359. return;
  2360. }
  2361. }
  2362. break;
  2363. case SKD_ID_INTERNAL:
  2364. if (req_slot == 0) {
  2365. skspcl = &skdev->internal_skspcl;
  2366. if (skspcl->req.id == req_id &&
  2367. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2368. skd_complete_internal(skdev,
  2369. skcomp, skerr, skspcl);
  2370. return;
  2371. }
  2372. }
  2373. break;
  2374. case SKD_ID_FIT_MSG:
  2375. /*
  2376. * These id's should never appear in a completion record.
  2377. */
  2378. break;
  2379. default:
  2380. /*
  2381. * These id's should never appear anywhere;
  2382. */
  2383. break;
  2384. }
  2385. /*
  2386. * If we get here it is a bad or stale id.
  2387. */
  2388. }
  2389. static void skd_complete_special(struct skd_device *skdev,
  2390. volatile struct fit_completion_entry_v1
  2391. *skcomp,
  2392. volatile struct fit_comp_error_info *skerr,
  2393. struct skd_special_context *skspcl)
  2394. {
  2395. pr_debug("%s:%s:%d completing special request %p\n",
  2396. skdev->name, __func__, __LINE__, skspcl);
  2397. if (skspcl->orphaned) {
  2398. /* Discard orphaned request */
  2399. /* ?: Can this release directly or does it need
  2400. * to use a worker? */
  2401. pr_debug("%s:%s:%d release orphaned %p\n",
  2402. skdev->name, __func__, __LINE__, skspcl);
  2403. skd_release_special(skdev, skspcl);
  2404. return;
  2405. }
  2406. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2407. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2408. skspcl->req.completion = *skcomp;
  2409. skspcl->req.err_info = *skerr;
  2410. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2411. skerr->code, skerr->qual, skerr->fruc);
  2412. wake_up_interruptible(&skdev->waitq);
  2413. }
  2414. /* assume spinlock is already held */
  2415. static void skd_release_special(struct skd_device *skdev,
  2416. struct skd_special_context *skspcl)
  2417. {
  2418. int i, was_depleted;
  2419. for (i = 0; i < skspcl->req.n_sg; i++) {
  2420. struct page *page = sg_page(&skspcl->req.sg[i]);
  2421. __free_page(page);
  2422. }
  2423. was_depleted = (skdev->skspcl_free_list == NULL);
  2424. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2425. skspcl->req.id += SKD_ID_INCR;
  2426. skspcl->req.next =
  2427. (struct skd_request_context *)skdev->skspcl_free_list;
  2428. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2429. if (was_depleted) {
  2430. pr_debug("%s:%s:%d skspcl was depleted\n",
  2431. skdev->name, __func__, __LINE__);
  2432. /* Free list was depleted. Their might be waiters. */
  2433. wake_up_interruptible(&skdev->waitq);
  2434. }
  2435. }
  2436. static void skd_reset_skcomp(struct skd_device *skdev)
  2437. {
  2438. u32 nbytes;
  2439. struct fit_completion_entry_v1 *skcomp;
  2440. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2441. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2442. memset(skdev->skcomp_table, 0, nbytes);
  2443. skdev->skcomp_ix = 0;
  2444. skdev->skcomp_cycle = 1;
  2445. }
  2446. /*
  2447. *****************************************************************************
  2448. * INTERRUPTS
  2449. *****************************************************************************
  2450. */
  2451. static void skd_completion_worker(struct work_struct *work)
  2452. {
  2453. struct skd_device *skdev =
  2454. container_of(work, struct skd_device, completion_worker);
  2455. unsigned long flags;
  2456. int flush_enqueued = 0;
  2457. spin_lock_irqsave(&skdev->lock, flags);
  2458. /*
  2459. * pass in limit=0, which means no limit..
  2460. * process everything in compq
  2461. */
  2462. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2463. skd_request_fn(skdev->queue);
  2464. spin_unlock_irqrestore(&skdev->lock, flags);
  2465. }
  2466. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2467. static irqreturn_t
  2468. skd_isr(int irq, void *ptr)
  2469. {
  2470. struct skd_device *skdev;
  2471. u32 intstat;
  2472. u32 ack;
  2473. int rc = 0;
  2474. int deferred = 0;
  2475. int flush_enqueued = 0;
  2476. skdev = (struct skd_device *)ptr;
  2477. spin_lock(&skdev->lock);
  2478. for (;; ) {
  2479. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2480. ack = FIT_INT_DEF_MASK;
  2481. ack &= intstat;
  2482. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2483. skdev->name, __func__, __LINE__, intstat, ack);
  2484. /* As long as there is an int pending on device, keep
  2485. * running loop. When none, get out, but if we've never
  2486. * done any processing, call completion handler?
  2487. */
  2488. if (ack == 0) {
  2489. /* No interrupts on device, but run the completion
  2490. * processor anyway?
  2491. */
  2492. if (rc == 0)
  2493. if (likely (skdev->state
  2494. == SKD_DRVR_STATE_ONLINE))
  2495. deferred = 1;
  2496. break;
  2497. }
  2498. rc = IRQ_HANDLED;
  2499. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2500. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2501. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2502. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2503. /*
  2504. * If we have already deferred completion
  2505. * processing, don't bother running it again
  2506. */
  2507. if (deferred == 0)
  2508. deferred =
  2509. skd_isr_completion_posted(skdev,
  2510. skd_isr_comp_limit, &flush_enqueued);
  2511. }
  2512. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2513. skd_isr_fwstate(skdev);
  2514. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2515. skdev->state ==
  2516. SKD_DRVR_STATE_DISAPPEARED) {
  2517. spin_unlock(&skdev->lock);
  2518. return rc;
  2519. }
  2520. }
  2521. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2522. skd_isr_msg_from_dev(skdev);
  2523. }
  2524. }
  2525. if (unlikely(flush_enqueued))
  2526. skd_request_fn(skdev->queue);
  2527. if (deferred)
  2528. schedule_work(&skdev->completion_worker);
  2529. else if (!flush_enqueued)
  2530. skd_request_fn(skdev->queue);
  2531. spin_unlock(&skdev->lock);
  2532. return rc;
  2533. }
  2534. static void skd_drive_fault(struct skd_device *skdev)
  2535. {
  2536. skdev->state = SKD_DRVR_STATE_FAULT;
  2537. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2538. }
  2539. static void skd_drive_disappeared(struct skd_device *skdev)
  2540. {
  2541. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2542. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2543. }
  2544. static void skd_isr_fwstate(struct skd_device *skdev)
  2545. {
  2546. u32 sense;
  2547. u32 state;
  2548. u32 mtd;
  2549. int prev_driver_state = skdev->state;
  2550. sense = SKD_READL(skdev, FIT_STATUS);
  2551. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2552. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2553. skd_name(skdev),
  2554. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2555. skd_drive_state_to_str(state), state);
  2556. skdev->drive_state = state;
  2557. switch (skdev->drive_state) {
  2558. case FIT_SR_DRIVE_INIT:
  2559. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2560. skd_disable_interrupts(skdev);
  2561. break;
  2562. }
  2563. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2564. skd_recover_requests(skdev, 0);
  2565. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2566. skdev->timer_countdown = SKD_STARTING_TIMO;
  2567. skdev->state = SKD_DRVR_STATE_STARTING;
  2568. skd_soft_reset(skdev);
  2569. break;
  2570. }
  2571. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2572. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2573. skdev->last_mtd = mtd;
  2574. break;
  2575. case FIT_SR_DRIVE_ONLINE:
  2576. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2577. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2578. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2579. skdev->queue_low_water_mark =
  2580. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2581. if (skdev->queue_low_water_mark < 1)
  2582. skdev->queue_low_water_mark = 1;
  2583. pr_info(
  2584. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2585. skd_name(skdev),
  2586. skdev->cur_max_queue_depth,
  2587. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2588. skd_refresh_device_data(skdev);
  2589. break;
  2590. case FIT_SR_DRIVE_BUSY:
  2591. skdev->state = SKD_DRVR_STATE_BUSY;
  2592. skdev->timer_countdown = SKD_BUSY_TIMO;
  2593. skd_quiesce_dev(skdev);
  2594. break;
  2595. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2596. /* set timer for 3 seconds, we'll abort any unfinished
  2597. * commands after that expires
  2598. */
  2599. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2600. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2601. blk_start_queue(skdev->queue);
  2602. break;
  2603. case FIT_SR_DRIVE_BUSY_ERASE:
  2604. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2605. skdev->timer_countdown = SKD_BUSY_TIMO;
  2606. break;
  2607. case FIT_SR_DRIVE_OFFLINE:
  2608. skdev->state = SKD_DRVR_STATE_IDLE;
  2609. break;
  2610. case FIT_SR_DRIVE_SOFT_RESET:
  2611. switch (skdev->state) {
  2612. case SKD_DRVR_STATE_STARTING:
  2613. case SKD_DRVR_STATE_RESTARTING:
  2614. /* Expected by a caller of skd_soft_reset() */
  2615. break;
  2616. default:
  2617. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2618. break;
  2619. }
  2620. break;
  2621. case FIT_SR_DRIVE_FW_BOOTING:
  2622. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2623. skdev->name, __func__, __LINE__, skdev->name);
  2624. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2625. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2626. break;
  2627. case FIT_SR_DRIVE_DEGRADED:
  2628. case FIT_SR_PCIE_LINK_DOWN:
  2629. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2630. break;
  2631. case FIT_SR_DRIVE_FAULT:
  2632. skd_drive_fault(skdev);
  2633. skd_recover_requests(skdev, 0);
  2634. blk_start_queue(skdev->queue);
  2635. break;
  2636. /* PCIe bus returned all Fs? */
  2637. case 0xFF:
  2638. pr_info("(%s): state=0x%x sense=0x%x\n",
  2639. skd_name(skdev), state, sense);
  2640. skd_drive_disappeared(skdev);
  2641. skd_recover_requests(skdev, 0);
  2642. blk_start_queue(skdev->queue);
  2643. break;
  2644. default:
  2645. /*
  2646. * Uknown FW State. Wait for a state we recognize.
  2647. */
  2648. break;
  2649. }
  2650. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2651. skd_name(skdev),
  2652. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2653. skd_skdev_state_to_str(skdev->state), skdev->state);
  2654. }
  2655. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2656. {
  2657. int i;
  2658. for (i = 0; i < skdev->num_req_context; i++) {
  2659. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2660. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2661. skd_log_skreq(skdev, skreq, "recover");
  2662. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2663. SKD_ASSERT(skreq->req != NULL);
  2664. /* Release DMA resources for the request. */
  2665. if (skreq->n_sg > 0)
  2666. skd_postop_sg_list(skdev, skreq);
  2667. if (requeue &&
  2668. (unsigned long) ++skreq->req->special <
  2669. SKD_MAX_RETRIES)
  2670. blk_requeue_request(skdev->queue, skreq->req);
  2671. else
  2672. skd_end_request(skdev, skreq, -EIO);
  2673. skreq->req = NULL;
  2674. skreq->state = SKD_REQ_STATE_IDLE;
  2675. skreq->id += SKD_ID_INCR;
  2676. }
  2677. if (i > 0)
  2678. skreq[-1].next = skreq;
  2679. skreq->next = NULL;
  2680. }
  2681. skdev->skreq_free_list = skdev->skreq_table;
  2682. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2683. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2684. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2685. skd_log_skmsg(skdev, skmsg, "salvaged");
  2686. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2687. skmsg->state = SKD_MSG_STATE_IDLE;
  2688. skmsg->id += SKD_ID_INCR;
  2689. }
  2690. if (i > 0)
  2691. skmsg[-1].next = skmsg;
  2692. skmsg->next = NULL;
  2693. }
  2694. skdev->skmsg_free_list = skdev->skmsg_table;
  2695. for (i = 0; i < skdev->n_special; i++) {
  2696. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2697. /* If orphaned, reclaim it because it has already been reported
  2698. * to the process as an error (it was just waiting for
  2699. * a completion that didn't come, and now it will never come)
  2700. * If busy, change to a state that will cause it to error
  2701. * out in the wait routine and let it do the normal
  2702. * reporting and reclaiming
  2703. */
  2704. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2705. if (skspcl->orphaned) {
  2706. pr_debug("%s:%s:%d orphaned %p\n",
  2707. skdev->name, __func__, __LINE__,
  2708. skspcl);
  2709. skd_release_special(skdev, skspcl);
  2710. } else {
  2711. pr_debug("%s:%s:%d not orphaned %p\n",
  2712. skdev->name, __func__, __LINE__,
  2713. skspcl);
  2714. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2715. }
  2716. }
  2717. }
  2718. skdev->skspcl_free_list = skdev->skspcl_table;
  2719. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2720. skdev->timeout_slot[i] = 0;
  2721. skdev->in_flight = 0;
  2722. }
  2723. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2724. {
  2725. u32 mfd;
  2726. u32 mtd;
  2727. u32 data;
  2728. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2729. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2730. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2731. /* ignore any mtd that is an ack for something we didn't send */
  2732. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2733. return;
  2734. switch (FIT_MXD_TYPE(mfd)) {
  2735. case FIT_MTD_FITFW_INIT:
  2736. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2737. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2738. pr_err("(%s): protocol mismatch\n",
  2739. skdev->name);
  2740. pr_err("(%s): got=%d support=%d\n",
  2741. skdev->name, skdev->proto_ver,
  2742. FIT_PROTOCOL_VERSION_1);
  2743. pr_err("(%s): please upgrade driver\n",
  2744. skdev->name);
  2745. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2746. skd_soft_reset(skdev);
  2747. break;
  2748. }
  2749. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2750. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2751. skdev->last_mtd = mtd;
  2752. break;
  2753. case FIT_MTD_GET_CMDQ_DEPTH:
  2754. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2755. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2756. SKD_N_COMPLETION_ENTRY);
  2757. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2758. skdev->last_mtd = mtd;
  2759. break;
  2760. case FIT_MTD_SET_COMPQ_DEPTH:
  2761. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2762. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2763. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2764. skdev->last_mtd = mtd;
  2765. break;
  2766. case FIT_MTD_SET_COMPQ_ADDR:
  2767. skd_reset_skcomp(skdev);
  2768. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2769. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2770. skdev->last_mtd = mtd;
  2771. break;
  2772. case FIT_MTD_CMD_LOG_HOST_ID:
  2773. skdev->connect_time_stamp = get_seconds();
  2774. data = skdev->connect_time_stamp & 0xFFFF;
  2775. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2776. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2777. skdev->last_mtd = mtd;
  2778. break;
  2779. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2780. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2781. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2782. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2783. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2784. skdev->last_mtd = mtd;
  2785. break;
  2786. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2787. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2788. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2789. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2790. skdev->last_mtd = mtd;
  2791. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2792. skd_name(skdev),
  2793. skdev->connect_time_stamp, skdev->drive_jiffies);
  2794. break;
  2795. case FIT_MTD_ARM_QUEUE:
  2796. skdev->last_mtd = 0;
  2797. /*
  2798. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2799. */
  2800. break;
  2801. default:
  2802. break;
  2803. }
  2804. }
  2805. static void skd_disable_interrupts(struct skd_device *skdev)
  2806. {
  2807. u32 sense;
  2808. sense = SKD_READL(skdev, FIT_CONTROL);
  2809. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2810. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2811. pr_debug("%s:%s:%d sense 0x%x\n",
  2812. skdev->name, __func__, __LINE__, sense);
  2813. /* Note that the 1s is written. A 1-bit means
  2814. * disable, a 0 means enable.
  2815. */
  2816. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2817. }
  2818. static void skd_enable_interrupts(struct skd_device *skdev)
  2819. {
  2820. u32 val;
  2821. /* unmask interrupts first */
  2822. val = FIT_ISH_FW_STATE_CHANGE +
  2823. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2824. /* Note that the compliment of mask is written. A 1-bit means
  2825. * disable, a 0 means enable. */
  2826. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2827. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2828. skdev->name, __func__, __LINE__, ~val);
  2829. val = SKD_READL(skdev, FIT_CONTROL);
  2830. val |= FIT_CR_ENABLE_INTERRUPTS;
  2831. pr_debug("%s:%s:%d control=0x%x\n",
  2832. skdev->name, __func__, __LINE__, val);
  2833. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2834. }
  2835. /*
  2836. *****************************************************************************
  2837. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2838. *****************************************************************************
  2839. */
  2840. static void skd_soft_reset(struct skd_device *skdev)
  2841. {
  2842. u32 val;
  2843. val = SKD_READL(skdev, FIT_CONTROL);
  2844. val |= (FIT_CR_SOFT_RESET);
  2845. pr_debug("%s:%s:%d control=0x%x\n",
  2846. skdev->name, __func__, __LINE__, val);
  2847. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2848. }
  2849. static void skd_start_device(struct skd_device *skdev)
  2850. {
  2851. unsigned long flags;
  2852. u32 sense;
  2853. u32 state;
  2854. spin_lock_irqsave(&skdev->lock, flags);
  2855. /* ack all ghost interrupts */
  2856. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2857. sense = SKD_READL(skdev, FIT_STATUS);
  2858. pr_debug("%s:%s:%d initial status=0x%x\n",
  2859. skdev->name, __func__, __LINE__, sense);
  2860. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2861. skdev->drive_state = state;
  2862. skdev->last_mtd = 0;
  2863. skdev->state = SKD_DRVR_STATE_STARTING;
  2864. skdev->timer_countdown = SKD_STARTING_TIMO;
  2865. skd_enable_interrupts(skdev);
  2866. switch (skdev->drive_state) {
  2867. case FIT_SR_DRIVE_OFFLINE:
  2868. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2869. break;
  2870. case FIT_SR_DRIVE_FW_BOOTING:
  2871. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2872. skdev->name, __func__, __LINE__, skdev->name);
  2873. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2874. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2875. break;
  2876. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2877. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2878. skd_name(skdev));
  2879. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2880. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2881. break;
  2882. case FIT_SR_DRIVE_BUSY_ERASE:
  2883. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2884. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2885. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2886. break;
  2887. case FIT_SR_DRIVE_INIT:
  2888. case FIT_SR_DRIVE_ONLINE:
  2889. skd_soft_reset(skdev);
  2890. break;
  2891. case FIT_SR_DRIVE_BUSY:
  2892. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2893. skdev->state = SKD_DRVR_STATE_BUSY;
  2894. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2895. break;
  2896. case FIT_SR_DRIVE_SOFT_RESET:
  2897. pr_err("(%s) drive soft reset in prog\n",
  2898. skd_name(skdev));
  2899. break;
  2900. case FIT_SR_DRIVE_FAULT:
  2901. /* Fault state is bad...soft reset won't do it...
  2902. * Hard reset, maybe, but does it work on device?
  2903. * For now, just fault so the system doesn't hang.
  2904. */
  2905. skd_drive_fault(skdev);
  2906. /*start the queue so we can respond with error to requests */
  2907. pr_debug("%s:%s:%d starting %s queue\n",
  2908. skdev->name, __func__, __LINE__, skdev->name);
  2909. blk_start_queue(skdev->queue);
  2910. skdev->gendisk_on = -1;
  2911. wake_up_interruptible(&skdev->waitq);
  2912. break;
  2913. case 0xFF:
  2914. /* Most likely the device isn't there or isn't responding
  2915. * to the BAR1 addresses. */
  2916. skd_drive_disappeared(skdev);
  2917. /*start the queue so we can respond with error to requests */
  2918. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  2919. skdev->name, __func__, __LINE__, skdev->name);
  2920. blk_start_queue(skdev->queue);
  2921. skdev->gendisk_on = -1;
  2922. wake_up_interruptible(&skdev->waitq);
  2923. break;
  2924. default:
  2925. pr_err("(%s) Start: unknown state %x\n",
  2926. skd_name(skdev), skdev->drive_state);
  2927. break;
  2928. }
  2929. state = SKD_READL(skdev, FIT_CONTROL);
  2930. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  2931. skdev->name, __func__, __LINE__, state);
  2932. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2933. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  2934. skdev->name, __func__, __LINE__, state);
  2935. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  2936. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  2937. skdev->name, __func__, __LINE__, state);
  2938. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2939. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  2940. skdev->name, __func__, __LINE__, state);
  2941. state = SKD_READL(skdev, FIT_HW_VERSION);
  2942. pr_debug("%s:%s:%d HW version=0x%x\n",
  2943. skdev->name, __func__, __LINE__, state);
  2944. spin_unlock_irqrestore(&skdev->lock, flags);
  2945. }
  2946. static void skd_stop_device(struct skd_device *skdev)
  2947. {
  2948. unsigned long flags;
  2949. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2950. u32 dev_state;
  2951. int i;
  2952. spin_lock_irqsave(&skdev->lock, flags);
  2953. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  2954. pr_err("(%s): skd_stop_device not online no sync\n",
  2955. skd_name(skdev));
  2956. goto stop_out;
  2957. }
  2958. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  2959. pr_err("(%s): skd_stop_device no special\n",
  2960. skd_name(skdev));
  2961. goto stop_out;
  2962. }
  2963. skdev->state = SKD_DRVR_STATE_SYNCING;
  2964. skdev->sync_done = 0;
  2965. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  2966. spin_unlock_irqrestore(&skdev->lock, flags);
  2967. wait_event_interruptible_timeout(skdev->waitq,
  2968. (skdev->sync_done), (10 * HZ));
  2969. spin_lock_irqsave(&skdev->lock, flags);
  2970. switch (skdev->sync_done) {
  2971. case 0:
  2972. pr_err("(%s): skd_stop_device no sync\n",
  2973. skd_name(skdev));
  2974. break;
  2975. case 1:
  2976. pr_err("(%s): skd_stop_device sync done\n",
  2977. skd_name(skdev));
  2978. break;
  2979. default:
  2980. pr_err("(%s): skd_stop_device sync error\n",
  2981. skd_name(skdev));
  2982. }
  2983. stop_out:
  2984. skdev->state = SKD_DRVR_STATE_STOPPING;
  2985. spin_unlock_irqrestore(&skdev->lock, flags);
  2986. skd_kill_timer(skdev);
  2987. spin_lock_irqsave(&skdev->lock, flags);
  2988. skd_disable_interrupts(skdev);
  2989. /* ensure all ints on device are cleared */
  2990. /* soft reset the device to unload with a clean slate */
  2991. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2992. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  2993. spin_unlock_irqrestore(&skdev->lock, flags);
  2994. /* poll every 100ms, 1 second timeout */
  2995. for (i = 0; i < 10; i++) {
  2996. dev_state =
  2997. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  2998. if (dev_state == FIT_SR_DRIVE_INIT)
  2999. break;
  3000. set_current_state(TASK_INTERRUPTIBLE);
  3001. schedule_timeout(msecs_to_jiffies(100));
  3002. }
  3003. if (dev_state != FIT_SR_DRIVE_INIT)
  3004. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3005. skd_name(skdev), dev_state);
  3006. }
  3007. /* assume spinlock is held */
  3008. static void skd_restart_device(struct skd_device *skdev)
  3009. {
  3010. u32 state;
  3011. /* ack all ghost interrupts */
  3012. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3013. state = SKD_READL(skdev, FIT_STATUS);
  3014. pr_debug("%s:%s:%d drive status=0x%x\n",
  3015. skdev->name, __func__, __LINE__, state);
  3016. state &= FIT_SR_DRIVE_STATE_MASK;
  3017. skdev->drive_state = state;
  3018. skdev->last_mtd = 0;
  3019. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3020. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3021. skd_soft_reset(skdev);
  3022. }
  3023. /* assume spinlock is held */
  3024. static int skd_quiesce_dev(struct skd_device *skdev)
  3025. {
  3026. int rc = 0;
  3027. switch (skdev->state) {
  3028. case SKD_DRVR_STATE_BUSY:
  3029. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3030. pr_debug("%s:%s:%d stopping %s queue\n",
  3031. skdev->name, __func__, __LINE__, skdev->name);
  3032. blk_stop_queue(skdev->queue);
  3033. break;
  3034. case SKD_DRVR_STATE_ONLINE:
  3035. case SKD_DRVR_STATE_STOPPING:
  3036. case SKD_DRVR_STATE_SYNCING:
  3037. case SKD_DRVR_STATE_PAUSING:
  3038. case SKD_DRVR_STATE_PAUSED:
  3039. case SKD_DRVR_STATE_STARTING:
  3040. case SKD_DRVR_STATE_RESTARTING:
  3041. case SKD_DRVR_STATE_RESUMING:
  3042. default:
  3043. rc = -EINVAL;
  3044. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3045. skdev->name, __func__, __LINE__, skdev->state);
  3046. }
  3047. return rc;
  3048. }
  3049. /* assume spinlock is held */
  3050. static int skd_unquiesce_dev(struct skd_device *skdev)
  3051. {
  3052. int prev_driver_state = skdev->state;
  3053. skd_log_skdev(skdev, "unquiesce");
  3054. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3055. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3056. skdev->name, __func__, __LINE__);
  3057. return 0;
  3058. }
  3059. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3060. /*
  3061. * If there has been an state change to other than
  3062. * ONLINE, we will rely on controller state change
  3063. * to come back online and restart the queue.
  3064. * The BUSY state means that driver is ready to
  3065. * continue normal processing but waiting for controller
  3066. * to become available.
  3067. */
  3068. skdev->state = SKD_DRVR_STATE_BUSY;
  3069. pr_debug("%s:%s:%d drive BUSY state\n",
  3070. skdev->name, __func__, __LINE__);
  3071. return 0;
  3072. }
  3073. /*
  3074. * Drive has just come online, driver is either in startup,
  3075. * paused performing a task, or bust waiting for hardware.
  3076. */
  3077. switch (skdev->state) {
  3078. case SKD_DRVR_STATE_PAUSED:
  3079. case SKD_DRVR_STATE_BUSY:
  3080. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3081. case SKD_DRVR_STATE_BUSY_ERASE:
  3082. case SKD_DRVR_STATE_STARTING:
  3083. case SKD_DRVR_STATE_RESTARTING:
  3084. case SKD_DRVR_STATE_FAULT:
  3085. case SKD_DRVR_STATE_IDLE:
  3086. case SKD_DRVR_STATE_LOAD:
  3087. skdev->state = SKD_DRVR_STATE_ONLINE;
  3088. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3089. skd_name(skdev),
  3090. skd_skdev_state_to_str(prev_driver_state),
  3091. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3092. skdev->state);
  3093. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3094. skdev->name, __func__, __LINE__);
  3095. pr_debug("%s:%s:%d starting %s queue\n",
  3096. skdev->name, __func__, __LINE__, skdev->name);
  3097. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3098. blk_start_queue(skdev->queue);
  3099. skdev->gendisk_on = 1;
  3100. wake_up_interruptible(&skdev->waitq);
  3101. break;
  3102. case SKD_DRVR_STATE_DISAPPEARED:
  3103. default:
  3104. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3105. skdev->name, __func__, __LINE__,
  3106. skdev->state);
  3107. return -EBUSY;
  3108. }
  3109. return 0;
  3110. }
  3111. /*
  3112. *****************************************************************************
  3113. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3114. *****************************************************************************
  3115. */
  3116. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3117. {
  3118. struct skd_device *skdev = skd_host_data;
  3119. unsigned long flags;
  3120. spin_lock_irqsave(&skdev->lock, flags);
  3121. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3122. skdev->name, __func__, __LINE__,
  3123. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3124. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3125. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3126. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3127. spin_unlock_irqrestore(&skdev->lock, flags);
  3128. return IRQ_HANDLED;
  3129. }
  3130. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3131. {
  3132. struct skd_device *skdev = skd_host_data;
  3133. unsigned long flags;
  3134. spin_lock_irqsave(&skdev->lock, flags);
  3135. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3136. skdev->name, __func__, __LINE__,
  3137. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3138. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3139. skd_isr_fwstate(skdev);
  3140. spin_unlock_irqrestore(&skdev->lock, flags);
  3141. return IRQ_HANDLED;
  3142. }
  3143. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3144. {
  3145. struct skd_device *skdev = skd_host_data;
  3146. unsigned long flags;
  3147. int flush_enqueued = 0;
  3148. int deferred;
  3149. spin_lock_irqsave(&skdev->lock, flags);
  3150. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3151. skdev->name, __func__, __LINE__,
  3152. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3153. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3154. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3155. &flush_enqueued);
  3156. if (flush_enqueued)
  3157. skd_request_fn(skdev->queue);
  3158. if (deferred)
  3159. schedule_work(&skdev->completion_worker);
  3160. else if (!flush_enqueued)
  3161. skd_request_fn(skdev->queue);
  3162. spin_unlock_irqrestore(&skdev->lock, flags);
  3163. return IRQ_HANDLED;
  3164. }
  3165. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3166. {
  3167. struct skd_device *skdev = skd_host_data;
  3168. unsigned long flags;
  3169. spin_lock_irqsave(&skdev->lock, flags);
  3170. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3171. skdev->name, __func__, __LINE__,
  3172. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3173. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3174. skd_isr_msg_from_dev(skdev);
  3175. spin_unlock_irqrestore(&skdev->lock, flags);
  3176. return IRQ_HANDLED;
  3177. }
  3178. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3179. {
  3180. struct skd_device *skdev = skd_host_data;
  3181. unsigned long flags;
  3182. spin_lock_irqsave(&skdev->lock, flags);
  3183. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3184. skdev->name, __func__, __LINE__,
  3185. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3186. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3187. spin_unlock_irqrestore(&skdev->lock, flags);
  3188. return IRQ_HANDLED;
  3189. }
  3190. /*
  3191. *****************************************************************************
  3192. * PCIe MSI/MSI-X SETUP
  3193. *****************************************************************************
  3194. */
  3195. struct skd_msix_entry {
  3196. char isr_name[30];
  3197. };
  3198. struct skd_init_msix_entry {
  3199. const char *name;
  3200. irq_handler_t handler;
  3201. };
  3202. #define SKD_MAX_MSIX_COUNT 13
  3203. #define SKD_MIN_MSIX_COUNT 7
  3204. #define SKD_BASE_MSIX_IRQ 4
  3205. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3206. { "(DMA 0)", skd_reserved_isr },
  3207. { "(DMA 1)", skd_reserved_isr },
  3208. { "(DMA 2)", skd_reserved_isr },
  3209. { "(DMA 3)", skd_reserved_isr },
  3210. { "(State Change)", skd_statec_isr },
  3211. { "(COMPL_Q)", skd_comp_q },
  3212. { "(MSG)", skd_msg_isr },
  3213. { "(Reserved)", skd_reserved_isr },
  3214. { "(Reserved)", skd_reserved_isr },
  3215. { "(Queue Full 0)", skd_qfull_isr },
  3216. { "(Queue Full 1)", skd_qfull_isr },
  3217. { "(Queue Full 2)", skd_qfull_isr },
  3218. { "(Queue Full 3)", skd_qfull_isr },
  3219. };
  3220. static int skd_acquire_msix(struct skd_device *skdev)
  3221. {
  3222. int i, rc;
  3223. struct pci_dev *pdev = skdev->pdev;
  3224. rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
  3225. PCI_IRQ_MSIX);
  3226. if (rc < 0) {
  3227. pr_err("(%s): failed to enable MSI-X %d\n",
  3228. skd_name(skdev), rc);
  3229. goto out;
  3230. }
  3231. skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
  3232. sizeof(struct skd_msix_entry), GFP_KERNEL);
  3233. if (!skdev->msix_entries) {
  3234. rc = -ENOMEM;
  3235. pr_err("(%s): msix table allocation error\n",
  3236. skd_name(skdev));
  3237. goto out;
  3238. }
  3239. /* Enable MSI-X vectors for the base queue */
  3240. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3241. struct skd_msix_entry *qentry = &skdev->msix_entries[i];
  3242. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3243. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3244. msix_entries[i].name);
  3245. rc = devm_request_irq(&skdev->pdev->dev,
  3246. pci_irq_vector(skdev->pdev, i),
  3247. msix_entries[i].handler, 0,
  3248. qentry->isr_name, skdev);
  3249. if (rc) {
  3250. pr_err("(%s): Unable to register(%d) MSI-X "
  3251. "handler %d: %s\n",
  3252. skd_name(skdev), rc, i, qentry->isr_name);
  3253. goto msix_out;
  3254. }
  3255. }
  3256. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3257. skdev->name, __func__, __LINE__,
  3258. pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
  3259. return 0;
  3260. msix_out:
  3261. while (--i >= 0)
  3262. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
  3263. out:
  3264. kfree(skdev->msix_entries);
  3265. skdev->msix_entries = NULL;
  3266. return rc;
  3267. }
  3268. static int skd_acquire_irq(struct skd_device *skdev)
  3269. {
  3270. struct pci_dev *pdev = skdev->pdev;
  3271. unsigned int irq_flag = PCI_IRQ_LEGACY;
  3272. int rc;
  3273. if (skd_isr_type == SKD_IRQ_MSIX) {
  3274. rc = skd_acquire_msix(skdev);
  3275. if (!rc)
  3276. return 0;
  3277. pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3278. skd_name(skdev), rc);
  3279. }
  3280. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
  3281. skdev->devno);
  3282. if (skd_isr_type != SKD_IRQ_LEGACY)
  3283. irq_flag |= PCI_IRQ_MSI;
  3284. rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
  3285. if (rc < 0) {
  3286. pr_err("(%s): failed to allocate the MSI interrupt %d\n",
  3287. skd_name(skdev), rc);
  3288. return rc;
  3289. }
  3290. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3291. pdev->msi_enabled ? 0 : IRQF_SHARED,
  3292. skdev->isr_name, skdev);
  3293. if (rc) {
  3294. pci_free_irq_vectors(pdev);
  3295. pr_err("(%s): failed to allocate interrupt %d\n",
  3296. skd_name(skdev), rc);
  3297. return rc;
  3298. }
  3299. return 0;
  3300. }
  3301. static void skd_release_irq(struct skd_device *skdev)
  3302. {
  3303. struct pci_dev *pdev = skdev->pdev;
  3304. if (skdev->msix_entries) {
  3305. int i;
  3306. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3307. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
  3308. skdev);
  3309. }
  3310. kfree(skdev->msix_entries);
  3311. skdev->msix_entries = NULL;
  3312. } else {
  3313. devm_free_irq(&pdev->dev, pdev->irq, skdev);
  3314. }
  3315. pci_free_irq_vectors(pdev);
  3316. }
  3317. /*
  3318. *****************************************************************************
  3319. * CONSTRUCT
  3320. *****************************************************************************
  3321. */
  3322. static int skd_cons_skcomp(struct skd_device *skdev)
  3323. {
  3324. int rc = 0;
  3325. struct fit_completion_entry_v1 *skcomp;
  3326. u32 nbytes;
  3327. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3328. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3329. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3330. skdev->name, __func__, __LINE__,
  3331. nbytes, SKD_N_COMPLETION_ENTRY);
  3332. skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
  3333. &skdev->cq_dma_address);
  3334. if (skcomp == NULL) {
  3335. rc = -ENOMEM;
  3336. goto err_out;
  3337. }
  3338. skdev->skcomp_table = skcomp;
  3339. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3340. sizeof(*skcomp) *
  3341. SKD_N_COMPLETION_ENTRY);
  3342. err_out:
  3343. return rc;
  3344. }
  3345. static int skd_cons_skmsg(struct skd_device *skdev)
  3346. {
  3347. int rc = 0;
  3348. u32 i;
  3349. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3350. skdev->name, __func__, __LINE__,
  3351. sizeof(struct skd_fitmsg_context),
  3352. skdev->num_fitmsg_context,
  3353. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3354. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3355. *skdev->num_fitmsg_context, GFP_KERNEL);
  3356. if (skdev->skmsg_table == NULL) {
  3357. rc = -ENOMEM;
  3358. goto err_out;
  3359. }
  3360. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3361. struct skd_fitmsg_context *skmsg;
  3362. skmsg = &skdev->skmsg_table[i];
  3363. skmsg->id = i + SKD_ID_FIT_MSG;
  3364. skmsg->state = SKD_MSG_STATE_IDLE;
  3365. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3366. SKD_N_FITMSG_BYTES + 64,
  3367. &skmsg->mb_dma_address);
  3368. if (skmsg->msg_buf == NULL) {
  3369. rc = -ENOMEM;
  3370. goto err_out;
  3371. }
  3372. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3373. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3374. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3375. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3376. FIT_QCMD_BASE_ADDRESS_MASK);
  3377. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3378. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3379. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3380. skmsg->next = &skmsg[1];
  3381. }
  3382. /* Free list is in order starting with the 0th entry. */
  3383. skdev->skmsg_table[i - 1].next = NULL;
  3384. skdev->skmsg_free_list = skdev->skmsg_table;
  3385. err_out:
  3386. return rc;
  3387. }
  3388. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3389. u32 n_sg,
  3390. dma_addr_t *ret_dma_addr)
  3391. {
  3392. struct fit_sg_descriptor *sg_list;
  3393. u32 nbytes;
  3394. nbytes = sizeof(*sg_list) * n_sg;
  3395. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3396. if (sg_list != NULL) {
  3397. uint64_t dma_address = *ret_dma_addr;
  3398. u32 i;
  3399. memset(sg_list, 0, nbytes);
  3400. for (i = 0; i < n_sg - 1; i++) {
  3401. uint64_t ndp_off;
  3402. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3403. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3404. }
  3405. sg_list[i].next_desc_ptr = 0LL;
  3406. }
  3407. return sg_list;
  3408. }
  3409. static int skd_cons_skreq(struct skd_device *skdev)
  3410. {
  3411. int rc = 0;
  3412. u32 i;
  3413. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3414. skdev->name, __func__, __LINE__,
  3415. sizeof(struct skd_request_context),
  3416. skdev->num_req_context,
  3417. sizeof(struct skd_request_context) * skdev->num_req_context);
  3418. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3419. * skdev->num_req_context, GFP_KERNEL);
  3420. if (skdev->skreq_table == NULL) {
  3421. rc = -ENOMEM;
  3422. goto err_out;
  3423. }
  3424. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3425. skdev->name, __func__, __LINE__,
  3426. skdev->sgs_per_request, sizeof(struct scatterlist),
  3427. skdev->sgs_per_request * sizeof(struct scatterlist));
  3428. for (i = 0; i < skdev->num_req_context; i++) {
  3429. struct skd_request_context *skreq;
  3430. skreq = &skdev->skreq_table[i];
  3431. skreq->id = i + SKD_ID_RW_REQUEST;
  3432. skreq->state = SKD_REQ_STATE_IDLE;
  3433. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3434. skdev->sgs_per_request, GFP_KERNEL);
  3435. if (skreq->sg == NULL) {
  3436. rc = -ENOMEM;
  3437. goto err_out;
  3438. }
  3439. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3440. skreq->sksg_list = skd_cons_sg_list(skdev,
  3441. skdev->sgs_per_request,
  3442. &skreq->sksg_dma_address);
  3443. if (skreq->sksg_list == NULL) {
  3444. rc = -ENOMEM;
  3445. goto err_out;
  3446. }
  3447. skreq->next = &skreq[1];
  3448. }
  3449. /* Free list is in order starting with the 0th entry. */
  3450. skdev->skreq_table[i - 1].next = NULL;
  3451. skdev->skreq_free_list = skdev->skreq_table;
  3452. err_out:
  3453. return rc;
  3454. }
  3455. static int skd_cons_skspcl(struct skd_device *skdev)
  3456. {
  3457. int rc = 0;
  3458. u32 i, nbytes;
  3459. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3460. skdev->name, __func__, __LINE__,
  3461. sizeof(struct skd_special_context),
  3462. skdev->n_special,
  3463. sizeof(struct skd_special_context) * skdev->n_special);
  3464. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3465. * skdev->n_special, GFP_KERNEL);
  3466. if (skdev->skspcl_table == NULL) {
  3467. rc = -ENOMEM;
  3468. goto err_out;
  3469. }
  3470. for (i = 0; i < skdev->n_special; i++) {
  3471. struct skd_special_context *skspcl;
  3472. skspcl = &skdev->skspcl_table[i];
  3473. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3474. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3475. skspcl->req.next = &skspcl[1].req;
  3476. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3477. skspcl->msg_buf =
  3478. pci_zalloc_consistent(skdev->pdev, nbytes,
  3479. &skspcl->mb_dma_address);
  3480. if (skspcl->msg_buf == NULL) {
  3481. rc = -ENOMEM;
  3482. goto err_out;
  3483. }
  3484. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3485. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3486. if (skspcl->req.sg == NULL) {
  3487. rc = -ENOMEM;
  3488. goto err_out;
  3489. }
  3490. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3491. SKD_N_SG_PER_SPECIAL,
  3492. &skspcl->req.
  3493. sksg_dma_address);
  3494. if (skspcl->req.sksg_list == NULL) {
  3495. rc = -ENOMEM;
  3496. goto err_out;
  3497. }
  3498. }
  3499. /* Free list is in order starting with the 0th entry. */
  3500. skdev->skspcl_table[i - 1].req.next = NULL;
  3501. skdev->skspcl_free_list = skdev->skspcl_table;
  3502. return rc;
  3503. err_out:
  3504. return rc;
  3505. }
  3506. static int skd_cons_sksb(struct skd_device *skdev)
  3507. {
  3508. int rc = 0;
  3509. struct skd_special_context *skspcl;
  3510. u32 nbytes;
  3511. skspcl = &skdev->internal_skspcl;
  3512. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3513. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3514. nbytes = SKD_N_INTERNAL_BYTES;
  3515. skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3516. &skspcl->db_dma_address);
  3517. if (skspcl->data_buf == NULL) {
  3518. rc = -ENOMEM;
  3519. goto err_out;
  3520. }
  3521. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3522. skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3523. &skspcl->mb_dma_address);
  3524. if (skspcl->msg_buf == NULL) {
  3525. rc = -ENOMEM;
  3526. goto err_out;
  3527. }
  3528. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3529. &skspcl->req.sksg_dma_address);
  3530. if (skspcl->req.sksg_list == NULL) {
  3531. rc = -ENOMEM;
  3532. goto err_out;
  3533. }
  3534. if (!skd_format_internal_skspcl(skdev)) {
  3535. rc = -EINVAL;
  3536. goto err_out;
  3537. }
  3538. err_out:
  3539. return rc;
  3540. }
  3541. static int skd_cons_disk(struct skd_device *skdev)
  3542. {
  3543. int rc = 0;
  3544. struct gendisk *disk;
  3545. struct request_queue *q;
  3546. unsigned long flags;
  3547. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3548. if (!disk) {
  3549. rc = -ENOMEM;
  3550. goto err_out;
  3551. }
  3552. skdev->disk = disk;
  3553. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3554. disk->major = skdev->major;
  3555. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3556. disk->fops = &skd_blockdev_ops;
  3557. disk->private_data = skdev;
  3558. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3559. if (!q) {
  3560. rc = -ENOMEM;
  3561. goto err_out;
  3562. }
  3563. skdev->queue = q;
  3564. disk->queue = q;
  3565. q->queuedata = skdev;
  3566. blk_queue_write_cache(q, true, true);
  3567. blk_queue_max_segments(q, skdev->sgs_per_request);
  3568. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3569. /* set sysfs ptimal_io_size to 8K */
  3570. blk_queue_io_opt(q, 8192);
  3571. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3572. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
  3573. spin_lock_irqsave(&skdev->lock, flags);
  3574. pr_debug("%s:%s:%d stopping %s queue\n",
  3575. skdev->name, __func__, __LINE__, skdev->name);
  3576. blk_stop_queue(skdev->queue);
  3577. spin_unlock_irqrestore(&skdev->lock, flags);
  3578. err_out:
  3579. return rc;
  3580. }
  3581. #define SKD_N_DEV_TABLE 16u
  3582. static u32 skd_next_devno;
  3583. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3584. {
  3585. struct skd_device *skdev;
  3586. int blk_major = skd_major;
  3587. int rc;
  3588. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3589. if (!skdev) {
  3590. pr_err(PFX "(%s): memory alloc failure\n",
  3591. pci_name(pdev));
  3592. return NULL;
  3593. }
  3594. skdev->state = SKD_DRVR_STATE_LOAD;
  3595. skdev->pdev = pdev;
  3596. skdev->devno = skd_next_devno++;
  3597. skdev->major = blk_major;
  3598. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3599. skdev->dev_max_queue_depth = 0;
  3600. skdev->num_req_context = skd_max_queue_depth;
  3601. skdev->num_fitmsg_context = skd_max_queue_depth;
  3602. skdev->n_special = skd_max_pass_thru;
  3603. skdev->cur_max_queue_depth = 1;
  3604. skdev->queue_low_water_mark = 1;
  3605. skdev->proto_ver = 99;
  3606. skdev->sgs_per_request = skd_sgs_per_request;
  3607. skdev->dbg_level = skd_dbg_level;
  3608. atomic_set(&skdev->device_count, 0);
  3609. spin_lock_init(&skdev->lock);
  3610. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3611. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3612. rc = skd_cons_skcomp(skdev);
  3613. if (rc < 0)
  3614. goto err_out;
  3615. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3616. rc = skd_cons_skmsg(skdev);
  3617. if (rc < 0)
  3618. goto err_out;
  3619. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3620. rc = skd_cons_skreq(skdev);
  3621. if (rc < 0)
  3622. goto err_out;
  3623. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3624. rc = skd_cons_skspcl(skdev);
  3625. if (rc < 0)
  3626. goto err_out;
  3627. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3628. rc = skd_cons_sksb(skdev);
  3629. if (rc < 0)
  3630. goto err_out;
  3631. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3632. rc = skd_cons_disk(skdev);
  3633. if (rc < 0)
  3634. goto err_out;
  3635. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3636. return skdev;
  3637. err_out:
  3638. pr_debug("%s:%s:%d construct failed\n",
  3639. skdev->name, __func__, __LINE__);
  3640. skd_destruct(skdev);
  3641. return NULL;
  3642. }
  3643. /*
  3644. *****************************************************************************
  3645. * DESTRUCT (FREE)
  3646. *****************************************************************************
  3647. */
  3648. static void skd_free_skcomp(struct skd_device *skdev)
  3649. {
  3650. if (skdev->skcomp_table != NULL) {
  3651. u32 nbytes;
  3652. nbytes = sizeof(skdev->skcomp_table[0]) *
  3653. SKD_N_COMPLETION_ENTRY;
  3654. pci_free_consistent(skdev->pdev, nbytes,
  3655. skdev->skcomp_table, skdev->cq_dma_address);
  3656. }
  3657. skdev->skcomp_table = NULL;
  3658. skdev->cq_dma_address = 0;
  3659. }
  3660. static void skd_free_skmsg(struct skd_device *skdev)
  3661. {
  3662. u32 i;
  3663. if (skdev->skmsg_table == NULL)
  3664. return;
  3665. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3666. struct skd_fitmsg_context *skmsg;
  3667. skmsg = &skdev->skmsg_table[i];
  3668. if (skmsg->msg_buf != NULL) {
  3669. skmsg->msg_buf += skmsg->offset;
  3670. skmsg->mb_dma_address += skmsg->offset;
  3671. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3672. skmsg->msg_buf,
  3673. skmsg->mb_dma_address);
  3674. }
  3675. skmsg->msg_buf = NULL;
  3676. skmsg->mb_dma_address = 0;
  3677. }
  3678. kfree(skdev->skmsg_table);
  3679. skdev->skmsg_table = NULL;
  3680. }
  3681. static void skd_free_sg_list(struct skd_device *skdev,
  3682. struct fit_sg_descriptor *sg_list,
  3683. u32 n_sg, dma_addr_t dma_addr)
  3684. {
  3685. if (sg_list != NULL) {
  3686. u32 nbytes;
  3687. nbytes = sizeof(*sg_list) * n_sg;
  3688. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3689. }
  3690. }
  3691. static void skd_free_skreq(struct skd_device *skdev)
  3692. {
  3693. u32 i;
  3694. if (skdev->skreq_table == NULL)
  3695. return;
  3696. for (i = 0; i < skdev->num_req_context; i++) {
  3697. struct skd_request_context *skreq;
  3698. skreq = &skdev->skreq_table[i];
  3699. skd_free_sg_list(skdev, skreq->sksg_list,
  3700. skdev->sgs_per_request,
  3701. skreq->sksg_dma_address);
  3702. skreq->sksg_list = NULL;
  3703. skreq->sksg_dma_address = 0;
  3704. kfree(skreq->sg);
  3705. }
  3706. kfree(skdev->skreq_table);
  3707. skdev->skreq_table = NULL;
  3708. }
  3709. static void skd_free_skspcl(struct skd_device *skdev)
  3710. {
  3711. u32 i;
  3712. u32 nbytes;
  3713. if (skdev->skspcl_table == NULL)
  3714. return;
  3715. for (i = 0; i < skdev->n_special; i++) {
  3716. struct skd_special_context *skspcl;
  3717. skspcl = &skdev->skspcl_table[i];
  3718. if (skspcl->msg_buf != NULL) {
  3719. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3720. pci_free_consistent(skdev->pdev, nbytes,
  3721. skspcl->msg_buf,
  3722. skspcl->mb_dma_address);
  3723. }
  3724. skspcl->msg_buf = NULL;
  3725. skspcl->mb_dma_address = 0;
  3726. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3727. SKD_N_SG_PER_SPECIAL,
  3728. skspcl->req.sksg_dma_address);
  3729. skspcl->req.sksg_list = NULL;
  3730. skspcl->req.sksg_dma_address = 0;
  3731. kfree(skspcl->req.sg);
  3732. }
  3733. kfree(skdev->skspcl_table);
  3734. skdev->skspcl_table = NULL;
  3735. }
  3736. static void skd_free_sksb(struct skd_device *skdev)
  3737. {
  3738. struct skd_special_context *skspcl;
  3739. u32 nbytes;
  3740. skspcl = &skdev->internal_skspcl;
  3741. if (skspcl->data_buf != NULL) {
  3742. nbytes = SKD_N_INTERNAL_BYTES;
  3743. pci_free_consistent(skdev->pdev, nbytes,
  3744. skspcl->data_buf, skspcl->db_dma_address);
  3745. }
  3746. skspcl->data_buf = NULL;
  3747. skspcl->db_dma_address = 0;
  3748. if (skspcl->msg_buf != NULL) {
  3749. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3750. pci_free_consistent(skdev->pdev, nbytes,
  3751. skspcl->msg_buf, skspcl->mb_dma_address);
  3752. }
  3753. skspcl->msg_buf = NULL;
  3754. skspcl->mb_dma_address = 0;
  3755. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3756. skspcl->req.sksg_dma_address);
  3757. skspcl->req.sksg_list = NULL;
  3758. skspcl->req.sksg_dma_address = 0;
  3759. }
  3760. static void skd_free_disk(struct skd_device *skdev)
  3761. {
  3762. struct gendisk *disk = skdev->disk;
  3763. if (disk != NULL) {
  3764. struct request_queue *q = disk->queue;
  3765. if (disk->flags & GENHD_FL_UP)
  3766. del_gendisk(disk);
  3767. if (q)
  3768. blk_cleanup_queue(q);
  3769. put_disk(disk);
  3770. }
  3771. skdev->disk = NULL;
  3772. }
  3773. static void skd_destruct(struct skd_device *skdev)
  3774. {
  3775. if (skdev == NULL)
  3776. return;
  3777. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3778. skd_free_disk(skdev);
  3779. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3780. skd_free_sksb(skdev);
  3781. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3782. skd_free_skspcl(skdev);
  3783. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3784. skd_free_skreq(skdev);
  3785. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3786. skd_free_skmsg(skdev);
  3787. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3788. skd_free_skcomp(skdev);
  3789. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3790. kfree(skdev);
  3791. }
  3792. /*
  3793. *****************************************************************************
  3794. * BLOCK DEVICE (BDEV) GLUE
  3795. *****************************************************************************
  3796. */
  3797. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  3798. {
  3799. struct skd_device *skdev;
  3800. u64 capacity;
  3801. skdev = bdev->bd_disk->private_data;
  3802. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  3803. skdev->name, __func__, __LINE__,
  3804. bdev->bd_disk->disk_name, current->comm);
  3805. if (skdev->read_cap_is_valid) {
  3806. capacity = get_capacity(skdev->disk);
  3807. geo->heads = 64;
  3808. geo->sectors = 255;
  3809. geo->cylinders = (capacity) / (255 * 64);
  3810. return 0;
  3811. }
  3812. return -EIO;
  3813. }
  3814. static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
  3815. {
  3816. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  3817. device_add_disk(parent, skdev->disk);
  3818. return 0;
  3819. }
  3820. static const struct block_device_operations skd_blockdev_ops = {
  3821. .owner = THIS_MODULE,
  3822. .ioctl = skd_bdev_ioctl,
  3823. .getgeo = skd_bdev_getgeo,
  3824. };
  3825. /*
  3826. *****************************************************************************
  3827. * PCIe DRIVER GLUE
  3828. *****************************************************************************
  3829. */
  3830. static const struct pci_device_id skd_pci_tbl[] = {
  3831. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  3832. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  3833. { 0 } /* terminate list */
  3834. };
  3835. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  3836. static char *skd_pci_info(struct skd_device *skdev, char *str)
  3837. {
  3838. int pcie_reg;
  3839. strcpy(str, "PCIe (");
  3840. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  3841. if (pcie_reg) {
  3842. char lwstr[6];
  3843. uint16_t pcie_lstat, lspeed, lwidth;
  3844. pcie_reg += 0x12;
  3845. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  3846. lspeed = pcie_lstat & (0xF);
  3847. lwidth = (pcie_lstat & 0x3F0) >> 4;
  3848. if (lspeed == 1)
  3849. strcat(str, "2.5GT/s ");
  3850. else if (lspeed == 2)
  3851. strcat(str, "5.0GT/s ");
  3852. else
  3853. strcat(str, "<unknown> ");
  3854. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  3855. strcat(str, lwstr);
  3856. }
  3857. return str;
  3858. }
  3859. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3860. {
  3861. int i;
  3862. int rc = 0;
  3863. char pci_str[32];
  3864. struct skd_device *skdev;
  3865. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  3866. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  3867. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  3868. pci_name(pdev), pdev->vendor, pdev->device);
  3869. rc = pci_enable_device(pdev);
  3870. if (rc)
  3871. return rc;
  3872. rc = pci_request_regions(pdev, DRV_NAME);
  3873. if (rc)
  3874. goto err_out;
  3875. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3876. if (!rc) {
  3877. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3878. pr_err("(%s): consistent DMA mask error %d\n",
  3879. pci_name(pdev), rc);
  3880. }
  3881. } else {
  3882. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  3883. if (rc) {
  3884. pr_err("(%s): DMA mask error %d\n",
  3885. pci_name(pdev), rc);
  3886. goto err_out_regions;
  3887. }
  3888. }
  3889. if (!skd_major) {
  3890. rc = register_blkdev(0, DRV_NAME);
  3891. if (rc < 0)
  3892. goto err_out_regions;
  3893. BUG_ON(!rc);
  3894. skd_major = rc;
  3895. }
  3896. skdev = skd_construct(pdev);
  3897. if (skdev == NULL) {
  3898. rc = -ENOMEM;
  3899. goto err_out_regions;
  3900. }
  3901. skd_pci_info(skdev, pci_str);
  3902. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  3903. pci_set_master(pdev);
  3904. rc = pci_enable_pcie_error_reporting(pdev);
  3905. if (rc) {
  3906. pr_err(
  3907. "(%s): bad enable of PCIe error reporting rc=%d\n",
  3908. skd_name(skdev), rc);
  3909. skdev->pcie_error_reporting_is_enabled = 0;
  3910. } else
  3911. skdev->pcie_error_reporting_is_enabled = 1;
  3912. pci_set_drvdata(pdev, skdev);
  3913. for (i = 0; i < SKD_MAX_BARS; i++) {
  3914. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  3915. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  3916. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  3917. skdev->mem_size[i]);
  3918. if (!skdev->mem_map[i]) {
  3919. pr_err("(%s): Unable to map adapter memory!\n",
  3920. skd_name(skdev));
  3921. rc = -ENODEV;
  3922. goto err_out_iounmap;
  3923. }
  3924. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  3925. skdev->name, __func__, __LINE__,
  3926. skdev->mem_map[i],
  3927. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  3928. }
  3929. rc = skd_acquire_irq(skdev);
  3930. if (rc) {
  3931. pr_err("(%s): interrupt resource error %d\n",
  3932. skd_name(skdev), rc);
  3933. goto err_out_iounmap;
  3934. }
  3935. rc = skd_start_timer(skdev);
  3936. if (rc)
  3937. goto err_out_timer;
  3938. init_waitqueue_head(&skdev->waitq);
  3939. skd_start_device(skdev);
  3940. rc = wait_event_interruptible_timeout(skdev->waitq,
  3941. (skdev->gendisk_on),
  3942. (SKD_START_WAIT_SECONDS * HZ));
  3943. if (skdev->gendisk_on > 0) {
  3944. /* device came on-line after reset */
  3945. skd_bdev_attach(&pdev->dev, skdev);
  3946. rc = 0;
  3947. } else {
  3948. /* we timed out, something is wrong with the device,
  3949. don't add the disk structure */
  3950. pr_err(
  3951. "(%s): error: waiting for s1120 timed out %d!\n",
  3952. skd_name(skdev), rc);
  3953. /* in case of no error; we timeout with ENXIO */
  3954. if (!rc)
  3955. rc = -ENXIO;
  3956. goto err_out_timer;
  3957. }
  3958. #ifdef SKD_VMK_POLL_HANDLER
  3959. if (skdev->irq_type == SKD_IRQ_MSIX) {
  3960. /* MSIX completion handler is being used for coredump */
  3961. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  3962. skdev->msix_entries[5].vector,
  3963. skd_comp_q, skdev);
  3964. } else {
  3965. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  3966. skdev->pdev->irq, skd_isr,
  3967. skdev);
  3968. }
  3969. #endif /* SKD_VMK_POLL_HANDLER */
  3970. return rc;
  3971. err_out_timer:
  3972. skd_stop_device(skdev);
  3973. skd_release_irq(skdev);
  3974. err_out_iounmap:
  3975. for (i = 0; i < SKD_MAX_BARS; i++)
  3976. if (skdev->mem_map[i])
  3977. iounmap(skdev->mem_map[i]);
  3978. if (skdev->pcie_error_reporting_is_enabled)
  3979. pci_disable_pcie_error_reporting(pdev);
  3980. skd_destruct(skdev);
  3981. err_out_regions:
  3982. pci_release_regions(pdev);
  3983. err_out:
  3984. pci_disable_device(pdev);
  3985. pci_set_drvdata(pdev, NULL);
  3986. return rc;
  3987. }
  3988. static void skd_pci_remove(struct pci_dev *pdev)
  3989. {
  3990. int i;
  3991. struct skd_device *skdev;
  3992. skdev = pci_get_drvdata(pdev);
  3993. if (!skdev) {
  3994. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  3995. return;
  3996. }
  3997. skd_stop_device(skdev);
  3998. skd_release_irq(skdev);
  3999. for (i = 0; i < SKD_MAX_BARS; i++)
  4000. if (skdev->mem_map[i])
  4001. iounmap((u32 *)skdev->mem_map[i]);
  4002. if (skdev->pcie_error_reporting_is_enabled)
  4003. pci_disable_pcie_error_reporting(pdev);
  4004. skd_destruct(skdev);
  4005. pci_release_regions(pdev);
  4006. pci_disable_device(pdev);
  4007. pci_set_drvdata(pdev, NULL);
  4008. return;
  4009. }
  4010. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4011. {
  4012. int i;
  4013. struct skd_device *skdev;
  4014. skdev = pci_get_drvdata(pdev);
  4015. if (!skdev) {
  4016. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4017. return -EIO;
  4018. }
  4019. skd_stop_device(skdev);
  4020. skd_release_irq(skdev);
  4021. for (i = 0; i < SKD_MAX_BARS; i++)
  4022. if (skdev->mem_map[i])
  4023. iounmap((u32 *)skdev->mem_map[i]);
  4024. if (skdev->pcie_error_reporting_is_enabled)
  4025. pci_disable_pcie_error_reporting(pdev);
  4026. pci_release_regions(pdev);
  4027. pci_save_state(pdev);
  4028. pci_disable_device(pdev);
  4029. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4030. return 0;
  4031. }
  4032. static int skd_pci_resume(struct pci_dev *pdev)
  4033. {
  4034. int i;
  4035. int rc = 0;
  4036. struct skd_device *skdev;
  4037. skdev = pci_get_drvdata(pdev);
  4038. if (!skdev) {
  4039. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4040. return -1;
  4041. }
  4042. pci_set_power_state(pdev, PCI_D0);
  4043. pci_enable_wake(pdev, PCI_D0, 0);
  4044. pci_restore_state(pdev);
  4045. rc = pci_enable_device(pdev);
  4046. if (rc)
  4047. return rc;
  4048. rc = pci_request_regions(pdev, DRV_NAME);
  4049. if (rc)
  4050. goto err_out;
  4051. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4052. if (!rc) {
  4053. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4054. pr_err("(%s): consistent DMA mask error %d\n",
  4055. pci_name(pdev), rc);
  4056. }
  4057. } else {
  4058. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4059. if (rc) {
  4060. pr_err("(%s): DMA mask error %d\n",
  4061. pci_name(pdev), rc);
  4062. goto err_out_regions;
  4063. }
  4064. }
  4065. pci_set_master(pdev);
  4066. rc = pci_enable_pcie_error_reporting(pdev);
  4067. if (rc) {
  4068. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4069. skdev->name, rc);
  4070. skdev->pcie_error_reporting_is_enabled = 0;
  4071. } else
  4072. skdev->pcie_error_reporting_is_enabled = 1;
  4073. for (i = 0; i < SKD_MAX_BARS; i++) {
  4074. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4075. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4076. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4077. skdev->mem_size[i]);
  4078. if (!skdev->mem_map[i]) {
  4079. pr_err("(%s): Unable to map adapter memory!\n",
  4080. skd_name(skdev));
  4081. rc = -ENODEV;
  4082. goto err_out_iounmap;
  4083. }
  4084. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4085. skdev->name, __func__, __LINE__,
  4086. skdev->mem_map[i],
  4087. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4088. }
  4089. rc = skd_acquire_irq(skdev);
  4090. if (rc) {
  4091. pr_err("(%s): interrupt resource error %d\n",
  4092. pci_name(pdev), rc);
  4093. goto err_out_iounmap;
  4094. }
  4095. rc = skd_start_timer(skdev);
  4096. if (rc)
  4097. goto err_out_timer;
  4098. init_waitqueue_head(&skdev->waitq);
  4099. skd_start_device(skdev);
  4100. return rc;
  4101. err_out_timer:
  4102. skd_stop_device(skdev);
  4103. skd_release_irq(skdev);
  4104. err_out_iounmap:
  4105. for (i = 0; i < SKD_MAX_BARS; i++)
  4106. if (skdev->mem_map[i])
  4107. iounmap(skdev->mem_map[i]);
  4108. if (skdev->pcie_error_reporting_is_enabled)
  4109. pci_disable_pcie_error_reporting(pdev);
  4110. err_out_regions:
  4111. pci_release_regions(pdev);
  4112. err_out:
  4113. pci_disable_device(pdev);
  4114. return rc;
  4115. }
  4116. static void skd_pci_shutdown(struct pci_dev *pdev)
  4117. {
  4118. struct skd_device *skdev;
  4119. pr_err("skd_pci_shutdown called\n");
  4120. skdev = pci_get_drvdata(pdev);
  4121. if (!skdev) {
  4122. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4123. return;
  4124. }
  4125. pr_err("%s: calling stop\n", skd_name(skdev));
  4126. skd_stop_device(skdev);
  4127. }
  4128. static struct pci_driver skd_driver = {
  4129. .name = DRV_NAME,
  4130. .id_table = skd_pci_tbl,
  4131. .probe = skd_pci_probe,
  4132. .remove = skd_pci_remove,
  4133. .suspend = skd_pci_suspend,
  4134. .resume = skd_pci_resume,
  4135. .shutdown = skd_pci_shutdown,
  4136. };
  4137. /*
  4138. *****************************************************************************
  4139. * LOGGING SUPPORT
  4140. *****************************************************************************
  4141. */
  4142. static const char *skd_name(struct skd_device *skdev)
  4143. {
  4144. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4145. if (skdev->inquiry_is_valid)
  4146. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4147. skdev->name, skdev->inq_serial_num,
  4148. pci_name(skdev->pdev));
  4149. else
  4150. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4151. skdev->name, pci_name(skdev->pdev));
  4152. return skdev->id_str;
  4153. }
  4154. const char *skd_drive_state_to_str(int state)
  4155. {
  4156. switch (state) {
  4157. case FIT_SR_DRIVE_OFFLINE:
  4158. return "OFFLINE";
  4159. case FIT_SR_DRIVE_INIT:
  4160. return "INIT";
  4161. case FIT_SR_DRIVE_ONLINE:
  4162. return "ONLINE";
  4163. case FIT_SR_DRIVE_BUSY:
  4164. return "BUSY";
  4165. case FIT_SR_DRIVE_FAULT:
  4166. return "FAULT";
  4167. case FIT_SR_DRIVE_DEGRADED:
  4168. return "DEGRADED";
  4169. case FIT_SR_PCIE_LINK_DOWN:
  4170. return "INK_DOWN";
  4171. case FIT_SR_DRIVE_SOFT_RESET:
  4172. return "SOFT_RESET";
  4173. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4174. return "NEED_FW";
  4175. case FIT_SR_DRIVE_INIT_FAULT:
  4176. return "INIT_FAULT";
  4177. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4178. return "BUSY_SANITIZE";
  4179. case FIT_SR_DRIVE_BUSY_ERASE:
  4180. return "BUSY_ERASE";
  4181. case FIT_SR_DRIVE_FW_BOOTING:
  4182. return "FW_BOOTING";
  4183. default:
  4184. return "???";
  4185. }
  4186. }
  4187. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4188. {
  4189. switch (state) {
  4190. case SKD_DRVR_STATE_LOAD:
  4191. return "LOAD";
  4192. case SKD_DRVR_STATE_IDLE:
  4193. return "IDLE";
  4194. case SKD_DRVR_STATE_BUSY:
  4195. return "BUSY";
  4196. case SKD_DRVR_STATE_STARTING:
  4197. return "STARTING";
  4198. case SKD_DRVR_STATE_ONLINE:
  4199. return "ONLINE";
  4200. case SKD_DRVR_STATE_PAUSING:
  4201. return "PAUSING";
  4202. case SKD_DRVR_STATE_PAUSED:
  4203. return "PAUSED";
  4204. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4205. return "DRAINING_TIMEOUT";
  4206. case SKD_DRVR_STATE_RESTARTING:
  4207. return "RESTARTING";
  4208. case SKD_DRVR_STATE_RESUMING:
  4209. return "RESUMING";
  4210. case SKD_DRVR_STATE_STOPPING:
  4211. return "STOPPING";
  4212. case SKD_DRVR_STATE_SYNCING:
  4213. return "SYNCING";
  4214. case SKD_DRVR_STATE_FAULT:
  4215. return "FAULT";
  4216. case SKD_DRVR_STATE_DISAPPEARED:
  4217. return "DISAPPEARED";
  4218. case SKD_DRVR_STATE_BUSY_ERASE:
  4219. return "BUSY_ERASE";
  4220. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4221. return "BUSY_SANITIZE";
  4222. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4223. return "BUSY_IMMINENT";
  4224. case SKD_DRVR_STATE_WAIT_BOOT:
  4225. return "WAIT_BOOT";
  4226. default:
  4227. return "???";
  4228. }
  4229. }
  4230. static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4231. {
  4232. switch (state) {
  4233. case SKD_MSG_STATE_IDLE:
  4234. return "IDLE";
  4235. case SKD_MSG_STATE_BUSY:
  4236. return "BUSY";
  4237. default:
  4238. return "???";
  4239. }
  4240. }
  4241. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  4242. {
  4243. switch (state) {
  4244. case SKD_REQ_STATE_IDLE:
  4245. return "IDLE";
  4246. case SKD_REQ_STATE_SETUP:
  4247. return "SETUP";
  4248. case SKD_REQ_STATE_BUSY:
  4249. return "BUSY";
  4250. case SKD_REQ_STATE_COMPLETED:
  4251. return "COMPLETED";
  4252. case SKD_REQ_STATE_TIMEOUT:
  4253. return "TIMEOUT";
  4254. case SKD_REQ_STATE_ABORTED:
  4255. return "ABORTED";
  4256. default:
  4257. return "???";
  4258. }
  4259. }
  4260. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4261. {
  4262. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4263. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4264. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4265. skdev->name, __func__, __LINE__,
  4266. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4267. skd_skdev_state_to_str(skdev->state), skdev->state);
  4268. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4269. skdev->name, __func__, __LINE__,
  4270. skdev->in_flight, skdev->cur_max_queue_depth,
  4271. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4272. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4273. skdev->name, __func__, __LINE__,
  4274. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4275. }
  4276. static void skd_log_skmsg(struct skd_device *skdev,
  4277. struct skd_fitmsg_context *skmsg, const char *event)
  4278. {
  4279. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4280. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4281. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4282. skdev->name, __func__, __LINE__,
  4283. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4284. skmsg->id, skmsg->length);
  4285. }
  4286. static void skd_log_skreq(struct skd_device *skdev,
  4287. struct skd_request_context *skreq, const char *event)
  4288. {
  4289. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4290. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4291. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4292. skdev->name, __func__, __LINE__,
  4293. skd_skreq_state_to_str(skreq->state), skreq->state,
  4294. skreq->id, skreq->fitmsg_id);
  4295. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4296. skdev->name, __func__, __LINE__,
  4297. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4298. if (skreq->req != NULL) {
  4299. struct request *req = skreq->req;
  4300. u32 lba = (u32)blk_rq_pos(req);
  4301. u32 count = blk_rq_sectors(req);
  4302. pr_debug("%s:%s:%d "
  4303. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4304. skdev->name, __func__, __LINE__,
  4305. req, lba, lba, count, count,
  4306. (int)rq_data_dir(req));
  4307. } else
  4308. pr_debug("%s:%s:%d req=NULL\n",
  4309. skdev->name, __func__, __LINE__);
  4310. }
  4311. /*
  4312. *****************************************************************************
  4313. * MODULE GLUE
  4314. *****************************************************************************
  4315. */
  4316. static int __init skd_init(void)
  4317. {
  4318. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4319. switch (skd_isr_type) {
  4320. case SKD_IRQ_LEGACY:
  4321. case SKD_IRQ_MSI:
  4322. case SKD_IRQ_MSIX:
  4323. break;
  4324. default:
  4325. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  4326. skd_isr_type, SKD_IRQ_DEFAULT);
  4327. skd_isr_type = SKD_IRQ_DEFAULT;
  4328. }
  4329. if (skd_max_queue_depth < 1 ||
  4330. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4331. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  4332. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4333. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4334. }
  4335. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4336. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4337. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4338. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4339. }
  4340. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4341. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  4342. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4343. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4344. }
  4345. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4346. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  4347. skd_dbg_level, 0);
  4348. skd_dbg_level = 0;
  4349. }
  4350. if (skd_isr_comp_limit < 0) {
  4351. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  4352. skd_isr_comp_limit, 0);
  4353. skd_isr_comp_limit = 0;
  4354. }
  4355. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4356. pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
  4357. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4358. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4359. }
  4360. return pci_register_driver(&skd_driver);
  4361. }
  4362. static void __exit skd_exit(void)
  4363. {
  4364. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4365. pci_unregister_driver(&skd_driver);
  4366. if (skd_major)
  4367. unregister_blkdev(skd_major, DRV_NAME);
  4368. }
  4369. module_init(skd_init);
  4370. module_exit(skd_exit);