skd_main.c 135 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268
  1. /* Copyright 2012 STEC, Inc.
  2. *
  3. * This file is licensed under the terms of the 3-clause
  4. * BSD License (http://opensource.org/licenses/BSD-3-Clause)
  5. * or the GNU GPL-2.0 (http://www.gnu.org/licenses/gpl-2.0.html),
  6. * at your option. Both licenses are also available in the LICENSE file
  7. * distributed with this project. This file may not be copied, modified,
  8. * or distributed except in accordance with those terms.
  9. * Gordoni Waidhofer <gwaidhofer@stec-inc.com>
  10. * Initial Driver Design!
  11. * Thomas Swann <tswann@stec-inc.com>
  12. * Interrupt handling.
  13. * Ramprasad Chinthekindi <rchinthekindi@stec-inc.com>
  14. * biomode implementation.
  15. * Akhil Bhansali <abhansali@stec-inc.com>
  16. * Added support for DISCARD / FLUSH and FUA.
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/module.h>
  20. #include <linux/init.h>
  21. #include <linux/pci.h>
  22. #include <linux/slab.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/sched.h>
  26. #include <linux/interrupt.h>
  27. #include <linux/compiler.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/bitops.h>
  30. #include <linux/delay.h>
  31. #include <linux/time.h>
  32. #include <linux/hdreg.h>
  33. #include <linux/dma-mapping.h>
  34. #include <linux/completion.h>
  35. #include <linux/scatterlist.h>
  36. #include <linux/version.h>
  37. #include <linux/err.h>
  38. #include <linux/aer.h>
  39. #include <linux/ctype.h>
  40. #include <linux/wait.h>
  41. #include <linux/uio.h>
  42. #include <scsi/scsi.h>
  43. #include <scsi/sg.h>
  44. #include <linux/io.h>
  45. #include <linux/uaccess.h>
  46. #include <asm/unaligned.h>
  47. #include "skd_s1120.h"
  48. static int skd_dbg_level;
  49. static int skd_isr_comp_limit = 4;
  50. enum {
  51. STEC_LINK_2_5GTS = 0,
  52. STEC_LINK_5GTS = 1,
  53. STEC_LINK_8GTS = 2,
  54. STEC_LINK_UNKNOWN = 0xFF
  55. };
  56. enum {
  57. SKD_FLUSH_INITIALIZER,
  58. SKD_FLUSH_ZERO_SIZE_FIRST,
  59. SKD_FLUSH_DATA_SECOND,
  60. };
  61. #define SKD_ASSERT(expr) \
  62. do { \
  63. if (unlikely(!(expr))) { \
  64. pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
  65. # expr, __FILE__, __func__, __LINE__); \
  66. } \
  67. } while (0)
  68. #define DRV_NAME "skd"
  69. #define DRV_VERSION "2.2.1"
  70. #define DRV_BUILD_ID "0260"
  71. #define PFX DRV_NAME ": "
  72. #define DRV_BIN_VERSION 0x100
  73. #define DRV_VER_COMPL "2.2.1." DRV_BUILD_ID
  74. MODULE_AUTHOR("bug-reports: support@stec-inc.com");
  75. MODULE_LICENSE("Dual BSD/GPL");
  76. MODULE_DESCRIPTION("STEC s1120 PCIe SSD block driver (b" DRV_BUILD_ID ")");
  77. MODULE_VERSION(DRV_VERSION "-" DRV_BUILD_ID);
  78. #define PCI_VENDOR_ID_STEC 0x1B39
  79. #define PCI_DEVICE_ID_S1120 0x0001
  80. #define SKD_FUA_NV (1 << 1)
  81. #define SKD_MINORS_PER_DEVICE 16
  82. #define SKD_MAX_QUEUE_DEPTH 200u
  83. #define SKD_PAUSE_TIMEOUT (5 * 1000)
  84. #define SKD_N_FITMSG_BYTES (512u)
  85. #define SKD_N_SPECIAL_CONTEXT 32u
  86. #define SKD_N_SPECIAL_FITMSG_BYTES (128u)
  87. /* SG elements are 32 bytes, so we can make this 4096 and still be under the
  88. * 128KB limit. That allows 4096*4K = 16M xfer size
  89. */
  90. #define SKD_N_SG_PER_REQ_DEFAULT 256u
  91. #define SKD_N_SG_PER_SPECIAL 256u
  92. #define SKD_N_COMPLETION_ENTRY 256u
  93. #define SKD_N_READ_CAP_BYTES (8u)
  94. #define SKD_N_INTERNAL_BYTES (512u)
  95. /* 5 bits of uniqifier, 0xF800 */
  96. #define SKD_ID_INCR (0x400)
  97. #define SKD_ID_TABLE_MASK (3u << 8u)
  98. #define SKD_ID_RW_REQUEST (0u << 8u)
  99. #define SKD_ID_INTERNAL (1u << 8u)
  100. #define SKD_ID_SPECIAL_REQUEST (2u << 8u)
  101. #define SKD_ID_FIT_MSG (3u << 8u)
  102. #define SKD_ID_SLOT_MASK 0x00FFu
  103. #define SKD_ID_SLOT_AND_TABLE_MASK 0x03FFu
  104. #define SKD_N_TIMEOUT_SLOT 4u
  105. #define SKD_TIMEOUT_SLOT_MASK 3u
  106. #define SKD_N_MAX_SECTORS 2048u
  107. #define SKD_MAX_RETRIES 2u
  108. #define SKD_TIMER_SECONDS(seconds) (seconds)
  109. #define SKD_TIMER_MINUTES(minutes) ((minutes) * (60))
  110. #define INQ_STD_NBYTES 36
  111. enum skd_drvr_state {
  112. SKD_DRVR_STATE_LOAD,
  113. SKD_DRVR_STATE_IDLE,
  114. SKD_DRVR_STATE_BUSY,
  115. SKD_DRVR_STATE_STARTING,
  116. SKD_DRVR_STATE_ONLINE,
  117. SKD_DRVR_STATE_PAUSING,
  118. SKD_DRVR_STATE_PAUSED,
  119. SKD_DRVR_STATE_DRAINING_TIMEOUT,
  120. SKD_DRVR_STATE_RESTARTING,
  121. SKD_DRVR_STATE_RESUMING,
  122. SKD_DRVR_STATE_STOPPING,
  123. SKD_DRVR_STATE_FAULT,
  124. SKD_DRVR_STATE_DISAPPEARED,
  125. SKD_DRVR_STATE_PROTOCOL_MISMATCH,
  126. SKD_DRVR_STATE_BUSY_ERASE,
  127. SKD_DRVR_STATE_BUSY_SANITIZE,
  128. SKD_DRVR_STATE_BUSY_IMMINENT,
  129. SKD_DRVR_STATE_WAIT_BOOT,
  130. SKD_DRVR_STATE_SYNCING,
  131. };
  132. #define SKD_WAIT_BOOT_TIMO SKD_TIMER_SECONDS(90u)
  133. #define SKD_STARTING_TIMO SKD_TIMER_SECONDS(8u)
  134. #define SKD_RESTARTING_TIMO SKD_TIMER_MINUTES(4u)
  135. #define SKD_DRAINING_TIMO SKD_TIMER_SECONDS(6u)
  136. #define SKD_BUSY_TIMO SKD_TIMER_MINUTES(20u)
  137. #define SKD_STARTED_BUSY_TIMO SKD_TIMER_SECONDS(60u)
  138. #define SKD_START_WAIT_SECONDS 90u
  139. enum skd_req_state {
  140. SKD_REQ_STATE_IDLE,
  141. SKD_REQ_STATE_SETUP,
  142. SKD_REQ_STATE_BUSY,
  143. SKD_REQ_STATE_COMPLETED,
  144. SKD_REQ_STATE_TIMEOUT,
  145. SKD_REQ_STATE_ABORTED,
  146. };
  147. enum skd_fit_msg_state {
  148. SKD_MSG_STATE_IDLE,
  149. SKD_MSG_STATE_BUSY,
  150. };
  151. enum skd_check_status_action {
  152. SKD_CHECK_STATUS_REPORT_GOOD,
  153. SKD_CHECK_STATUS_REPORT_SMART_ALERT,
  154. SKD_CHECK_STATUS_REQUEUE_REQUEST,
  155. SKD_CHECK_STATUS_REPORT_ERROR,
  156. SKD_CHECK_STATUS_BUSY_IMMINENT,
  157. };
  158. struct skd_fitmsg_context {
  159. enum skd_fit_msg_state state;
  160. struct skd_fitmsg_context *next;
  161. u32 id;
  162. u16 outstanding;
  163. u32 length;
  164. u32 offset;
  165. u8 *msg_buf;
  166. dma_addr_t mb_dma_address;
  167. };
  168. struct skd_request_context {
  169. enum skd_req_state state;
  170. struct skd_request_context *next;
  171. u16 id;
  172. u32 fitmsg_id;
  173. struct request *req;
  174. u8 flush_cmd;
  175. u32 timeout_stamp;
  176. u8 sg_data_dir;
  177. struct scatterlist *sg;
  178. u32 n_sg;
  179. u32 sg_byte_count;
  180. struct fit_sg_descriptor *sksg_list;
  181. dma_addr_t sksg_dma_address;
  182. struct fit_completion_entry_v1 completion;
  183. struct fit_comp_error_info err_info;
  184. };
  185. #define SKD_DATA_DIR_HOST_TO_CARD 1
  186. #define SKD_DATA_DIR_CARD_TO_HOST 2
  187. struct skd_special_context {
  188. struct skd_request_context req;
  189. u8 orphaned;
  190. void *data_buf;
  191. dma_addr_t db_dma_address;
  192. u8 *msg_buf;
  193. dma_addr_t mb_dma_address;
  194. };
  195. struct skd_sg_io {
  196. fmode_t mode;
  197. void __user *argp;
  198. struct sg_io_hdr sg;
  199. u8 cdb[16];
  200. u32 dxfer_len;
  201. u32 iovcnt;
  202. struct sg_iovec *iov;
  203. struct sg_iovec no_iov_iov;
  204. struct skd_special_context *skspcl;
  205. };
  206. typedef enum skd_irq_type {
  207. SKD_IRQ_LEGACY,
  208. SKD_IRQ_MSI,
  209. SKD_IRQ_MSIX
  210. } skd_irq_type_t;
  211. #define SKD_MAX_BARS 2
  212. struct skd_device {
  213. volatile void __iomem *mem_map[SKD_MAX_BARS];
  214. resource_size_t mem_phys[SKD_MAX_BARS];
  215. u32 mem_size[SKD_MAX_BARS];
  216. struct skd_msix_entry *msix_entries;
  217. struct pci_dev *pdev;
  218. int pcie_error_reporting_is_enabled;
  219. spinlock_t lock;
  220. struct gendisk *disk;
  221. struct request_queue *queue;
  222. struct device *class_dev;
  223. int gendisk_on;
  224. int sync_done;
  225. atomic_t device_count;
  226. u32 devno;
  227. u32 major;
  228. char name[32];
  229. char isr_name[30];
  230. enum skd_drvr_state state;
  231. u32 drive_state;
  232. u32 in_flight;
  233. u32 cur_max_queue_depth;
  234. u32 queue_low_water_mark;
  235. u32 dev_max_queue_depth;
  236. u32 num_fitmsg_context;
  237. u32 num_req_context;
  238. u32 timeout_slot[SKD_N_TIMEOUT_SLOT];
  239. u32 timeout_stamp;
  240. struct skd_fitmsg_context *skmsg_free_list;
  241. struct skd_fitmsg_context *skmsg_table;
  242. struct skd_request_context *skreq_free_list;
  243. struct skd_request_context *skreq_table;
  244. struct skd_special_context *skspcl_free_list;
  245. struct skd_special_context *skspcl_table;
  246. struct skd_special_context internal_skspcl;
  247. u32 read_cap_blocksize;
  248. u32 read_cap_last_lba;
  249. int read_cap_is_valid;
  250. int inquiry_is_valid;
  251. u8 inq_serial_num[13]; /*12 chars plus null term */
  252. u8 id_str[80]; /* holds a composite name (pci + sernum) */
  253. u8 skcomp_cycle;
  254. u32 skcomp_ix;
  255. struct fit_completion_entry_v1 *skcomp_table;
  256. struct fit_comp_error_info *skerr_table;
  257. dma_addr_t cq_dma_address;
  258. wait_queue_head_t waitq;
  259. struct timer_list timer;
  260. u32 timer_countdown;
  261. u32 timer_substate;
  262. int n_special;
  263. int sgs_per_request;
  264. u32 last_mtd;
  265. u32 proto_ver;
  266. int dbg_level;
  267. u32 connect_time_stamp;
  268. int connect_retries;
  269. #define SKD_MAX_CONNECT_RETRIES 16
  270. u32 drive_jiffies;
  271. u32 timo_slot;
  272. struct work_struct completion_worker;
  273. };
  274. #define SKD_WRITEL(DEV, VAL, OFF) skd_reg_write32(DEV, VAL, OFF)
  275. #define SKD_READL(DEV, OFF) skd_reg_read32(DEV, OFF)
  276. #define SKD_WRITEQ(DEV, VAL, OFF) skd_reg_write64(DEV, VAL, OFF)
  277. static inline u32 skd_reg_read32(struct skd_device *skdev, u32 offset)
  278. {
  279. u32 val;
  280. if (likely(skdev->dbg_level < 2))
  281. return readl(skdev->mem_map[1] + offset);
  282. else {
  283. barrier();
  284. val = readl(skdev->mem_map[1] + offset);
  285. barrier();
  286. pr_debug("%s:%s:%d offset %x = %x\n",
  287. skdev->name, __func__, __LINE__, offset, val);
  288. return val;
  289. }
  290. }
  291. static inline void skd_reg_write32(struct skd_device *skdev, u32 val,
  292. u32 offset)
  293. {
  294. if (likely(skdev->dbg_level < 2)) {
  295. writel(val, skdev->mem_map[1] + offset);
  296. barrier();
  297. } else {
  298. barrier();
  299. writel(val, skdev->mem_map[1] + offset);
  300. barrier();
  301. pr_debug("%s:%s:%d offset %x = %x\n",
  302. skdev->name, __func__, __LINE__, offset, val);
  303. }
  304. }
  305. static inline void skd_reg_write64(struct skd_device *skdev, u64 val,
  306. u32 offset)
  307. {
  308. if (likely(skdev->dbg_level < 2)) {
  309. writeq(val, skdev->mem_map[1] + offset);
  310. barrier();
  311. } else {
  312. barrier();
  313. writeq(val, skdev->mem_map[1] + offset);
  314. barrier();
  315. pr_debug("%s:%s:%d offset %x = %016llx\n",
  316. skdev->name, __func__, __LINE__, offset, val);
  317. }
  318. }
  319. #define SKD_IRQ_DEFAULT SKD_IRQ_MSI
  320. static int skd_isr_type = SKD_IRQ_DEFAULT;
  321. module_param(skd_isr_type, int, 0444);
  322. MODULE_PARM_DESC(skd_isr_type, "Interrupt type capability."
  323. " (0==legacy, 1==MSI, 2==MSI-X, default==1)");
  324. #define SKD_MAX_REQ_PER_MSG_DEFAULT 1
  325. static int skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  326. module_param(skd_max_req_per_msg, int, 0444);
  327. MODULE_PARM_DESC(skd_max_req_per_msg,
  328. "Maximum SCSI requests packed in a single message."
  329. " (1-14, default==1)");
  330. #define SKD_MAX_QUEUE_DEPTH_DEFAULT 64
  331. #define SKD_MAX_QUEUE_DEPTH_DEFAULT_STR "64"
  332. static int skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  333. module_param(skd_max_queue_depth, int, 0444);
  334. MODULE_PARM_DESC(skd_max_queue_depth,
  335. "Maximum SCSI requests issued to s1120."
  336. " (1-200, default==" SKD_MAX_QUEUE_DEPTH_DEFAULT_STR ")");
  337. static int skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  338. module_param(skd_sgs_per_request, int, 0444);
  339. MODULE_PARM_DESC(skd_sgs_per_request,
  340. "Maximum SG elements per block request."
  341. " (1-4096, default==256)");
  342. static int skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  343. module_param(skd_max_pass_thru, int, 0444);
  344. MODULE_PARM_DESC(skd_max_pass_thru,
  345. "Maximum SCSI pass-thru at a time." " (1-50, default==32)");
  346. module_param(skd_dbg_level, int, 0444);
  347. MODULE_PARM_DESC(skd_dbg_level, "s1120 debug level (0,1,2)");
  348. module_param(skd_isr_comp_limit, int, 0444);
  349. MODULE_PARM_DESC(skd_isr_comp_limit, "s1120 isr comp limit (0=none) default=4");
  350. /* Major device number dynamically assigned. */
  351. static u32 skd_major;
  352. static void skd_destruct(struct skd_device *skdev);
  353. static const struct block_device_operations skd_blockdev_ops;
  354. static void skd_send_fitmsg(struct skd_device *skdev,
  355. struct skd_fitmsg_context *skmsg);
  356. static void skd_send_special_fitmsg(struct skd_device *skdev,
  357. struct skd_special_context *skspcl);
  358. static void skd_request_fn(struct request_queue *rq);
  359. static void skd_end_request(struct skd_device *skdev,
  360. struct skd_request_context *skreq, blk_status_t status);
  361. static bool skd_preop_sg_list(struct skd_device *skdev,
  362. struct skd_request_context *skreq);
  363. static void skd_postop_sg_list(struct skd_device *skdev,
  364. struct skd_request_context *skreq);
  365. static void skd_restart_device(struct skd_device *skdev);
  366. static int skd_quiesce_dev(struct skd_device *skdev);
  367. static int skd_unquiesce_dev(struct skd_device *skdev);
  368. static void skd_release_special(struct skd_device *skdev,
  369. struct skd_special_context *skspcl);
  370. static void skd_disable_interrupts(struct skd_device *skdev);
  371. static void skd_isr_fwstate(struct skd_device *skdev);
  372. static void skd_recover_requests(struct skd_device *skdev, int requeue);
  373. static void skd_soft_reset(struct skd_device *skdev);
  374. static const char *skd_name(struct skd_device *skdev);
  375. const char *skd_drive_state_to_str(int state);
  376. const char *skd_skdev_state_to_str(enum skd_drvr_state state);
  377. static void skd_log_skdev(struct skd_device *skdev, const char *event);
  378. static void skd_log_skmsg(struct skd_device *skdev,
  379. struct skd_fitmsg_context *skmsg, const char *event);
  380. static void skd_log_skreq(struct skd_device *skdev,
  381. struct skd_request_context *skreq, const char *event);
  382. /*
  383. *****************************************************************************
  384. * READ/WRITE REQUESTS
  385. *****************************************************************************
  386. */
  387. static void skd_fail_all_pending(struct skd_device *skdev)
  388. {
  389. struct request_queue *q = skdev->queue;
  390. struct request *req;
  391. for (;; ) {
  392. req = blk_peek_request(q);
  393. if (req == NULL)
  394. break;
  395. blk_start_request(req);
  396. __blk_end_request_all(req, BLK_STS_IOERR);
  397. }
  398. }
  399. static void
  400. skd_prep_rw_cdb(struct skd_scsi_request *scsi_req,
  401. int data_dir, unsigned lba,
  402. unsigned count)
  403. {
  404. if (data_dir == READ)
  405. scsi_req->cdb[0] = 0x28;
  406. else
  407. scsi_req->cdb[0] = 0x2a;
  408. scsi_req->cdb[1] = 0;
  409. scsi_req->cdb[2] = (lba & 0xff000000) >> 24;
  410. scsi_req->cdb[3] = (lba & 0xff0000) >> 16;
  411. scsi_req->cdb[4] = (lba & 0xff00) >> 8;
  412. scsi_req->cdb[5] = (lba & 0xff);
  413. scsi_req->cdb[6] = 0;
  414. scsi_req->cdb[7] = (count & 0xff00) >> 8;
  415. scsi_req->cdb[8] = count & 0xff;
  416. scsi_req->cdb[9] = 0;
  417. }
  418. static void
  419. skd_prep_zerosize_flush_cdb(struct skd_scsi_request *scsi_req,
  420. struct skd_request_context *skreq)
  421. {
  422. skreq->flush_cmd = 1;
  423. scsi_req->cdb[0] = 0x35;
  424. scsi_req->cdb[1] = 0;
  425. scsi_req->cdb[2] = 0;
  426. scsi_req->cdb[3] = 0;
  427. scsi_req->cdb[4] = 0;
  428. scsi_req->cdb[5] = 0;
  429. scsi_req->cdb[6] = 0;
  430. scsi_req->cdb[7] = 0;
  431. scsi_req->cdb[8] = 0;
  432. scsi_req->cdb[9] = 0;
  433. }
  434. static void skd_request_fn_not_online(struct request_queue *q);
  435. static void skd_request_fn(struct request_queue *q)
  436. {
  437. struct skd_device *skdev = q->queuedata;
  438. struct skd_fitmsg_context *skmsg = NULL;
  439. struct fit_msg_hdr *fmh = NULL;
  440. struct skd_request_context *skreq;
  441. struct request *req = NULL;
  442. struct skd_scsi_request *scsi_req;
  443. unsigned long io_flags;
  444. u32 lba;
  445. u32 count;
  446. int data_dir;
  447. u32 be_lba;
  448. u32 be_count;
  449. u64 be_dmaa;
  450. u64 cmdctxt;
  451. u32 timo_slot;
  452. void *cmd_ptr;
  453. int flush, fua;
  454. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  455. skd_request_fn_not_online(q);
  456. return;
  457. }
  458. if (blk_queue_stopped(skdev->queue)) {
  459. if (skdev->skmsg_free_list == NULL ||
  460. skdev->skreq_free_list == NULL ||
  461. skdev->in_flight >= skdev->queue_low_water_mark)
  462. /* There is still some kind of shortage */
  463. return;
  464. queue_flag_clear(QUEUE_FLAG_STOPPED, skdev->queue);
  465. }
  466. /*
  467. * Stop conditions:
  468. * - There are no more native requests
  469. * - There are already the maximum number of requests in progress
  470. * - There are no more skd_request_context entries
  471. * - There are no more FIT msg buffers
  472. */
  473. for (;; ) {
  474. flush = fua = 0;
  475. req = blk_peek_request(q);
  476. /* Are there any native requests to start? */
  477. if (req == NULL)
  478. break;
  479. lba = (u32)blk_rq_pos(req);
  480. count = blk_rq_sectors(req);
  481. data_dir = rq_data_dir(req);
  482. io_flags = req->cmd_flags;
  483. if (req_op(req) == REQ_OP_FLUSH)
  484. flush++;
  485. if (io_flags & REQ_FUA)
  486. fua++;
  487. pr_debug("%s:%s:%d new req=%p lba=%u(0x%x) "
  488. "count=%u(0x%x) dir=%d\n",
  489. skdev->name, __func__, __LINE__,
  490. req, lba, lba, count, count, data_dir);
  491. /* At this point we know there is a request */
  492. /* Are too many requets already in progress? */
  493. if (skdev->in_flight >= skdev->cur_max_queue_depth) {
  494. pr_debug("%s:%s:%d qdepth %d, limit %d\n",
  495. skdev->name, __func__, __LINE__,
  496. skdev->in_flight, skdev->cur_max_queue_depth);
  497. break;
  498. }
  499. /* Is a skd_request_context available? */
  500. skreq = skdev->skreq_free_list;
  501. if (skreq == NULL) {
  502. pr_debug("%s:%s:%d Out of req=%p\n",
  503. skdev->name, __func__, __LINE__, q);
  504. break;
  505. }
  506. SKD_ASSERT(skreq->state == SKD_REQ_STATE_IDLE);
  507. SKD_ASSERT((skreq->id & SKD_ID_INCR) == 0);
  508. /* Now we check to see if we can get a fit msg */
  509. if (skmsg == NULL) {
  510. if (skdev->skmsg_free_list == NULL) {
  511. pr_debug("%s:%s:%d Out of msg\n",
  512. skdev->name, __func__, __LINE__);
  513. break;
  514. }
  515. }
  516. skreq->flush_cmd = 0;
  517. skreq->n_sg = 0;
  518. skreq->sg_byte_count = 0;
  519. /*
  520. * OK to now dequeue request from q.
  521. *
  522. * At this point we are comitted to either start or reject
  523. * the native request. Note that skd_request_context is
  524. * available but is still at the head of the free list.
  525. */
  526. blk_start_request(req);
  527. skreq->req = req;
  528. skreq->fitmsg_id = 0;
  529. /* Either a FIT msg is in progress or we have to start one. */
  530. if (skmsg == NULL) {
  531. /* Are there any FIT msg buffers available? */
  532. skmsg = skdev->skmsg_free_list;
  533. if (skmsg == NULL) {
  534. pr_debug("%s:%s:%d Out of msg skdev=%p\n",
  535. skdev->name, __func__, __LINE__,
  536. skdev);
  537. break;
  538. }
  539. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_IDLE);
  540. SKD_ASSERT((skmsg->id & SKD_ID_INCR) == 0);
  541. skdev->skmsg_free_list = skmsg->next;
  542. skmsg->state = SKD_MSG_STATE_BUSY;
  543. skmsg->id += SKD_ID_INCR;
  544. /* Initialize the FIT msg header */
  545. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  546. memset(fmh, 0, sizeof(*fmh));
  547. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  548. skmsg->length = sizeof(*fmh);
  549. }
  550. skreq->fitmsg_id = skmsg->id;
  551. /*
  552. * Note that a FIT msg may have just been started
  553. * but contains no SoFIT requests yet.
  554. */
  555. /*
  556. * Transcode the request, checking as we go. The outcome of
  557. * the transcoding is represented by the error variable.
  558. */
  559. cmd_ptr = &skmsg->msg_buf[skmsg->length];
  560. memset(cmd_ptr, 0, 32);
  561. be_lba = cpu_to_be32(lba);
  562. be_count = cpu_to_be32(count);
  563. be_dmaa = cpu_to_be64((u64)skreq->sksg_dma_address);
  564. cmdctxt = skreq->id + SKD_ID_INCR;
  565. scsi_req = cmd_ptr;
  566. scsi_req->hdr.tag = cmdctxt;
  567. scsi_req->hdr.sg_list_dma_address = be_dmaa;
  568. if (data_dir == READ)
  569. skreq->sg_data_dir = SKD_DATA_DIR_CARD_TO_HOST;
  570. else
  571. skreq->sg_data_dir = SKD_DATA_DIR_HOST_TO_CARD;
  572. if (flush == SKD_FLUSH_ZERO_SIZE_FIRST) {
  573. skd_prep_zerosize_flush_cdb(scsi_req, skreq);
  574. SKD_ASSERT(skreq->flush_cmd == 1);
  575. } else {
  576. skd_prep_rw_cdb(scsi_req, data_dir, lba, count);
  577. }
  578. if (fua)
  579. scsi_req->cdb[1] |= SKD_FUA_NV;
  580. if (!req->bio)
  581. goto skip_sg;
  582. if (!skd_preop_sg_list(skdev, skreq)) {
  583. /*
  584. * Complete the native request with error.
  585. * Note that the request context is still at the
  586. * head of the free list, and that the SoFIT request
  587. * was encoded into the FIT msg buffer but the FIT
  588. * msg length has not been updated. In short, the
  589. * only resource that has been allocated but might
  590. * not be used is that the FIT msg could be empty.
  591. */
  592. pr_debug("%s:%s:%d error Out\n",
  593. skdev->name, __func__, __LINE__);
  594. skd_end_request(skdev, skreq, BLK_STS_RESOURCE);
  595. continue;
  596. }
  597. skip_sg:
  598. scsi_req->hdr.sg_list_len_bytes =
  599. cpu_to_be32(skreq->sg_byte_count);
  600. /* Complete resource allocations. */
  601. skdev->skreq_free_list = skreq->next;
  602. skreq->state = SKD_REQ_STATE_BUSY;
  603. skreq->id += SKD_ID_INCR;
  604. skmsg->length += sizeof(struct skd_scsi_request);
  605. fmh->num_protocol_cmds_coalesced++;
  606. /*
  607. * Update the active request counts.
  608. * Capture the timeout timestamp.
  609. */
  610. skreq->timeout_stamp = skdev->timeout_stamp;
  611. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  612. skdev->timeout_slot[timo_slot]++;
  613. skdev->in_flight++;
  614. pr_debug("%s:%s:%d req=0x%x busy=%d\n",
  615. skdev->name, __func__, __LINE__,
  616. skreq->id, skdev->in_flight);
  617. /*
  618. * If the FIT msg buffer is full send it.
  619. */
  620. if (skmsg->length >= SKD_N_FITMSG_BYTES ||
  621. fmh->num_protocol_cmds_coalesced >= skd_max_req_per_msg) {
  622. skd_send_fitmsg(skdev, skmsg);
  623. skmsg = NULL;
  624. fmh = NULL;
  625. }
  626. }
  627. /*
  628. * Is a FIT msg in progress? If it is empty put the buffer back
  629. * on the free list. If it is non-empty send what we got.
  630. * This minimizes latency when there are fewer requests than
  631. * what fits in a FIT msg.
  632. */
  633. if (skmsg != NULL) {
  634. /* Bigger than just a FIT msg header? */
  635. if (skmsg->length > sizeof(struct fit_msg_hdr)) {
  636. pr_debug("%s:%s:%d sending msg=%p, len %d\n",
  637. skdev->name, __func__, __LINE__,
  638. skmsg, skmsg->length);
  639. skd_send_fitmsg(skdev, skmsg);
  640. } else {
  641. /*
  642. * The FIT msg is empty. It means we got started
  643. * on the msg, but the requests were rejected.
  644. */
  645. skmsg->state = SKD_MSG_STATE_IDLE;
  646. skmsg->id += SKD_ID_INCR;
  647. skmsg->next = skdev->skmsg_free_list;
  648. skdev->skmsg_free_list = skmsg;
  649. }
  650. skmsg = NULL;
  651. fmh = NULL;
  652. }
  653. /*
  654. * If req is non-NULL it means there is something to do but
  655. * we are out of a resource.
  656. */
  657. if (req)
  658. blk_stop_queue(skdev->queue);
  659. }
  660. static void skd_end_request(struct skd_device *skdev,
  661. struct skd_request_context *skreq, blk_status_t error)
  662. {
  663. if (unlikely(error)) {
  664. struct request *req = skreq->req;
  665. char *cmd = (rq_data_dir(req) == READ) ? "read" : "write";
  666. u32 lba = (u32)blk_rq_pos(req);
  667. u32 count = blk_rq_sectors(req);
  668. pr_err("(%s): Error cmd=%s sect=%u count=%u id=0x%x\n",
  669. skd_name(skdev), cmd, lba, count, skreq->id);
  670. } else
  671. pr_debug("%s:%s:%d id=0x%x error=%d\n",
  672. skdev->name, __func__, __LINE__, skreq->id, error);
  673. __blk_end_request_all(skreq->req, error);
  674. }
  675. static bool skd_preop_sg_list(struct skd_device *skdev,
  676. struct skd_request_context *skreq)
  677. {
  678. struct request *req = skreq->req;
  679. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  680. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  681. struct scatterlist *sg = &skreq->sg[0];
  682. int n_sg;
  683. int i;
  684. skreq->sg_byte_count = 0;
  685. /* SKD_ASSERT(skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD ||
  686. skreq->sg_data_dir == SKD_DATA_DIR_CARD_TO_HOST); */
  687. n_sg = blk_rq_map_sg(skdev->queue, req, sg);
  688. if (n_sg <= 0)
  689. return false;
  690. /*
  691. * Map scatterlist to PCI bus addresses.
  692. * Note PCI might change the number of entries.
  693. */
  694. n_sg = pci_map_sg(skdev->pdev, sg, n_sg, pci_dir);
  695. if (n_sg <= 0)
  696. return false;
  697. SKD_ASSERT(n_sg <= skdev->sgs_per_request);
  698. skreq->n_sg = n_sg;
  699. for (i = 0; i < n_sg; i++) {
  700. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  701. u32 cnt = sg_dma_len(&sg[i]);
  702. uint64_t dma_addr = sg_dma_address(&sg[i]);
  703. sgd->control = FIT_SGD_CONTROL_NOT_LAST;
  704. sgd->byte_count = cnt;
  705. skreq->sg_byte_count += cnt;
  706. sgd->host_side_addr = dma_addr;
  707. sgd->dev_side_addr = 0;
  708. }
  709. skreq->sksg_list[n_sg - 1].next_desc_ptr = 0LL;
  710. skreq->sksg_list[n_sg - 1].control = FIT_SGD_CONTROL_LAST;
  711. if (unlikely(skdev->dbg_level > 1)) {
  712. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  713. skdev->name, __func__, __LINE__,
  714. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  715. for (i = 0; i < n_sg; i++) {
  716. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  717. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  718. "addr=0x%llx next=0x%llx\n",
  719. skdev->name, __func__, __LINE__,
  720. i, sgd->byte_count, sgd->control,
  721. sgd->host_side_addr, sgd->next_desc_ptr);
  722. }
  723. }
  724. return true;
  725. }
  726. static void skd_postop_sg_list(struct skd_device *skdev,
  727. struct skd_request_context *skreq)
  728. {
  729. int writing = skreq->sg_data_dir == SKD_DATA_DIR_HOST_TO_CARD;
  730. int pci_dir = writing ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE;
  731. /*
  732. * restore the next ptr for next IO request so we
  733. * don't have to set it every time.
  734. */
  735. skreq->sksg_list[skreq->n_sg - 1].next_desc_ptr =
  736. skreq->sksg_dma_address +
  737. ((skreq->n_sg) * sizeof(struct fit_sg_descriptor));
  738. pci_unmap_sg(skdev->pdev, &skreq->sg[0], skreq->n_sg, pci_dir);
  739. }
  740. static void skd_request_fn_not_online(struct request_queue *q)
  741. {
  742. struct skd_device *skdev = q->queuedata;
  743. int error;
  744. SKD_ASSERT(skdev->state != SKD_DRVR_STATE_ONLINE);
  745. skd_log_skdev(skdev, "req_not_online");
  746. switch (skdev->state) {
  747. case SKD_DRVR_STATE_PAUSING:
  748. case SKD_DRVR_STATE_PAUSED:
  749. case SKD_DRVR_STATE_STARTING:
  750. case SKD_DRVR_STATE_RESTARTING:
  751. case SKD_DRVR_STATE_WAIT_BOOT:
  752. /* In case of starting, we haven't started the queue,
  753. * so we can't get here... but requests are
  754. * possibly hanging out waiting for us because we
  755. * reported the dev/skd0 already. They'll wait
  756. * forever if connect doesn't complete.
  757. * What to do??? delay dev/skd0 ??
  758. */
  759. case SKD_DRVR_STATE_BUSY:
  760. case SKD_DRVR_STATE_BUSY_IMMINENT:
  761. case SKD_DRVR_STATE_BUSY_ERASE:
  762. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  763. return;
  764. case SKD_DRVR_STATE_BUSY_SANITIZE:
  765. case SKD_DRVR_STATE_STOPPING:
  766. case SKD_DRVR_STATE_SYNCING:
  767. case SKD_DRVR_STATE_FAULT:
  768. case SKD_DRVR_STATE_DISAPPEARED:
  769. default:
  770. error = -EIO;
  771. break;
  772. }
  773. /* If we get here, terminate all pending block requeusts
  774. * with EIO and any scsi pass thru with appropriate sense
  775. */
  776. skd_fail_all_pending(skdev);
  777. }
  778. /*
  779. *****************************************************************************
  780. * TIMER
  781. *****************************************************************************
  782. */
  783. static void skd_timer_tick_not_online(struct skd_device *skdev);
  784. static void skd_timer_tick(ulong arg)
  785. {
  786. struct skd_device *skdev = (struct skd_device *)arg;
  787. u32 timo_slot;
  788. u32 overdue_timestamp;
  789. unsigned long reqflags;
  790. u32 state;
  791. if (skdev->state == SKD_DRVR_STATE_FAULT)
  792. /* The driver has declared fault, and we want it to
  793. * stay that way until driver is reloaded.
  794. */
  795. return;
  796. spin_lock_irqsave(&skdev->lock, reqflags);
  797. state = SKD_READL(skdev, FIT_STATUS);
  798. state &= FIT_SR_DRIVE_STATE_MASK;
  799. if (state != skdev->drive_state)
  800. skd_isr_fwstate(skdev);
  801. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  802. skd_timer_tick_not_online(skdev);
  803. goto timer_func_out;
  804. }
  805. skdev->timeout_stamp++;
  806. timo_slot = skdev->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  807. /*
  808. * All requests that happened during the previous use of
  809. * this slot should be done by now. The previous use was
  810. * over 7 seconds ago.
  811. */
  812. if (skdev->timeout_slot[timo_slot] == 0)
  813. goto timer_func_out;
  814. /* Something is overdue */
  815. overdue_timestamp = skdev->timeout_stamp - SKD_N_TIMEOUT_SLOT;
  816. pr_debug("%s:%s:%d found %d timeouts, draining busy=%d\n",
  817. skdev->name, __func__, __LINE__,
  818. skdev->timeout_slot[timo_slot], skdev->in_flight);
  819. pr_err("(%s): Overdue IOs (%d), busy %d\n",
  820. skd_name(skdev), skdev->timeout_slot[timo_slot],
  821. skdev->in_flight);
  822. skdev->timer_countdown = SKD_DRAINING_TIMO;
  823. skdev->state = SKD_DRVR_STATE_DRAINING_TIMEOUT;
  824. skdev->timo_slot = timo_slot;
  825. blk_stop_queue(skdev->queue);
  826. timer_func_out:
  827. mod_timer(&skdev->timer, (jiffies + HZ));
  828. spin_unlock_irqrestore(&skdev->lock, reqflags);
  829. }
  830. static void skd_timer_tick_not_online(struct skd_device *skdev)
  831. {
  832. switch (skdev->state) {
  833. case SKD_DRVR_STATE_IDLE:
  834. case SKD_DRVR_STATE_LOAD:
  835. break;
  836. case SKD_DRVR_STATE_BUSY_SANITIZE:
  837. pr_debug("%s:%s:%d drive busy sanitize[%x], driver[%x]\n",
  838. skdev->name, __func__, __LINE__,
  839. skdev->drive_state, skdev->state);
  840. /* If we've been in sanitize for 3 seconds, we figure we're not
  841. * going to get anymore completions, so recover requests now
  842. */
  843. if (skdev->timer_countdown > 0) {
  844. skdev->timer_countdown--;
  845. return;
  846. }
  847. skd_recover_requests(skdev, 0);
  848. break;
  849. case SKD_DRVR_STATE_BUSY:
  850. case SKD_DRVR_STATE_BUSY_IMMINENT:
  851. case SKD_DRVR_STATE_BUSY_ERASE:
  852. pr_debug("%s:%s:%d busy[%x], countdown=%d\n",
  853. skdev->name, __func__, __LINE__,
  854. skdev->state, skdev->timer_countdown);
  855. if (skdev->timer_countdown > 0) {
  856. skdev->timer_countdown--;
  857. return;
  858. }
  859. pr_debug("%s:%s:%d busy[%x], timedout=%d, restarting device.",
  860. skdev->name, __func__, __LINE__,
  861. skdev->state, skdev->timer_countdown);
  862. skd_restart_device(skdev);
  863. break;
  864. case SKD_DRVR_STATE_WAIT_BOOT:
  865. case SKD_DRVR_STATE_STARTING:
  866. if (skdev->timer_countdown > 0) {
  867. skdev->timer_countdown--;
  868. return;
  869. }
  870. /* For now, we fault the drive. Could attempt resets to
  871. * revcover at some point. */
  872. skdev->state = SKD_DRVR_STATE_FAULT;
  873. pr_err("(%s): DriveFault Connect Timeout (%x)\n",
  874. skd_name(skdev), skdev->drive_state);
  875. /*start the queue so we can respond with error to requests */
  876. /* wakeup anyone waiting for startup complete */
  877. blk_start_queue(skdev->queue);
  878. skdev->gendisk_on = -1;
  879. wake_up_interruptible(&skdev->waitq);
  880. break;
  881. case SKD_DRVR_STATE_ONLINE:
  882. /* shouldn't get here. */
  883. break;
  884. case SKD_DRVR_STATE_PAUSING:
  885. case SKD_DRVR_STATE_PAUSED:
  886. break;
  887. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  888. pr_debug("%s:%s:%d "
  889. "draining busy [%d] tick[%d] qdb[%d] tmls[%d]\n",
  890. skdev->name, __func__, __LINE__,
  891. skdev->timo_slot,
  892. skdev->timer_countdown,
  893. skdev->in_flight,
  894. skdev->timeout_slot[skdev->timo_slot]);
  895. /* if the slot has cleared we can let the I/O continue */
  896. if (skdev->timeout_slot[skdev->timo_slot] == 0) {
  897. pr_debug("%s:%s:%d Slot drained, starting queue.\n",
  898. skdev->name, __func__, __LINE__);
  899. skdev->state = SKD_DRVR_STATE_ONLINE;
  900. blk_start_queue(skdev->queue);
  901. return;
  902. }
  903. if (skdev->timer_countdown > 0) {
  904. skdev->timer_countdown--;
  905. return;
  906. }
  907. skd_restart_device(skdev);
  908. break;
  909. case SKD_DRVR_STATE_RESTARTING:
  910. if (skdev->timer_countdown > 0) {
  911. skdev->timer_countdown--;
  912. return;
  913. }
  914. /* For now, we fault the drive. Could attempt resets to
  915. * revcover at some point. */
  916. skdev->state = SKD_DRVR_STATE_FAULT;
  917. pr_err("(%s): DriveFault Reconnect Timeout (%x)\n",
  918. skd_name(skdev), skdev->drive_state);
  919. /*
  920. * Recovering does two things:
  921. * 1. completes IO with error
  922. * 2. reclaims dma resources
  923. * When is it safe to recover requests?
  924. * - if the drive state is faulted
  925. * - if the state is still soft reset after out timeout
  926. * - if the drive registers are dead (state = FF)
  927. * If it is "unsafe", we still need to recover, so we will
  928. * disable pci bus mastering and disable our interrupts.
  929. */
  930. if ((skdev->drive_state == FIT_SR_DRIVE_SOFT_RESET) ||
  931. (skdev->drive_state == FIT_SR_DRIVE_FAULT) ||
  932. (skdev->drive_state == FIT_SR_DRIVE_STATE_MASK))
  933. /* It never came out of soft reset. Try to
  934. * recover the requests and then let them
  935. * fail. This is to mitigate hung processes. */
  936. skd_recover_requests(skdev, 0);
  937. else {
  938. pr_err("(%s): Disable BusMaster (%x)\n",
  939. skd_name(skdev), skdev->drive_state);
  940. pci_disable_device(skdev->pdev);
  941. skd_disable_interrupts(skdev);
  942. skd_recover_requests(skdev, 0);
  943. }
  944. /*start the queue so we can respond with error to requests */
  945. /* wakeup anyone waiting for startup complete */
  946. blk_start_queue(skdev->queue);
  947. skdev->gendisk_on = -1;
  948. wake_up_interruptible(&skdev->waitq);
  949. break;
  950. case SKD_DRVR_STATE_RESUMING:
  951. case SKD_DRVR_STATE_STOPPING:
  952. case SKD_DRVR_STATE_SYNCING:
  953. case SKD_DRVR_STATE_FAULT:
  954. case SKD_DRVR_STATE_DISAPPEARED:
  955. default:
  956. break;
  957. }
  958. }
  959. static int skd_start_timer(struct skd_device *skdev)
  960. {
  961. int rc;
  962. init_timer(&skdev->timer);
  963. setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
  964. rc = mod_timer(&skdev->timer, (jiffies + HZ));
  965. if (rc)
  966. pr_err("%s: failed to start timer %d\n",
  967. __func__, rc);
  968. return rc;
  969. }
  970. static void skd_kill_timer(struct skd_device *skdev)
  971. {
  972. del_timer_sync(&skdev->timer);
  973. }
  974. /*
  975. *****************************************************************************
  976. * IOCTL
  977. *****************************************************************************
  978. */
  979. static int skd_ioctl_sg_io(struct skd_device *skdev,
  980. fmode_t mode, void __user *argp);
  981. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  982. struct skd_sg_io *sksgio);
  983. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  984. struct skd_sg_io *sksgio);
  985. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  986. struct skd_sg_io *sksgio);
  987. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  988. struct skd_sg_io *sksgio, int dxfer_dir);
  989. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  990. struct skd_sg_io *sksgio);
  991. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio);
  992. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  993. struct skd_sg_io *sksgio);
  994. static int skd_sg_io_put_status(struct skd_device *skdev,
  995. struct skd_sg_io *sksgio);
  996. static void skd_complete_special(struct skd_device *skdev,
  997. volatile struct fit_completion_entry_v1
  998. *skcomp,
  999. volatile struct fit_comp_error_info *skerr,
  1000. struct skd_special_context *skspcl);
  1001. static int skd_bdev_ioctl(struct block_device *bdev, fmode_t mode,
  1002. uint cmd_in, ulong arg)
  1003. {
  1004. static const int sg_version_num = 30527;
  1005. int rc = 0, timeout;
  1006. struct gendisk *disk = bdev->bd_disk;
  1007. struct skd_device *skdev = disk->private_data;
  1008. int __user *p = (int __user *)arg;
  1009. pr_debug("%s:%s:%d %s: CMD[%s] ioctl mode 0x%x, cmd 0x%x arg %0lx\n",
  1010. skdev->name, __func__, __LINE__,
  1011. disk->disk_name, current->comm, mode, cmd_in, arg);
  1012. if (!capable(CAP_SYS_ADMIN))
  1013. return -EPERM;
  1014. switch (cmd_in) {
  1015. case SG_SET_TIMEOUT:
  1016. rc = get_user(timeout, p);
  1017. if (!rc)
  1018. disk->queue->sg_timeout = clock_t_to_jiffies(timeout);
  1019. break;
  1020. case SG_GET_TIMEOUT:
  1021. rc = jiffies_to_clock_t(disk->queue->sg_timeout);
  1022. break;
  1023. case SG_GET_VERSION_NUM:
  1024. rc = put_user(sg_version_num, p);
  1025. break;
  1026. case SG_IO:
  1027. rc = skd_ioctl_sg_io(skdev, mode, (void __user *)arg);
  1028. break;
  1029. default:
  1030. rc = -ENOTTY;
  1031. break;
  1032. }
  1033. pr_debug("%s:%s:%d %s: completion rc %d\n",
  1034. skdev->name, __func__, __LINE__, disk->disk_name, rc);
  1035. return rc;
  1036. }
  1037. static int skd_ioctl_sg_io(struct skd_device *skdev, fmode_t mode,
  1038. void __user *argp)
  1039. {
  1040. int rc;
  1041. struct skd_sg_io sksgio;
  1042. memset(&sksgio, 0, sizeof(sksgio));
  1043. sksgio.mode = mode;
  1044. sksgio.argp = argp;
  1045. sksgio.iov = &sksgio.no_iov_iov;
  1046. switch (skdev->state) {
  1047. case SKD_DRVR_STATE_ONLINE:
  1048. case SKD_DRVR_STATE_BUSY_IMMINENT:
  1049. break;
  1050. default:
  1051. pr_debug("%s:%s:%d drive not online\n",
  1052. skdev->name, __func__, __LINE__);
  1053. rc = -ENXIO;
  1054. goto out;
  1055. }
  1056. rc = skd_sg_io_get_and_check_args(skdev, &sksgio);
  1057. if (rc)
  1058. goto out;
  1059. rc = skd_sg_io_obtain_skspcl(skdev, &sksgio);
  1060. if (rc)
  1061. goto out;
  1062. rc = skd_sg_io_prep_buffering(skdev, &sksgio);
  1063. if (rc)
  1064. goto out;
  1065. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_TO_DEV);
  1066. if (rc)
  1067. goto out;
  1068. rc = skd_sg_io_send_fitmsg(skdev, &sksgio);
  1069. if (rc)
  1070. goto out;
  1071. rc = skd_sg_io_await(skdev, &sksgio);
  1072. if (rc)
  1073. goto out;
  1074. rc = skd_sg_io_copy_buffer(skdev, &sksgio, SG_DXFER_FROM_DEV);
  1075. if (rc)
  1076. goto out;
  1077. rc = skd_sg_io_put_status(skdev, &sksgio);
  1078. if (rc)
  1079. goto out;
  1080. rc = 0;
  1081. out:
  1082. skd_sg_io_release_skspcl(skdev, &sksgio);
  1083. if (sksgio.iov != NULL && sksgio.iov != &sksgio.no_iov_iov)
  1084. kfree(sksgio.iov);
  1085. return rc;
  1086. }
  1087. static int skd_sg_io_get_and_check_args(struct skd_device *skdev,
  1088. struct skd_sg_io *sksgio)
  1089. {
  1090. struct sg_io_hdr *sgp = &sksgio->sg;
  1091. int i, acc;
  1092. if (!access_ok(VERIFY_WRITE, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1093. pr_debug("%s:%s:%d access sg failed %p\n",
  1094. skdev->name, __func__, __LINE__, sksgio->argp);
  1095. return -EFAULT;
  1096. }
  1097. if (__copy_from_user(sgp, sksgio->argp, sizeof(sg_io_hdr_t))) {
  1098. pr_debug("%s:%s:%d copy_from_user sg failed %p\n",
  1099. skdev->name, __func__, __LINE__, sksgio->argp);
  1100. return -EFAULT;
  1101. }
  1102. if (sgp->interface_id != SG_INTERFACE_ID_ORIG) {
  1103. pr_debug("%s:%s:%d interface_id invalid 0x%x\n",
  1104. skdev->name, __func__, __LINE__, sgp->interface_id);
  1105. return -EINVAL;
  1106. }
  1107. if (sgp->cmd_len > sizeof(sksgio->cdb)) {
  1108. pr_debug("%s:%s:%d cmd_len invalid %d\n",
  1109. skdev->name, __func__, __LINE__, sgp->cmd_len);
  1110. return -EINVAL;
  1111. }
  1112. if (sgp->iovec_count > 256) {
  1113. pr_debug("%s:%s:%d iovec_count invalid %d\n",
  1114. skdev->name, __func__, __LINE__, sgp->iovec_count);
  1115. return -EINVAL;
  1116. }
  1117. if (sgp->dxfer_len > (PAGE_SIZE * SKD_N_SG_PER_SPECIAL)) {
  1118. pr_debug("%s:%s:%d dxfer_len invalid %d\n",
  1119. skdev->name, __func__, __LINE__, sgp->dxfer_len);
  1120. return -EINVAL;
  1121. }
  1122. switch (sgp->dxfer_direction) {
  1123. case SG_DXFER_NONE:
  1124. acc = -1;
  1125. break;
  1126. case SG_DXFER_TO_DEV:
  1127. acc = VERIFY_READ;
  1128. break;
  1129. case SG_DXFER_FROM_DEV:
  1130. case SG_DXFER_TO_FROM_DEV:
  1131. acc = VERIFY_WRITE;
  1132. break;
  1133. default:
  1134. pr_debug("%s:%s:%d dxfer_dir invalid %d\n",
  1135. skdev->name, __func__, __LINE__, sgp->dxfer_direction);
  1136. return -EINVAL;
  1137. }
  1138. if (copy_from_user(sksgio->cdb, sgp->cmdp, sgp->cmd_len)) {
  1139. pr_debug("%s:%s:%d copy_from_user cmdp failed %p\n",
  1140. skdev->name, __func__, __LINE__, sgp->cmdp);
  1141. return -EFAULT;
  1142. }
  1143. if (sgp->mx_sb_len != 0) {
  1144. if (!access_ok(VERIFY_WRITE, sgp->sbp, sgp->mx_sb_len)) {
  1145. pr_debug("%s:%s:%d access sbp failed %p\n",
  1146. skdev->name, __func__, __LINE__, sgp->sbp);
  1147. return -EFAULT;
  1148. }
  1149. }
  1150. if (sgp->iovec_count == 0) {
  1151. sksgio->iov[0].iov_base = sgp->dxferp;
  1152. sksgio->iov[0].iov_len = sgp->dxfer_len;
  1153. sksgio->iovcnt = 1;
  1154. sksgio->dxfer_len = sgp->dxfer_len;
  1155. } else {
  1156. struct sg_iovec *iov;
  1157. uint nbytes = sizeof(*iov) * sgp->iovec_count;
  1158. size_t iov_data_len;
  1159. iov = kmalloc(nbytes, GFP_KERNEL);
  1160. if (iov == NULL) {
  1161. pr_debug("%s:%s:%d alloc iovec failed %d\n",
  1162. skdev->name, __func__, __LINE__,
  1163. sgp->iovec_count);
  1164. return -ENOMEM;
  1165. }
  1166. sksgio->iov = iov;
  1167. sksgio->iovcnt = sgp->iovec_count;
  1168. if (copy_from_user(iov, sgp->dxferp, nbytes)) {
  1169. pr_debug("%s:%s:%d copy_from_user iovec failed %p\n",
  1170. skdev->name, __func__, __LINE__, sgp->dxferp);
  1171. return -EFAULT;
  1172. }
  1173. /*
  1174. * Sum up the vecs, making sure they don't overflow
  1175. */
  1176. iov_data_len = 0;
  1177. for (i = 0; i < sgp->iovec_count; i++) {
  1178. if (iov_data_len + iov[i].iov_len < iov_data_len)
  1179. return -EINVAL;
  1180. iov_data_len += iov[i].iov_len;
  1181. }
  1182. /* SG_IO howto says that the shorter of the two wins */
  1183. if (sgp->dxfer_len < iov_data_len) {
  1184. sksgio->iovcnt = iov_shorten((struct iovec *)iov,
  1185. sgp->iovec_count,
  1186. sgp->dxfer_len);
  1187. sksgio->dxfer_len = sgp->dxfer_len;
  1188. } else
  1189. sksgio->dxfer_len = iov_data_len;
  1190. }
  1191. if (sgp->dxfer_direction != SG_DXFER_NONE) {
  1192. struct sg_iovec *iov = sksgio->iov;
  1193. for (i = 0; i < sksgio->iovcnt; i++, iov++) {
  1194. if (!access_ok(acc, iov->iov_base, iov->iov_len)) {
  1195. pr_debug("%s:%s:%d access data failed %p/%d\n",
  1196. skdev->name, __func__, __LINE__,
  1197. iov->iov_base, (int)iov->iov_len);
  1198. return -EFAULT;
  1199. }
  1200. }
  1201. }
  1202. return 0;
  1203. }
  1204. static int skd_sg_io_obtain_skspcl(struct skd_device *skdev,
  1205. struct skd_sg_io *sksgio)
  1206. {
  1207. struct skd_special_context *skspcl = NULL;
  1208. int rc;
  1209. for (;;) {
  1210. ulong flags;
  1211. spin_lock_irqsave(&skdev->lock, flags);
  1212. skspcl = skdev->skspcl_free_list;
  1213. if (skspcl != NULL) {
  1214. skdev->skspcl_free_list =
  1215. (struct skd_special_context *)skspcl->req.next;
  1216. skspcl->req.id += SKD_ID_INCR;
  1217. skspcl->req.state = SKD_REQ_STATE_SETUP;
  1218. skspcl->orphaned = 0;
  1219. skspcl->req.n_sg = 0;
  1220. }
  1221. spin_unlock_irqrestore(&skdev->lock, flags);
  1222. if (skspcl != NULL) {
  1223. rc = 0;
  1224. break;
  1225. }
  1226. pr_debug("%s:%s:%d blocking\n",
  1227. skdev->name, __func__, __LINE__);
  1228. rc = wait_event_interruptible_timeout(
  1229. skdev->waitq,
  1230. (skdev->skspcl_free_list != NULL),
  1231. msecs_to_jiffies(sksgio->sg.timeout));
  1232. pr_debug("%s:%s:%d unblocking, rc=%d\n",
  1233. skdev->name, __func__, __LINE__, rc);
  1234. if (rc <= 0) {
  1235. if (rc == 0)
  1236. rc = -ETIMEDOUT;
  1237. else
  1238. rc = -EINTR;
  1239. break;
  1240. }
  1241. /*
  1242. * If we get here rc > 0 meaning the timeout to
  1243. * wait_event_interruptible_timeout() had time left, hence the
  1244. * sought event -- non-empty free list -- happened.
  1245. * Retry the allocation.
  1246. */
  1247. }
  1248. sksgio->skspcl = skspcl;
  1249. return rc;
  1250. }
  1251. static int skd_skreq_prep_buffering(struct skd_device *skdev,
  1252. struct skd_request_context *skreq,
  1253. u32 dxfer_len)
  1254. {
  1255. u32 resid = dxfer_len;
  1256. /*
  1257. * The DMA engine must have aligned addresses and byte counts.
  1258. */
  1259. resid += (-resid) & 3;
  1260. skreq->sg_byte_count = resid;
  1261. skreq->n_sg = 0;
  1262. while (resid > 0) {
  1263. u32 nbytes = PAGE_SIZE;
  1264. u32 ix = skreq->n_sg;
  1265. struct scatterlist *sg = &skreq->sg[ix];
  1266. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1267. struct page *page;
  1268. if (nbytes > resid)
  1269. nbytes = resid;
  1270. page = alloc_page(GFP_KERNEL);
  1271. if (page == NULL)
  1272. return -ENOMEM;
  1273. sg_set_page(sg, page, nbytes, 0);
  1274. /* TODO: This should be going through a pci_???()
  1275. * routine to do proper mapping. */
  1276. sksg->control = FIT_SGD_CONTROL_NOT_LAST;
  1277. sksg->byte_count = nbytes;
  1278. sksg->host_side_addr = sg_phys(sg);
  1279. sksg->dev_side_addr = 0;
  1280. sksg->next_desc_ptr = skreq->sksg_dma_address +
  1281. (ix + 1) * sizeof(*sksg);
  1282. skreq->n_sg++;
  1283. resid -= nbytes;
  1284. }
  1285. if (skreq->n_sg > 0) {
  1286. u32 ix = skreq->n_sg - 1;
  1287. struct fit_sg_descriptor *sksg = &skreq->sksg_list[ix];
  1288. sksg->control = FIT_SGD_CONTROL_LAST;
  1289. sksg->next_desc_ptr = 0;
  1290. }
  1291. if (unlikely(skdev->dbg_level > 1)) {
  1292. u32 i;
  1293. pr_debug("%s:%s:%d skreq=%x sksg_list=%p sksg_dma=%llx\n",
  1294. skdev->name, __func__, __LINE__,
  1295. skreq->id, skreq->sksg_list, skreq->sksg_dma_address);
  1296. for (i = 0; i < skreq->n_sg; i++) {
  1297. struct fit_sg_descriptor *sgd = &skreq->sksg_list[i];
  1298. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1299. "addr=0x%llx next=0x%llx\n",
  1300. skdev->name, __func__, __LINE__,
  1301. i, sgd->byte_count, sgd->control,
  1302. sgd->host_side_addr, sgd->next_desc_ptr);
  1303. }
  1304. }
  1305. return 0;
  1306. }
  1307. static int skd_sg_io_prep_buffering(struct skd_device *skdev,
  1308. struct skd_sg_io *sksgio)
  1309. {
  1310. struct skd_special_context *skspcl = sksgio->skspcl;
  1311. struct skd_request_context *skreq = &skspcl->req;
  1312. u32 dxfer_len = sksgio->dxfer_len;
  1313. int rc;
  1314. rc = skd_skreq_prep_buffering(skdev, skreq, dxfer_len);
  1315. /*
  1316. * Eventually, errors or not, skd_release_special() is called
  1317. * to recover allocations including partial allocations.
  1318. */
  1319. return rc;
  1320. }
  1321. static int skd_sg_io_copy_buffer(struct skd_device *skdev,
  1322. struct skd_sg_io *sksgio, int dxfer_dir)
  1323. {
  1324. struct skd_special_context *skspcl = sksgio->skspcl;
  1325. u32 iov_ix = 0;
  1326. struct sg_iovec curiov;
  1327. u32 sksg_ix = 0;
  1328. u8 *bufp = NULL;
  1329. u32 buf_len = 0;
  1330. u32 resid = sksgio->dxfer_len;
  1331. int rc;
  1332. curiov.iov_len = 0;
  1333. curiov.iov_base = NULL;
  1334. if (dxfer_dir != sksgio->sg.dxfer_direction) {
  1335. if (dxfer_dir != SG_DXFER_TO_DEV ||
  1336. sksgio->sg.dxfer_direction != SG_DXFER_TO_FROM_DEV)
  1337. return 0;
  1338. }
  1339. while (resid > 0) {
  1340. u32 nbytes = PAGE_SIZE;
  1341. if (curiov.iov_len == 0) {
  1342. curiov = sksgio->iov[iov_ix++];
  1343. continue;
  1344. }
  1345. if (buf_len == 0) {
  1346. struct page *page;
  1347. page = sg_page(&skspcl->req.sg[sksg_ix++]);
  1348. bufp = page_address(page);
  1349. buf_len = PAGE_SIZE;
  1350. }
  1351. nbytes = min_t(u32, nbytes, resid);
  1352. nbytes = min_t(u32, nbytes, curiov.iov_len);
  1353. nbytes = min_t(u32, nbytes, buf_len);
  1354. if (dxfer_dir == SG_DXFER_TO_DEV)
  1355. rc = __copy_from_user(bufp, curiov.iov_base, nbytes);
  1356. else
  1357. rc = __copy_to_user(curiov.iov_base, bufp, nbytes);
  1358. if (rc)
  1359. return -EFAULT;
  1360. resid -= nbytes;
  1361. curiov.iov_len -= nbytes;
  1362. curiov.iov_base += nbytes;
  1363. buf_len -= nbytes;
  1364. }
  1365. return 0;
  1366. }
  1367. static int skd_sg_io_send_fitmsg(struct skd_device *skdev,
  1368. struct skd_sg_io *sksgio)
  1369. {
  1370. struct skd_special_context *skspcl = sksgio->skspcl;
  1371. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  1372. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  1373. memset(skspcl->msg_buf, 0, SKD_N_SPECIAL_FITMSG_BYTES);
  1374. /* Initialize the FIT msg header */
  1375. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1376. fmh->num_protocol_cmds_coalesced = 1;
  1377. /* Initialize the SCSI request */
  1378. if (sksgio->sg.dxfer_direction != SG_DXFER_NONE)
  1379. scsi_req->hdr.sg_list_dma_address =
  1380. cpu_to_be64(skspcl->req.sksg_dma_address);
  1381. scsi_req->hdr.tag = skspcl->req.id;
  1382. scsi_req->hdr.sg_list_len_bytes =
  1383. cpu_to_be32(skspcl->req.sg_byte_count);
  1384. memcpy(scsi_req->cdb, sksgio->cdb, sizeof(scsi_req->cdb));
  1385. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1386. skd_send_special_fitmsg(skdev, skspcl);
  1387. return 0;
  1388. }
  1389. static int skd_sg_io_await(struct skd_device *skdev, struct skd_sg_io *sksgio)
  1390. {
  1391. unsigned long flags;
  1392. int rc;
  1393. rc = wait_event_interruptible_timeout(skdev->waitq,
  1394. (sksgio->skspcl->req.state !=
  1395. SKD_REQ_STATE_BUSY),
  1396. msecs_to_jiffies(sksgio->sg.
  1397. timeout));
  1398. spin_lock_irqsave(&skdev->lock, flags);
  1399. if (sksgio->skspcl->req.state == SKD_REQ_STATE_ABORTED) {
  1400. pr_debug("%s:%s:%d skspcl %p aborted\n",
  1401. skdev->name, __func__, __LINE__, sksgio->skspcl);
  1402. /* Build check cond, sense and let command finish. */
  1403. /* For a timeout, we must fabricate completion and sense
  1404. * data to complete the command */
  1405. sksgio->skspcl->req.completion.status =
  1406. SAM_STAT_CHECK_CONDITION;
  1407. memset(&sksgio->skspcl->req.err_info, 0,
  1408. sizeof(sksgio->skspcl->req.err_info));
  1409. sksgio->skspcl->req.err_info.type = 0x70;
  1410. sksgio->skspcl->req.err_info.key = ABORTED_COMMAND;
  1411. sksgio->skspcl->req.err_info.code = 0x44;
  1412. sksgio->skspcl->req.err_info.qual = 0;
  1413. rc = 0;
  1414. } else if (sksgio->skspcl->req.state != SKD_REQ_STATE_BUSY)
  1415. /* No longer on the adapter. We finish. */
  1416. rc = 0;
  1417. else {
  1418. /* Something's gone wrong. Still busy. Timeout or
  1419. * user interrupted (control-C). Mark as an orphan
  1420. * so it will be disposed when completed. */
  1421. sksgio->skspcl->orphaned = 1;
  1422. sksgio->skspcl = NULL;
  1423. if (rc == 0) {
  1424. pr_debug("%s:%s:%d timed out %p (%u ms)\n",
  1425. skdev->name, __func__, __LINE__,
  1426. sksgio, sksgio->sg.timeout);
  1427. rc = -ETIMEDOUT;
  1428. } else {
  1429. pr_debug("%s:%s:%d cntlc %p\n",
  1430. skdev->name, __func__, __LINE__, sksgio);
  1431. rc = -EINTR;
  1432. }
  1433. }
  1434. spin_unlock_irqrestore(&skdev->lock, flags);
  1435. return rc;
  1436. }
  1437. static int skd_sg_io_put_status(struct skd_device *skdev,
  1438. struct skd_sg_io *sksgio)
  1439. {
  1440. struct sg_io_hdr *sgp = &sksgio->sg;
  1441. struct skd_special_context *skspcl = sksgio->skspcl;
  1442. int resid = 0;
  1443. u32 nb = be32_to_cpu(skspcl->req.completion.num_returned_bytes);
  1444. sgp->status = skspcl->req.completion.status;
  1445. resid = sksgio->dxfer_len - nb;
  1446. sgp->masked_status = sgp->status & STATUS_MASK;
  1447. sgp->msg_status = 0;
  1448. sgp->host_status = 0;
  1449. sgp->driver_status = 0;
  1450. sgp->resid = resid;
  1451. if (sgp->masked_status || sgp->host_status || sgp->driver_status)
  1452. sgp->info |= SG_INFO_CHECK;
  1453. pr_debug("%s:%s:%d status %x masked %x resid 0x%x\n",
  1454. skdev->name, __func__, __LINE__,
  1455. sgp->status, sgp->masked_status, sgp->resid);
  1456. if (sgp->masked_status == SAM_STAT_CHECK_CONDITION) {
  1457. if (sgp->mx_sb_len > 0) {
  1458. struct fit_comp_error_info *ei = &skspcl->req.err_info;
  1459. u32 nbytes = sizeof(*ei);
  1460. nbytes = min_t(u32, nbytes, sgp->mx_sb_len);
  1461. sgp->sb_len_wr = nbytes;
  1462. if (__copy_to_user(sgp->sbp, ei, nbytes)) {
  1463. pr_debug("%s:%s:%d copy_to_user sense failed %p\n",
  1464. skdev->name, __func__, __LINE__,
  1465. sgp->sbp);
  1466. return -EFAULT;
  1467. }
  1468. }
  1469. }
  1470. if (__copy_to_user(sksgio->argp, sgp, sizeof(sg_io_hdr_t))) {
  1471. pr_debug("%s:%s:%d copy_to_user sg failed %p\n",
  1472. skdev->name, __func__, __LINE__, sksgio->argp);
  1473. return -EFAULT;
  1474. }
  1475. return 0;
  1476. }
  1477. static int skd_sg_io_release_skspcl(struct skd_device *skdev,
  1478. struct skd_sg_io *sksgio)
  1479. {
  1480. struct skd_special_context *skspcl = sksgio->skspcl;
  1481. if (skspcl != NULL) {
  1482. ulong flags;
  1483. sksgio->skspcl = NULL;
  1484. spin_lock_irqsave(&skdev->lock, flags);
  1485. skd_release_special(skdev, skspcl);
  1486. spin_unlock_irqrestore(&skdev->lock, flags);
  1487. }
  1488. return 0;
  1489. }
  1490. /*
  1491. *****************************************************************************
  1492. * INTERNAL REQUESTS -- generated by driver itself
  1493. *****************************************************************************
  1494. */
  1495. static int skd_format_internal_skspcl(struct skd_device *skdev)
  1496. {
  1497. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1498. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1499. struct fit_msg_hdr *fmh;
  1500. uint64_t dma_address;
  1501. struct skd_scsi_request *scsi;
  1502. fmh = (struct fit_msg_hdr *)&skspcl->msg_buf[0];
  1503. fmh->protocol_id = FIT_PROTOCOL_ID_SOFIT;
  1504. fmh->num_protocol_cmds_coalesced = 1;
  1505. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1506. memset(scsi, 0, sizeof(*scsi));
  1507. dma_address = skspcl->req.sksg_dma_address;
  1508. scsi->hdr.sg_list_dma_address = cpu_to_be64(dma_address);
  1509. sgd->control = FIT_SGD_CONTROL_LAST;
  1510. sgd->byte_count = 0;
  1511. sgd->host_side_addr = skspcl->db_dma_address;
  1512. sgd->dev_side_addr = 0;
  1513. sgd->next_desc_ptr = 0LL;
  1514. return 1;
  1515. }
  1516. #define WR_BUF_SIZE SKD_N_INTERNAL_BYTES
  1517. static void skd_send_internal_skspcl(struct skd_device *skdev,
  1518. struct skd_special_context *skspcl,
  1519. u8 opcode)
  1520. {
  1521. struct fit_sg_descriptor *sgd = &skspcl->req.sksg_list[0];
  1522. struct skd_scsi_request *scsi;
  1523. unsigned char *buf = skspcl->data_buf;
  1524. int i;
  1525. if (skspcl->req.state != SKD_REQ_STATE_IDLE)
  1526. /*
  1527. * A refresh is already in progress.
  1528. * Just wait for it to finish.
  1529. */
  1530. return;
  1531. SKD_ASSERT((skspcl->req.id & SKD_ID_INCR) == 0);
  1532. skspcl->req.state = SKD_REQ_STATE_BUSY;
  1533. skspcl->req.id += SKD_ID_INCR;
  1534. scsi = (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1535. scsi->hdr.tag = skspcl->req.id;
  1536. memset(scsi->cdb, 0, sizeof(scsi->cdb));
  1537. switch (opcode) {
  1538. case TEST_UNIT_READY:
  1539. scsi->cdb[0] = TEST_UNIT_READY;
  1540. sgd->byte_count = 0;
  1541. scsi->hdr.sg_list_len_bytes = 0;
  1542. break;
  1543. case READ_CAPACITY:
  1544. scsi->cdb[0] = READ_CAPACITY;
  1545. sgd->byte_count = SKD_N_READ_CAP_BYTES;
  1546. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1547. break;
  1548. case INQUIRY:
  1549. scsi->cdb[0] = INQUIRY;
  1550. scsi->cdb[1] = 0x01; /* evpd */
  1551. scsi->cdb[2] = 0x80; /* serial number page */
  1552. scsi->cdb[4] = 0x10;
  1553. sgd->byte_count = 16;
  1554. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1555. break;
  1556. case SYNCHRONIZE_CACHE:
  1557. scsi->cdb[0] = SYNCHRONIZE_CACHE;
  1558. sgd->byte_count = 0;
  1559. scsi->hdr.sg_list_len_bytes = 0;
  1560. break;
  1561. case WRITE_BUFFER:
  1562. scsi->cdb[0] = WRITE_BUFFER;
  1563. scsi->cdb[1] = 0x02;
  1564. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1565. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1566. sgd->byte_count = WR_BUF_SIZE;
  1567. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1568. /* fill incrementing byte pattern */
  1569. for (i = 0; i < sgd->byte_count; i++)
  1570. buf[i] = i & 0xFF;
  1571. break;
  1572. case READ_BUFFER:
  1573. scsi->cdb[0] = READ_BUFFER;
  1574. scsi->cdb[1] = 0x02;
  1575. scsi->cdb[7] = (WR_BUF_SIZE & 0xFF00) >> 8;
  1576. scsi->cdb[8] = WR_BUF_SIZE & 0xFF;
  1577. sgd->byte_count = WR_BUF_SIZE;
  1578. scsi->hdr.sg_list_len_bytes = cpu_to_be32(sgd->byte_count);
  1579. memset(skspcl->data_buf, 0, sgd->byte_count);
  1580. break;
  1581. default:
  1582. SKD_ASSERT("Don't know what to send");
  1583. return;
  1584. }
  1585. skd_send_special_fitmsg(skdev, skspcl);
  1586. }
  1587. static void skd_refresh_device_data(struct skd_device *skdev)
  1588. {
  1589. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  1590. skd_send_internal_skspcl(skdev, skspcl, TEST_UNIT_READY);
  1591. }
  1592. static int skd_chk_read_buf(struct skd_device *skdev,
  1593. struct skd_special_context *skspcl)
  1594. {
  1595. unsigned char *buf = skspcl->data_buf;
  1596. int i;
  1597. /* check for incrementing byte pattern */
  1598. for (i = 0; i < WR_BUF_SIZE; i++)
  1599. if (buf[i] != (i & 0xFF))
  1600. return 1;
  1601. return 0;
  1602. }
  1603. static void skd_log_check_status(struct skd_device *skdev, u8 status, u8 key,
  1604. u8 code, u8 qual, u8 fruc)
  1605. {
  1606. /* If the check condition is of special interest, log a message */
  1607. if ((status == SAM_STAT_CHECK_CONDITION) && (key == 0x02)
  1608. && (code == 0x04) && (qual == 0x06)) {
  1609. pr_err("(%s): *** LOST_WRITE_DATA ERROR *** key/asc/"
  1610. "ascq/fruc %02x/%02x/%02x/%02x\n",
  1611. skd_name(skdev), key, code, qual, fruc);
  1612. }
  1613. }
  1614. static void skd_complete_internal(struct skd_device *skdev,
  1615. volatile struct fit_completion_entry_v1
  1616. *skcomp,
  1617. volatile struct fit_comp_error_info *skerr,
  1618. struct skd_special_context *skspcl)
  1619. {
  1620. u8 *buf = skspcl->data_buf;
  1621. u8 status;
  1622. int i;
  1623. struct skd_scsi_request *scsi =
  1624. (struct skd_scsi_request *)&skspcl->msg_buf[64];
  1625. SKD_ASSERT(skspcl == &skdev->internal_skspcl);
  1626. pr_debug("%s:%s:%d complete internal %x\n",
  1627. skdev->name, __func__, __LINE__, scsi->cdb[0]);
  1628. skspcl->req.completion = *skcomp;
  1629. skspcl->req.state = SKD_REQ_STATE_IDLE;
  1630. skspcl->req.id += SKD_ID_INCR;
  1631. status = skspcl->req.completion.status;
  1632. skd_log_check_status(skdev, status, skerr->key, skerr->code,
  1633. skerr->qual, skerr->fruc);
  1634. switch (scsi->cdb[0]) {
  1635. case TEST_UNIT_READY:
  1636. if (status == SAM_STAT_GOOD)
  1637. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1638. else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1639. (skerr->key == MEDIUM_ERROR))
  1640. skd_send_internal_skspcl(skdev, skspcl, WRITE_BUFFER);
  1641. else {
  1642. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1643. pr_debug("%s:%s:%d TUR failed, don't send anymore state 0x%x\n",
  1644. skdev->name, __func__, __LINE__,
  1645. skdev->state);
  1646. return;
  1647. }
  1648. pr_debug("%s:%s:%d **** TUR failed, retry skerr\n",
  1649. skdev->name, __func__, __LINE__);
  1650. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1651. }
  1652. break;
  1653. case WRITE_BUFFER:
  1654. if (status == SAM_STAT_GOOD)
  1655. skd_send_internal_skspcl(skdev, skspcl, READ_BUFFER);
  1656. else {
  1657. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1658. pr_debug("%s:%s:%d write buffer failed, don't send anymore state 0x%x\n",
  1659. skdev->name, __func__, __LINE__,
  1660. skdev->state);
  1661. return;
  1662. }
  1663. pr_debug("%s:%s:%d **** write buffer failed, retry skerr\n",
  1664. skdev->name, __func__, __LINE__);
  1665. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1666. }
  1667. break;
  1668. case READ_BUFFER:
  1669. if (status == SAM_STAT_GOOD) {
  1670. if (skd_chk_read_buf(skdev, skspcl) == 0)
  1671. skd_send_internal_skspcl(skdev, skspcl,
  1672. READ_CAPACITY);
  1673. else {
  1674. pr_err(
  1675. "(%s):*** W/R Buffer mismatch %d ***\n",
  1676. skd_name(skdev), skdev->connect_retries);
  1677. if (skdev->connect_retries <
  1678. SKD_MAX_CONNECT_RETRIES) {
  1679. skdev->connect_retries++;
  1680. skd_soft_reset(skdev);
  1681. } else {
  1682. pr_err(
  1683. "(%s): W/R Buffer Connect Error\n",
  1684. skd_name(skdev));
  1685. return;
  1686. }
  1687. }
  1688. } else {
  1689. if (skdev->state == SKD_DRVR_STATE_STOPPING) {
  1690. pr_debug("%s:%s:%d "
  1691. "read buffer failed, don't send anymore state 0x%x\n",
  1692. skdev->name, __func__, __LINE__,
  1693. skdev->state);
  1694. return;
  1695. }
  1696. pr_debug("%s:%s:%d "
  1697. "**** read buffer failed, retry skerr\n",
  1698. skdev->name, __func__, __LINE__);
  1699. skd_send_internal_skspcl(skdev, skspcl, 0x00);
  1700. }
  1701. break;
  1702. case READ_CAPACITY:
  1703. skdev->read_cap_is_valid = 0;
  1704. if (status == SAM_STAT_GOOD) {
  1705. skdev->read_cap_last_lba =
  1706. (buf[0] << 24) | (buf[1] << 16) |
  1707. (buf[2] << 8) | buf[3];
  1708. skdev->read_cap_blocksize =
  1709. (buf[4] << 24) | (buf[5] << 16) |
  1710. (buf[6] << 8) | buf[7];
  1711. pr_debug("%s:%s:%d last lba %d, bs %d\n",
  1712. skdev->name, __func__, __LINE__,
  1713. skdev->read_cap_last_lba,
  1714. skdev->read_cap_blocksize);
  1715. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1716. skdev->read_cap_is_valid = 1;
  1717. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1718. } else if ((status == SAM_STAT_CHECK_CONDITION) &&
  1719. (skerr->key == MEDIUM_ERROR)) {
  1720. skdev->read_cap_last_lba = ~0;
  1721. set_capacity(skdev->disk, skdev->read_cap_last_lba + 1);
  1722. pr_debug("%s:%s:%d "
  1723. "**** MEDIUM ERROR caused READCAP to fail, ignore failure and continue to inquiry\n",
  1724. skdev->name, __func__, __LINE__);
  1725. skd_send_internal_skspcl(skdev, skspcl, INQUIRY);
  1726. } else {
  1727. pr_debug("%s:%s:%d **** READCAP failed, retry TUR\n",
  1728. skdev->name, __func__, __LINE__);
  1729. skd_send_internal_skspcl(skdev, skspcl,
  1730. TEST_UNIT_READY);
  1731. }
  1732. break;
  1733. case INQUIRY:
  1734. skdev->inquiry_is_valid = 0;
  1735. if (status == SAM_STAT_GOOD) {
  1736. skdev->inquiry_is_valid = 1;
  1737. for (i = 0; i < 12; i++)
  1738. skdev->inq_serial_num[i] = buf[i + 4];
  1739. skdev->inq_serial_num[12] = 0;
  1740. }
  1741. if (skd_unquiesce_dev(skdev) < 0)
  1742. pr_debug("%s:%s:%d **** failed, to ONLINE device\n",
  1743. skdev->name, __func__, __LINE__);
  1744. /* connection is complete */
  1745. skdev->connect_retries = 0;
  1746. break;
  1747. case SYNCHRONIZE_CACHE:
  1748. if (status == SAM_STAT_GOOD)
  1749. skdev->sync_done = 1;
  1750. else
  1751. skdev->sync_done = -1;
  1752. wake_up_interruptible(&skdev->waitq);
  1753. break;
  1754. default:
  1755. SKD_ASSERT("we didn't send this");
  1756. }
  1757. }
  1758. /*
  1759. *****************************************************************************
  1760. * FIT MESSAGES
  1761. *****************************************************************************
  1762. */
  1763. static void skd_send_fitmsg(struct skd_device *skdev,
  1764. struct skd_fitmsg_context *skmsg)
  1765. {
  1766. u64 qcmd;
  1767. struct fit_msg_hdr *fmh;
  1768. pr_debug("%s:%s:%d dma address 0x%llx, busy=%d\n",
  1769. skdev->name, __func__, __LINE__,
  1770. skmsg->mb_dma_address, skdev->in_flight);
  1771. pr_debug("%s:%s:%d msg_buf 0x%p, offset %x\n",
  1772. skdev->name, __func__, __LINE__,
  1773. skmsg->msg_buf, skmsg->offset);
  1774. qcmd = skmsg->mb_dma_address;
  1775. qcmd |= FIT_QCMD_QID_NORMAL;
  1776. fmh = (struct fit_msg_hdr *)skmsg->msg_buf;
  1777. skmsg->outstanding = fmh->num_protocol_cmds_coalesced;
  1778. if (unlikely(skdev->dbg_level > 1)) {
  1779. u8 *bp = (u8 *)skmsg->msg_buf;
  1780. int i;
  1781. for (i = 0; i < skmsg->length; i += 8) {
  1782. pr_debug("%s:%s:%d msg[%2d] %8ph\n",
  1783. skdev->name, __func__, __LINE__, i, &bp[i]);
  1784. if (i == 0)
  1785. i = 64 - 8;
  1786. }
  1787. }
  1788. if (skmsg->length > 256)
  1789. qcmd |= FIT_QCMD_MSGSIZE_512;
  1790. else if (skmsg->length > 128)
  1791. qcmd |= FIT_QCMD_MSGSIZE_256;
  1792. else if (skmsg->length > 64)
  1793. qcmd |= FIT_QCMD_MSGSIZE_128;
  1794. else
  1795. /*
  1796. * This makes no sense because the FIT msg header is
  1797. * 64 bytes. If the msg is only 64 bytes long it has
  1798. * no payload.
  1799. */
  1800. qcmd |= FIT_QCMD_MSGSIZE_64;
  1801. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1802. }
  1803. static void skd_send_special_fitmsg(struct skd_device *skdev,
  1804. struct skd_special_context *skspcl)
  1805. {
  1806. u64 qcmd;
  1807. if (unlikely(skdev->dbg_level > 1)) {
  1808. u8 *bp = (u8 *)skspcl->msg_buf;
  1809. int i;
  1810. for (i = 0; i < SKD_N_SPECIAL_FITMSG_BYTES; i += 8) {
  1811. pr_debug("%s:%s:%d spcl[%2d] %8ph\n",
  1812. skdev->name, __func__, __LINE__, i, &bp[i]);
  1813. if (i == 0)
  1814. i = 64 - 8;
  1815. }
  1816. pr_debug("%s:%s:%d skspcl=%p id=%04x sksg_list=%p sksg_dma=%llx\n",
  1817. skdev->name, __func__, __LINE__,
  1818. skspcl, skspcl->req.id, skspcl->req.sksg_list,
  1819. skspcl->req.sksg_dma_address);
  1820. for (i = 0; i < skspcl->req.n_sg; i++) {
  1821. struct fit_sg_descriptor *sgd =
  1822. &skspcl->req.sksg_list[i];
  1823. pr_debug("%s:%s:%d sg[%d] count=%u ctrl=0x%x "
  1824. "addr=0x%llx next=0x%llx\n",
  1825. skdev->name, __func__, __LINE__,
  1826. i, sgd->byte_count, sgd->control,
  1827. sgd->host_side_addr, sgd->next_desc_ptr);
  1828. }
  1829. }
  1830. /*
  1831. * Special FIT msgs are always 128 bytes: a 64-byte FIT hdr
  1832. * and one 64-byte SSDI command.
  1833. */
  1834. qcmd = skspcl->mb_dma_address;
  1835. qcmd |= FIT_QCMD_QID_NORMAL + FIT_QCMD_MSGSIZE_128;
  1836. SKD_WRITEQ(skdev, qcmd, FIT_Q_COMMAND);
  1837. }
  1838. /*
  1839. *****************************************************************************
  1840. * COMPLETION QUEUE
  1841. *****************************************************************************
  1842. */
  1843. static void skd_complete_other(struct skd_device *skdev,
  1844. volatile struct fit_completion_entry_v1 *skcomp,
  1845. volatile struct fit_comp_error_info *skerr);
  1846. struct sns_info {
  1847. u8 type;
  1848. u8 stat;
  1849. u8 key;
  1850. u8 asc;
  1851. u8 ascq;
  1852. u8 mask;
  1853. enum skd_check_status_action action;
  1854. };
  1855. static struct sns_info skd_chkstat_table[] = {
  1856. /* Good */
  1857. { 0x70, 0x02, RECOVERED_ERROR, 0, 0, 0x1c,
  1858. SKD_CHECK_STATUS_REPORT_GOOD },
  1859. /* Smart alerts */
  1860. { 0x70, 0x02, NO_SENSE, 0x0B, 0x00, 0x1E, /* warnings */
  1861. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1862. { 0x70, 0x02, NO_SENSE, 0x5D, 0x00, 0x1E, /* thresholds */
  1863. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1864. { 0x70, 0x02, RECOVERED_ERROR, 0x0B, 0x01, 0x1F, /* temperature over trigger */
  1865. SKD_CHECK_STATUS_REPORT_SMART_ALERT },
  1866. /* Retry (with limits) */
  1867. { 0x70, 0x02, 0x0B, 0, 0, 0x1C, /* This one is for DMA ERROR */
  1868. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1869. { 0x70, 0x02, 0x06, 0x0B, 0x00, 0x1E, /* warnings */
  1870. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1871. { 0x70, 0x02, 0x06, 0x5D, 0x00, 0x1E, /* thresholds */
  1872. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1873. { 0x70, 0x02, 0x06, 0x80, 0x30, 0x1F, /* backup power */
  1874. SKD_CHECK_STATUS_REQUEUE_REQUEST },
  1875. /* Busy (or about to be) */
  1876. { 0x70, 0x02, 0x06, 0x3f, 0x01, 0x1F, /* fw changed */
  1877. SKD_CHECK_STATUS_BUSY_IMMINENT },
  1878. };
  1879. /*
  1880. * Look up status and sense data to decide how to handle the error
  1881. * from the device.
  1882. * mask says which fields must match e.g., mask=0x18 means check
  1883. * type and stat, ignore key, asc, ascq.
  1884. */
  1885. static enum skd_check_status_action
  1886. skd_check_status(struct skd_device *skdev,
  1887. u8 cmp_status, volatile struct fit_comp_error_info *skerr)
  1888. {
  1889. int i, n;
  1890. pr_err("(%s): key/asc/ascq/fruc %02x/%02x/%02x/%02x\n",
  1891. skd_name(skdev), skerr->key, skerr->code, skerr->qual,
  1892. skerr->fruc);
  1893. pr_debug("%s:%s:%d stat: t=%02x stat=%02x k=%02x c=%02x q=%02x fruc=%02x\n",
  1894. skdev->name, __func__, __LINE__, skerr->type, cmp_status,
  1895. skerr->key, skerr->code, skerr->qual, skerr->fruc);
  1896. /* Does the info match an entry in the good category? */
  1897. n = sizeof(skd_chkstat_table) / sizeof(skd_chkstat_table[0]);
  1898. for (i = 0; i < n; i++) {
  1899. struct sns_info *sns = &skd_chkstat_table[i];
  1900. if (sns->mask & 0x10)
  1901. if (skerr->type != sns->type)
  1902. continue;
  1903. if (sns->mask & 0x08)
  1904. if (cmp_status != sns->stat)
  1905. continue;
  1906. if (sns->mask & 0x04)
  1907. if (skerr->key != sns->key)
  1908. continue;
  1909. if (sns->mask & 0x02)
  1910. if (skerr->code != sns->asc)
  1911. continue;
  1912. if (sns->mask & 0x01)
  1913. if (skerr->qual != sns->ascq)
  1914. continue;
  1915. if (sns->action == SKD_CHECK_STATUS_REPORT_SMART_ALERT) {
  1916. pr_err("(%s): SMART Alert: sense key/asc/ascq "
  1917. "%02x/%02x/%02x\n",
  1918. skd_name(skdev), skerr->key,
  1919. skerr->code, skerr->qual);
  1920. }
  1921. return sns->action;
  1922. }
  1923. /* No other match, so nonzero status means error,
  1924. * zero status means good
  1925. */
  1926. if (cmp_status) {
  1927. pr_debug("%s:%s:%d status check: error\n",
  1928. skdev->name, __func__, __LINE__);
  1929. return SKD_CHECK_STATUS_REPORT_ERROR;
  1930. }
  1931. pr_debug("%s:%s:%d status check good default\n",
  1932. skdev->name, __func__, __LINE__);
  1933. return SKD_CHECK_STATUS_REPORT_GOOD;
  1934. }
  1935. static void skd_resolve_req_exception(struct skd_device *skdev,
  1936. struct skd_request_context *skreq)
  1937. {
  1938. u8 cmp_status = skreq->completion.status;
  1939. switch (skd_check_status(skdev, cmp_status, &skreq->err_info)) {
  1940. case SKD_CHECK_STATUS_REPORT_GOOD:
  1941. case SKD_CHECK_STATUS_REPORT_SMART_ALERT:
  1942. skd_end_request(skdev, skreq, BLK_STS_OK);
  1943. break;
  1944. case SKD_CHECK_STATUS_BUSY_IMMINENT:
  1945. skd_log_skreq(skdev, skreq, "retry(busy)");
  1946. blk_requeue_request(skdev->queue, skreq->req);
  1947. pr_info("(%s) drive BUSY imminent\n", skd_name(skdev));
  1948. skdev->state = SKD_DRVR_STATE_BUSY_IMMINENT;
  1949. skdev->timer_countdown = SKD_TIMER_MINUTES(20);
  1950. skd_quiesce_dev(skdev);
  1951. break;
  1952. case SKD_CHECK_STATUS_REQUEUE_REQUEST:
  1953. if ((unsigned long) ++skreq->req->special < SKD_MAX_RETRIES) {
  1954. skd_log_skreq(skdev, skreq, "retry");
  1955. blk_requeue_request(skdev->queue, skreq->req);
  1956. break;
  1957. }
  1958. /* fall through to report error */
  1959. case SKD_CHECK_STATUS_REPORT_ERROR:
  1960. default:
  1961. skd_end_request(skdev, skreq, BLK_STS_IOERR);
  1962. break;
  1963. }
  1964. }
  1965. /* assume spinlock is already held */
  1966. static void skd_release_skreq(struct skd_device *skdev,
  1967. struct skd_request_context *skreq)
  1968. {
  1969. u32 msg_slot;
  1970. struct skd_fitmsg_context *skmsg;
  1971. u32 timo_slot;
  1972. /*
  1973. * Reclaim the FIT msg buffer if this is
  1974. * the first of the requests it carried to
  1975. * be completed. The FIT msg buffer used to
  1976. * send this request cannot be reused until
  1977. * we are sure the s1120 card has copied
  1978. * it to its memory. The FIT msg might have
  1979. * contained several requests. As soon as
  1980. * any of them are completed we know that
  1981. * the entire FIT msg was transferred.
  1982. * Only the first completed request will
  1983. * match the FIT msg buffer id. The FIT
  1984. * msg buffer id is immediately updated.
  1985. * When subsequent requests complete the FIT
  1986. * msg buffer id won't match, so we know
  1987. * quite cheaply that it is already done.
  1988. */
  1989. msg_slot = skreq->fitmsg_id & SKD_ID_SLOT_MASK;
  1990. SKD_ASSERT(msg_slot < skdev->num_fitmsg_context);
  1991. skmsg = &skdev->skmsg_table[msg_slot];
  1992. if (skmsg->id == skreq->fitmsg_id) {
  1993. SKD_ASSERT(skmsg->state == SKD_MSG_STATE_BUSY);
  1994. SKD_ASSERT(skmsg->outstanding > 0);
  1995. skmsg->outstanding--;
  1996. if (skmsg->outstanding == 0) {
  1997. skmsg->state = SKD_MSG_STATE_IDLE;
  1998. skmsg->id += SKD_ID_INCR;
  1999. skmsg->next = skdev->skmsg_free_list;
  2000. skdev->skmsg_free_list = skmsg;
  2001. }
  2002. }
  2003. /*
  2004. * Decrease the number of active requests.
  2005. * Also decrements the count in the timeout slot.
  2006. */
  2007. SKD_ASSERT(skdev->in_flight > 0);
  2008. skdev->in_flight -= 1;
  2009. timo_slot = skreq->timeout_stamp & SKD_TIMEOUT_SLOT_MASK;
  2010. SKD_ASSERT(skdev->timeout_slot[timo_slot] > 0);
  2011. skdev->timeout_slot[timo_slot] -= 1;
  2012. /*
  2013. * Reset backpointer
  2014. */
  2015. skreq->req = NULL;
  2016. /*
  2017. * Reclaim the skd_request_context
  2018. */
  2019. skreq->state = SKD_REQ_STATE_IDLE;
  2020. skreq->id += SKD_ID_INCR;
  2021. skreq->next = skdev->skreq_free_list;
  2022. skdev->skreq_free_list = skreq;
  2023. }
  2024. #define DRIVER_INQ_EVPD_PAGE_CODE 0xDA
  2025. static void skd_do_inq_page_00(struct skd_device *skdev,
  2026. volatile struct fit_completion_entry_v1 *skcomp,
  2027. volatile struct fit_comp_error_info *skerr,
  2028. uint8_t *cdb, uint8_t *buf)
  2029. {
  2030. uint16_t insert_pt, max_bytes, drive_pages, drive_bytes, new_size;
  2031. /* Caller requested "supported pages". The driver needs to insert
  2032. * its page.
  2033. */
  2034. pr_debug("%s:%s:%d skd_do_driver_inquiry: modify supported pages.\n",
  2035. skdev->name, __func__, __LINE__);
  2036. /* If the device rejected the request because the CDB was
  2037. * improperly formed, then just leave.
  2038. */
  2039. if (skcomp->status == SAM_STAT_CHECK_CONDITION &&
  2040. skerr->key == ILLEGAL_REQUEST && skerr->code == 0x24)
  2041. return;
  2042. /* Get the amount of space the caller allocated */
  2043. max_bytes = (cdb[3] << 8) | cdb[4];
  2044. /* Get the number of pages actually returned by the device */
  2045. drive_pages = (buf[2] << 8) | buf[3];
  2046. drive_bytes = drive_pages + 4;
  2047. new_size = drive_pages + 1;
  2048. /* Supported pages must be in numerical order, so find where
  2049. * the driver page needs to be inserted into the list of
  2050. * pages returned by the device.
  2051. */
  2052. for (insert_pt = 4; insert_pt < drive_bytes; insert_pt++) {
  2053. if (buf[insert_pt] == DRIVER_INQ_EVPD_PAGE_CODE)
  2054. return; /* Device using this page code. abort */
  2055. else if (buf[insert_pt] > DRIVER_INQ_EVPD_PAGE_CODE)
  2056. break;
  2057. }
  2058. if (insert_pt < max_bytes) {
  2059. uint16_t u;
  2060. /* Shift everything up one byte to make room. */
  2061. for (u = new_size + 3; u > insert_pt; u--)
  2062. buf[u] = buf[u - 1];
  2063. buf[insert_pt] = DRIVER_INQ_EVPD_PAGE_CODE;
  2064. /* SCSI byte order increment of num_returned_bytes by 1 */
  2065. skcomp->num_returned_bytes =
  2066. be32_to_cpu(skcomp->num_returned_bytes) + 1;
  2067. skcomp->num_returned_bytes =
  2068. be32_to_cpu(skcomp->num_returned_bytes);
  2069. }
  2070. /* update page length field to reflect the driver's page too */
  2071. buf[2] = (uint8_t)((new_size >> 8) & 0xFF);
  2072. buf[3] = (uint8_t)((new_size >> 0) & 0xFF);
  2073. }
  2074. static void skd_get_link_info(struct pci_dev *pdev, u8 *speed, u8 *width)
  2075. {
  2076. int pcie_reg;
  2077. u16 pci_bus_speed;
  2078. u8 pci_lanes;
  2079. pcie_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
  2080. if (pcie_reg) {
  2081. u16 linksta;
  2082. pci_read_config_word(pdev, pcie_reg + PCI_EXP_LNKSTA, &linksta);
  2083. pci_bus_speed = linksta & 0xF;
  2084. pci_lanes = (linksta & 0x3F0) >> 4;
  2085. } else {
  2086. *speed = STEC_LINK_UNKNOWN;
  2087. *width = 0xFF;
  2088. return;
  2089. }
  2090. switch (pci_bus_speed) {
  2091. case 1:
  2092. *speed = STEC_LINK_2_5GTS;
  2093. break;
  2094. case 2:
  2095. *speed = STEC_LINK_5GTS;
  2096. break;
  2097. case 3:
  2098. *speed = STEC_LINK_8GTS;
  2099. break;
  2100. default:
  2101. *speed = STEC_LINK_UNKNOWN;
  2102. break;
  2103. }
  2104. if (pci_lanes <= 0x20)
  2105. *width = pci_lanes;
  2106. else
  2107. *width = 0xFF;
  2108. }
  2109. static void skd_do_inq_page_da(struct skd_device *skdev,
  2110. volatile struct fit_completion_entry_v1 *skcomp,
  2111. volatile struct fit_comp_error_info *skerr,
  2112. uint8_t *cdb, uint8_t *buf)
  2113. {
  2114. struct pci_dev *pdev = skdev->pdev;
  2115. unsigned max_bytes;
  2116. struct driver_inquiry_data inq;
  2117. u16 val;
  2118. pr_debug("%s:%s:%d skd_do_driver_inquiry: return driver page\n",
  2119. skdev->name, __func__, __LINE__);
  2120. memset(&inq, 0, sizeof(inq));
  2121. inq.page_code = DRIVER_INQ_EVPD_PAGE_CODE;
  2122. skd_get_link_info(pdev, &inq.pcie_link_speed, &inq.pcie_link_lanes);
  2123. inq.pcie_bus_number = cpu_to_be16(pdev->bus->number);
  2124. inq.pcie_device_number = PCI_SLOT(pdev->devfn);
  2125. inq.pcie_function_number = PCI_FUNC(pdev->devfn);
  2126. pci_read_config_word(pdev, PCI_VENDOR_ID, &val);
  2127. inq.pcie_vendor_id = cpu_to_be16(val);
  2128. pci_read_config_word(pdev, PCI_DEVICE_ID, &val);
  2129. inq.pcie_device_id = cpu_to_be16(val);
  2130. pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &val);
  2131. inq.pcie_subsystem_vendor_id = cpu_to_be16(val);
  2132. pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &val);
  2133. inq.pcie_subsystem_device_id = cpu_to_be16(val);
  2134. /* Driver version, fixed lenth, padded with spaces on the right */
  2135. inq.driver_version_length = sizeof(inq.driver_version);
  2136. memset(&inq.driver_version, ' ', sizeof(inq.driver_version));
  2137. memcpy(inq.driver_version, DRV_VER_COMPL,
  2138. min(sizeof(inq.driver_version), strlen(DRV_VER_COMPL)));
  2139. inq.page_length = cpu_to_be16((sizeof(inq) - 4));
  2140. /* Clear the error set by the device */
  2141. skcomp->status = SAM_STAT_GOOD;
  2142. memset((void *)skerr, 0, sizeof(*skerr));
  2143. /* copy response into output buffer */
  2144. max_bytes = (cdb[3] << 8) | cdb[4];
  2145. memcpy(buf, &inq, min_t(unsigned, max_bytes, sizeof(inq)));
  2146. skcomp->num_returned_bytes =
  2147. be32_to_cpu(min_t(uint16_t, max_bytes, sizeof(inq)));
  2148. }
  2149. static void skd_do_driver_inq(struct skd_device *skdev,
  2150. volatile struct fit_completion_entry_v1 *skcomp,
  2151. volatile struct fit_comp_error_info *skerr,
  2152. uint8_t *cdb, uint8_t *buf)
  2153. {
  2154. if (!buf)
  2155. return;
  2156. else if (cdb[0] != INQUIRY)
  2157. return; /* Not an INQUIRY */
  2158. else if ((cdb[1] & 1) == 0)
  2159. return; /* EVPD not set */
  2160. else if (cdb[2] == 0)
  2161. /* Need to add driver's page to supported pages list */
  2162. skd_do_inq_page_00(skdev, skcomp, skerr, cdb, buf);
  2163. else if (cdb[2] == DRIVER_INQ_EVPD_PAGE_CODE)
  2164. /* Caller requested driver's page */
  2165. skd_do_inq_page_da(skdev, skcomp, skerr, cdb, buf);
  2166. }
  2167. static unsigned char *skd_sg_1st_page_ptr(struct scatterlist *sg)
  2168. {
  2169. if (!sg)
  2170. return NULL;
  2171. if (!sg_page(sg))
  2172. return NULL;
  2173. return sg_virt(sg);
  2174. }
  2175. static void skd_process_scsi_inq(struct skd_device *skdev,
  2176. volatile struct fit_completion_entry_v1
  2177. *skcomp,
  2178. volatile struct fit_comp_error_info *skerr,
  2179. struct skd_special_context *skspcl)
  2180. {
  2181. uint8_t *buf;
  2182. struct fit_msg_hdr *fmh = (struct fit_msg_hdr *)skspcl->msg_buf;
  2183. struct skd_scsi_request *scsi_req = (struct skd_scsi_request *)&fmh[1];
  2184. dma_sync_sg_for_cpu(skdev->class_dev, skspcl->req.sg, skspcl->req.n_sg,
  2185. skspcl->req.sg_data_dir);
  2186. buf = skd_sg_1st_page_ptr(skspcl->req.sg);
  2187. if (buf)
  2188. skd_do_driver_inq(skdev, skcomp, skerr, scsi_req->cdb, buf);
  2189. }
  2190. static int skd_isr_completion_posted(struct skd_device *skdev,
  2191. int limit, int *enqueued)
  2192. {
  2193. volatile struct fit_completion_entry_v1 *skcmp = NULL;
  2194. volatile struct fit_comp_error_info *skerr;
  2195. u16 req_id;
  2196. u32 req_slot;
  2197. struct skd_request_context *skreq;
  2198. u16 cmp_cntxt = 0;
  2199. u8 cmp_status = 0;
  2200. u8 cmp_cycle = 0;
  2201. u32 cmp_bytes = 0;
  2202. int rc = 0;
  2203. int processed = 0;
  2204. for (;; ) {
  2205. SKD_ASSERT(skdev->skcomp_ix < SKD_N_COMPLETION_ENTRY);
  2206. skcmp = &skdev->skcomp_table[skdev->skcomp_ix];
  2207. cmp_cycle = skcmp->cycle;
  2208. cmp_cntxt = skcmp->tag;
  2209. cmp_status = skcmp->status;
  2210. cmp_bytes = be32_to_cpu(skcmp->num_returned_bytes);
  2211. skerr = &skdev->skerr_table[skdev->skcomp_ix];
  2212. pr_debug("%s:%s:%d "
  2213. "cycle=%d ix=%d got cycle=%d cmdctxt=0x%x stat=%d "
  2214. "busy=%d rbytes=0x%x proto=%d\n",
  2215. skdev->name, __func__, __LINE__, skdev->skcomp_cycle,
  2216. skdev->skcomp_ix, cmp_cycle, cmp_cntxt, cmp_status,
  2217. skdev->in_flight, cmp_bytes, skdev->proto_ver);
  2218. if (cmp_cycle != skdev->skcomp_cycle) {
  2219. pr_debug("%s:%s:%d end of completions\n",
  2220. skdev->name, __func__, __LINE__);
  2221. break;
  2222. }
  2223. /*
  2224. * Update the completion queue head index and possibly
  2225. * the completion cycle count. 8-bit wrap-around.
  2226. */
  2227. skdev->skcomp_ix++;
  2228. if (skdev->skcomp_ix >= SKD_N_COMPLETION_ENTRY) {
  2229. skdev->skcomp_ix = 0;
  2230. skdev->skcomp_cycle++;
  2231. }
  2232. /*
  2233. * The command context is a unique 32-bit ID. The low order
  2234. * bits help locate the request. The request is usually a
  2235. * r/w request (see skd_start() above) or a special request.
  2236. */
  2237. req_id = cmp_cntxt;
  2238. req_slot = req_id & SKD_ID_SLOT_AND_TABLE_MASK;
  2239. /* Is this other than a r/w request? */
  2240. if (req_slot >= skdev->num_req_context) {
  2241. /*
  2242. * This is not a completion for a r/w request.
  2243. */
  2244. skd_complete_other(skdev, skcmp, skerr);
  2245. continue;
  2246. }
  2247. skreq = &skdev->skreq_table[req_slot];
  2248. /*
  2249. * Make sure the request ID for the slot matches.
  2250. */
  2251. if (skreq->id != req_id) {
  2252. pr_debug("%s:%s:%d mismatch comp_id=0x%x req_id=0x%x\n",
  2253. skdev->name, __func__, __LINE__,
  2254. req_id, skreq->id);
  2255. {
  2256. u16 new_id = cmp_cntxt;
  2257. pr_err("(%s): Completion mismatch "
  2258. "comp_id=0x%04x skreq=0x%04x new=0x%04x\n",
  2259. skd_name(skdev), req_id,
  2260. skreq->id, new_id);
  2261. continue;
  2262. }
  2263. }
  2264. SKD_ASSERT(skreq->state == SKD_REQ_STATE_BUSY);
  2265. if (skreq->state == SKD_REQ_STATE_ABORTED) {
  2266. pr_debug("%s:%s:%d reclaim req %p id=%04x\n",
  2267. skdev->name, __func__, __LINE__,
  2268. skreq, skreq->id);
  2269. /* a previously timed out command can
  2270. * now be cleaned up */
  2271. skd_release_skreq(skdev, skreq);
  2272. continue;
  2273. }
  2274. skreq->completion = *skcmp;
  2275. if (unlikely(cmp_status == SAM_STAT_CHECK_CONDITION)) {
  2276. skreq->err_info = *skerr;
  2277. skd_log_check_status(skdev, cmp_status, skerr->key,
  2278. skerr->code, skerr->qual,
  2279. skerr->fruc);
  2280. }
  2281. /* Release DMA resources for the request. */
  2282. if (skreq->n_sg > 0)
  2283. skd_postop_sg_list(skdev, skreq);
  2284. if (!skreq->req) {
  2285. pr_debug("%s:%s:%d NULL backptr skdreq %p, "
  2286. "req=0x%x req_id=0x%x\n",
  2287. skdev->name, __func__, __LINE__,
  2288. skreq, skreq->id, req_id);
  2289. } else {
  2290. /*
  2291. * Capture the outcome and post it back to the
  2292. * native request.
  2293. */
  2294. if (likely(cmp_status == SAM_STAT_GOOD))
  2295. skd_end_request(skdev, skreq, BLK_STS_OK);
  2296. else
  2297. skd_resolve_req_exception(skdev, skreq);
  2298. }
  2299. /*
  2300. * Release the skreq, its FIT msg (if one), timeout slot,
  2301. * and queue depth.
  2302. */
  2303. skd_release_skreq(skdev, skreq);
  2304. /* skd_isr_comp_limit equal zero means no limit */
  2305. if (limit) {
  2306. if (++processed >= limit) {
  2307. rc = 1;
  2308. break;
  2309. }
  2310. }
  2311. }
  2312. if ((skdev->state == SKD_DRVR_STATE_PAUSING)
  2313. && (skdev->in_flight) == 0) {
  2314. skdev->state = SKD_DRVR_STATE_PAUSED;
  2315. wake_up_interruptible(&skdev->waitq);
  2316. }
  2317. return rc;
  2318. }
  2319. static void skd_complete_other(struct skd_device *skdev,
  2320. volatile struct fit_completion_entry_v1 *skcomp,
  2321. volatile struct fit_comp_error_info *skerr)
  2322. {
  2323. u32 req_id = 0;
  2324. u32 req_table;
  2325. u32 req_slot;
  2326. struct skd_special_context *skspcl;
  2327. req_id = skcomp->tag;
  2328. req_table = req_id & SKD_ID_TABLE_MASK;
  2329. req_slot = req_id & SKD_ID_SLOT_MASK;
  2330. pr_debug("%s:%s:%d table=0x%x id=0x%x slot=%d\n",
  2331. skdev->name, __func__, __LINE__,
  2332. req_table, req_id, req_slot);
  2333. /*
  2334. * Based on the request id, determine how to dispatch this completion.
  2335. * This swich/case is finding the good cases and forwarding the
  2336. * completion entry. Errors are reported below the switch.
  2337. */
  2338. switch (req_table) {
  2339. case SKD_ID_RW_REQUEST:
  2340. /*
  2341. * The caller, skd_completion_posted_isr() above,
  2342. * handles r/w requests. The only way we get here
  2343. * is if the req_slot is out of bounds.
  2344. */
  2345. break;
  2346. case SKD_ID_SPECIAL_REQUEST:
  2347. /*
  2348. * Make sure the req_slot is in bounds and that the id
  2349. * matches.
  2350. */
  2351. if (req_slot < skdev->n_special) {
  2352. skspcl = &skdev->skspcl_table[req_slot];
  2353. if (skspcl->req.id == req_id &&
  2354. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2355. skd_complete_special(skdev,
  2356. skcomp, skerr, skspcl);
  2357. return;
  2358. }
  2359. }
  2360. break;
  2361. case SKD_ID_INTERNAL:
  2362. if (req_slot == 0) {
  2363. skspcl = &skdev->internal_skspcl;
  2364. if (skspcl->req.id == req_id &&
  2365. skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2366. skd_complete_internal(skdev,
  2367. skcomp, skerr, skspcl);
  2368. return;
  2369. }
  2370. }
  2371. break;
  2372. case SKD_ID_FIT_MSG:
  2373. /*
  2374. * These id's should never appear in a completion record.
  2375. */
  2376. break;
  2377. default:
  2378. /*
  2379. * These id's should never appear anywhere;
  2380. */
  2381. break;
  2382. }
  2383. /*
  2384. * If we get here it is a bad or stale id.
  2385. */
  2386. }
  2387. static void skd_complete_special(struct skd_device *skdev,
  2388. volatile struct fit_completion_entry_v1
  2389. *skcomp,
  2390. volatile struct fit_comp_error_info *skerr,
  2391. struct skd_special_context *skspcl)
  2392. {
  2393. pr_debug("%s:%s:%d completing special request %p\n",
  2394. skdev->name, __func__, __LINE__, skspcl);
  2395. if (skspcl->orphaned) {
  2396. /* Discard orphaned request */
  2397. /* ?: Can this release directly or does it need
  2398. * to use a worker? */
  2399. pr_debug("%s:%s:%d release orphaned %p\n",
  2400. skdev->name, __func__, __LINE__, skspcl);
  2401. skd_release_special(skdev, skspcl);
  2402. return;
  2403. }
  2404. skd_process_scsi_inq(skdev, skcomp, skerr, skspcl);
  2405. skspcl->req.state = SKD_REQ_STATE_COMPLETED;
  2406. skspcl->req.completion = *skcomp;
  2407. skspcl->req.err_info = *skerr;
  2408. skd_log_check_status(skdev, skspcl->req.completion.status, skerr->key,
  2409. skerr->code, skerr->qual, skerr->fruc);
  2410. wake_up_interruptible(&skdev->waitq);
  2411. }
  2412. /* assume spinlock is already held */
  2413. static void skd_release_special(struct skd_device *skdev,
  2414. struct skd_special_context *skspcl)
  2415. {
  2416. int i, was_depleted;
  2417. for (i = 0; i < skspcl->req.n_sg; i++) {
  2418. struct page *page = sg_page(&skspcl->req.sg[i]);
  2419. __free_page(page);
  2420. }
  2421. was_depleted = (skdev->skspcl_free_list == NULL);
  2422. skspcl->req.state = SKD_REQ_STATE_IDLE;
  2423. skspcl->req.id += SKD_ID_INCR;
  2424. skspcl->req.next =
  2425. (struct skd_request_context *)skdev->skspcl_free_list;
  2426. skdev->skspcl_free_list = (struct skd_special_context *)skspcl;
  2427. if (was_depleted) {
  2428. pr_debug("%s:%s:%d skspcl was depleted\n",
  2429. skdev->name, __func__, __LINE__);
  2430. /* Free list was depleted. Their might be waiters. */
  2431. wake_up_interruptible(&skdev->waitq);
  2432. }
  2433. }
  2434. static void skd_reset_skcomp(struct skd_device *skdev)
  2435. {
  2436. u32 nbytes;
  2437. struct fit_completion_entry_v1 *skcomp;
  2438. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  2439. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  2440. memset(skdev->skcomp_table, 0, nbytes);
  2441. skdev->skcomp_ix = 0;
  2442. skdev->skcomp_cycle = 1;
  2443. }
  2444. /*
  2445. *****************************************************************************
  2446. * INTERRUPTS
  2447. *****************************************************************************
  2448. */
  2449. static void skd_completion_worker(struct work_struct *work)
  2450. {
  2451. struct skd_device *skdev =
  2452. container_of(work, struct skd_device, completion_worker);
  2453. unsigned long flags;
  2454. int flush_enqueued = 0;
  2455. spin_lock_irqsave(&skdev->lock, flags);
  2456. /*
  2457. * pass in limit=0, which means no limit..
  2458. * process everything in compq
  2459. */
  2460. skd_isr_completion_posted(skdev, 0, &flush_enqueued);
  2461. skd_request_fn(skdev->queue);
  2462. spin_unlock_irqrestore(&skdev->lock, flags);
  2463. }
  2464. static void skd_isr_msg_from_dev(struct skd_device *skdev);
  2465. static irqreturn_t
  2466. skd_isr(int irq, void *ptr)
  2467. {
  2468. struct skd_device *skdev;
  2469. u32 intstat;
  2470. u32 ack;
  2471. int rc = 0;
  2472. int deferred = 0;
  2473. int flush_enqueued = 0;
  2474. skdev = (struct skd_device *)ptr;
  2475. spin_lock(&skdev->lock);
  2476. for (;; ) {
  2477. intstat = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2478. ack = FIT_INT_DEF_MASK;
  2479. ack &= intstat;
  2480. pr_debug("%s:%s:%d intstat=0x%x ack=0x%x\n",
  2481. skdev->name, __func__, __LINE__, intstat, ack);
  2482. /* As long as there is an int pending on device, keep
  2483. * running loop. When none, get out, but if we've never
  2484. * done any processing, call completion handler?
  2485. */
  2486. if (ack == 0) {
  2487. /* No interrupts on device, but run the completion
  2488. * processor anyway?
  2489. */
  2490. if (rc == 0)
  2491. if (likely (skdev->state
  2492. == SKD_DRVR_STATE_ONLINE))
  2493. deferred = 1;
  2494. break;
  2495. }
  2496. rc = IRQ_HANDLED;
  2497. SKD_WRITEL(skdev, ack, FIT_INT_STATUS_HOST);
  2498. if (likely((skdev->state != SKD_DRVR_STATE_LOAD) &&
  2499. (skdev->state != SKD_DRVR_STATE_STOPPING))) {
  2500. if (intstat & FIT_ISH_COMPLETION_POSTED) {
  2501. /*
  2502. * If we have already deferred completion
  2503. * processing, don't bother running it again
  2504. */
  2505. if (deferred == 0)
  2506. deferred =
  2507. skd_isr_completion_posted(skdev,
  2508. skd_isr_comp_limit, &flush_enqueued);
  2509. }
  2510. if (intstat & FIT_ISH_FW_STATE_CHANGE) {
  2511. skd_isr_fwstate(skdev);
  2512. if (skdev->state == SKD_DRVR_STATE_FAULT ||
  2513. skdev->state ==
  2514. SKD_DRVR_STATE_DISAPPEARED) {
  2515. spin_unlock(&skdev->lock);
  2516. return rc;
  2517. }
  2518. }
  2519. if (intstat & FIT_ISH_MSG_FROM_DEV)
  2520. skd_isr_msg_from_dev(skdev);
  2521. }
  2522. }
  2523. if (unlikely(flush_enqueued))
  2524. skd_request_fn(skdev->queue);
  2525. if (deferred)
  2526. schedule_work(&skdev->completion_worker);
  2527. else if (!flush_enqueued)
  2528. skd_request_fn(skdev->queue);
  2529. spin_unlock(&skdev->lock);
  2530. return rc;
  2531. }
  2532. static void skd_drive_fault(struct skd_device *skdev)
  2533. {
  2534. skdev->state = SKD_DRVR_STATE_FAULT;
  2535. pr_err("(%s): Drive FAULT\n", skd_name(skdev));
  2536. }
  2537. static void skd_drive_disappeared(struct skd_device *skdev)
  2538. {
  2539. skdev->state = SKD_DRVR_STATE_DISAPPEARED;
  2540. pr_err("(%s): Drive DISAPPEARED\n", skd_name(skdev));
  2541. }
  2542. static void skd_isr_fwstate(struct skd_device *skdev)
  2543. {
  2544. u32 sense;
  2545. u32 state;
  2546. u32 mtd;
  2547. int prev_driver_state = skdev->state;
  2548. sense = SKD_READL(skdev, FIT_STATUS);
  2549. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2550. pr_err("(%s): s1120 state %s(%d)=>%s(%d)\n",
  2551. skd_name(skdev),
  2552. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  2553. skd_drive_state_to_str(state), state);
  2554. skdev->drive_state = state;
  2555. switch (skdev->drive_state) {
  2556. case FIT_SR_DRIVE_INIT:
  2557. if (skdev->state == SKD_DRVR_STATE_PROTOCOL_MISMATCH) {
  2558. skd_disable_interrupts(skdev);
  2559. break;
  2560. }
  2561. if (skdev->state == SKD_DRVR_STATE_RESTARTING)
  2562. skd_recover_requests(skdev, 0);
  2563. if (skdev->state == SKD_DRVR_STATE_WAIT_BOOT) {
  2564. skdev->timer_countdown = SKD_STARTING_TIMO;
  2565. skdev->state = SKD_DRVR_STATE_STARTING;
  2566. skd_soft_reset(skdev);
  2567. break;
  2568. }
  2569. mtd = FIT_MXD_CONS(FIT_MTD_FITFW_INIT, 0, 0);
  2570. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2571. skdev->last_mtd = mtd;
  2572. break;
  2573. case FIT_SR_DRIVE_ONLINE:
  2574. skdev->cur_max_queue_depth = skd_max_queue_depth;
  2575. if (skdev->cur_max_queue_depth > skdev->dev_max_queue_depth)
  2576. skdev->cur_max_queue_depth = skdev->dev_max_queue_depth;
  2577. skdev->queue_low_water_mark =
  2578. skdev->cur_max_queue_depth * 2 / 3 + 1;
  2579. if (skdev->queue_low_water_mark < 1)
  2580. skdev->queue_low_water_mark = 1;
  2581. pr_info(
  2582. "(%s): Queue depth limit=%d dev=%d lowat=%d\n",
  2583. skd_name(skdev),
  2584. skdev->cur_max_queue_depth,
  2585. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  2586. skd_refresh_device_data(skdev);
  2587. break;
  2588. case FIT_SR_DRIVE_BUSY:
  2589. skdev->state = SKD_DRVR_STATE_BUSY;
  2590. skdev->timer_countdown = SKD_BUSY_TIMO;
  2591. skd_quiesce_dev(skdev);
  2592. break;
  2593. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2594. /* set timer for 3 seconds, we'll abort any unfinished
  2595. * commands after that expires
  2596. */
  2597. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2598. skdev->timer_countdown = SKD_TIMER_SECONDS(3);
  2599. blk_start_queue(skdev->queue);
  2600. break;
  2601. case FIT_SR_DRIVE_BUSY_ERASE:
  2602. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2603. skdev->timer_countdown = SKD_BUSY_TIMO;
  2604. break;
  2605. case FIT_SR_DRIVE_OFFLINE:
  2606. skdev->state = SKD_DRVR_STATE_IDLE;
  2607. break;
  2608. case FIT_SR_DRIVE_SOFT_RESET:
  2609. switch (skdev->state) {
  2610. case SKD_DRVR_STATE_STARTING:
  2611. case SKD_DRVR_STATE_RESTARTING:
  2612. /* Expected by a caller of skd_soft_reset() */
  2613. break;
  2614. default:
  2615. skdev->state = SKD_DRVR_STATE_RESTARTING;
  2616. break;
  2617. }
  2618. break;
  2619. case FIT_SR_DRIVE_FW_BOOTING:
  2620. pr_debug("%s:%s:%d ISR FIT_SR_DRIVE_FW_BOOTING %s\n",
  2621. skdev->name, __func__, __LINE__, skdev->name);
  2622. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2623. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2624. break;
  2625. case FIT_SR_DRIVE_DEGRADED:
  2626. case FIT_SR_PCIE_LINK_DOWN:
  2627. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  2628. break;
  2629. case FIT_SR_DRIVE_FAULT:
  2630. skd_drive_fault(skdev);
  2631. skd_recover_requests(skdev, 0);
  2632. blk_start_queue(skdev->queue);
  2633. break;
  2634. /* PCIe bus returned all Fs? */
  2635. case 0xFF:
  2636. pr_info("(%s): state=0x%x sense=0x%x\n",
  2637. skd_name(skdev), state, sense);
  2638. skd_drive_disappeared(skdev);
  2639. skd_recover_requests(skdev, 0);
  2640. blk_start_queue(skdev->queue);
  2641. break;
  2642. default:
  2643. /*
  2644. * Uknown FW State. Wait for a state we recognize.
  2645. */
  2646. break;
  2647. }
  2648. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  2649. skd_name(skdev),
  2650. skd_skdev_state_to_str(prev_driver_state), prev_driver_state,
  2651. skd_skdev_state_to_str(skdev->state), skdev->state);
  2652. }
  2653. static void skd_recover_requests(struct skd_device *skdev, int requeue)
  2654. {
  2655. int i;
  2656. for (i = 0; i < skdev->num_req_context; i++) {
  2657. struct skd_request_context *skreq = &skdev->skreq_table[i];
  2658. if (skreq->state == SKD_REQ_STATE_BUSY) {
  2659. skd_log_skreq(skdev, skreq, "recover");
  2660. SKD_ASSERT((skreq->id & SKD_ID_INCR) != 0);
  2661. SKD_ASSERT(skreq->req != NULL);
  2662. /* Release DMA resources for the request. */
  2663. if (skreq->n_sg > 0)
  2664. skd_postop_sg_list(skdev, skreq);
  2665. if (requeue &&
  2666. (unsigned long) ++skreq->req->special <
  2667. SKD_MAX_RETRIES)
  2668. blk_requeue_request(skdev->queue, skreq->req);
  2669. else
  2670. skd_end_request(skdev, skreq, BLK_STS_IOERR);
  2671. skreq->req = NULL;
  2672. skreq->state = SKD_REQ_STATE_IDLE;
  2673. skreq->id += SKD_ID_INCR;
  2674. }
  2675. if (i > 0)
  2676. skreq[-1].next = skreq;
  2677. skreq->next = NULL;
  2678. }
  2679. skdev->skreq_free_list = skdev->skreq_table;
  2680. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  2681. struct skd_fitmsg_context *skmsg = &skdev->skmsg_table[i];
  2682. if (skmsg->state == SKD_MSG_STATE_BUSY) {
  2683. skd_log_skmsg(skdev, skmsg, "salvaged");
  2684. SKD_ASSERT((skmsg->id & SKD_ID_INCR) != 0);
  2685. skmsg->state = SKD_MSG_STATE_IDLE;
  2686. skmsg->id += SKD_ID_INCR;
  2687. }
  2688. if (i > 0)
  2689. skmsg[-1].next = skmsg;
  2690. skmsg->next = NULL;
  2691. }
  2692. skdev->skmsg_free_list = skdev->skmsg_table;
  2693. for (i = 0; i < skdev->n_special; i++) {
  2694. struct skd_special_context *skspcl = &skdev->skspcl_table[i];
  2695. /* If orphaned, reclaim it because it has already been reported
  2696. * to the process as an error (it was just waiting for
  2697. * a completion that didn't come, and now it will never come)
  2698. * If busy, change to a state that will cause it to error
  2699. * out in the wait routine and let it do the normal
  2700. * reporting and reclaiming
  2701. */
  2702. if (skspcl->req.state == SKD_REQ_STATE_BUSY) {
  2703. if (skspcl->orphaned) {
  2704. pr_debug("%s:%s:%d orphaned %p\n",
  2705. skdev->name, __func__, __LINE__,
  2706. skspcl);
  2707. skd_release_special(skdev, skspcl);
  2708. } else {
  2709. pr_debug("%s:%s:%d not orphaned %p\n",
  2710. skdev->name, __func__, __LINE__,
  2711. skspcl);
  2712. skspcl->req.state = SKD_REQ_STATE_ABORTED;
  2713. }
  2714. }
  2715. }
  2716. skdev->skspcl_free_list = skdev->skspcl_table;
  2717. for (i = 0; i < SKD_N_TIMEOUT_SLOT; i++)
  2718. skdev->timeout_slot[i] = 0;
  2719. skdev->in_flight = 0;
  2720. }
  2721. static void skd_isr_msg_from_dev(struct skd_device *skdev)
  2722. {
  2723. u32 mfd;
  2724. u32 mtd;
  2725. u32 data;
  2726. mfd = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2727. pr_debug("%s:%s:%d mfd=0x%x last_mtd=0x%x\n",
  2728. skdev->name, __func__, __LINE__, mfd, skdev->last_mtd);
  2729. /* ignore any mtd that is an ack for something we didn't send */
  2730. if (FIT_MXD_TYPE(mfd) != FIT_MXD_TYPE(skdev->last_mtd))
  2731. return;
  2732. switch (FIT_MXD_TYPE(mfd)) {
  2733. case FIT_MTD_FITFW_INIT:
  2734. skdev->proto_ver = FIT_PROTOCOL_MAJOR_VER(mfd);
  2735. if (skdev->proto_ver != FIT_PROTOCOL_VERSION_1) {
  2736. pr_err("(%s): protocol mismatch\n",
  2737. skdev->name);
  2738. pr_err("(%s): got=%d support=%d\n",
  2739. skdev->name, skdev->proto_ver,
  2740. FIT_PROTOCOL_VERSION_1);
  2741. pr_err("(%s): please upgrade driver\n",
  2742. skdev->name);
  2743. skdev->state = SKD_DRVR_STATE_PROTOCOL_MISMATCH;
  2744. skd_soft_reset(skdev);
  2745. break;
  2746. }
  2747. mtd = FIT_MXD_CONS(FIT_MTD_GET_CMDQ_DEPTH, 0, 0);
  2748. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2749. skdev->last_mtd = mtd;
  2750. break;
  2751. case FIT_MTD_GET_CMDQ_DEPTH:
  2752. skdev->dev_max_queue_depth = FIT_MXD_DATA(mfd);
  2753. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_DEPTH, 0,
  2754. SKD_N_COMPLETION_ENTRY);
  2755. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2756. skdev->last_mtd = mtd;
  2757. break;
  2758. case FIT_MTD_SET_COMPQ_DEPTH:
  2759. SKD_WRITEQ(skdev, skdev->cq_dma_address, FIT_MSG_TO_DEVICE_ARG);
  2760. mtd = FIT_MXD_CONS(FIT_MTD_SET_COMPQ_ADDR, 0, 0);
  2761. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2762. skdev->last_mtd = mtd;
  2763. break;
  2764. case FIT_MTD_SET_COMPQ_ADDR:
  2765. skd_reset_skcomp(skdev);
  2766. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_HOST_ID, 0, skdev->devno);
  2767. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2768. skdev->last_mtd = mtd;
  2769. break;
  2770. case FIT_MTD_CMD_LOG_HOST_ID:
  2771. skdev->connect_time_stamp = get_seconds();
  2772. data = skdev->connect_time_stamp & 0xFFFF;
  2773. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_LO, 0, data);
  2774. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2775. skdev->last_mtd = mtd;
  2776. break;
  2777. case FIT_MTD_CMD_LOG_TIME_STAMP_LO:
  2778. skdev->drive_jiffies = FIT_MXD_DATA(mfd);
  2779. data = (skdev->connect_time_stamp >> 16) & 0xFFFF;
  2780. mtd = FIT_MXD_CONS(FIT_MTD_CMD_LOG_TIME_STAMP_HI, 0, data);
  2781. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2782. skdev->last_mtd = mtd;
  2783. break;
  2784. case FIT_MTD_CMD_LOG_TIME_STAMP_HI:
  2785. skdev->drive_jiffies |= (FIT_MXD_DATA(mfd) << 16);
  2786. mtd = FIT_MXD_CONS(FIT_MTD_ARM_QUEUE, 0, 0);
  2787. SKD_WRITEL(skdev, mtd, FIT_MSG_TO_DEVICE);
  2788. skdev->last_mtd = mtd;
  2789. pr_err("(%s): Time sync driver=0x%x device=0x%x\n",
  2790. skd_name(skdev),
  2791. skdev->connect_time_stamp, skdev->drive_jiffies);
  2792. break;
  2793. case FIT_MTD_ARM_QUEUE:
  2794. skdev->last_mtd = 0;
  2795. /*
  2796. * State should be, or soon will be, FIT_SR_DRIVE_ONLINE.
  2797. */
  2798. break;
  2799. default:
  2800. break;
  2801. }
  2802. }
  2803. static void skd_disable_interrupts(struct skd_device *skdev)
  2804. {
  2805. u32 sense;
  2806. sense = SKD_READL(skdev, FIT_CONTROL);
  2807. sense &= ~FIT_CR_ENABLE_INTERRUPTS;
  2808. SKD_WRITEL(skdev, sense, FIT_CONTROL);
  2809. pr_debug("%s:%s:%d sense 0x%x\n",
  2810. skdev->name, __func__, __LINE__, sense);
  2811. /* Note that the 1s is written. A 1-bit means
  2812. * disable, a 0 means enable.
  2813. */
  2814. SKD_WRITEL(skdev, ~0, FIT_INT_MASK_HOST);
  2815. }
  2816. static void skd_enable_interrupts(struct skd_device *skdev)
  2817. {
  2818. u32 val;
  2819. /* unmask interrupts first */
  2820. val = FIT_ISH_FW_STATE_CHANGE +
  2821. FIT_ISH_COMPLETION_POSTED + FIT_ISH_MSG_FROM_DEV;
  2822. /* Note that the compliment of mask is written. A 1-bit means
  2823. * disable, a 0 means enable. */
  2824. SKD_WRITEL(skdev, ~val, FIT_INT_MASK_HOST);
  2825. pr_debug("%s:%s:%d interrupt mask=0x%x\n",
  2826. skdev->name, __func__, __LINE__, ~val);
  2827. val = SKD_READL(skdev, FIT_CONTROL);
  2828. val |= FIT_CR_ENABLE_INTERRUPTS;
  2829. pr_debug("%s:%s:%d control=0x%x\n",
  2830. skdev->name, __func__, __LINE__, val);
  2831. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2832. }
  2833. /*
  2834. *****************************************************************************
  2835. * START, STOP, RESTART, QUIESCE, UNQUIESCE
  2836. *****************************************************************************
  2837. */
  2838. static void skd_soft_reset(struct skd_device *skdev)
  2839. {
  2840. u32 val;
  2841. val = SKD_READL(skdev, FIT_CONTROL);
  2842. val |= (FIT_CR_SOFT_RESET);
  2843. pr_debug("%s:%s:%d control=0x%x\n",
  2844. skdev->name, __func__, __LINE__, val);
  2845. SKD_WRITEL(skdev, val, FIT_CONTROL);
  2846. }
  2847. static void skd_start_device(struct skd_device *skdev)
  2848. {
  2849. unsigned long flags;
  2850. u32 sense;
  2851. u32 state;
  2852. spin_lock_irqsave(&skdev->lock, flags);
  2853. /* ack all ghost interrupts */
  2854. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2855. sense = SKD_READL(skdev, FIT_STATUS);
  2856. pr_debug("%s:%s:%d initial status=0x%x\n",
  2857. skdev->name, __func__, __LINE__, sense);
  2858. state = sense & FIT_SR_DRIVE_STATE_MASK;
  2859. skdev->drive_state = state;
  2860. skdev->last_mtd = 0;
  2861. skdev->state = SKD_DRVR_STATE_STARTING;
  2862. skdev->timer_countdown = SKD_STARTING_TIMO;
  2863. skd_enable_interrupts(skdev);
  2864. switch (skdev->drive_state) {
  2865. case FIT_SR_DRIVE_OFFLINE:
  2866. pr_err("(%s): Drive offline...\n", skd_name(skdev));
  2867. break;
  2868. case FIT_SR_DRIVE_FW_BOOTING:
  2869. pr_debug("%s:%s:%d FIT_SR_DRIVE_FW_BOOTING %s\n",
  2870. skdev->name, __func__, __LINE__, skdev->name);
  2871. skdev->state = SKD_DRVR_STATE_WAIT_BOOT;
  2872. skdev->timer_countdown = SKD_WAIT_BOOT_TIMO;
  2873. break;
  2874. case FIT_SR_DRIVE_BUSY_SANITIZE:
  2875. pr_info("(%s): Start: BUSY_SANITIZE\n",
  2876. skd_name(skdev));
  2877. skdev->state = SKD_DRVR_STATE_BUSY_SANITIZE;
  2878. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2879. break;
  2880. case FIT_SR_DRIVE_BUSY_ERASE:
  2881. pr_info("(%s): Start: BUSY_ERASE\n", skd_name(skdev));
  2882. skdev->state = SKD_DRVR_STATE_BUSY_ERASE;
  2883. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2884. break;
  2885. case FIT_SR_DRIVE_INIT:
  2886. case FIT_SR_DRIVE_ONLINE:
  2887. skd_soft_reset(skdev);
  2888. break;
  2889. case FIT_SR_DRIVE_BUSY:
  2890. pr_err("(%s): Drive Busy...\n", skd_name(skdev));
  2891. skdev->state = SKD_DRVR_STATE_BUSY;
  2892. skdev->timer_countdown = SKD_STARTED_BUSY_TIMO;
  2893. break;
  2894. case FIT_SR_DRIVE_SOFT_RESET:
  2895. pr_err("(%s) drive soft reset in prog\n",
  2896. skd_name(skdev));
  2897. break;
  2898. case FIT_SR_DRIVE_FAULT:
  2899. /* Fault state is bad...soft reset won't do it...
  2900. * Hard reset, maybe, but does it work on device?
  2901. * For now, just fault so the system doesn't hang.
  2902. */
  2903. skd_drive_fault(skdev);
  2904. /*start the queue so we can respond with error to requests */
  2905. pr_debug("%s:%s:%d starting %s queue\n",
  2906. skdev->name, __func__, __LINE__, skdev->name);
  2907. blk_start_queue(skdev->queue);
  2908. skdev->gendisk_on = -1;
  2909. wake_up_interruptible(&skdev->waitq);
  2910. break;
  2911. case 0xFF:
  2912. /* Most likely the device isn't there or isn't responding
  2913. * to the BAR1 addresses. */
  2914. skd_drive_disappeared(skdev);
  2915. /*start the queue so we can respond with error to requests */
  2916. pr_debug("%s:%s:%d starting %s queue to error-out reqs\n",
  2917. skdev->name, __func__, __LINE__, skdev->name);
  2918. blk_start_queue(skdev->queue);
  2919. skdev->gendisk_on = -1;
  2920. wake_up_interruptible(&skdev->waitq);
  2921. break;
  2922. default:
  2923. pr_err("(%s) Start: unknown state %x\n",
  2924. skd_name(skdev), skdev->drive_state);
  2925. break;
  2926. }
  2927. state = SKD_READL(skdev, FIT_CONTROL);
  2928. pr_debug("%s:%s:%d FIT Control Status=0x%x\n",
  2929. skdev->name, __func__, __LINE__, state);
  2930. state = SKD_READL(skdev, FIT_INT_STATUS_HOST);
  2931. pr_debug("%s:%s:%d Intr Status=0x%x\n",
  2932. skdev->name, __func__, __LINE__, state);
  2933. state = SKD_READL(skdev, FIT_INT_MASK_HOST);
  2934. pr_debug("%s:%s:%d Intr Mask=0x%x\n",
  2935. skdev->name, __func__, __LINE__, state);
  2936. state = SKD_READL(skdev, FIT_MSG_FROM_DEVICE);
  2937. pr_debug("%s:%s:%d Msg from Dev=0x%x\n",
  2938. skdev->name, __func__, __LINE__, state);
  2939. state = SKD_READL(skdev, FIT_HW_VERSION);
  2940. pr_debug("%s:%s:%d HW version=0x%x\n",
  2941. skdev->name, __func__, __LINE__, state);
  2942. spin_unlock_irqrestore(&skdev->lock, flags);
  2943. }
  2944. static void skd_stop_device(struct skd_device *skdev)
  2945. {
  2946. unsigned long flags;
  2947. struct skd_special_context *skspcl = &skdev->internal_skspcl;
  2948. u32 dev_state;
  2949. int i;
  2950. spin_lock_irqsave(&skdev->lock, flags);
  2951. if (skdev->state != SKD_DRVR_STATE_ONLINE) {
  2952. pr_err("(%s): skd_stop_device not online no sync\n",
  2953. skd_name(skdev));
  2954. goto stop_out;
  2955. }
  2956. if (skspcl->req.state != SKD_REQ_STATE_IDLE) {
  2957. pr_err("(%s): skd_stop_device no special\n",
  2958. skd_name(skdev));
  2959. goto stop_out;
  2960. }
  2961. skdev->state = SKD_DRVR_STATE_SYNCING;
  2962. skdev->sync_done = 0;
  2963. skd_send_internal_skspcl(skdev, skspcl, SYNCHRONIZE_CACHE);
  2964. spin_unlock_irqrestore(&skdev->lock, flags);
  2965. wait_event_interruptible_timeout(skdev->waitq,
  2966. (skdev->sync_done), (10 * HZ));
  2967. spin_lock_irqsave(&skdev->lock, flags);
  2968. switch (skdev->sync_done) {
  2969. case 0:
  2970. pr_err("(%s): skd_stop_device no sync\n",
  2971. skd_name(skdev));
  2972. break;
  2973. case 1:
  2974. pr_err("(%s): skd_stop_device sync done\n",
  2975. skd_name(skdev));
  2976. break;
  2977. default:
  2978. pr_err("(%s): skd_stop_device sync error\n",
  2979. skd_name(skdev));
  2980. }
  2981. stop_out:
  2982. skdev->state = SKD_DRVR_STATE_STOPPING;
  2983. spin_unlock_irqrestore(&skdev->lock, flags);
  2984. skd_kill_timer(skdev);
  2985. spin_lock_irqsave(&skdev->lock, flags);
  2986. skd_disable_interrupts(skdev);
  2987. /* ensure all ints on device are cleared */
  2988. /* soft reset the device to unload with a clean slate */
  2989. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  2990. SKD_WRITEL(skdev, FIT_CR_SOFT_RESET, FIT_CONTROL);
  2991. spin_unlock_irqrestore(&skdev->lock, flags);
  2992. /* poll every 100ms, 1 second timeout */
  2993. for (i = 0; i < 10; i++) {
  2994. dev_state =
  2995. SKD_READL(skdev, FIT_STATUS) & FIT_SR_DRIVE_STATE_MASK;
  2996. if (dev_state == FIT_SR_DRIVE_INIT)
  2997. break;
  2998. set_current_state(TASK_INTERRUPTIBLE);
  2999. schedule_timeout(msecs_to_jiffies(100));
  3000. }
  3001. if (dev_state != FIT_SR_DRIVE_INIT)
  3002. pr_err("(%s): skd_stop_device state error 0x%02x\n",
  3003. skd_name(skdev), dev_state);
  3004. }
  3005. /* assume spinlock is held */
  3006. static void skd_restart_device(struct skd_device *skdev)
  3007. {
  3008. u32 state;
  3009. /* ack all ghost interrupts */
  3010. SKD_WRITEL(skdev, FIT_INT_DEF_MASK, FIT_INT_STATUS_HOST);
  3011. state = SKD_READL(skdev, FIT_STATUS);
  3012. pr_debug("%s:%s:%d drive status=0x%x\n",
  3013. skdev->name, __func__, __LINE__, state);
  3014. state &= FIT_SR_DRIVE_STATE_MASK;
  3015. skdev->drive_state = state;
  3016. skdev->last_mtd = 0;
  3017. skdev->state = SKD_DRVR_STATE_RESTARTING;
  3018. skdev->timer_countdown = SKD_RESTARTING_TIMO;
  3019. skd_soft_reset(skdev);
  3020. }
  3021. /* assume spinlock is held */
  3022. static int skd_quiesce_dev(struct skd_device *skdev)
  3023. {
  3024. int rc = 0;
  3025. switch (skdev->state) {
  3026. case SKD_DRVR_STATE_BUSY:
  3027. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3028. pr_debug("%s:%s:%d stopping %s queue\n",
  3029. skdev->name, __func__, __LINE__, skdev->name);
  3030. blk_stop_queue(skdev->queue);
  3031. break;
  3032. case SKD_DRVR_STATE_ONLINE:
  3033. case SKD_DRVR_STATE_STOPPING:
  3034. case SKD_DRVR_STATE_SYNCING:
  3035. case SKD_DRVR_STATE_PAUSING:
  3036. case SKD_DRVR_STATE_PAUSED:
  3037. case SKD_DRVR_STATE_STARTING:
  3038. case SKD_DRVR_STATE_RESTARTING:
  3039. case SKD_DRVR_STATE_RESUMING:
  3040. default:
  3041. rc = -EINVAL;
  3042. pr_debug("%s:%s:%d state [%d] not implemented\n",
  3043. skdev->name, __func__, __LINE__, skdev->state);
  3044. }
  3045. return rc;
  3046. }
  3047. /* assume spinlock is held */
  3048. static int skd_unquiesce_dev(struct skd_device *skdev)
  3049. {
  3050. int prev_driver_state = skdev->state;
  3051. skd_log_skdev(skdev, "unquiesce");
  3052. if (skdev->state == SKD_DRVR_STATE_ONLINE) {
  3053. pr_debug("%s:%s:%d **** device already ONLINE\n",
  3054. skdev->name, __func__, __LINE__);
  3055. return 0;
  3056. }
  3057. if (skdev->drive_state != FIT_SR_DRIVE_ONLINE) {
  3058. /*
  3059. * If there has been an state change to other than
  3060. * ONLINE, we will rely on controller state change
  3061. * to come back online and restart the queue.
  3062. * The BUSY state means that driver is ready to
  3063. * continue normal processing but waiting for controller
  3064. * to become available.
  3065. */
  3066. skdev->state = SKD_DRVR_STATE_BUSY;
  3067. pr_debug("%s:%s:%d drive BUSY state\n",
  3068. skdev->name, __func__, __LINE__);
  3069. return 0;
  3070. }
  3071. /*
  3072. * Drive has just come online, driver is either in startup,
  3073. * paused performing a task, or bust waiting for hardware.
  3074. */
  3075. switch (skdev->state) {
  3076. case SKD_DRVR_STATE_PAUSED:
  3077. case SKD_DRVR_STATE_BUSY:
  3078. case SKD_DRVR_STATE_BUSY_IMMINENT:
  3079. case SKD_DRVR_STATE_BUSY_ERASE:
  3080. case SKD_DRVR_STATE_STARTING:
  3081. case SKD_DRVR_STATE_RESTARTING:
  3082. case SKD_DRVR_STATE_FAULT:
  3083. case SKD_DRVR_STATE_IDLE:
  3084. case SKD_DRVR_STATE_LOAD:
  3085. skdev->state = SKD_DRVR_STATE_ONLINE;
  3086. pr_err("(%s): Driver state %s(%d)=>%s(%d)\n",
  3087. skd_name(skdev),
  3088. skd_skdev_state_to_str(prev_driver_state),
  3089. prev_driver_state, skd_skdev_state_to_str(skdev->state),
  3090. skdev->state);
  3091. pr_debug("%s:%s:%d **** device ONLINE...starting block queue\n",
  3092. skdev->name, __func__, __LINE__);
  3093. pr_debug("%s:%s:%d starting %s queue\n",
  3094. skdev->name, __func__, __LINE__, skdev->name);
  3095. pr_info("(%s): STEC s1120 ONLINE\n", skd_name(skdev));
  3096. blk_start_queue(skdev->queue);
  3097. skdev->gendisk_on = 1;
  3098. wake_up_interruptible(&skdev->waitq);
  3099. break;
  3100. case SKD_DRVR_STATE_DISAPPEARED:
  3101. default:
  3102. pr_debug("%s:%s:%d **** driver state %d, not implemented \n",
  3103. skdev->name, __func__, __LINE__,
  3104. skdev->state);
  3105. return -EBUSY;
  3106. }
  3107. return 0;
  3108. }
  3109. /*
  3110. *****************************************************************************
  3111. * PCIe MSI/MSI-X INTERRUPT HANDLERS
  3112. *****************************************************************************
  3113. */
  3114. static irqreturn_t skd_reserved_isr(int irq, void *skd_host_data)
  3115. {
  3116. struct skd_device *skdev = skd_host_data;
  3117. unsigned long flags;
  3118. spin_lock_irqsave(&skdev->lock, flags);
  3119. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3120. skdev->name, __func__, __LINE__,
  3121. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3122. pr_err("(%s): MSIX reserved irq %d = 0x%x\n", skd_name(skdev),
  3123. irq, SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3124. SKD_WRITEL(skdev, FIT_INT_RESERVED_MASK, FIT_INT_STATUS_HOST);
  3125. spin_unlock_irqrestore(&skdev->lock, flags);
  3126. return IRQ_HANDLED;
  3127. }
  3128. static irqreturn_t skd_statec_isr(int irq, void *skd_host_data)
  3129. {
  3130. struct skd_device *skdev = skd_host_data;
  3131. unsigned long flags;
  3132. spin_lock_irqsave(&skdev->lock, flags);
  3133. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3134. skdev->name, __func__, __LINE__,
  3135. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3136. SKD_WRITEL(skdev, FIT_ISH_FW_STATE_CHANGE, FIT_INT_STATUS_HOST);
  3137. skd_isr_fwstate(skdev);
  3138. spin_unlock_irqrestore(&skdev->lock, flags);
  3139. return IRQ_HANDLED;
  3140. }
  3141. static irqreturn_t skd_comp_q(int irq, void *skd_host_data)
  3142. {
  3143. struct skd_device *skdev = skd_host_data;
  3144. unsigned long flags;
  3145. int flush_enqueued = 0;
  3146. int deferred;
  3147. spin_lock_irqsave(&skdev->lock, flags);
  3148. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3149. skdev->name, __func__, __LINE__,
  3150. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3151. SKD_WRITEL(skdev, FIT_ISH_COMPLETION_POSTED, FIT_INT_STATUS_HOST);
  3152. deferred = skd_isr_completion_posted(skdev, skd_isr_comp_limit,
  3153. &flush_enqueued);
  3154. if (flush_enqueued)
  3155. skd_request_fn(skdev->queue);
  3156. if (deferred)
  3157. schedule_work(&skdev->completion_worker);
  3158. else if (!flush_enqueued)
  3159. skd_request_fn(skdev->queue);
  3160. spin_unlock_irqrestore(&skdev->lock, flags);
  3161. return IRQ_HANDLED;
  3162. }
  3163. static irqreturn_t skd_msg_isr(int irq, void *skd_host_data)
  3164. {
  3165. struct skd_device *skdev = skd_host_data;
  3166. unsigned long flags;
  3167. spin_lock_irqsave(&skdev->lock, flags);
  3168. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3169. skdev->name, __func__, __LINE__,
  3170. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3171. SKD_WRITEL(skdev, FIT_ISH_MSG_FROM_DEV, FIT_INT_STATUS_HOST);
  3172. skd_isr_msg_from_dev(skdev);
  3173. spin_unlock_irqrestore(&skdev->lock, flags);
  3174. return IRQ_HANDLED;
  3175. }
  3176. static irqreturn_t skd_qfull_isr(int irq, void *skd_host_data)
  3177. {
  3178. struct skd_device *skdev = skd_host_data;
  3179. unsigned long flags;
  3180. spin_lock_irqsave(&skdev->lock, flags);
  3181. pr_debug("%s:%s:%d MSIX = 0x%x\n",
  3182. skdev->name, __func__, __LINE__,
  3183. SKD_READL(skdev, FIT_INT_STATUS_HOST));
  3184. SKD_WRITEL(skdev, FIT_INT_QUEUE_FULL, FIT_INT_STATUS_HOST);
  3185. spin_unlock_irqrestore(&skdev->lock, flags);
  3186. return IRQ_HANDLED;
  3187. }
  3188. /*
  3189. *****************************************************************************
  3190. * PCIe MSI/MSI-X SETUP
  3191. *****************************************************************************
  3192. */
  3193. struct skd_msix_entry {
  3194. char isr_name[30];
  3195. };
  3196. struct skd_init_msix_entry {
  3197. const char *name;
  3198. irq_handler_t handler;
  3199. };
  3200. #define SKD_MAX_MSIX_COUNT 13
  3201. #define SKD_MIN_MSIX_COUNT 7
  3202. #define SKD_BASE_MSIX_IRQ 4
  3203. static struct skd_init_msix_entry msix_entries[SKD_MAX_MSIX_COUNT] = {
  3204. { "(DMA 0)", skd_reserved_isr },
  3205. { "(DMA 1)", skd_reserved_isr },
  3206. { "(DMA 2)", skd_reserved_isr },
  3207. { "(DMA 3)", skd_reserved_isr },
  3208. { "(State Change)", skd_statec_isr },
  3209. { "(COMPL_Q)", skd_comp_q },
  3210. { "(MSG)", skd_msg_isr },
  3211. { "(Reserved)", skd_reserved_isr },
  3212. { "(Reserved)", skd_reserved_isr },
  3213. { "(Queue Full 0)", skd_qfull_isr },
  3214. { "(Queue Full 1)", skd_qfull_isr },
  3215. { "(Queue Full 2)", skd_qfull_isr },
  3216. { "(Queue Full 3)", skd_qfull_isr },
  3217. };
  3218. static int skd_acquire_msix(struct skd_device *skdev)
  3219. {
  3220. int i, rc;
  3221. struct pci_dev *pdev = skdev->pdev;
  3222. rc = pci_alloc_irq_vectors(pdev, SKD_MAX_MSIX_COUNT, SKD_MAX_MSIX_COUNT,
  3223. PCI_IRQ_MSIX);
  3224. if (rc < 0) {
  3225. pr_err("(%s): failed to enable MSI-X %d\n",
  3226. skd_name(skdev), rc);
  3227. goto out;
  3228. }
  3229. skdev->msix_entries = kcalloc(SKD_MAX_MSIX_COUNT,
  3230. sizeof(struct skd_msix_entry), GFP_KERNEL);
  3231. if (!skdev->msix_entries) {
  3232. rc = -ENOMEM;
  3233. pr_err("(%s): msix table allocation error\n",
  3234. skd_name(skdev));
  3235. goto out;
  3236. }
  3237. /* Enable MSI-X vectors for the base queue */
  3238. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3239. struct skd_msix_entry *qentry = &skdev->msix_entries[i];
  3240. snprintf(qentry->isr_name, sizeof(qentry->isr_name),
  3241. "%s%d-msix %s", DRV_NAME, skdev->devno,
  3242. msix_entries[i].name);
  3243. rc = devm_request_irq(&skdev->pdev->dev,
  3244. pci_irq_vector(skdev->pdev, i),
  3245. msix_entries[i].handler, 0,
  3246. qentry->isr_name, skdev);
  3247. if (rc) {
  3248. pr_err("(%s): Unable to register(%d) MSI-X "
  3249. "handler %d: %s\n",
  3250. skd_name(skdev), rc, i, qentry->isr_name);
  3251. goto msix_out;
  3252. }
  3253. }
  3254. pr_debug("%s:%s:%d %s: <%s> msix %d irq(s) enabled\n",
  3255. skdev->name, __func__, __LINE__,
  3256. pci_name(pdev), skdev->name, SKD_MAX_MSIX_COUNT);
  3257. return 0;
  3258. msix_out:
  3259. while (--i >= 0)
  3260. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i), skdev);
  3261. out:
  3262. kfree(skdev->msix_entries);
  3263. skdev->msix_entries = NULL;
  3264. return rc;
  3265. }
  3266. static int skd_acquire_irq(struct skd_device *skdev)
  3267. {
  3268. struct pci_dev *pdev = skdev->pdev;
  3269. unsigned int irq_flag = PCI_IRQ_LEGACY;
  3270. int rc;
  3271. if (skd_isr_type == SKD_IRQ_MSIX) {
  3272. rc = skd_acquire_msix(skdev);
  3273. if (!rc)
  3274. return 0;
  3275. pr_err("(%s): failed to enable MSI-X, re-trying with MSI %d\n",
  3276. skd_name(skdev), rc);
  3277. }
  3278. snprintf(skdev->isr_name, sizeof(skdev->isr_name), "%s%d", DRV_NAME,
  3279. skdev->devno);
  3280. if (skd_isr_type != SKD_IRQ_LEGACY)
  3281. irq_flag |= PCI_IRQ_MSI;
  3282. rc = pci_alloc_irq_vectors(pdev, 1, 1, irq_flag);
  3283. if (rc < 0) {
  3284. pr_err("(%s): failed to allocate the MSI interrupt %d\n",
  3285. skd_name(skdev), rc);
  3286. return rc;
  3287. }
  3288. rc = devm_request_irq(&pdev->dev, pdev->irq, skd_isr,
  3289. pdev->msi_enabled ? 0 : IRQF_SHARED,
  3290. skdev->isr_name, skdev);
  3291. if (rc) {
  3292. pci_free_irq_vectors(pdev);
  3293. pr_err("(%s): failed to allocate interrupt %d\n",
  3294. skd_name(skdev), rc);
  3295. return rc;
  3296. }
  3297. return 0;
  3298. }
  3299. static void skd_release_irq(struct skd_device *skdev)
  3300. {
  3301. struct pci_dev *pdev = skdev->pdev;
  3302. if (skdev->msix_entries) {
  3303. int i;
  3304. for (i = 0; i < SKD_MAX_MSIX_COUNT; i++) {
  3305. devm_free_irq(&pdev->dev, pci_irq_vector(pdev, i),
  3306. skdev);
  3307. }
  3308. kfree(skdev->msix_entries);
  3309. skdev->msix_entries = NULL;
  3310. } else {
  3311. devm_free_irq(&pdev->dev, pdev->irq, skdev);
  3312. }
  3313. pci_free_irq_vectors(pdev);
  3314. }
  3315. /*
  3316. *****************************************************************************
  3317. * CONSTRUCT
  3318. *****************************************************************************
  3319. */
  3320. static int skd_cons_skcomp(struct skd_device *skdev)
  3321. {
  3322. int rc = 0;
  3323. struct fit_completion_entry_v1 *skcomp;
  3324. u32 nbytes;
  3325. nbytes = sizeof(*skcomp) * SKD_N_COMPLETION_ENTRY;
  3326. nbytes += sizeof(struct fit_comp_error_info) * SKD_N_COMPLETION_ENTRY;
  3327. pr_debug("%s:%s:%d comp pci_alloc, total bytes %d entries %d\n",
  3328. skdev->name, __func__, __LINE__,
  3329. nbytes, SKD_N_COMPLETION_ENTRY);
  3330. skcomp = pci_zalloc_consistent(skdev->pdev, nbytes,
  3331. &skdev->cq_dma_address);
  3332. if (skcomp == NULL) {
  3333. rc = -ENOMEM;
  3334. goto err_out;
  3335. }
  3336. skdev->skcomp_table = skcomp;
  3337. skdev->skerr_table = (struct fit_comp_error_info *)((char *)skcomp +
  3338. sizeof(*skcomp) *
  3339. SKD_N_COMPLETION_ENTRY);
  3340. err_out:
  3341. return rc;
  3342. }
  3343. static int skd_cons_skmsg(struct skd_device *skdev)
  3344. {
  3345. int rc = 0;
  3346. u32 i;
  3347. pr_debug("%s:%s:%d skmsg_table kzalloc, struct %lu, count %u total %lu\n",
  3348. skdev->name, __func__, __LINE__,
  3349. sizeof(struct skd_fitmsg_context),
  3350. skdev->num_fitmsg_context,
  3351. sizeof(struct skd_fitmsg_context) * skdev->num_fitmsg_context);
  3352. skdev->skmsg_table = kzalloc(sizeof(struct skd_fitmsg_context)
  3353. *skdev->num_fitmsg_context, GFP_KERNEL);
  3354. if (skdev->skmsg_table == NULL) {
  3355. rc = -ENOMEM;
  3356. goto err_out;
  3357. }
  3358. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3359. struct skd_fitmsg_context *skmsg;
  3360. skmsg = &skdev->skmsg_table[i];
  3361. skmsg->id = i + SKD_ID_FIT_MSG;
  3362. skmsg->state = SKD_MSG_STATE_IDLE;
  3363. skmsg->msg_buf = pci_alloc_consistent(skdev->pdev,
  3364. SKD_N_FITMSG_BYTES + 64,
  3365. &skmsg->mb_dma_address);
  3366. if (skmsg->msg_buf == NULL) {
  3367. rc = -ENOMEM;
  3368. goto err_out;
  3369. }
  3370. skmsg->offset = (u32)((u64)skmsg->msg_buf &
  3371. (~FIT_QCMD_BASE_ADDRESS_MASK));
  3372. skmsg->msg_buf += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3373. skmsg->msg_buf = (u8 *)((u64)skmsg->msg_buf &
  3374. FIT_QCMD_BASE_ADDRESS_MASK);
  3375. skmsg->mb_dma_address += ~FIT_QCMD_BASE_ADDRESS_MASK;
  3376. skmsg->mb_dma_address &= FIT_QCMD_BASE_ADDRESS_MASK;
  3377. memset(skmsg->msg_buf, 0, SKD_N_FITMSG_BYTES);
  3378. skmsg->next = &skmsg[1];
  3379. }
  3380. /* Free list is in order starting with the 0th entry. */
  3381. skdev->skmsg_table[i - 1].next = NULL;
  3382. skdev->skmsg_free_list = skdev->skmsg_table;
  3383. err_out:
  3384. return rc;
  3385. }
  3386. static struct fit_sg_descriptor *skd_cons_sg_list(struct skd_device *skdev,
  3387. u32 n_sg,
  3388. dma_addr_t *ret_dma_addr)
  3389. {
  3390. struct fit_sg_descriptor *sg_list;
  3391. u32 nbytes;
  3392. nbytes = sizeof(*sg_list) * n_sg;
  3393. sg_list = pci_alloc_consistent(skdev->pdev, nbytes, ret_dma_addr);
  3394. if (sg_list != NULL) {
  3395. uint64_t dma_address = *ret_dma_addr;
  3396. u32 i;
  3397. memset(sg_list, 0, nbytes);
  3398. for (i = 0; i < n_sg - 1; i++) {
  3399. uint64_t ndp_off;
  3400. ndp_off = (i + 1) * sizeof(struct fit_sg_descriptor);
  3401. sg_list[i].next_desc_ptr = dma_address + ndp_off;
  3402. }
  3403. sg_list[i].next_desc_ptr = 0LL;
  3404. }
  3405. return sg_list;
  3406. }
  3407. static int skd_cons_skreq(struct skd_device *skdev)
  3408. {
  3409. int rc = 0;
  3410. u32 i;
  3411. pr_debug("%s:%s:%d skreq_table kzalloc, struct %lu, count %u total %lu\n",
  3412. skdev->name, __func__, __LINE__,
  3413. sizeof(struct skd_request_context),
  3414. skdev->num_req_context,
  3415. sizeof(struct skd_request_context) * skdev->num_req_context);
  3416. skdev->skreq_table = kzalloc(sizeof(struct skd_request_context)
  3417. * skdev->num_req_context, GFP_KERNEL);
  3418. if (skdev->skreq_table == NULL) {
  3419. rc = -ENOMEM;
  3420. goto err_out;
  3421. }
  3422. pr_debug("%s:%s:%d alloc sg_table sg_per_req %u scatlist %lu total %lu\n",
  3423. skdev->name, __func__, __LINE__,
  3424. skdev->sgs_per_request, sizeof(struct scatterlist),
  3425. skdev->sgs_per_request * sizeof(struct scatterlist));
  3426. for (i = 0; i < skdev->num_req_context; i++) {
  3427. struct skd_request_context *skreq;
  3428. skreq = &skdev->skreq_table[i];
  3429. skreq->id = i + SKD_ID_RW_REQUEST;
  3430. skreq->state = SKD_REQ_STATE_IDLE;
  3431. skreq->sg = kzalloc(sizeof(struct scatterlist) *
  3432. skdev->sgs_per_request, GFP_KERNEL);
  3433. if (skreq->sg == NULL) {
  3434. rc = -ENOMEM;
  3435. goto err_out;
  3436. }
  3437. sg_init_table(skreq->sg, skdev->sgs_per_request);
  3438. skreq->sksg_list = skd_cons_sg_list(skdev,
  3439. skdev->sgs_per_request,
  3440. &skreq->sksg_dma_address);
  3441. if (skreq->sksg_list == NULL) {
  3442. rc = -ENOMEM;
  3443. goto err_out;
  3444. }
  3445. skreq->next = &skreq[1];
  3446. }
  3447. /* Free list is in order starting with the 0th entry. */
  3448. skdev->skreq_table[i - 1].next = NULL;
  3449. skdev->skreq_free_list = skdev->skreq_table;
  3450. err_out:
  3451. return rc;
  3452. }
  3453. static int skd_cons_skspcl(struct skd_device *skdev)
  3454. {
  3455. int rc = 0;
  3456. u32 i, nbytes;
  3457. pr_debug("%s:%s:%d skspcl_table kzalloc, struct %lu, count %u total %lu\n",
  3458. skdev->name, __func__, __LINE__,
  3459. sizeof(struct skd_special_context),
  3460. skdev->n_special,
  3461. sizeof(struct skd_special_context) * skdev->n_special);
  3462. skdev->skspcl_table = kzalloc(sizeof(struct skd_special_context)
  3463. * skdev->n_special, GFP_KERNEL);
  3464. if (skdev->skspcl_table == NULL) {
  3465. rc = -ENOMEM;
  3466. goto err_out;
  3467. }
  3468. for (i = 0; i < skdev->n_special; i++) {
  3469. struct skd_special_context *skspcl;
  3470. skspcl = &skdev->skspcl_table[i];
  3471. skspcl->req.id = i + SKD_ID_SPECIAL_REQUEST;
  3472. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3473. skspcl->req.next = &skspcl[1].req;
  3474. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3475. skspcl->msg_buf =
  3476. pci_zalloc_consistent(skdev->pdev, nbytes,
  3477. &skspcl->mb_dma_address);
  3478. if (skspcl->msg_buf == NULL) {
  3479. rc = -ENOMEM;
  3480. goto err_out;
  3481. }
  3482. skspcl->req.sg = kzalloc(sizeof(struct scatterlist) *
  3483. SKD_N_SG_PER_SPECIAL, GFP_KERNEL);
  3484. if (skspcl->req.sg == NULL) {
  3485. rc = -ENOMEM;
  3486. goto err_out;
  3487. }
  3488. skspcl->req.sksg_list = skd_cons_sg_list(skdev,
  3489. SKD_N_SG_PER_SPECIAL,
  3490. &skspcl->req.
  3491. sksg_dma_address);
  3492. if (skspcl->req.sksg_list == NULL) {
  3493. rc = -ENOMEM;
  3494. goto err_out;
  3495. }
  3496. }
  3497. /* Free list is in order starting with the 0th entry. */
  3498. skdev->skspcl_table[i - 1].req.next = NULL;
  3499. skdev->skspcl_free_list = skdev->skspcl_table;
  3500. return rc;
  3501. err_out:
  3502. return rc;
  3503. }
  3504. static int skd_cons_sksb(struct skd_device *skdev)
  3505. {
  3506. int rc = 0;
  3507. struct skd_special_context *skspcl;
  3508. u32 nbytes;
  3509. skspcl = &skdev->internal_skspcl;
  3510. skspcl->req.id = 0 + SKD_ID_INTERNAL;
  3511. skspcl->req.state = SKD_REQ_STATE_IDLE;
  3512. nbytes = SKD_N_INTERNAL_BYTES;
  3513. skspcl->data_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3514. &skspcl->db_dma_address);
  3515. if (skspcl->data_buf == NULL) {
  3516. rc = -ENOMEM;
  3517. goto err_out;
  3518. }
  3519. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3520. skspcl->msg_buf = pci_zalloc_consistent(skdev->pdev, nbytes,
  3521. &skspcl->mb_dma_address);
  3522. if (skspcl->msg_buf == NULL) {
  3523. rc = -ENOMEM;
  3524. goto err_out;
  3525. }
  3526. skspcl->req.sksg_list = skd_cons_sg_list(skdev, 1,
  3527. &skspcl->req.sksg_dma_address);
  3528. if (skspcl->req.sksg_list == NULL) {
  3529. rc = -ENOMEM;
  3530. goto err_out;
  3531. }
  3532. if (!skd_format_internal_skspcl(skdev)) {
  3533. rc = -EINVAL;
  3534. goto err_out;
  3535. }
  3536. err_out:
  3537. return rc;
  3538. }
  3539. static int skd_cons_disk(struct skd_device *skdev)
  3540. {
  3541. int rc = 0;
  3542. struct gendisk *disk;
  3543. struct request_queue *q;
  3544. unsigned long flags;
  3545. disk = alloc_disk(SKD_MINORS_PER_DEVICE);
  3546. if (!disk) {
  3547. rc = -ENOMEM;
  3548. goto err_out;
  3549. }
  3550. skdev->disk = disk;
  3551. sprintf(disk->disk_name, DRV_NAME "%u", skdev->devno);
  3552. disk->major = skdev->major;
  3553. disk->first_minor = skdev->devno * SKD_MINORS_PER_DEVICE;
  3554. disk->fops = &skd_blockdev_ops;
  3555. disk->private_data = skdev;
  3556. q = blk_init_queue(skd_request_fn, &skdev->lock);
  3557. if (!q) {
  3558. rc = -ENOMEM;
  3559. goto err_out;
  3560. }
  3561. skdev->queue = q;
  3562. disk->queue = q;
  3563. q->queuedata = skdev;
  3564. blk_queue_write_cache(q, true, true);
  3565. blk_queue_max_segments(q, skdev->sgs_per_request);
  3566. blk_queue_max_hw_sectors(q, SKD_N_MAX_SECTORS);
  3567. /* set sysfs ptimal_io_size to 8K */
  3568. blk_queue_io_opt(q, 8192);
  3569. queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
  3570. queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
  3571. spin_lock_irqsave(&skdev->lock, flags);
  3572. pr_debug("%s:%s:%d stopping %s queue\n",
  3573. skdev->name, __func__, __LINE__, skdev->name);
  3574. blk_stop_queue(skdev->queue);
  3575. spin_unlock_irqrestore(&skdev->lock, flags);
  3576. err_out:
  3577. return rc;
  3578. }
  3579. #define SKD_N_DEV_TABLE 16u
  3580. static u32 skd_next_devno;
  3581. static struct skd_device *skd_construct(struct pci_dev *pdev)
  3582. {
  3583. struct skd_device *skdev;
  3584. int blk_major = skd_major;
  3585. int rc;
  3586. skdev = kzalloc(sizeof(*skdev), GFP_KERNEL);
  3587. if (!skdev) {
  3588. pr_err(PFX "(%s): memory alloc failure\n",
  3589. pci_name(pdev));
  3590. return NULL;
  3591. }
  3592. skdev->state = SKD_DRVR_STATE_LOAD;
  3593. skdev->pdev = pdev;
  3594. skdev->devno = skd_next_devno++;
  3595. skdev->major = blk_major;
  3596. sprintf(skdev->name, DRV_NAME "%d", skdev->devno);
  3597. skdev->dev_max_queue_depth = 0;
  3598. skdev->num_req_context = skd_max_queue_depth;
  3599. skdev->num_fitmsg_context = skd_max_queue_depth;
  3600. skdev->n_special = skd_max_pass_thru;
  3601. skdev->cur_max_queue_depth = 1;
  3602. skdev->queue_low_water_mark = 1;
  3603. skdev->proto_ver = 99;
  3604. skdev->sgs_per_request = skd_sgs_per_request;
  3605. skdev->dbg_level = skd_dbg_level;
  3606. atomic_set(&skdev->device_count, 0);
  3607. spin_lock_init(&skdev->lock);
  3608. INIT_WORK(&skdev->completion_worker, skd_completion_worker);
  3609. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3610. rc = skd_cons_skcomp(skdev);
  3611. if (rc < 0)
  3612. goto err_out;
  3613. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3614. rc = skd_cons_skmsg(skdev);
  3615. if (rc < 0)
  3616. goto err_out;
  3617. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3618. rc = skd_cons_skreq(skdev);
  3619. if (rc < 0)
  3620. goto err_out;
  3621. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3622. rc = skd_cons_skspcl(skdev);
  3623. if (rc < 0)
  3624. goto err_out;
  3625. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3626. rc = skd_cons_sksb(skdev);
  3627. if (rc < 0)
  3628. goto err_out;
  3629. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3630. rc = skd_cons_disk(skdev);
  3631. if (rc < 0)
  3632. goto err_out;
  3633. pr_debug("%s:%s:%d VICTORY\n", skdev->name, __func__, __LINE__);
  3634. return skdev;
  3635. err_out:
  3636. pr_debug("%s:%s:%d construct failed\n",
  3637. skdev->name, __func__, __LINE__);
  3638. skd_destruct(skdev);
  3639. return NULL;
  3640. }
  3641. /*
  3642. *****************************************************************************
  3643. * DESTRUCT (FREE)
  3644. *****************************************************************************
  3645. */
  3646. static void skd_free_skcomp(struct skd_device *skdev)
  3647. {
  3648. if (skdev->skcomp_table != NULL) {
  3649. u32 nbytes;
  3650. nbytes = sizeof(skdev->skcomp_table[0]) *
  3651. SKD_N_COMPLETION_ENTRY;
  3652. pci_free_consistent(skdev->pdev, nbytes,
  3653. skdev->skcomp_table, skdev->cq_dma_address);
  3654. }
  3655. skdev->skcomp_table = NULL;
  3656. skdev->cq_dma_address = 0;
  3657. }
  3658. static void skd_free_skmsg(struct skd_device *skdev)
  3659. {
  3660. u32 i;
  3661. if (skdev->skmsg_table == NULL)
  3662. return;
  3663. for (i = 0; i < skdev->num_fitmsg_context; i++) {
  3664. struct skd_fitmsg_context *skmsg;
  3665. skmsg = &skdev->skmsg_table[i];
  3666. if (skmsg->msg_buf != NULL) {
  3667. skmsg->msg_buf += skmsg->offset;
  3668. skmsg->mb_dma_address += skmsg->offset;
  3669. pci_free_consistent(skdev->pdev, SKD_N_FITMSG_BYTES,
  3670. skmsg->msg_buf,
  3671. skmsg->mb_dma_address);
  3672. }
  3673. skmsg->msg_buf = NULL;
  3674. skmsg->mb_dma_address = 0;
  3675. }
  3676. kfree(skdev->skmsg_table);
  3677. skdev->skmsg_table = NULL;
  3678. }
  3679. static void skd_free_sg_list(struct skd_device *skdev,
  3680. struct fit_sg_descriptor *sg_list,
  3681. u32 n_sg, dma_addr_t dma_addr)
  3682. {
  3683. if (sg_list != NULL) {
  3684. u32 nbytes;
  3685. nbytes = sizeof(*sg_list) * n_sg;
  3686. pci_free_consistent(skdev->pdev, nbytes, sg_list, dma_addr);
  3687. }
  3688. }
  3689. static void skd_free_skreq(struct skd_device *skdev)
  3690. {
  3691. u32 i;
  3692. if (skdev->skreq_table == NULL)
  3693. return;
  3694. for (i = 0; i < skdev->num_req_context; i++) {
  3695. struct skd_request_context *skreq;
  3696. skreq = &skdev->skreq_table[i];
  3697. skd_free_sg_list(skdev, skreq->sksg_list,
  3698. skdev->sgs_per_request,
  3699. skreq->sksg_dma_address);
  3700. skreq->sksg_list = NULL;
  3701. skreq->sksg_dma_address = 0;
  3702. kfree(skreq->sg);
  3703. }
  3704. kfree(skdev->skreq_table);
  3705. skdev->skreq_table = NULL;
  3706. }
  3707. static void skd_free_skspcl(struct skd_device *skdev)
  3708. {
  3709. u32 i;
  3710. u32 nbytes;
  3711. if (skdev->skspcl_table == NULL)
  3712. return;
  3713. for (i = 0; i < skdev->n_special; i++) {
  3714. struct skd_special_context *skspcl;
  3715. skspcl = &skdev->skspcl_table[i];
  3716. if (skspcl->msg_buf != NULL) {
  3717. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3718. pci_free_consistent(skdev->pdev, nbytes,
  3719. skspcl->msg_buf,
  3720. skspcl->mb_dma_address);
  3721. }
  3722. skspcl->msg_buf = NULL;
  3723. skspcl->mb_dma_address = 0;
  3724. skd_free_sg_list(skdev, skspcl->req.sksg_list,
  3725. SKD_N_SG_PER_SPECIAL,
  3726. skspcl->req.sksg_dma_address);
  3727. skspcl->req.sksg_list = NULL;
  3728. skspcl->req.sksg_dma_address = 0;
  3729. kfree(skspcl->req.sg);
  3730. }
  3731. kfree(skdev->skspcl_table);
  3732. skdev->skspcl_table = NULL;
  3733. }
  3734. static void skd_free_sksb(struct skd_device *skdev)
  3735. {
  3736. struct skd_special_context *skspcl;
  3737. u32 nbytes;
  3738. skspcl = &skdev->internal_skspcl;
  3739. if (skspcl->data_buf != NULL) {
  3740. nbytes = SKD_N_INTERNAL_BYTES;
  3741. pci_free_consistent(skdev->pdev, nbytes,
  3742. skspcl->data_buf, skspcl->db_dma_address);
  3743. }
  3744. skspcl->data_buf = NULL;
  3745. skspcl->db_dma_address = 0;
  3746. if (skspcl->msg_buf != NULL) {
  3747. nbytes = SKD_N_SPECIAL_FITMSG_BYTES;
  3748. pci_free_consistent(skdev->pdev, nbytes,
  3749. skspcl->msg_buf, skspcl->mb_dma_address);
  3750. }
  3751. skspcl->msg_buf = NULL;
  3752. skspcl->mb_dma_address = 0;
  3753. skd_free_sg_list(skdev, skspcl->req.sksg_list, 1,
  3754. skspcl->req.sksg_dma_address);
  3755. skspcl->req.sksg_list = NULL;
  3756. skspcl->req.sksg_dma_address = 0;
  3757. }
  3758. static void skd_free_disk(struct skd_device *skdev)
  3759. {
  3760. struct gendisk *disk = skdev->disk;
  3761. if (disk != NULL) {
  3762. struct request_queue *q = disk->queue;
  3763. if (disk->flags & GENHD_FL_UP)
  3764. del_gendisk(disk);
  3765. if (q)
  3766. blk_cleanup_queue(q);
  3767. put_disk(disk);
  3768. }
  3769. skdev->disk = NULL;
  3770. }
  3771. static void skd_destruct(struct skd_device *skdev)
  3772. {
  3773. if (skdev == NULL)
  3774. return;
  3775. pr_debug("%s:%s:%d disk\n", skdev->name, __func__, __LINE__);
  3776. skd_free_disk(skdev);
  3777. pr_debug("%s:%s:%d sksb\n", skdev->name, __func__, __LINE__);
  3778. skd_free_sksb(skdev);
  3779. pr_debug("%s:%s:%d skspcl\n", skdev->name, __func__, __LINE__);
  3780. skd_free_skspcl(skdev);
  3781. pr_debug("%s:%s:%d skreq\n", skdev->name, __func__, __LINE__);
  3782. skd_free_skreq(skdev);
  3783. pr_debug("%s:%s:%d skmsg\n", skdev->name, __func__, __LINE__);
  3784. skd_free_skmsg(skdev);
  3785. pr_debug("%s:%s:%d skcomp\n", skdev->name, __func__, __LINE__);
  3786. skd_free_skcomp(skdev);
  3787. pr_debug("%s:%s:%d skdev\n", skdev->name, __func__, __LINE__);
  3788. kfree(skdev);
  3789. }
  3790. /*
  3791. *****************************************************************************
  3792. * BLOCK DEVICE (BDEV) GLUE
  3793. *****************************************************************************
  3794. */
  3795. static int skd_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo)
  3796. {
  3797. struct skd_device *skdev;
  3798. u64 capacity;
  3799. skdev = bdev->bd_disk->private_data;
  3800. pr_debug("%s:%s:%d %s: CMD[%s] getgeo device\n",
  3801. skdev->name, __func__, __LINE__,
  3802. bdev->bd_disk->disk_name, current->comm);
  3803. if (skdev->read_cap_is_valid) {
  3804. capacity = get_capacity(skdev->disk);
  3805. geo->heads = 64;
  3806. geo->sectors = 255;
  3807. geo->cylinders = (capacity) / (255 * 64);
  3808. return 0;
  3809. }
  3810. return -EIO;
  3811. }
  3812. static int skd_bdev_attach(struct device *parent, struct skd_device *skdev)
  3813. {
  3814. pr_debug("%s:%s:%d add_disk\n", skdev->name, __func__, __LINE__);
  3815. device_add_disk(parent, skdev->disk);
  3816. return 0;
  3817. }
  3818. static const struct block_device_operations skd_blockdev_ops = {
  3819. .owner = THIS_MODULE,
  3820. .ioctl = skd_bdev_ioctl,
  3821. .getgeo = skd_bdev_getgeo,
  3822. };
  3823. /*
  3824. *****************************************************************************
  3825. * PCIe DRIVER GLUE
  3826. *****************************************************************************
  3827. */
  3828. static const struct pci_device_id skd_pci_tbl[] = {
  3829. { PCI_VENDOR_ID_STEC, PCI_DEVICE_ID_S1120,
  3830. PCI_ANY_ID, PCI_ANY_ID, 0, 0, },
  3831. { 0 } /* terminate list */
  3832. };
  3833. MODULE_DEVICE_TABLE(pci, skd_pci_tbl);
  3834. static char *skd_pci_info(struct skd_device *skdev, char *str)
  3835. {
  3836. int pcie_reg;
  3837. strcpy(str, "PCIe (");
  3838. pcie_reg = pci_find_capability(skdev->pdev, PCI_CAP_ID_EXP);
  3839. if (pcie_reg) {
  3840. char lwstr[6];
  3841. uint16_t pcie_lstat, lspeed, lwidth;
  3842. pcie_reg += 0x12;
  3843. pci_read_config_word(skdev->pdev, pcie_reg, &pcie_lstat);
  3844. lspeed = pcie_lstat & (0xF);
  3845. lwidth = (pcie_lstat & 0x3F0) >> 4;
  3846. if (lspeed == 1)
  3847. strcat(str, "2.5GT/s ");
  3848. else if (lspeed == 2)
  3849. strcat(str, "5.0GT/s ");
  3850. else
  3851. strcat(str, "<unknown> ");
  3852. snprintf(lwstr, sizeof(lwstr), "%dX)", lwidth);
  3853. strcat(str, lwstr);
  3854. }
  3855. return str;
  3856. }
  3857. static int skd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  3858. {
  3859. int i;
  3860. int rc = 0;
  3861. char pci_str[32];
  3862. struct skd_device *skdev;
  3863. pr_info("STEC s1120 Driver(%s) version %s-b%s\n",
  3864. DRV_NAME, DRV_VERSION, DRV_BUILD_ID);
  3865. pr_info("(skd?:??:[%s]): vendor=%04X device=%04x\n",
  3866. pci_name(pdev), pdev->vendor, pdev->device);
  3867. rc = pci_enable_device(pdev);
  3868. if (rc)
  3869. return rc;
  3870. rc = pci_request_regions(pdev, DRV_NAME);
  3871. if (rc)
  3872. goto err_out;
  3873. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  3874. if (!rc) {
  3875. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  3876. pr_err("(%s): consistent DMA mask error %d\n",
  3877. pci_name(pdev), rc);
  3878. }
  3879. } else {
  3880. (rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)));
  3881. if (rc) {
  3882. pr_err("(%s): DMA mask error %d\n",
  3883. pci_name(pdev), rc);
  3884. goto err_out_regions;
  3885. }
  3886. }
  3887. if (!skd_major) {
  3888. rc = register_blkdev(0, DRV_NAME);
  3889. if (rc < 0)
  3890. goto err_out_regions;
  3891. BUG_ON(!rc);
  3892. skd_major = rc;
  3893. }
  3894. skdev = skd_construct(pdev);
  3895. if (skdev == NULL) {
  3896. rc = -ENOMEM;
  3897. goto err_out_regions;
  3898. }
  3899. skd_pci_info(skdev, pci_str);
  3900. pr_info("(%s): %s 64bit\n", skd_name(skdev), pci_str);
  3901. pci_set_master(pdev);
  3902. rc = pci_enable_pcie_error_reporting(pdev);
  3903. if (rc) {
  3904. pr_err(
  3905. "(%s): bad enable of PCIe error reporting rc=%d\n",
  3906. skd_name(skdev), rc);
  3907. skdev->pcie_error_reporting_is_enabled = 0;
  3908. } else
  3909. skdev->pcie_error_reporting_is_enabled = 1;
  3910. pci_set_drvdata(pdev, skdev);
  3911. for (i = 0; i < SKD_MAX_BARS; i++) {
  3912. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  3913. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  3914. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  3915. skdev->mem_size[i]);
  3916. if (!skdev->mem_map[i]) {
  3917. pr_err("(%s): Unable to map adapter memory!\n",
  3918. skd_name(skdev));
  3919. rc = -ENODEV;
  3920. goto err_out_iounmap;
  3921. }
  3922. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  3923. skdev->name, __func__, __LINE__,
  3924. skdev->mem_map[i],
  3925. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  3926. }
  3927. rc = skd_acquire_irq(skdev);
  3928. if (rc) {
  3929. pr_err("(%s): interrupt resource error %d\n",
  3930. skd_name(skdev), rc);
  3931. goto err_out_iounmap;
  3932. }
  3933. rc = skd_start_timer(skdev);
  3934. if (rc)
  3935. goto err_out_timer;
  3936. init_waitqueue_head(&skdev->waitq);
  3937. skd_start_device(skdev);
  3938. rc = wait_event_interruptible_timeout(skdev->waitq,
  3939. (skdev->gendisk_on),
  3940. (SKD_START_WAIT_SECONDS * HZ));
  3941. if (skdev->gendisk_on > 0) {
  3942. /* device came on-line after reset */
  3943. skd_bdev_attach(&pdev->dev, skdev);
  3944. rc = 0;
  3945. } else {
  3946. /* we timed out, something is wrong with the device,
  3947. don't add the disk structure */
  3948. pr_err(
  3949. "(%s): error: waiting for s1120 timed out %d!\n",
  3950. skd_name(skdev), rc);
  3951. /* in case of no error; we timeout with ENXIO */
  3952. if (!rc)
  3953. rc = -ENXIO;
  3954. goto err_out_timer;
  3955. }
  3956. #ifdef SKD_VMK_POLL_HANDLER
  3957. if (skdev->irq_type == SKD_IRQ_MSIX) {
  3958. /* MSIX completion handler is being used for coredump */
  3959. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  3960. skdev->msix_entries[5].vector,
  3961. skd_comp_q, skdev);
  3962. } else {
  3963. vmklnx_scsi_register_poll_handler(skdev->scsi_host,
  3964. skdev->pdev->irq, skd_isr,
  3965. skdev);
  3966. }
  3967. #endif /* SKD_VMK_POLL_HANDLER */
  3968. return rc;
  3969. err_out_timer:
  3970. skd_stop_device(skdev);
  3971. skd_release_irq(skdev);
  3972. err_out_iounmap:
  3973. for (i = 0; i < SKD_MAX_BARS; i++)
  3974. if (skdev->mem_map[i])
  3975. iounmap(skdev->mem_map[i]);
  3976. if (skdev->pcie_error_reporting_is_enabled)
  3977. pci_disable_pcie_error_reporting(pdev);
  3978. skd_destruct(skdev);
  3979. err_out_regions:
  3980. pci_release_regions(pdev);
  3981. err_out:
  3982. pci_disable_device(pdev);
  3983. pci_set_drvdata(pdev, NULL);
  3984. return rc;
  3985. }
  3986. static void skd_pci_remove(struct pci_dev *pdev)
  3987. {
  3988. int i;
  3989. struct skd_device *skdev;
  3990. skdev = pci_get_drvdata(pdev);
  3991. if (!skdev) {
  3992. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  3993. return;
  3994. }
  3995. skd_stop_device(skdev);
  3996. skd_release_irq(skdev);
  3997. for (i = 0; i < SKD_MAX_BARS; i++)
  3998. if (skdev->mem_map[i])
  3999. iounmap((u32 *)skdev->mem_map[i]);
  4000. if (skdev->pcie_error_reporting_is_enabled)
  4001. pci_disable_pcie_error_reporting(pdev);
  4002. skd_destruct(skdev);
  4003. pci_release_regions(pdev);
  4004. pci_disable_device(pdev);
  4005. pci_set_drvdata(pdev, NULL);
  4006. return;
  4007. }
  4008. static int skd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
  4009. {
  4010. int i;
  4011. struct skd_device *skdev;
  4012. skdev = pci_get_drvdata(pdev);
  4013. if (!skdev) {
  4014. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4015. return -EIO;
  4016. }
  4017. skd_stop_device(skdev);
  4018. skd_release_irq(skdev);
  4019. for (i = 0; i < SKD_MAX_BARS; i++)
  4020. if (skdev->mem_map[i])
  4021. iounmap((u32 *)skdev->mem_map[i]);
  4022. if (skdev->pcie_error_reporting_is_enabled)
  4023. pci_disable_pcie_error_reporting(pdev);
  4024. pci_release_regions(pdev);
  4025. pci_save_state(pdev);
  4026. pci_disable_device(pdev);
  4027. pci_set_power_state(pdev, pci_choose_state(pdev, state));
  4028. return 0;
  4029. }
  4030. static int skd_pci_resume(struct pci_dev *pdev)
  4031. {
  4032. int i;
  4033. int rc = 0;
  4034. struct skd_device *skdev;
  4035. skdev = pci_get_drvdata(pdev);
  4036. if (!skdev) {
  4037. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4038. return -1;
  4039. }
  4040. pci_set_power_state(pdev, PCI_D0);
  4041. pci_enable_wake(pdev, PCI_D0, 0);
  4042. pci_restore_state(pdev);
  4043. rc = pci_enable_device(pdev);
  4044. if (rc)
  4045. return rc;
  4046. rc = pci_request_regions(pdev, DRV_NAME);
  4047. if (rc)
  4048. goto err_out;
  4049. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
  4050. if (!rc) {
  4051. if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
  4052. pr_err("(%s): consistent DMA mask error %d\n",
  4053. pci_name(pdev), rc);
  4054. }
  4055. } else {
  4056. rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
  4057. if (rc) {
  4058. pr_err("(%s): DMA mask error %d\n",
  4059. pci_name(pdev), rc);
  4060. goto err_out_regions;
  4061. }
  4062. }
  4063. pci_set_master(pdev);
  4064. rc = pci_enable_pcie_error_reporting(pdev);
  4065. if (rc) {
  4066. pr_err("(%s): bad enable of PCIe error reporting rc=%d\n",
  4067. skdev->name, rc);
  4068. skdev->pcie_error_reporting_is_enabled = 0;
  4069. } else
  4070. skdev->pcie_error_reporting_is_enabled = 1;
  4071. for (i = 0; i < SKD_MAX_BARS; i++) {
  4072. skdev->mem_phys[i] = pci_resource_start(pdev, i);
  4073. skdev->mem_size[i] = (u32)pci_resource_len(pdev, i);
  4074. skdev->mem_map[i] = ioremap(skdev->mem_phys[i],
  4075. skdev->mem_size[i]);
  4076. if (!skdev->mem_map[i]) {
  4077. pr_err("(%s): Unable to map adapter memory!\n",
  4078. skd_name(skdev));
  4079. rc = -ENODEV;
  4080. goto err_out_iounmap;
  4081. }
  4082. pr_debug("%s:%s:%d mem_map=%p, phyd=%016llx, size=%d\n",
  4083. skdev->name, __func__, __LINE__,
  4084. skdev->mem_map[i],
  4085. (uint64_t)skdev->mem_phys[i], skdev->mem_size[i]);
  4086. }
  4087. rc = skd_acquire_irq(skdev);
  4088. if (rc) {
  4089. pr_err("(%s): interrupt resource error %d\n",
  4090. pci_name(pdev), rc);
  4091. goto err_out_iounmap;
  4092. }
  4093. rc = skd_start_timer(skdev);
  4094. if (rc)
  4095. goto err_out_timer;
  4096. init_waitqueue_head(&skdev->waitq);
  4097. skd_start_device(skdev);
  4098. return rc;
  4099. err_out_timer:
  4100. skd_stop_device(skdev);
  4101. skd_release_irq(skdev);
  4102. err_out_iounmap:
  4103. for (i = 0; i < SKD_MAX_BARS; i++)
  4104. if (skdev->mem_map[i])
  4105. iounmap(skdev->mem_map[i]);
  4106. if (skdev->pcie_error_reporting_is_enabled)
  4107. pci_disable_pcie_error_reporting(pdev);
  4108. err_out_regions:
  4109. pci_release_regions(pdev);
  4110. err_out:
  4111. pci_disable_device(pdev);
  4112. return rc;
  4113. }
  4114. static void skd_pci_shutdown(struct pci_dev *pdev)
  4115. {
  4116. struct skd_device *skdev;
  4117. pr_err("skd_pci_shutdown called\n");
  4118. skdev = pci_get_drvdata(pdev);
  4119. if (!skdev) {
  4120. pr_err("%s: no device data for PCI\n", pci_name(pdev));
  4121. return;
  4122. }
  4123. pr_err("%s: calling stop\n", skd_name(skdev));
  4124. skd_stop_device(skdev);
  4125. }
  4126. static struct pci_driver skd_driver = {
  4127. .name = DRV_NAME,
  4128. .id_table = skd_pci_tbl,
  4129. .probe = skd_pci_probe,
  4130. .remove = skd_pci_remove,
  4131. .suspend = skd_pci_suspend,
  4132. .resume = skd_pci_resume,
  4133. .shutdown = skd_pci_shutdown,
  4134. };
  4135. /*
  4136. *****************************************************************************
  4137. * LOGGING SUPPORT
  4138. *****************************************************************************
  4139. */
  4140. static const char *skd_name(struct skd_device *skdev)
  4141. {
  4142. memset(skdev->id_str, 0, sizeof(skdev->id_str));
  4143. if (skdev->inquiry_is_valid)
  4144. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:%s:[%s]",
  4145. skdev->name, skdev->inq_serial_num,
  4146. pci_name(skdev->pdev));
  4147. else
  4148. snprintf(skdev->id_str, sizeof(skdev->id_str), "%s:??:[%s]",
  4149. skdev->name, pci_name(skdev->pdev));
  4150. return skdev->id_str;
  4151. }
  4152. const char *skd_drive_state_to_str(int state)
  4153. {
  4154. switch (state) {
  4155. case FIT_SR_DRIVE_OFFLINE:
  4156. return "OFFLINE";
  4157. case FIT_SR_DRIVE_INIT:
  4158. return "INIT";
  4159. case FIT_SR_DRIVE_ONLINE:
  4160. return "ONLINE";
  4161. case FIT_SR_DRIVE_BUSY:
  4162. return "BUSY";
  4163. case FIT_SR_DRIVE_FAULT:
  4164. return "FAULT";
  4165. case FIT_SR_DRIVE_DEGRADED:
  4166. return "DEGRADED";
  4167. case FIT_SR_PCIE_LINK_DOWN:
  4168. return "INK_DOWN";
  4169. case FIT_SR_DRIVE_SOFT_RESET:
  4170. return "SOFT_RESET";
  4171. case FIT_SR_DRIVE_NEED_FW_DOWNLOAD:
  4172. return "NEED_FW";
  4173. case FIT_SR_DRIVE_INIT_FAULT:
  4174. return "INIT_FAULT";
  4175. case FIT_SR_DRIVE_BUSY_SANITIZE:
  4176. return "BUSY_SANITIZE";
  4177. case FIT_SR_DRIVE_BUSY_ERASE:
  4178. return "BUSY_ERASE";
  4179. case FIT_SR_DRIVE_FW_BOOTING:
  4180. return "FW_BOOTING";
  4181. default:
  4182. return "???";
  4183. }
  4184. }
  4185. const char *skd_skdev_state_to_str(enum skd_drvr_state state)
  4186. {
  4187. switch (state) {
  4188. case SKD_DRVR_STATE_LOAD:
  4189. return "LOAD";
  4190. case SKD_DRVR_STATE_IDLE:
  4191. return "IDLE";
  4192. case SKD_DRVR_STATE_BUSY:
  4193. return "BUSY";
  4194. case SKD_DRVR_STATE_STARTING:
  4195. return "STARTING";
  4196. case SKD_DRVR_STATE_ONLINE:
  4197. return "ONLINE";
  4198. case SKD_DRVR_STATE_PAUSING:
  4199. return "PAUSING";
  4200. case SKD_DRVR_STATE_PAUSED:
  4201. return "PAUSED";
  4202. case SKD_DRVR_STATE_DRAINING_TIMEOUT:
  4203. return "DRAINING_TIMEOUT";
  4204. case SKD_DRVR_STATE_RESTARTING:
  4205. return "RESTARTING";
  4206. case SKD_DRVR_STATE_RESUMING:
  4207. return "RESUMING";
  4208. case SKD_DRVR_STATE_STOPPING:
  4209. return "STOPPING";
  4210. case SKD_DRVR_STATE_SYNCING:
  4211. return "SYNCING";
  4212. case SKD_DRVR_STATE_FAULT:
  4213. return "FAULT";
  4214. case SKD_DRVR_STATE_DISAPPEARED:
  4215. return "DISAPPEARED";
  4216. case SKD_DRVR_STATE_BUSY_ERASE:
  4217. return "BUSY_ERASE";
  4218. case SKD_DRVR_STATE_BUSY_SANITIZE:
  4219. return "BUSY_SANITIZE";
  4220. case SKD_DRVR_STATE_BUSY_IMMINENT:
  4221. return "BUSY_IMMINENT";
  4222. case SKD_DRVR_STATE_WAIT_BOOT:
  4223. return "WAIT_BOOT";
  4224. default:
  4225. return "???";
  4226. }
  4227. }
  4228. static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
  4229. {
  4230. switch (state) {
  4231. case SKD_MSG_STATE_IDLE:
  4232. return "IDLE";
  4233. case SKD_MSG_STATE_BUSY:
  4234. return "BUSY";
  4235. default:
  4236. return "???";
  4237. }
  4238. }
  4239. static const char *skd_skreq_state_to_str(enum skd_req_state state)
  4240. {
  4241. switch (state) {
  4242. case SKD_REQ_STATE_IDLE:
  4243. return "IDLE";
  4244. case SKD_REQ_STATE_SETUP:
  4245. return "SETUP";
  4246. case SKD_REQ_STATE_BUSY:
  4247. return "BUSY";
  4248. case SKD_REQ_STATE_COMPLETED:
  4249. return "COMPLETED";
  4250. case SKD_REQ_STATE_TIMEOUT:
  4251. return "TIMEOUT";
  4252. case SKD_REQ_STATE_ABORTED:
  4253. return "ABORTED";
  4254. default:
  4255. return "???";
  4256. }
  4257. }
  4258. static void skd_log_skdev(struct skd_device *skdev, const char *event)
  4259. {
  4260. pr_debug("%s:%s:%d (%s) skdev=%p event='%s'\n",
  4261. skdev->name, __func__, __LINE__, skdev->name, skdev, event);
  4262. pr_debug("%s:%s:%d drive_state=%s(%d) driver_state=%s(%d)\n",
  4263. skdev->name, __func__, __LINE__,
  4264. skd_drive_state_to_str(skdev->drive_state), skdev->drive_state,
  4265. skd_skdev_state_to_str(skdev->state), skdev->state);
  4266. pr_debug("%s:%s:%d busy=%d limit=%d dev=%d lowat=%d\n",
  4267. skdev->name, __func__, __LINE__,
  4268. skdev->in_flight, skdev->cur_max_queue_depth,
  4269. skdev->dev_max_queue_depth, skdev->queue_low_water_mark);
  4270. pr_debug("%s:%s:%d timestamp=0x%x cycle=%d cycle_ix=%d\n",
  4271. skdev->name, __func__, __LINE__,
  4272. skdev->timeout_stamp, skdev->skcomp_cycle, skdev->skcomp_ix);
  4273. }
  4274. static void skd_log_skmsg(struct skd_device *skdev,
  4275. struct skd_fitmsg_context *skmsg, const char *event)
  4276. {
  4277. pr_debug("%s:%s:%d (%s) skmsg=%p event='%s'\n",
  4278. skdev->name, __func__, __LINE__, skdev->name, skmsg, event);
  4279. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x length=%d\n",
  4280. skdev->name, __func__, __LINE__,
  4281. skd_skmsg_state_to_str(skmsg->state), skmsg->state,
  4282. skmsg->id, skmsg->length);
  4283. }
  4284. static void skd_log_skreq(struct skd_device *skdev,
  4285. struct skd_request_context *skreq, const char *event)
  4286. {
  4287. pr_debug("%s:%s:%d (%s) skreq=%p event='%s'\n",
  4288. skdev->name, __func__, __LINE__, skdev->name, skreq, event);
  4289. pr_debug("%s:%s:%d state=%s(%d) id=0x%04x fitmsg=0x%04x\n",
  4290. skdev->name, __func__, __LINE__,
  4291. skd_skreq_state_to_str(skreq->state), skreq->state,
  4292. skreq->id, skreq->fitmsg_id);
  4293. pr_debug("%s:%s:%d timo=0x%x sg_dir=%d n_sg=%d\n",
  4294. skdev->name, __func__, __LINE__,
  4295. skreq->timeout_stamp, skreq->sg_data_dir, skreq->n_sg);
  4296. if (skreq->req != NULL) {
  4297. struct request *req = skreq->req;
  4298. u32 lba = (u32)blk_rq_pos(req);
  4299. u32 count = blk_rq_sectors(req);
  4300. pr_debug("%s:%s:%d "
  4301. "req=%p lba=%u(0x%x) count=%u(0x%x) dir=%d\n",
  4302. skdev->name, __func__, __LINE__,
  4303. req, lba, lba, count, count,
  4304. (int)rq_data_dir(req));
  4305. } else
  4306. pr_debug("%s:%s:%d req=NULL\n",
  4307. skdev->name, __func__, __LINE__);
  4308. }
  4309. /*
  4310. *****************************************************************************
  4311. * MODULE GLUE
  4312. *****************************************************************************
  4313. */
  4314. static int __init skd_init(void)
  4315. {
  4316. pr_info(PFX " v%s-b%s loaded\n", DRV_VERSION, DRV_BUILD_ID);
  4317. switch (skd_isr_type) {
  4318. case SKD_IRQ_LEGACY:
  4319. case SKD_IRQ_MSI:
  4320. case SKD_IRQ_MSIX:
  4321. break;
  4322. default:
  4323. pr_err(PFX "skd_isr_type %d invalid, re-set to %d\n",
  4324. skd_isr_type, SKD_IRQ_DEFAULT);
  4325. skd_isr_type = SKD_IRQ_DEFAULT;
  4326. }
  4327. if (skd_max_queue_depth < 1 ||
  4328. skd_max_queue_depth > SKD_MAX_QUEUE_DEPTH) {
  4329. pr_err(PFX "skd_max_queue_depth %d invalid, re-set to %d\n",
  4330. skd_max_queue_depth, SKD_MAX_QUEUE_DEPTH_DEFAULT);
  4331. skd_max_queue_depth = SKD_MAX_QUEUE_DEPTH_DEFAULT;
  4332. }
  4333. if (skd_max_req_per_msg < 1 || skd_max_req_per_msg > 14) {
  4334. pr_err(PFX "skd_max_req_per_msg %d invalid, re-set to %d\n",
  4335. skd_max_req_per_msg, SKD_MAX_REQ_PER_MSG_DEFAULT);
  4336. skd_max_req_per_msg = SKD_MAX_REQ_PER_MSG_DEFAULT;
  4337. }
  4338. if (skd_sgs_per_request < 1 || skd_sgs_per_request > 4096) {
  4339. pr_err(PFX "skd_sg_per_request %d invalid, re-set to %d\n",
  4340. skd_sgs_per_request, SKD_N_SG_PER_REQ_DEFAULT);
  4341. skd_sgs_per_request = SKD_N_SG_PER_REQ_DEFAULT;
  4342. }
  4343. if (skd_dbg_level < 0 || skd_dbg_level > 2) {
  4344. pr_err(PFX "skd_dbg_level %d invalid, re-set to %d\n",
  4345. skd_dbg_level, 0);
  4346. skd_dbg_level = 0;
  4347. }
  4348. if (skd_isr_comp_limit < 0) {
  4349. pr_err(PFX "skd_isr_comp_limit %d invalid, set to %d\n",
  4350. skd_isr_comp_limit, 0);
  4351. skd_isr_comp_limit = 0;
  4352. }
  4353. if (skd_max_pass_thru < 1 || skd_max_pass_thru > 50) {
  4354. pr_err(PFX "skd_max_pass_thru %d invalid, re-set to %d\n",
  4355. skd_max_pass_thru, SKD_N_SPECIAL_CONTEXT);
  4356. skd_max_pass_thru = SKD_N_SPECIAL_CONTEXT;
  4357. }
  4358. return pci_register_driver(&skd_driver);
  4359. }
  4360. static void __exit skd_exit(void)
  4361. {
  4362. pr_info(PFX " v%s-b%s unloading\n", DRV_VERSION, DRV_BUILD_ID);
  4363. pci_unregister_driver(&skd_driver);
  4364. if (skd_major)
  4365. unregister_blkdev(skd_major, DRV_NAME);
  4366. }
  4367. module_init(skd_init);
  4368. module_exit(skd_exit);