ibmvscsi_tgt.c 109 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179
  1. /*******************************************************************************
  2. * IBM Virtual SCSI Target Driver
  3. * Copyright (C) 2003-2005 Dave Boutcher (boutcher@us.ibm.com) IBM Corp.
  4. * Santiago Leon (santil@us.ibm.com) IBM Corp.
  5. * Linda Xie (lxie@us.ibm.com) IBM Corp.
  6. *
  7. * Copyright (C) 2005-2011 FUJITA Tomonori <tomof@acm.org>
  8. * Copyright (C) 2010 Nicholas A. Bellinger <nab@kernel.org>
  9. *
  10. * Authors: Bryant G. Ly <bryantly@linux.vnet.ibm.com>
  11. * Authors: Michael Cyr <mikecyr@linux.vnet.ibm.com>
  12. *
  13. * This program is free software; you can redistribute it and/or modify
  14. * it under the terms of the GNU General Public License as published by
  15. * the Free Software Foundation; either version 2 of the License, or
  16. * (at your option) any later version.
  17. *
  18. * This program is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  21. * GNU General Public License for more details.
  22. *
  23. ****************************************************************************/
  24. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  25. #include <linux/module.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/types.h>
  29. #include <linux/list.h>
  30. #include <linux/string.h>
  31. #include <linux/delay.h>
  32. #include <target/target_core_base.h>
  33. #include <target/target_core_fabric.h>
  34. #include <asm/hvcall.h>
  35. #include <asm/vio.h>
  36. #include <scsi/viosrp.h>
  37. #include "ibmvscsi_tgt.h"
  38. #define IBMVSCSIS_VERSION "v0.2"
  39. #define INITIAL_SRP_LIMIT 800
  40. #define DEFAULT_MAX_SECTORS 256
  41. #define MAX_TXU 1024 * 1024
  42. static uint max_vdma_size = MAX_H_COPY_RDMA;
  43. static char system_id[SYS_ID_NAME_LEN] = "";
  44. static char partition_name[PARTITION_NAMELEN] = "UNKNOWN";
  45. static uint partition_number = -1;
  46. /* Adapter list and lock to control it */
  47. static DEFINE_SPINLOCK(ibmvscsis_dev_lock);
  48. static LIST_HEAD(ibmvscsis_dev_list);
  49. static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  50. struct viosrp_crq *crq);
  51. static void ibmvscsis_adapter_idle(struct scsi_info *vscsi);
  52. static void ibmvscsis_determine_resid(struct se_cmd *se_cmd,
  53. struct srp_rsp *rsp)
  54. {
  55. u32 residual_count = se_cmd->residual_count;
  56. if (!residual_count)
  57. return;
  58. if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
  59. if (se_cmd->data_direction == DMA_TO_DEVICE) {
  60. /* residual data from an underflow write */
  61. rsp->flags = SRP_RSP_FLAG_DOUNDER;
  62. rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  63. } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  64. /* residual data from an underflow read */
  65. rsp->flags = SRP_RSP_FLAG_DIUNDER;
  66. rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  67. }
  68. } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
  69. if (se_cmd->data_direction == DMA_TO_DEVICE) {
  70. /* residual data from an overflow write */
  71. rsp->flags = SRP_RSP_FLAG_DOOVER;
  72. rsp->data_out_res_cnt = cpu_to_be32(residual_count);
  73. } else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
  74. /* residual data from an overflow read */
  75. rsp->flags = SRP_RSP_FLAG_DIOVER;
  76. rsp->data_in_res_cnt = cpu_to_be32(residual_count);
  77. }
  78. }
  79. }
  80. /**
  81. * connection_broken() - Determine if the connection to the client is good
  82. * @vscsi: Pointer to our adapter structure
  83. *
  84. * This function attempts to send a ping MAD to the client. If the call to
  85. * queue the request returns H_CLOSED then the connection has been broken
  86. * and the function returns TRUE.
  87. *
  88. * EXECUTION ENVIRONMENT:
  89. * Interrupt or Process environment
  90. */
  91. static bool connection_broken(struct scsi_info *vscsi)
  92. {
  93. struct viosrp_crq *crq;
  94. u64 buffer[2] = { 0, 0 };
  95. long h_return_code;
  96. bool rc = false;
  97. /* create a PING crq */
  98. crq = (struct viosrp_crq *)&buffer;
  99. crq->valid = VALID_CMD_RESP_EL;
  100. crq->format = MESSAGE_IN_CRQ;
  101. crq->status = PING;
  102. h_return_code = h_send_crq(vscsi->dds.unit_id,
  103. cpu_to_be64(buffer[MSG_HI]),
  104. cpu_to_be64(buffer[MSG_LOW]));
  105. dev_dbg(&vscsi->dev, "Connection_broken: rc %ld\n", h_return_code);
  106. if (h_return_code == H_CLOSED)
  107. rc = true;
  108. return rc;
  109. }
  110. /**
  111. * ibmvscsis_unregister_command_q() - Helper Function-Unregister Command Queue
  112. * @vscsi: Pointer to our adapter structure
  113. *
  114. * This function calls h_free_q then frees the interrupt bit etc.
  115. * It must release the lock before doing so because of the time it can take
  116. * for h_free_crq in PHYP
  117. * NOTE: the caller must make sure that state and or flags will prevent
  118. * interrupt handler from scheduling work.
  119. * NOTE: anyone calling this function may need to set the CRQ_CLOSED flag
  120. * we can't do it here, because we don't have the lock
  121. *
  122. * EXECUTION ENVIRONMENT:
  123. * Process level
  124. */
  125. static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi)
  126. {
  127. long qrc;
  128. long rc = ADAPT_SUCCESS;
  129. int ticks = 0;
  130. do {
  131. qrc = h_free_crq(vscsi->dds.unit_id);
  132. switch (qrc) {
  133. case H_SUCCESS:
  134. spin_lock_bh(&vscsi->intr_lock);
  135. vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS;
  136. spin_unlock_bh(&vscsi->intr_lock);
  137. break;
  138. case H_HARDWARE:
  139. case H_PARAMETER:
  140. dev_err(&vscsi->dev, "unregister_command_q: error from h_free_crq %ld\n",
  141. qrc);
  142. rc = ERROR;
  143. break;
  144. case H_BUSY:
  145. case H_LONG_BUSY_ORDER_1_MSEC:
  146. /* msleep not good for small values */
  147. usleep_range(1000, 2000);
  148. ticks += 1;
  149. break;
  150. case H_LONG_BUSY_ORDER_10_MSEC:
  151. usleep_range(10000, 20000);
  152. ticks += 10;
  153. break;
  154. case H_LONG_BUSY_ORDER_100_MSEC:
  155. msleep(100);
  156. ticks += 100;
  157. break;
  158. case H_LONG_BUSY_ORDER_1_SEC:
  159. ssleep(1);
  160. ticks += 1000;
  161. break;
  162. case H_LONG_BUSY_ORDER_10_SEC:
  163. ssleep(10);
  164. ticks += 10000;
  165. break;
  166. case H_LONG_BUSY_ORDER_100_SEC:
  167. ssleep(100);
  168. ticks += 100000;
  169. break;
  170. default:
  171. dev_err(&vscsi->dev, "unregister_command_q: unknown error %ld from h_free_crq\n",
  172. qrc);
  173. rc = ERROR;
  174. break;
  175. }
  176. /*
  177. * dont wait more then 300 seconds
  178. * ticks are in milliseconds more or less
  179. */
  180. if (ticks > 300000 && qrc != H_SUCCESS) {
  181. rc = ERROR;
  182. dev_err(&vscsi->dev, "Excessive wait for h_free_crq\n");
  183. }
  184. } while (qrc != H_SUCCESS && rc == ADAPT_SUCCESS);
  185. dev_dbg(&vscsi->dev, "Freeing CRQ: phyp rc %ld, rc %ld\n", qrc, rc);
  186. return rc;
  187. }
  188. /**
  189. * ibmvscsis_delete_client_info() - Helper function to Delete Client Info
  190. * @vscsi: Pointer to our adapter structure
  191. * @client_closed: True if client closed its queue
  192. *
  193. * Deletes information specific to the client when the client goes away
  194. *
  195. * EXECUTION ENVIRONMENT:
  196. * Interrupt or Process
  197. */
  198. static void ibmvscsis_delete_client_info(struct scsi_info *vscsi,
  199. bool client_closed)
  200. {
  201. vscsi->client_cap = 0;
  202. /*
  203. * Some things we don't want to clear if we're closing the queue,
  204. * because some clients don't resend the host handshake when they
  205. * get a transport event.
  206. */
  207. if (client_closed)
  208. vscsi->client_data.os_type = 0;
  209. }
  210. /**
  211. * ibmvscsis_free_command_q() - Free Command Queue
  212. * @vscsi: Pointer to our adapter structure
  213. *
  214. * This function calls unregister_command_q, then clears interrupts and
  215. * any pending interrupt acknowledgments associated with the command q.
  216. * It also clears memory if there is no error.
  217. *
  218. * PHYP did not meet the PAPR architecture so that we must give up the
  219. * lock. This causes a timing hole regarding state change. To close the
  220. * hole this routine does accounting on any change that occurred during
  221. * the time the lock is not held.
  222. * NOTE: must give up and then acquire the interrupt lock, the caller must
  223. * make sure that state and or flags will prevent interrupt handler from
  224. * scheduling work.
  225. *
  226. * EXECUTION ENVIRONMENT:
  227. * Process level, interrupt lock is held
  228. */
  229. static long ibmvscsis_free_command_q(struct scsi_info *vscsi)
  230. {
  231. int bytes;
  232. u32 flags_under_lock;
  233. u16 state_under_lock;
  234. long rc = ADAPT_SUCCESS;
  235. if (!(vscsi->flags & CRQ_CLOSED)) {
  236. vio_disable_interrupts(vscsi->dma_dev);
  237. state_under_lock = vscsi->new_state;
  238. flags_under_lock = vscsi->flags;
  239. vscsi->phyp_acr_state = 0;
  240. vscsi->phyp_acr_flags = 0;
  241. spin_unlock_bh(&vscsi->intr_lock);
  242. rc = ibmvscsis_unregister_command_q(vscsi);
  243. spin_lock_bh(&vscsi->intr_lock);
  244. if (state_under_lock != vscsi->new_state)
  245. vscsi->phyp_acr_state = vscsi->new_state;
  246. vscsi->phyp_acr_flags = ((~flags_under_lock) & vscsi->flags);
  247. if (rc == ADAPT_SUCCESS) {
  248. bytes = vscsi->cmd_q.size * PAGE_SIZE;
  249. memset(vscsi->cmd_q.base_addr, 0, bytes);
  250. vscsi->cmd_q.index = 0;
  251. vscsi->flags |= CRQ_CLOSED;
  252. ibmvscsis_delete_client_info(vscsi, false);
  253. }
  254. dev_dbg(&vscsi->dev, "free_command_q: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
  255. vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
  256. vscsi->phyp_acr_state);
  257. }
  258. return rc;
  259. }
  260. /**
  261. * ibmvscsis_cmd_q_dequeue() - Get valid Command element
  262. * @mask: Mask to use in case index wraps
  263. * @current_index: Current index into command queue
  264. * @base_addr: Pointer to start of command queue
  265. *
  266. * Returns a pointer to a valid command element or NULL, if the command
  267. * queue is empty
  268. *
  269. * EXECUTION ENVIRONMENT:
  270. * Interrupt environment, interrupt lock held
  271. */
  272. static struct viosrp_crq *ibmvscsis_cmd_q_dequeue(uint mask,
  273. uint *current_index,
  274. struct viosrp_crq *base_addr)
  275. {
  276. struct viosrp_crq *ptr;
  277. ptr = base_addr + *current_index;
  278. if (ptr->valid) {
  279. *current_index = (*current_index + 1) & mask;
  280. dma_rmb();
  281. } else {
  282. ptr = NULL;
  283. }
  284. return ptr;
  285. }
  286. /**
  287. * ibmvscsis_send_init_message() - send initialize message to the client
  288. * @vscsi: Pointer to our adapter structure
  289. * @format: Which Init Message format to send
  290. *
  291. * EXECUTION ENVIRONMENT:
  292. * Interrupt environment interrupt lock held
  293. */
  294. static long ibmvscsis_send_init_message(struct scsi_info *vscsi, u8 format)
  295. {
  296. struct viosrp_crq *crq;
  297. u64 buffer[2] = { 0, 0 };
  298. long rc;
  299. crq = (struct viosrp_crq *)&buffer;
  300. crq->valid = VALID_INIT_MSG;
  301. crq->format = format;
  302. rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
  303. cpu_to_be64(buffer[MSG_LOW]));
  304. return rc;
  305. }
  306. /**
  307. * ibmvscsis_check_init_msg() - Check init message valid
  308. * @vscsi: Pointer to our adapter structure
  309. * @format: Pointer to return format of Init Message, if any.
  310. * Set to UNUSED_FORMAT if no Init Message in queue.
  311. *
  312. * Checks if an initialize message was queued by the initiatior
  313. * after the queue was created and before the interrupt was enabled.
  314. *
  315. * EXECUTION ENVIRONMENT:
  316. * Process level only, interrupt lock held
  317. */
  318. static long ibmvscsis_check_init_msg(struct scsi_info *vscsi, uint *format)
  319. {
  320. struct viosrp_crq *crq;
  321. long rc = ADAPT_SUCCESS;
  322. crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index,
  323. vscsi->cmd_q.base_addr);
  324. if (!crq) {
  325. *format = (uint)UNUSED_FORMAT;
  326. } else if (crq->valid == VALID_INIT_MSG && crq->format == INIT_MSG) {
  327. *format = (uint)INIT_MSG;
  328. crq->valid = INVALIDATE_CMD_RESP_EL;
  329. dma_rmb();
  330. /*
  331. * the caller has ensured no initialize message was
  332. * sent after the queue was
  333. * created so there should be no other message on the queue.
  334. */
  335. crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask,
  336. &vscsi->cmd_q.index,
  337. vscsi->cmd_q.base_addr);
  338. if (crq) {
  339. *format = (uint)(crq->format);
  340. rc = ERROR;
  341. crq->valid = INVALIDATE_CMD_RESP_EL;
  342. dma_rmb();
  343. }
  344. } else {
  345. *format = (uint)(crq->format);
  346. rc = ERROR;
  347. crq->valid = INVALIDATE_CMD_RESP_EL;
  348. dma_rmb();
  349. }
  350. return rc;
  351. }
  352. /**
  353. * ibmvscsis_disconnect() - Helper function to disconnect
  354. * @work: Pointer to work_struct, gives access to our adapter structure
  355. *
  356. * An error has occurred or the driver received a Transport event,
  357. * and the driver is requesting that the command queue be de-registered
  358. * in a safe manner. If there is no outstanding I/O then we can stop the
  359. * queue. If we are restarting the queue it will be reflected in the
  360. * the state of the adapter.
  361. *
  362. * EXECUTION ENVIRONMENT:
  363. * Process environment
  364. */
  365. static void ibmvscsis_disconnect(struct work_struct *work)
  366. {
  367. struct scsi_info *vscsi = container_of(work, struct scsi_info,
  368. proc_work);
  369. u16 new_state;
  370. bool wait_idle = false;
  371. spin_lock_bh(&vscsi->intr_lock);
  372. new_state = vscsi->new_state;
  373. vscsi->new_state = 0;
  374. vscsi->flags |= DISCONNECT_SCHEDULED;
  375. vscsi->flags &= ~SCHEDULE_DISCONNECT;
  376. dev_dbg(&vscsi->dev, "disconnect: flags 0x%x, state 0x%hx\n",
  377. vscsi->flags, vscsi->state);
  378. /*
  379. * check which state we are in and see if we
  380. * should transitition to the new state
  381. */
  382. switch (vscsi->state) {
  383. /* Should never be called while in this state. */
  384. case NO_QUEUE:
  385. /*
  386. * Can never transition from this state;
  387. * igonore errors and logout.
  388. */
  389. case UNCONFIGURING:
  390. break;
  391. /* can transition from this state to UNCONFIGURING */
  392. case ERR_DISCONNECT:
  393. if (new_state == UNCONFIGURING)
  394. vscsi->state = new_state;
  395. break;
  396. /*
  397. * Can transition from this state to to unconfiguring
  398. * or err disconnect.
  399. */
  400. case ERR_DISCONNECT_RECONNECT:
  401. switch (new_state) {
  402. case UNCONFIGURING:
  403. case ERR_DISCONNECT:
  404. vscsi->state = new_state;
  405. break;
  406. case WAIT_IDLE:
  407. break;
  408. default:
  409. break;
  410. }
  411. break;
  412. /* can transition from this state to UNCONFIGURING */
  413. case ERR_DISCONNECTED:
  414. if (new_state == UNCONFIGURING)
  415. vscsi->state = new_state;
  416. break;
  417. case WAIT_ENABLED:
  418. switch (new_state) {
  419. case UNCONFIGURING:
  420. vscsi->state = new_state;
  421. vscsi->flags |= RESPONSE_Q_DOWN;
  422. vscsi->flags &= ~(SCHEDULE_DISCONNECT |
  423. DISCONNECT_SCHEDULED);
  424. dma_rmb();
  425. if (vscsi->flags & CFG_SLEEPING) {
  426. vscsi->flags &= ~CFG_SLEEPING;
  427. complete(&vscsi->unconfig);
  428. }
  429. break;
  430. /* should never happen */
  431. case ERR_DISCONNECT:
  432. case ERR_DISCONNECT_RECONNECT:
  433. case WAIT_IDLE:
  434. dev_err(&vscsi->dev, "disconnect: invalid state %d for WAIT_IDLE\n",
  435. vscsi->state);
  436. break;
  437. }
  438. break;
  439. case WAIT_IDLE:
  440. switch (new_state) {
  441. case UNCONFIGURING:
  442. vscsi->flags |= RESPONSE_Q_DOWN;
  443. vscsi->state = new_state;
  444. vscsi->flags &= ~(SCHEDULE_DISCONNECT |
  445. DISCONNECT_SCHEDULED);
  446. ibmvscsis_free_command_q(vscsi);
  447. break;
  448. case ERR_DISCONNECT:
  449. case ERR_DISCONNECT_RECONNECT:
  450. vscsi->state = new_state;
  451. break;
  452. }
  453. break;
  454. /*
  455. * Initiator has not done a successful srp login
  456. * or has done a successful srp logout ( adapter was not
  457. * busy). In the first case there can be responses queued
  458. * waiting for space on the initiators response queue (MAD)
  459. * The second case the adapter is idle. Assume the worse case,
  460. * i.e. the second case.
  461. */
  462. case WAIT_CONNECTION:
  463. case CONNECTED:
  464. case SRP_PROCESSING:
  465. wait_idle = true;
  466. vscsi->state = new_state;
  467. break;
  468. /* can transition from this state to UNCONFIGURING */
  469. case UNDEFINED:
  470. if (new_state == UNCONFIGURING)
  471. vscsi->state = new_state;
  472. break;
  473. default:
  474. break;
  475. }
  476. if (wait_idle) {
  477. dev_dbg(&vscsi->dev, "disconnect start wait, active %d, sched %d\n",
  478. (int)list_empty(&vscsi->active_q),
  479. (int)list_empty(&vscsi->schedule_q));
  480. if (!list_empty(&vscsi->active_q) ||
  481. !list_empty(&vscsi->schedule_q)) {
  482. vscsi->flags |= WAIT_FOR_IDLE;
  483. dev_dbg(&vscsi->dev, "disconnect flags 0x%x\n",
  484. vscsi->flags);
  485. /*
  486. * This routine is can not be called with the interrupt
  487. * lock held.
  488. */
  489. spin_unlock_bh(&vscsi->intr_lock);
  490. wait_for_completion(&vscsi->wait_idle);
  491. spin_lock_bh(&vscsi->intr_lock);
  492. }
  493. dev_dbg(&vscsi->dev, "disconnect stop wait\n");
  494. ibmvscsis_adapter_idle(vscsi);
  495. }
  496. spin_unlock_bh(&vscsi->intr_lock);
  497. }
  498. /**
  499. * ibmvscsis_post_disconnect() - Schedule the disconnect
  500. * @vscsi: Pointer to our adapter structure
  501. * @new_state: State to move to after disconnecting
  502. * @flag_bits: Flags to turn on in adapter structure
  503. *
  504. * If it's already been scheduled, then see if we need to "upgrade"
  505. * the new state (if the one passed in is more "severe" than the
  506. * previous one).
  507. *
  508. * PRECONDITION:
  509. * interrupt lock is held
  510. */
  511. static void ibmvscsis_post_disconnect(struct scsi_info *vscsi, uint new_state,
  512. uint flag_bits)
  513. {
  514. uint state;
  515. /* check the validity of the new state */
  516. switch (new_state) {
  517. case UNCONFIGURING:
  518. case ERR_DISCONNECT:
  519. case ERR_DISCONNECT_RECONNECT:
  520. case WAIT_IDLE:
  521. break;
  522. default:
  523. dev_err(&vscsi->dev, "post_disconnect: Invalid new state %d\n",
  524. new_state);
  525. return;
  526. }
  527. vscsi->flags |= flag_bits;
  528. dev_dbg(&vscsi->dev, "post_disconnect: new_state 0x%x, flag_bits 0x%x, vscsi->flags 0x%x, state %hx\n",
  529. new_state, flag_bits, vscsi->flags, vscsi->state);
  530. if (!(vscsi->flags & (DISCONNECT_SCHEDULED | SCHEDULE_DISCONNECT))) {
  531. vscsi->flags |= SCHEDULE_DISCONNECT;
  532. vscsi->new_state = new_state;
  533. INIT_WORK(&vscsi->proc_work, ibmvscsis_disconnect);
  534. (void)queue_work(vscsi->work_q, &vscsi->proc_work);
  535. } else {
  536. if (vscsi->new_state)
  537. state = vscsi->new_state;
  538. else
  539. state = vscsi->state;
  540. switch (state) {
  541. case NO_QUEUE:
  542. case UNCONFIGURING:
  543. break;
  544. case ERR_DISCONNECTED:
  545. case ERR_DISCONNECT:
  546. case UNDEFINED:
  547. if (new_state == UNCONFIGURING)
  548. vscsi->new_state = new_state;
  549. break;
  550. case ERR_DISCONNECT_RECONNECT:
  551. switch (new_state) {
  552. case UNCONFIGURING:
  553. case ERR_DISCONNECT:
  554. vscsi->new_state = new_state;
  555. break;
  556. default:
  557. break;
  558. }
  559. break;
  560. case WAIT_ENABLED:
  561. case WAIT_IDLE:
  562. case WAIT_CONNECTION:
  563. case CONNECTED:
  564. case SRP_PROCESSING:
  565. vscsi->new_state = new_state;
  566. break;
  567. default:
  568. break;
  569. }
  570. }
  571. dev_dbg(&vscsi->dev, "Leaving post_disconnect: flags 0x%x, new_state 0x%x\n",
  572. vscsi->flags, vscsi->new_state);
  573. }
  574. /**
  575. * ibmvscsis_handle_init_compl_msg() - Respond to an Init Complete Message
  576. * @vscsi: Pointer to our adapter structure
  577. *
  578. * Must be called with interrupt lock held.
  579. */
  580. static long ibmvscsis_handle_init_compl_msg(struct scsi_info *vscsi)
  581. {
  582. long rc = ADAPT_SUCCESS;
  583. switch (vscsi->state) {
  584. case NO_QUEUE:
  585. case ERR_DISCONNECT:
  586. case ERR_DISCONNECT_RECONNECT:
  587. case ERR_DISCONNECTED:
  588. case UNCONFIGURING:
  589. case UNDEFINED:
  590. rc = ERROR;
  591. break;
  592. case WAIT_CONNECTION:
  593. vscsi->state = CONNECTED;
  594. break;
  595. case WAIT_IDLE:
  596. case SRP_PROCESSING:
  597. case CONNECTED:
  598. case WAIT_ENABLED:
  599. default:
  600. rc = ERROR;
  601. dev_err(&vscsi->dev, "init_msg: invalid state %d to get init compl msg\n",
  602. vscsi->state);
  603. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  604. break;
  605. }
  606. return rc;
  607. }
  608. /**
  609. * ibmvscsis_handle_init_msg() - Respond to an Init Message
  610. * @vscsi: Pointer to our adapter structure
  611. *
  612. * Must be called with interrupt lock held.
  613. */
  614. static long ibmvscsis_handle_init_msg(struct scsi_info *vscsi)
  615. {
  616. long rc = ADAPT_SUCCESS;
  617. switch (vscsi->state) {
  618. case WAIT_CONNECTION:
  619. rc = ibmvscsis_send_init_message(vscsi, INIT_COMPLETE_MSG);
  620. switch (rc) {
  621. case H_SUCCESS:
  622. vscsi->state = CONNECTED;
  623. break;
  624. case H_PARAMETER:
  625. dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
  626. rc);
  627. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
  628. break;
  629. case H_DROPPED:
  630. dev_err(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
  631. rc);
  632. rc = ERROR;
  633. ibmvscsis_post_disconnect(vscsi,
  634. ERR_DISCONNECT_RECONNECT, 0);
  635. break;
  636. case H_CLOSED:
  637. dev_warn(&vscsi->dev, "init_msg: failed to send, rc %ld\n",
  638. rc);
  639. rc = 0;
  640. break;
  641. }
  642. break;
  643. case UNDEFINED:
  644. rc = ERROR;
  645. break;
  646. case UNCONFIGURING:
  647. break;
  648. case WAIT_ENABLED:
  649. case CONNECTED:
  650. case SRP_PROCESSING:
  651. case WAIT_IDLE:
  652. case NO_QUEUE:
  653. case ERR_DISCONNECT:
  654. case ERR_DISCONNECT_RECONNECT:
  655. case ERR_DISCONNECTED:
  656. default:
  657. rc = ERROR;
  658. dev_err(&vscsi->dev, "init_msg: invalid state %d to get init msg\n",
  659. vscsi->state);
  660. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  661. break;
  662. }
  663. return rc;
  664. }
  665. /**
  666. * ibmvscsis_init_msg() - Respond to an init message
  667. * @vscsi: Pointer to our adapter structure
  668. * @crq: Pointer to CRQ element containing the Init Message
  669. *
  670. * EXECUTION ENVIRONMENT:
  671. * Interrupt, interrupt lock held
  672. */
  673. static long ibmvscsis_init_msg(struct scsi_info *vscsi, struct viosrp_crq *crq)
  674. {
  675. long rc = ADAPT_SUCCESS;
  676. dev_dbg(&vscsi->dev, "init_msg: state 0x%hx\n", vscsi->state);
  677. rc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
  678. (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
  679. 0);
  680. if (rc == H_SUCCESS) {
  681. vscsi->client_data.partition_number =
  682. be64_to_cpu(*(u64 *)vscsi->map_buf);
  683. dev_dbg(&vscsi->dev, "init_msg, part num %d\n",
  684. vscsi->client_data.partition_number);
  685. } else {
  686. dev_dbg(&vscsi->dev, "init_msg h_vioctl rc %ld\n", rc);
  687. rc = ADAPT_SUCCESS;
  688. }
  689. if (crq->format == INIT_MSG) {
  690. rc = ibmvscsis_handle_init_msg(vscsi);
  691. } else if (crq->format == INIT_COMPLETE_MSG) {
  692. rc = ibmvscsis_handle_init_compl_msg(vscsi);
  693. } else {
  694. rc = ERROR;
  695. dev_err(&vscsi->dev, "init_msg: invalid format %d\n",
  696. (uint)crq->format);
  697. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  698. }
  699. return rc;
  700. }
  701. /**
  702. * ibmvscsis_establish_new_q() - Establish new CRQ queue
  703. * @vscsi: Pointer to our adapter structure
  704. *
  705. * Must be called with interrupt lock held.
  706. */
  707. static long ibmvscsis_establish_new_q(struct scsi_info *vscsi)
  708. {
  709. long rc = ADAPT_SUCCESS;
  710. uint format;
  711. rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000,
  712. 0, 0, 0, 0);
  713. if (rc == H_SUCCESS)
  714. vscsi->flags |= PREP_FOR_SUSPEND_ENABLED;
  715. else if (rc != H_NOT_FOUND)
  716. dev_err(&vscsi->dev, "Error from Enable Prepare for Suspend: %ld\n",
  717. rc);
  718. vscsi->flags &= PRESERVE_FLAG_FIELDS;
  719. vscsi->rsp_q_timer.timer_pops = 0;
  720. vscsi->debit = 0;
  721. vscsi->credit = 0;
  722. rc = vio_enable_interrupts(vscsi->dma_dev);
  723. if (rc) {
  724. dev_warn(&vscsi->dev, "establish_new_q: failed to enable interrupts, rc %ld\n",
  725. rc);
  726. return rc;
  727. }
  728. rc = ibmvscsis_check_init_msg(vscsi, &format);
  729. if (rc) {
  730. dev_err(&vscsi->dev, "establish_new_q: check_init_msg failed, rc %ld\n",
  731. rc);
  732. return rc;
  733. }
  734. if (format == UNUSED_FORMAT) {
  735. rc = ibmvscsis_send_init_message(vscsi, INIT_MSG);
  736. switch (rc) {
  737. case H_SUCCESS:
  738. case H_DROPPED:
  739. case H_CLOSED:
  740. rc = ADAPT_SUCCESS;
  741. break;
  742. case H_PARAMETER:
  743. case H_HARDWARE:
  744. break;
  745. default:
  746. vscsi->state = UNDEFINED;
  747. rc = H_HARDWARE;
  748. break;
  749. }
  750. } else if (format == INIT_MSG) {
  751. rc = ibmvscsis_handle_init_msg(vscsi);
  752. }
  753. return rc;
  754. }
  755. /**
  756. * ibmvscsis_reset_queue() - Reset CRQ Queue
  757. * @vscsi: Pointer to our adapter structure
  758. *
  759. * This function calls h_free_q and then calls h_reg_q and does all
  760. * of the bookkeeping to get us back to where we can communicate.
  761. *
  762. * Actually, we don't always call h_free_crq. A problem was discovered
  763. * where one partition would close and reopen his queue, which would
  764. * cause his partner to get a transport event, which would cause him to
  765. * close and reopen his queue, which would cause the original partition
  766. * to get a transport event, etc., etc. To prevent this, we don't
  767. * actually close our queue if the client initiated the reset, (i.e.
  768. * either we got a transport event or we have detected that the client's
  769. * queue is gone)
  770. *
  771. * EXECUTION ENVIRONMENT:
  772. * Process environment, called with interrupt lock held
  773. */
  774. static void ibmvscsis_reset_queue(struct scsi_info *vscsi)
  775. {
  776. int bytes;
  777. long rc = ADAPT_SUCCESS;
  778. dev_dbg(&vscsi->dev, "reset_queue: flags 0x%x\n", vscsi->flags);
  779. /* don't reset, the client did it for us */
  780. if (vscsi->flags & (CLIENT_FAILED | TRANS_EVENT)) {
  781. vscsi->flags &= PRESERVE_FLAG_FIELDS;
  782. vscsi->rsp_q_timer.timer_pops = 0;
  783. vscsi->debit = 0;
  784. vscsi->credit = 0;
  785. vscsi->state = WAIT_CONNECTION;
  786. vio_enable_interrupts(vscsi->dma_dev);
  787. } else {
  788. rc = ibmvscsis_free_command_q(vscsi);
  789. if (rc == ADAPT_SUCCESS) {
  790. vscsi->state = WAIT_CONNECTION;
  791. bytes = vscsi->cmd_q.size * PAGE_SIZE;
  792. rc = h_reg_crq(vscsi->dds.unit_id,
  793. vscsi->cmd_q.crq_token, bytes);
  794. if (rc == H_CLOSED || rc == H_SUCCESS) {
  795. rc = ibmvscsis_establish_new_q(vscsi);
  796. }
  797. if (rc != ADAPT_SUCCESS) {
  798. dev_dbg(&vscsi->dev, "reset_queue: reg_crq rc %ld\n",
  799. rc);
  800. vscsi->state = ERR_DISCONNECTED;
  801. vscsi->flags |= RESPONSE_Q_DOWN;
  802. ibmvscsis_free_command_q(vscsi);
  803. }
  804. } else {
  805. vscsi->state = ERR_DISCONNECTED;
  806. vscsi->flags |= RESPONSE_Q_DOWN;
  807. }
  808. }
  809. }
  810. /**
  811. * ibmvscsis_free_cmd_resources() - Free command resources
  812. * @vscsi: Pointer to our adapter structure
  813. * @cmd: Command which is not longer in use
  814. *
  815. * Must be called with interrupt lock held.
  816. */
  817. static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi,
  818. struct ibmvscsis_cmd *cmd)
  819. {
  820. struct iu_entry *iue = cmd->iue;
  821. switch (cmd->type) {
  822. case TASK_MANAGEMENT:
  823. case SCSI_CDB:
  824. /*
  825. * When the queue goes down this value is cleared, so it
  826. * cannot be cleared in this general purpose function.
  827. */
  828. if (vscsi->debit)
  829. vscsi->debit -= 1;
  830. break;
  831. case ADAPTER_MAD:
  832. vscsi->flags &= ~PROCESSING_MAD;
  833. break;
  834. case UNSET_TYPE:
  835. break;
  836. default:
  837. dev_err(&vscsi->dev, "free_cmd_resources unknown type %d\n",
  838. cmd->type);
  839. break;
  840. }
  841. cmd->iue = NULL;
  842. list_add_tail(&cmd->list, &vscsi->free_cmd);
  843. srp_iu_put(iue);
  844. if (list_empty(&vscsi->active_q) && list_empty(&vscsi->schedule_q) &&
  845. list_empty(&vscsi->waiting_rsp) && (vscsi->flags & WAIT_FOR_IDLE)) {
  846. vscsi->flags &= ~WAIT_FOR_IDLE;
  847. complete(&vscsi->wait_idle);
  848. }
  849. }
  850. /**
  851. * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL
  852. * @vscsi: Pointer to our adapter structure
  853. * @idle: Indicates whether we were called from adapter_idle. This
  854. * is important to know if we need to do a disconnect, since if
  855. * we're called from adapter_idle, we're still processing the
  856. * current disconnect, so we can't just call post_disconnect.
  857. *
  858. * This function is called when the adapter is idle when phyp has sent
  859. * us a Prepare for Suspend Transport Event.
  860. *
  861. * EXECUTION ENVIRONMENT:
  862. * Process or interrupt environment called with interrupt lock held
  863. */
  864. static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle)
  865. {
  866. long rc = 0;
  867. struct viosrp_crq *crq;
  868. /* See if there is a Resume event in the queue */
  869. crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
  870. dev_dbg(&vscsi->dev, "ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n",
  871. vscsi->flags, vscsi->state, (int)crq->valid);
  872. if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) {
  873. rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0,
  874. 0, 0);
  875. if (rc) {
  876. dev_err(&vscsi->dev, "Ready for Suspend Vioctl failed: %ld\n",
  877. rc);
  878. rc = 0;
  879. }
  880. } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) &&
  881. (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) ||
  882. ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
  883. (crq->format != RESUME_FROM_SUSP)))) {
  884. if (idle) {
  885. vscsi->state = ERR_DISCONNECT_RECONNECT;
  886. ibmvscsis_reset_queue(vscsi);
  887. rc = -1;
  888. } else if (vscsi->state == CONNECTED) {
  889. ibmvscsis_post_disconnect(vscsi,
  890. ERR_DISCONNECT_RECONNECT, 0);
  891. }
  892. vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
  893. if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) ||
  894. (crq->format != RESUME_FROM_SUSP)))
  895. dev_err(&vscsi->dev, "Invalid element in CRQ after Prepare for Suspend");
  896. }
  897. vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED);
  898. return rc;
  899. }
  900. /**
  901. * ibmvscsis_trans_event() - Handle a Transport Event
  902. * @vscsi: Pointer to our adapter structure
  903. * @crq: Pointer to CRQ entry containing the Transport Event
  904. *
  905. * Do the logic to close the I_T nexus. This function may not
  906. * behave to specification.
  907. *
  908. * EXECUTION ENVIRONMENT:
  909. * Interrupt, interrupt lock held
  910. */
  911. static long ibmvscsis_trans_event(struct scsi_info *vscsi,
  912. struct viosrp_crq *crq)
  913. {
  914. long rc = ADAPT_SUCCESS;
  915. dev_dbg(&vscsi->dev, "trans_event: format %d, flags 0x%x, state 0x%hx\n",
  916. (int)crq->format, vscsi->flags, vscsi->state);
  917. switch (crq->format) {
  918. case MIGRATED:
  919. case PARTNER_FAILED:
  920. case PARTNER_DEREGISTER:
  921. ibmvscsis_delete_client_info(vscsi, true);
  922. if (crq->format == MIGRATED)
  923. vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
  924. switch (vscsi->state) {
  925. case NO_QUEUE:
  926. case ERR_DISCONNECTED:
  927. case UNDEFINED:
  928. break;
  929. case UNCONFIGURING:
  930. vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
  931. break;
  932. case WAIT_ENABLED:
  933. break;
  934. case WAIT_CONNECTION:
  935. break;
  936. case CONNECTED:
  937. ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
  938. (RESPONSE_Q_DOWN |
  939. TRANS_EVENT));
  940. break;
  941. case SRP_PROCESSING:
  942. if ((vscsi->debit > 0) ||
  943. !list_empty(&vscsi->schedule_q) ||
  944. !list_empty(&vscsi->waiting_rsp) ||
  945. !list_empty(&vscsi->active_q)) {
  946. dev_dbg(&vscsi->dev, "debit %d, sched %d, wait %d, active %d\n",
  947. vscsi->debit,
  948. (int)list_empty(&vscsi->schedule_q),
  949. (int)list_empty(&vscsi->waiting_rsp),
  950. (int)list_empty(&vscsi->active_q));
  951. dev_warn(&vscsi->dev, "connection lost with outstanding work\n");
  952. } else {
  953. dev_dbg(&vscsi->dev, "trans_event: SRP Processing, but no outstanding work\n");
  954. }
  955. ibmvscsis_post_disconnect(vscsi, WAIT_IDLE,
  956. (RESPONSE_Q_DOWN |
  957. TRANS_EVENT));
  958. break;
  959. case ERR_DISCONNECT:
  960. case ERR_DISCONNECT_RECONNECT:
  961. case WAIT_IDLE:
  962. vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT);
  963. break;
  964. }
  965. break;
  966. case PREPARE_FOR_SUSPEND:
  967. dev_dbg(&vscsi->dev, "Prep for Suspend, crq status = 0x%x\n",
  968. (int)crq->status);
  969. switch (vscsi->state) {
  970. case ERR_DISCONNECTED:
  971. case WAIT_CONNECTION:
  972. case CONNECTED:
  973. ibmvscsis_ready_for_suspend(vscsi, false);
  974. break;
  975. case SRP_PROCESSING:
  976. vscsi->resume_state = vscsi->state;
  977. vscsi->flags |= PREP_FOR_SUSPEND_PENDING;
  978. if (crq->status == CRQ_ENTRY_OVERWRITTEN)
  979. vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE;
  980. ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
  981. break;
  982. case NO_QUEUE:
  983. case UNDEFINED:
  984. case UNCONFIGURING:
  985. case WAIT_ENABLED:
  986. case ERR_DISCONNECT:
  987. case ERR_DISCONNECT_RECONNECT:
  988. case WAIT_IDLE:
  989. dev_err(&vscsi->dev, "Invalid state for Prepare for Suspend Trans Event: 0x%x\n",
  990. vscsi->state);
  991. break;
  992. }
  993. break;
  994. case RESUME_FROM_SUSP:
  995. dev_dbg(&vscsi->dev, "Resume from Suspend, crq status = 0x%x\n",
  996. (int)crq->status);
  997. if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
  998. vscsi->flags |= PREP_FOR_SUSPEND_ABORTED;
  999. } else {
  1000. if ((crq->status == CRQ_ENTRY_OVERWRITTEN) ||
  1001. (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) {
  1002. ibmvscsis_post_disconnect(vscsi,
  1003. ERR_DISCONNECT_RECONNECT,
  1004. 0);
  1005. vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE;
  1006. }
  1007. }
  1008. break;
  1009. default:
  1010. rc = ERROR;
  1011. dev_err(&vscsi->dev, "trans_event: invalid format %d\n",
  1012. (uint)crq->format);
  1013. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT,
  1014. RESPONSE_Q_DOWN);
  1015. break;
  1016. }
  1017. rc = vscsi->flags & SCHEDULE_DISCONNECT;
  1018. dev_dbg(&vscsi->dev, "Leaving trans_event: flags 0x%x, state 0x%hx, rc %ld\n",
  1019. vscsi->flags, vscsi->state, rc);
  1020. return rc;
  1021. }
  1022. /**
  1023. * ibmvscsis_poll_cmd_q() - Poll Command Queue
  1024. * @vscsi: Pointer to our adapter structure
  1025. *
  1026. * Called to handle command elements that may have arrived while
  1027. * interrupts were disabled.
  1028. *
  1029. * EXECUTION ENVIRONMENT:
  1030. * intr_lock must be held
  1031. */
  1032. static void ibmvscsis_poll_cmd_q(struct scsi_info *vscsi)
  1033. {
  1034. struct viosrp_crq *crq;
  1035. long rc;
  1036. bool ack = true;
  1037. volatile u8 valid;
  1038. dev_dbg(&vscsi->dev, "poll_cmd_q: flags 0x%x, state 0x%hx, q index %ud\n",
  1039. vscsi->flags, vscsi->state, vscsi->cmd_q.index);
  1040. rc = vscsi->flags & SCHEDULE_DISCONNECT;
  1041. crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
  1042. valid = crq->valid;
  1043. dma_rmb();
  1044. while (valid) {
  1045. poll_work:
  1046. vscsi->cmd_q.index =
  1047. (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
  1048. if (!rc) {
  1049. rc = ibmvscsis_parse_command(vscsi, crq);
  1050. } else {
  1051. if ((uint)crq->valid == VALID_TRANS_EVENT) {
  1052. /*
  1053. * must service the transport layer events even
  1054. * in an error state, dont break out until all
  1055. * the consecutive transport events have been
  1056. * processed
  1057. */
  1058. rc = ibmvscsis_trans_event(vscsi, crq);
  1059. } else if (vscsi->flags & TRANS_EVENT) {
  1060. /*
  1061. * if a tranport event has occurred leave
  1062. * everything but transport events on the queue
  1063. */
  1064. dev_dbg(&vscsi->dev, "poll_cmd_q, ignoring\n");
  1065. /*
  1066. * need to decrement the queue index so we can
  1067. * look at the elment again
  1068. */
  1069. if (vscsi->cmd_q.index)
  1070. vscsi->cmd_q.index -= 1;
  1071. else
  1072. /*
  1073. * index is at 0 it just wrapped.
  1074. * have it index last element in q
  1075. */
  1076. vscsi->cmd_q.index = vscsi->cmd_q.mask;
  1077. break;
  1078. }
  1079. }
  1080. crq->valid = INVALIDATE_CMD_RESP_EL;
  1081. crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
  1082. valid = crq->valid;
  1083. dma_rmb();
  1084. }
  1085. if (!rc) {
  1086. if (ack) {
  1087. vio_enable_interrupts(vscsi->dma_dev);
  1088. ack = false;
  1089. dev_dbg(&vscsi->dev, "poll_cmd_q, reenabling interrupts\n");
  1090. }
  1091. valid = crq->valid;
  1092. dma_rmb();
  1093. if (valid)
  1094. goto poll_work;
  1095. }
  1096. dev_dbg(&vscsi->dev, "Leaving poll_cmd_q: rc %ld\n", rc);
  1097. }
  1098. /**
  1099. * ibmvscsis_free_cmd_qs() - Free elements in queue
  1100. * @vscsi: Pointer to our adapter structure
  1101. *
  1102. * Free all of the elements on all queues that are waiting for
  1103. * whatever reason.
  1104. *
  1105. * PRECONDITION:
  1106. * Called with interrupt lock held
  1107. */
  1108. static void ibmvscsis_free_cmd_qs(struct scsi_info *vscsi)
  1109. {
  1110. struct ibmvscsis_cmd *cmd, *nxt;
  1111. dev_dbg(&vscsi->dev, "free_cmd_qs: waiting_rsp empty %d, timer starter %d\n",
  1112. (int)list_empty(&vscsi->waiting_rsp),
  1113. vscsi->rsp_q_timer.started);
  1114. list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
  1115. list_del(&cmd->list);
  1116. ibmvscsis_free_cmd_resources(vscsi, cmd);
  1117. }
  1118. }
  1119. /**
  1120. * ibmvscsis_get_free_cmd() - Get free command from list
  1121. * @vscsi: Pointer to our adapter structure
  1122. *
  1123. * Must be called with interrupt lock held.
  1124. */
  1125. static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi)
  1126. {
  1127. struct ibmvscsis_cmd *cmd = NULL;
  1128. struct iu_entry *iue;
  1129. iue = srp_iu_get(&vscsi->target);
  1130. if (iue) {
  1131. cmd = list_first_entry_or_null(&vscsi->free_cmd,
  1132. struct ibmvscsis_cmd, list);
  1133. if (cmd) {
  1134. if (cmd->abort_cmd)
  1135. cmd->abort_cmd = NULL;
  1136. cmd->flags &= ~(DELAY_SEND);
  1137. list_del(&cmd->list);
  1138. cmd->iue = iue;
  1139. cmd->type = UNSET_TYPE;
  1140. memset(&cmd->se_cmd, 0, sizeof(cmd->se_cmd));
  1141. } else {
  1142. srp_iu_put(iue);
  1143. }
  1144. }
  1145. return cmd;
  1146. }
  1147. /**
  1148. * ibmvscsis_adapter_idle() - Helper function to handle idle adapter
  1149. * @vscsi: Pointer to our adapter structure
  1150. *
  1151. * This function is called when the adapter is idle when the driver
  1152. * is attempting to clear an error condition.
  1153. * The adapter is considered busy if any of its cmd queues
  1154. * are non-empty. This function can be invoked
  1155. * from the off level disconnect function.
  1156. *
  1157. * EXECUTION ENVIRONMENT:
  1158. * Process environment called with interrupt lock held
  1159. */
  1160. static void ibmvscsis_adapter_idle(struct scsi_info *vscsi)
  1161. {
  1162. int free_qs = false;
  1163. long rc = 0;
  1164. dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx\n",
  1165. vscsi->flags, vscsi->state);
  1166. /* Only need to free qs if we're disconnecting from client */
  1167. if (vscsi->state != WAIT_CONNECTION || vscsi->flags & TRANS_EVENT)
  1168. free_qs = true;
  1169. switch (vscsi->state) {
  1170. case UNCONFIGURING:
  1171. ibmvscsis_free_command_q(vscsi);
  1172. dma_rmb();
  1173. isync();
  1174. if (vscsi->flags & CFG_SLEEPING) {
  1175. vscsi->flags &= ~CFG_SLEEPING;
  1176. complete(&vscsi->unconfig);
  1177. }
  1178. break;
  1179. case ERR_DISCONNECT_RECONNECT:
  1180. ibmvscsis_reset_queue(vscsi);
  1181. dev_dbg(&vscsi->dev, "adapter_idle, disc_rec: flags 0x%x\n",
  1182. vscsi->flags);
  1183. break;
  1184. case ERR_DISCONNECT:
  1185. ibmvscsis_free_command_q(vscsi);
  1186. vscsi->flags &= ~(SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED);
  1187. vscsi->flags |= RESPONSE_Q_DOWN;
  1188. if (vscsi->tport.enabled)
  1189. vscsi->state = ERR_DISCONNECTED;
  1190. else
  1191. vscsi->state = WAIT_ENABLED;
  1192. dev_dbg(&vscsi->dev, "adapter_idle, disc: flags 0x%x, state 0x%hx\n",
  1193. vscsi->flags, vscsi->state);
  1194. break;
  1195. case WAIT_IDLE:
  1196. vscsi->rsp_q_timer.timer_pops = 0;
  1197. vscsi->debit = 0;
  1198. vscsi->credit = 0;
  1199. if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) {
  1200. vscsi->state = vscsi->resume_state;
  1201. vscsi->resume_state = 0;
  1202. rc = ibmvscsis_ready_for_suspend(vscsi, true);
  1203. vscsi->flags &= ~DISCONNECT_SCHEDULED;
  1204. if (rc)
  1205. break;
  1206. } else if (vscsi->flags & TRANS_EVENT) {
  1207. vscsi->state = WAIT_CONNECTION;
  1208. vscsi->flags &= PRESERVE_FLAG_FIELDS;
  1209. } else {
  1210. vscsi->state = CONNECTED;
  1211. vscsi->flags &= ~DISCONNECT_SCHEDULED;
  1212. }
  1213. dev_dbg(&vscsi->dev, "adapter_idle, wait: flags 0x%x, state 0x%hx\n",
  1214. vscsi->flags, vscsi->state);
  1215. ibmvscsis_poll_cmd_q(vscsi);
  1216. break;
  1217. case ERR_DISCONNECTED:
  1218. vscsi->flags &= ~DISCONNECT_SCHEDULED;
  1219. dev_dbg(&vscsi->dev, "adapter_idle, disconnected: flags 0x%x, state 0x%hx\n",
  1220. vscsi->flags, vscsi->state);
  1221. break;
  1222. default:
  1223. dev_err(&vscsi->dev, "adapter_idle: in invalid state %d\n",
  1224. vscsi->state);
  1225. break;
  1226. }
  1227. if (free_qs)
  1228. ibmvscsis_free_cmd_qs(vscsi);
  1229. /*
  1230. * There is a timing window where we could lose a disconnect request.
  1231. * The known path to this window occurs during the DISCONNECT_RECONNECT
  1232. * case above: reset_queue calls free_command_q, which will release the
  1233. * interrupt lock. During that time, a new post_disconnect call can be
  1234. * made with a "more severe" state (DISCONNECT or UNCONFIGURING).
  1235. * Because the DISCONNECT_SCHEDULED flag is already set, post_disconnect
  1236. * will only set the new_state. Now free_command_q reacquires the intr
  1237. * lock and clears the DISCONNECT_SCHEDULED flag (using PRESERVE_FLAG_
  1238. * FIELDS), and the disconnect is lost. This is particularly bad when
  1239. * the new disconnect was for UNCONFIGURING, since the unconfigure hangs
  1240. * forever.
  1241. * Fix is that free command queue sets acr state and acr flags if there
  1242. * is a change under the lock
  1243. * note free command queue writes to this state it clears it
  1244. * before releasing the lock, different drivers call the free command
  1245. * queue different times so dont initialize above
  1246. */
  1247. if (vscsi->phyp_acr_state != 0) {
  1248. /*
  1249. * set any bits in flags that may have been cleared by
  1250. * a call to free command queue in switch statement
  1251. * or reset queue
  1252. */
  1253. vscsi->flags |= vscsi->phyp_acr_flags;
  1254. ibmvscsis_post_disconnect(vscsi, vscsi->phyp_acr_state, 0);
  1255. vscsi->phyp_acr_state = 0;
  1256. vscsi->phyp_acr_flags = 0;
  1257. dev_dbg(&vscsi->dev, "adapter_idle: flags 0x%x, state 0x%hx, acr_flags 0x%x, acr_state 0x%hx\n",
  1258. vscsi->flags, vscsi->state, vscsi->phyp_acr_flags,
  1259. vscsi->phyp_acr_state);
  1260. }
  1261. dev_dbg(&vscsi->dev, "Leaving adapter_idle: flags 0x%x, state 0x%hx, new_state 0x%x\n",
  1262. vscsi->flags, vscsi->state, vscsi->new_state);
  1263. }
  1264. /**
  1265. * ibmvscsis_copy_crq_packet() - Copy CRQ Packet
  1266. * @vscsi: Pointer to our adapter structure
  1267. * @cmd: Pointer to command element to use to process the request
  1268. * @crq: Pointer to CRQ entry containing the request
  1269. *
  1270. * Copy the srp information unit from the hosted
  1271. * partition using remote dma
  1272. *
  1273. * EXECUTION ENVIRONMENT:
  1274. * Interrupt, interrupt lock held
  1275. */
  1276. static long ibmvscsis_copy_crq_packet(struct scsi_info *vscsi,
  1277. struct ibmvscsis_cmd *cmd,
  1278. struct viosrp_crq *crq)
  1279. {
  1280. struct iu_entry *iue = cmd->iue;
  1281. long rc = 0;
  1282. u16 len;
  1283. len = be16_to_cpu(crq->IU_length);
  1284. if ((len > SRP_MAX_IU_LEN) || (len == 0)) {
  1285. dev_err(&vscsi->dev, "copy_crq: Invalid len %d passed", len);
  1286. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  1287. return SRP_VIOLATION;
  1288. }
  1289. rc = h_copy_rdma(len, vscsi->dds.window[REMOTE].liobn,
  1290. be64_to_cpu(crq->IU_data_ptr),
  1291. vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma);
  1292. switch (rc) {
  1293. case H_SUCCESS:
  1294. cmd->init_time = mftb();
  1295. iue->remote_token = crq->IU_data_ptr;
  1296. iue->iu_len = len;
  1297. dev_dbg(&vscsi->dev, "copy_crq: ioba 0x%llx, init_time 0x%llx\n",
  1298. be64_to_cpu(crq->IU_data_ptr), cmd->init_time);
  1299. break;
  1300. case H_PERMISSION:
  1301. if (connection_broken(vscsi))
  1302. ibmvscsis_post_disconnect(vscsi,
  1303. ERR_DISCONNECT_RECONNECT,
  1304. (RESPONSE_Q_DOWN |
  1305. CLIENT_FAILED));
  1306. else
  1307. ibmvscsis_post_disconnect(vscsi,
  1308. ERR_DISCONNECT_RECONNECT, 0);
  1309. dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
  1310. rc);
  1311. break;
  1312. case H_DEST_PARM:
  1313. case H_SOURCE_PARM:
  1314. default:
  1315. dev_err(&vscsi->dev, "copy_crq: h_copy_rdma failed, rc %ld\n",
  1316. rc);
  1317. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  1318. break;
  1319. }
  1320. return rc;
  1321. }
  1322. /**
  1323. * ibmvscsis_adapter_info - Service an Adapter Info MAnagement Data gram
  1324. * @vscsi: Pointer to our adapter structure
  1325. * @iue: Information Unit containing the Adapter Info MAD request
  1326. *
  1327. * EXECUTION ENVIRONMENT:
  1328. * Interrupt adapter lock is held
  1329. */
  1330. static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
  1331. struct iu_entry *iue)
  1332. {
  1333. struct viosrp_adapter_info *mad = &vio_iu(iue)->mad.adapter_info;
  1334. struct mad_adapter_info_data *info;
  1335. uint flag_bits = 0;
  1336. dma_addr_t token;
  1337. long rc;
  1338. mad->common.status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
  1339. if (be16_to_cpu(mad->common.length) > sizeof(*info)) {
  1340. mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
  1341. return 0;
  1342. }
  1343. info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
  1344. GFP_ATOMIC);
  1345. if (!info) {
  1346. dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
  1347. iue->target);
  1348. mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
  1349. return 0;
  1350. }
  1351. /* Get remote info */
  1352. rc = h_copy_rdma(be16_to_cpu(mad->common.length),
  1353. vscsi->dds.window[REMOTE].liobn,
  1354. be64_to_cpu(mad->buffer),
  1355. vscsi->dds.window[LOCAL].liobn, token);
  1356. if (rc != H_SUCCESS) {
  1357. if (rc == H_PERMISSION) {
  1358. if (connection_broken(vscsi))
  1359. flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
  1360. }
  1361. dev_warn(&vscsi->dev, "adapter_info: h_copy_rdma from client failed, rc %ld\n",
  1362. rc);
  1363. dev_dbg(&vscsi->dev, "adapter_info: ioba 0x%llx, flags 0x%x, flag_bits 0x%x\n",
  1364. be64_to_cpu(mad->buffer), vscsi->flags, flag_bits);
  1365. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
  1366. flag_bits);
  1367. goto free_dma;
  1368. }
  1369. /*
  1370. * Copy client info, but ignore partition number, which we
  1371. * already got from phyp - unless we failed to get it from
  1372. * phyp (e.g. if we're running on a p5 system).
  1373. */
  1374. if (vscsi->client_data.partition_number == 0)
  1375. vscsi->client_data.partition_number =
  1376. be32_to_cpu(info->partition_number);
  1377. strncpy(vscsi->client_data.srp_version, info->srp_version,
  1378. sizeof(vscsi->client_data.srp_version));
  1379. strncpy(vscsi->client_data.partition_name, info->partition_name,
  1380. sizeof(vscsi->client_data.partition_name));
  1381. vscsi->client_data.mad_version = be32_to_cpu(info->mad_version);
  1382. vscsi->client_data.os_type = be32_to_cpu(info->os_type);
  1383. /* Copy our info */
  1384. strncpy(info->srp_version, SRP_VERSION,
  1385. sizeof(info->srp_version));
  1386. strncpy(info->partition_name, vscsi->dds.partition_name,
  1387. sizeof(info->partition_name));
  1388. info->partition_number = cpu_to_be32(vscsi->dds.partition_num);
  1389. info->mad_version = cpu_to_be32(MAD_VERSION_1);
  1390. info->os_type = cpu_to_be32(LINUX);
  1391. memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
  1392. info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
  1393. dma_wmb();
  1394. rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
  1395. token, vscsi->dds.window[REMOTE].liobn,
  1396. be64_to_cpu(mad->buffer));
  1397. switch (rc) {
  1398. case H_SUCCESS:
  1399. break;
  1400. case H_SOURCE_PARM:
  1401. case H_DEST_PARM:
  1402. case H_PERMISSION:
  1403. if (connection_broken(vscsi))
  1404. flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
  1405. default:
  1406. dev_err(&vscsi->dev, "adapter_info: h_copy_rdma to client failed, rc %ld\n",
  1407. rc);
  1408. ibmvscsis_post_disconnect(vscsi,
  1409. ERR_DISCONNECT_RECONNECT,
  1410. flag_bits);
  1411. break;
  1412. }
  1413. free_dma:
  1414. dma_free_coherent(&vscsi->dma_dev->dev, sizeof(*info), info, token);
  1415. dev_dbg(&vscsi->dev, "Leaving adapter_info, rc %ld\n", rc);
  1416. return rc;
  1417. }
  1418. /**
  1419. * ibmvscsis_cap_mad() - Service a Capabilities MAnagement Data gram
  1420. * @vscsi: Pointer to our adapter structure
  1421. * @iue: Information Unit containing the Capabilities MAD request
  1422. *
  1423. * NOTE: if you return an error from this routine you must be
  1424. * disconnecting or you will cause a hang
  1425. *
  1426. * EXECUTION ENVIRONMENT:
  1427. * Interrupt called with adapter lock held
  1428. */
  1429. static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
  1430. {
  1431. struct viosrp_capabilities *mad = &vio_iu(iue)->mad.capabilities;
  1432. struct capabilities *cap;
  1433. struct mad_capability_common *common;
  1434. dma_addr_t token;
  1435. u16 olen, len, status, min_len, cap_len;
  1436. u32 flag;
  1437. uint flag_bits = 0;
  1438. long rc = 0;
  1439. olen = be16_to_cpu(mad->common.length);
  1440. /*
  1441. * struct capabilities hardcodes a couple capabilities after the
  1442. * header, but the capabilities can actually be in any order.
  1443. */
  1444. min_len = offsetof(struct capabilities, migration);
  1445. if ((olen < min_len) || (olen > PAGE_SIZE)) {
  1446. dev_warn(&vscsi->dev, "cap_mad: invalid len %d\n", olen);
  1447. mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
  1448. return 0;
  1449. }
  1450. cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
  1451. GFP_ATOMIC);
  1452. if (!cap) {
  1453. dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
  1454. iue->target);
  1455. mad->common.status = cpu_to_be16(VIOSRP_MAD_FAILED);
  1456. return 0;
  1457. }
  1458. rc = h_copy_rdma(olen, vscsi->dds.window[REMOTE].liobn,
  1459. be64_to_cpu(mad->buffer),
  1460. vscsi->dds.window[LOCAL].liobn, token);
  1461. if (rc == H_SUCCESS) {
  1462. strncpy(cap->name, dev_name(&vscsi->dma_dev->dev),
  1463. SRP_MAX_LOC_LEN);
  1464. len = olen - min_len;
  1465. status = VIOSRP_MAD_SUCCESS;
  1466. common = (struct mad_capability_common *)&cap->migration;
  1467. while ((len > 0) && (status == VIOSRP_MAD_SUCCESS) && !rc) {
  1468. dev_dbg(&vscsi->dev, "cap_mad: len left %hd, cap type %d, cap len %hd\n",
  1469. len, be32_to_cpu(common->cap_type),
  1470. be16_to_cpu(common->length));
  1471. cap_len = be16_to_cpu(common->length);
  1472. if (cap_len > len) {
  1473. dev_err(&vscsi->dev, "cap_mad: cap len mismatch with total len\n");
  1474. status = VIOSRP_MAD_FAILED;
  1475. break;
  1476. }
  1477. if (cap_len == 0) {
  1478. dev_err(&vscsi->dev, "cap_mad: cap len is 0\n");
  1479. status = VIOSRP_MAD_FAILED;
  1480. break;
  1481. }
  1482. switch (common->cap_type) {
  1483. default:
  1484. dev_dbg(&vscsi->dev, "cap_mad: unsupported capability\n");
  1485. common->server_support = 0;
  1486. flag = cpu_to_be32((u32)CAP_LIST_SUPPORTED);
  1487. cap->flags &= ~flag;
  1488. break;
  1489. }
  1490. len = len - cap_len;
  1491. common = (struct mad_capability_common *)
  1492. ((char *)common + cap_len);
  1493. }
  1494. mad->common.status = cpu_to_be16(status);
  1495. dma_wmb();
  1496. rc = h_copy_rdma(olen, vscsi->dds.window[LOCAL].liobn, token,
  1497. vscsi->dds.window[REMOTE].liobn,
  1498. be64_to_cpu(mad->buffer));
  1499. if (rc != H_SUCCESS) {
  1500. dev_dbg(&vscsi->dev, "cap_mad: failed to copy to client, rc %ld\n",
  1501. rc);
  1502. if (rc == H_PERMISSION) {
  1503. if (connection_broken(vscsi))
  1504. flag_bits = (RESPONSE_Q_DOWN |
  1505. CLIENT_FAILED);
  1506. }
  1507. dev_warn(&vscsi->dev, "cap_mad: error copying data to client, rc %ld\n",
  1508. rc);
  1509. ibmvscsis_post_disconnect(vscsi,
  1510. ERR_DISCONNECT_RECONNECT,
  1511. flag_bits);
  1512. }
  1513. }
  1514. dma_free_coherent(&vscsi->dma_dev->dev, olen, cap, token);
  1515. dev_dbg(&vscsi->dev, "Leaving cap_mad, rc %ld, client_cap 0x%x\n",
  1516. rc, vscsi->client_cap);
  1517. return rc;
  1518. }
  1519. /**
  1520. * ibmvscsis_process_mad() - Service a MAnagement Data gram
  1521. * @vscsi: Pointer to our adapter structure
  1522. * @iue: Information Unit containing the MAD request
  1523. *
  1524. * Must be called with interrupt lock held.
  1525. */
  1526. static long ibmvscsis_process_mad(struct scsi_info *vscsi, struct iu_entry *iue)
  1527. {
  1528. struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
  1529. struct viosrp_empty_iu *empty;
  1530. long rc = ADAPT_SUCCESS;
  1531. switch (be32_to_cpu(mad->type)) {
  1532. case VIOSRP_EMPTY_IU_TYPE:
  1533. empty = &vio_iu(iue)->mad.empty_iu;
  1534. vscsi->empty_iu_id = be64_to_cpu(empty->buffer);
  1535. vscsi->empty_iu_tag = be64_to_cpu(empty->common.tag);
  1536. mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
  1537. break;
  1538. case VIOSRP_ADAPTER_INFO_TYPE:
  1539. rc = ibmvscsis_adapter_info(vscsi, iue);
  1540. break;
  1541. case VIOSRP_CAPABILITIES_TYPE:
  1542. rc = ibmvscsis_cap_mad(vscsi, iue);
  1543. break;
  1544. case VIOSRP_ENABLE_FAST_FAIL:
  1545. if (vscsi->state == CONNECTED) {
  1546. vscsi->fast_fail = true;
  1547. mad->status = cpu_to_be16(VIOSRP_MAD_SUCCESS);
  1548. } else {
  1549. dev_warn(&vscsi->dev, "fast fail mad sent after login\n");
  1550. mad->status = cpu_to_be16(VIOSRP_MAD_FAILED);
  1551. }
  1552. break;
  1553. default:
  1554. mad->status = cpu_to_be16(VIOSRP_MAD_NOT_SUPPORTED);
  1555. break;
  1556. }
  1557. return rc;
  1558. }
  1559. /**
  1560. * srp_snd_msg_failed() - Handle an error when sending a response
  1561. * @vscsi: Pointer to our adapter structure
  1562. * @rc: The return code from the h_send_crq command
  1563. *
  1564. * Must be called with interrupt lock held.
  1565. */
  1566. static void srp_snd_msg_failed(struct scsi_info *vscsi, long rc)
  1567. {
  1568. ktime_t kt;
  1569. if (rc != H_DROPPED) {
  1570. ibmvscsis_free_cmd_qs(vscsi);
  1571. if (rc == H_CLOSED)
  1572. vscsi->flags |= CLIENT_FAILED;
  1573. /* don't flag the same problem multiple times */
  1574. if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
  1575. vscsi->flags |= RESPONSE_Q_DOWN;
  1576. if (!(vscsi->state & (ERR_DISCONNECT |
  1577. ERR_DISCONNECT_RECONNECT |
  1578. ERR_DISCONNECTED | UNDEFINED))) {
  1579. dev_err(&vscsi->dev, "snd_msg_failed: setting RESPONSE_Q_DOWN, state 0x%hx, flags 0x%x, rc %ld\n",
  1580. vscsi->state, vscsi->flags, rc);
  1581. }
  1582. ibmvscsis_post_disconnect(vscsi,
  1583. ERR_DISCONNECT_RECONNECT, 0);
  1584. }
  1585. return;
  1586. }
  1587. /*
  1588. * The response queue is full.
  1589. * If the server is processing SRP requests, i.e.
  1590. * the client has successfully done an
  1591. * SRP_LOGIN, then it will wait forever for room in
  1592. * the queue. However if the system admin
  1593. * is attempting to unconfigure the server then one
  1594. * or more children will be in a state where
  1595. * they are being removed. So if there is even one
  1596. * child being removed then the driver assumes
  1597. * the system admin is attempting to break the
  1598. * connection with the client and MAX_TIMER_POPS
  1599. * is honored.
  1600. */
  1601. if ((vscsi->rsp_q_timer.timer_pops < MAX_TIMER_POPS) ||
  1602. (vscsi->state == SRP_PROCESSING)) {
  1603. dev_dbg(&vscsi->dev, "snd_msg_failed: response queue full, flags 0x%x, timer started %d, pops %d\n",
  1604. vscsi->flags, (int)vscsi->rsp_q_timer.started,
  1605. vscsi->rsp_q_timer.timer_pops);
  1606. /*
  1607. * Check if the timer is running; if it
  1608. * is not then start it up.
  1609. */
  1610. if (!vscsi->rsp_q_timer.started) {
  1611. if (vscsi->rsp_q_timer.timer_pops <
  1612. MAX_TIMER_POPS) {
  1613. kt = WAIT_NANO_SECONDS;
  1614. } else {
  1615. /*
  1616. * slide the timeslice if the maximum
  1617. * timer pops have already happened
  1618. */
  1619. kt = ktime_set(WAIT_SECONDS, 0);
  1620. }
  1621. vscsi->rsp_q_timer.started = true;
  1622. hrtimer_start(&vscsi->rsp_q_timer.timer, kt,
  1623. HRTIMER_MODE_REL);
  1624. }
  1625. } else {
  1626. /*
  1627. * TBD: Do we need to worry about this? Need to get
  1628. * remove working.
  1629. */
  1630. /*
  1631. * waited a long time and it appears the system admin
  1632. * is bring this driver down
  1633. */
  1634. vscsi->flags |= RESPONSE_Q_DOWN;
  1635. ibmvscsis_free_cmd_qs(vscsi);
  1636. /*
  1637. * if the driver is already attempting to disconnect
  1638. * from the client and has already logged an error
  1639. * trace this event but don't put it in the error log
  1640. */
  1641. if (!(vscsi->state & (ERR_DISCONNECT |
  1642. ERR_DISCONNECT_RECONNECT |
  1643. ERR_DISCONNECTED | UNDEFINED))) {
  1644. dev_err(&vscsi->dev, "client crq full too long\n");
  1645. ibmvscsis_post_disconnect(vscsi,
  1646. ERR_DISCONNECT_RECONNECT,
  1647. 0);
  1648. }
  1649. }
  1650. }
  1651. /**
  1652. * ibmvscsis_send_messages() - Send a Response
  1653. * @vscsi: Pointer to our adapter structure
  1654. *
  1655. * Send a response, first checking the waiting queue. Responses are
  1656. * sent in order they are received. If the response cannot be sent,
  1657. * because the client queue is full, it stays on the waiting queue.
  1658. *
  1659. * PRECONDITION:
  1660. * Called with interrupt lock held
  1661. */
  1662. static void ibmvscsis_send_messages(struct scsi_info *vscsi)
  1663. {
  1664. u64 msg_hi = 0;
  1665. /* note do not attempt to access the IU_data_ptr with this pointer
  1666. * it is not valid
  1667. */
  1668. struct viosrp_crq *crq = (struct viosrp_crq *)&msg_hi;
  1669. struct ibmvscsis_cmd *cmd, *nxt;
  1670. struct iu_entry *iue;
  1671. long rc = ADAPT_SUCCESS;
  1672. bool retry = false;
  1673. if (!(vscsi->flags & RESPONSE_Q_DOWN)) {
  1674. do {
  1675. retry = false;
  1676. list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
  1677. list) {
  1678. /*
  1679. * Check to make sure abort cmd gets processed
  1680. * prior to the abort tmr cmd
  1681. */
  1682. if (cmd->flags & DELAY_SEND)
  1683. continue;
  1684. if (cmd->abort_cmd) {
  1685. retry = true;
  1686. cmd->abort_cmd->flags &= ~(DELAY_SEND);
  1687. cmd->abort_cmd = NULL;
  1688. }
  1689. /*
  1690. * If CMD_T_ABORTED w/o CMD_T_TAS scenarios and
  1691. * the case where LIO issued a
  1692. * ABORT_TASK: Sending TMR_TASK_DOES_NOT_EXIST
  1693. * case then we dont send a response, since it
  1694. * was already done.
  1695. */
  1696. if (cmd->se_cmd.transport_state & CMD_T_ABORTED &&
  1697. !(cmd->se_cmd.transport_state & CMD_T_TAS)) {
  1698. list_del(&cmd->list);
  1699. ibmvscsis_free_cmd_resources(vscsi,
  1700. cmd);
  1701. /*
  1702. * With a successfully aborted op
  1703. * through LIO we want to increment the
  1704. * the vscsi credit so that when we dont
  1705. * send a rsp to the original scsi abort
  1706. * op (h_send_crq), but the tm rsp to
  1707. * the abort is sent, the credit is
  1708. * correctly sent with the abort tm rsp.
  1709. * We would need 1 for the abort tm rsp
  1710. * and 1 credit for the aborted scsi op.
  1711. * Thus we need to increment here.
  1712. * Also we want to increment the credit
  1713. * here because we want to make sure
  1714. * cmd is actually released first
  1715. * otherwise the client will think it
  1716. * it can send a new cmd, and we could
  1717. * find ourselves short of cmd elements.
  1718. */
  1719. vscsi->credit += 1;
  1720. } else {
  1721. iue = cmd->iue;
  1722. crq->valid = VALID_CMD_RESP_EL;
  1723. crq->format = cmd->rsp.format;
  1724. if (cmd->flags & CMD_FAST_FAIL)
  1725. crq->status = VIOSRP_ADAPTER_FAIL;
  1726. crq->IU_length = cpu_to_be16(cmd->rsp.len);
  1727. rc = h_send_crq(vscsi->dma_dev->unit_address,
  1728. be64_to_cpu(msg_hi),
  1729. be64_to_cpu(cmd->rsp.tag));
  1730. dev_dbg(&vscsi->dev, "send_messages: cmd %p, tag 0x%llx, rc %ld\n",
  1731. cmd, be64_to_cpu(cmd->rsp.tag),
  1732. rc);
  1733. /* if all ok free up the command
  1734. * element resources
  1735. */
  1736. if (rc == H_SUCCESS) {
  1737. /* some movement has occurred */
  1738. vscsi->rsp_q_timer.timer_pops = 0;
  1739. list_del(&cmd->list);
  1740. ibmvscsis_free_cmd_resources(vscsi,
  1741. cmd);
  1742. } else {
  1743. srp_snd_msg_failed(vscsi, rc);
  1744. break;
  1745. }
  1746. }
  1747. }
  1748. } while (retry);
  1749. if (!rc) {
  1750. /*
  1751. * The timer could pop with the queue empty. If
  1752. * this happens, rc will always indicate a
  1753. * success; clear the pop count.
  1754. */
  1755. vscsi->rsp_q_timer.timer_pops = 0;
  1756. }
  1757. } else {
  1758. ibmvscsis_free_cmd_qs(vscsi);
  1759. }
  1760. }
  1761. /* Called with intr lock held */
  1762. static void ibmvscsis_send_mad_resp(struct scsi_info *vscsi,
  1763. struct ibmvscsis_cmd *cmd,
  1764. struct viosrp_crq *crq)
  1765. {
  1766. struct iu_entry *iue = cmd->iue;
  1767. struct mad_common *mad = (struct mad_common *)&vio_iu(iue)->mad;
  1768. uint flag_bits = 0;
  1769. long rc;
  1770. dma_wmb();
  1771. rc = h_copy_rdma(sizeof(struct mad_common),
  1772. vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
  1773. vscsi->dds.window[REMOTE].liobn,
  1774. be64_to_cpu(crq->IU_data_ptr));
  1775. if (!rc) {
  1776. cmd->rsp.format = VIOSRP_MAD_FORMAT;
  1777. cmd->rsp.len = sizeof(struct mad_common);
  1778. cmd->rsp.tag = mad->tag;
  1779. list_add_tail(&cmd->list, &vscsi->waiting_rsp);
  1780. ibmvscsis_send_messages(vscsi);
  1781. } else {
  1782. dev_dbg(&vscsi->dev, "Error sending mad response, rc %ld\n",
  1783. rc);
  1784. if (rc == H_PERMISSION) {
  1785. if (connection_broken(vscsi))
  1786. flag_bits = (RESPONSE_Q_DOWN | CLIENT_FAILED);
  1787. }
  1788. dev_err(&vscsi->dev, "mad: failed to copy to client, rc %ld\n",
  1789. rc);
  1790. ibmvscsis_free_cmd_resources(vscsi, cmd);
  1791. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
  1792. flag_bits);
  1793. }
  1794. }
  1795. /**
  1796. * ibmvscsis_mad() - Service a MAnagement Data gram.
  1797. * @vscsi: Pointer to our adapter structure
  1798. * @crq: Pointer to the CRQ entry containing the MAD request
  1799. *
  1800. * EXECUTION ENVIRONMENT:
  1801. * Interrupt, called with adapter lock held
  1802. */
  1803. static long ibmvscsis_mad(struct scsi_info *vscsi, struct viosrp_crq *crq)
  1804. {
  1805. struct iu_entry *iue;
  1806. struct ibmvscsis_cmd *cmd;
  1807. struct mad_common *mad;
  1808. long rc = ADAPT_SUCCESS;
  1809. switch (vscsi->state) {
  1810. /*
  1811. * We have not exchanged Init Msgs yet, so this MAD was sent
  1812. * before the last Transport Event; client will not be
  1813. * expecting a response.
  1814. */
  1815. case WAIT_CONNECTION:
  1816. dev_dbg(&vscsi->dev, "mad: in Wait Connection state, ignoring MAD, flags %d\n",
  1817. vscsi->flags);
  1818. return ADAPT_SUCCESS;
  1819. case SRP_PROCESSING:
  1820. case CONNECTED:
  1821. break;
  1822. /*
  1823. * We should never get here while we're in these states.
  1824. * Just log an error and get out.
  1825. */
  1826. case UNCONFIGURING:
  1827. case WAIT_IDLE:
  1828. case ERR_DISCONNECT:
  1829. case ERR_DISCONNECT_RECONNECT:
  1830. default:
  1831. dev_err(&vscsi->dev, "mad: invalid adapter state %d for mad\n",
  1832. vscsi->state);
  1833. return ADAPT_SUCCESS;
  1834. }
  1835. cmd = ibmvscsis_get_free_cmd(vscsi);
  1836. if (!cmd) {
  1837. dev_err(&vscsi->dev, "mad: failed to get cmd, debit %d\n",
  1838. vscsi->debit);
  1839. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  1840. return ERROR;
  1841. }
  1842. iue = cmd->iue;
  1843. cmd->type = ADAPTER_MAD;
  1844. rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
  1845. if (!rc) {
  1846. mad = (struct mad_common *)&vio_iu(iue)->mad;
  1847. dev_dbg(&vscsi->dev, "mad: type %d\n", be32_to_cpu(mad->type));
  1848. rc = ibmvscsis_process_mad(vscsi, iue);
  1849. dev_dbg(&vscsi->dev, "mad: status %hd, rc %ld\n",
  1850. be16_to_cpu(mad->status), rc);
  1851. if (!rc)
  1852. ibmvscsis_send_mad_resp(vscsi, cmd, crq);
  1853. } else {
  1854. ibmvscsis_free_cmd_resources(vscsi, cmd);
  1855. }
  1856. dev_dbg(&vscsi->dev, "Leaving mad, rc %ld\n", rc);
  1857. return rc;
  1858. }
  1859. /**
  1860. * ibmvscsis_login_rsp() - Create/copy a login response notice to the client
  1861. * @vscsi: Pointer to our adapter structure
  1862. * @cmd: Pointer to the command for the SRP Login request
  1863. *
  1864. * EXECUTION ENVIRONMENT:
  1865. * Interrupt, interrupt lock held
  1866. */
  1867. static long ibmvscsis_login_rsp(struct scsi_info *vscsi,
  1868. struct ibmvscsis_cmd *cmd)
  1869. {
  1870. struct iu_entry *iue = cmd->iue;
  1871. struct srp_login_rsp *rsp = &vio_iu(iue)->srp.login_rsp;
  1872. struct format_code *fmt;
  1873. uint flag_bits = 0;
  1874. long rc = ADAPT_SUCCESS;
  1875. memset(rsp, 0, sizeof(struct srp_login_rsp));
  1876. rsp->opcode = SRP_LOGIN_RSP;
  1877. rsp->req_lim_delta = cpu_to_be32(vscsi->request_limit);
  1878. rsp->tag = cmd->rsp.tag;
  1879. rsp->max_it_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
  1880. rsp->max_ti_iu_len = cpu_to_be32(SRP_MAX_IU_LEN);
  1881. fmt = (struct format_code *)&rsp->buf_fmt;
  1882. fmt->buffers = SUPPORTED_FORMATS;
  1883. vscsi->credit = 0;
  1884. cmd->rsp.len = sizeof(struct srp_login_rsp);
  1885. dma_wmb();
  1886. rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
  1887. iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
  1888. be64_to_cpu(iue->remote_token));
  1889. switch (rc) {
  1890. case H_SUCCESS:
  1891. break;
  1892. case H_PERMISSION:
  1893. if (connection_broken(vscsi))
  1894. flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
  1895. dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
  1896. rc);
  1897. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
  1898. flag_bits);
  1899. break;
  1900. case H_SOURCE_PARM:
  1901. case H_DEST_PARM:
  1902. default:
  1903. dev_err(&vscsi->dev, "login_rsp: error copying to client, rc %ld\n",
  1904. rc);
  1905. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  1906. break;
  1907. }
  1908. return rc;
  1909. }
  1910. /**
  1911. * ibmvscsis_srp_login_rej() - Create/copy a login rejection notice to client
  1912. * @vscsi: Pointer to our adapter structure
  1913. * @cmd: Pointer to the command for the SRP Login request
  1914. * @reason: The reason the SRP Login is being rejected, per SRP protocol
  1915. *
  1916. * EXECUTION ENVIRONMENT:
  1917. * Interrupt, interrupt lock held
  1918. */
  1919. static long ibmvscsis_srp_login_rej(struct scsi_info *vscsi,
  1920. struct ibmvscsis_cmd *cmd, u32 reason)
  1921. {
  1922. struct iu_entry *iue = cmd->iue;
  1923. struct srp_login_rej *rej = &vio_iu(iue)->srp.login_rej;
  1924. struct format_code *fmt;
  1925. uint flag_bits = 0;
  1926. long rc = ADAPT_SUCCESS;
  1927. memset(rej, 0, sizeof(*rej));
  1928. rej->opcode = SRP_LOGIN_REJ;
  1929. rej->reason = cpu_to_be32(reason);
  1930. rej->tag = cmd->rsp.tag;
  1931. fmt = (struct format_code *)&rej->buf_fmt;
  1932. fmt->buffers = SUPPORTED_FORMATS;
  1933. cmd->rsp.len = sizeof(*rej);
  1934. dma_wmb();
  1935. rc = h_copy_rdma(cmd->rsp.len, vscsi->dds.window[LOCAL].liobn,
  1936. iue->sbuf->dma, vscsi->dds.window[REMOTE].liobn,
  1937. be64_to_cpu(iue->remote_token));
  1938. switch (rc) {
  1939. case H_SUCCESS:
  1940. break;
  1941. case H_PERMISSION:
  1942. if (connection_broken(vscsi))
  1943. flag_bits = RESPONSE_Q_DOWN | CLIENT_FAILED;
  1944. dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
  1945. rc);
  1946. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT,
  1947. flag_bits);
  1948. break;
  1949. case H_SOURCE_PARM:
  1950. case H_DEST_PARM:
  1951. default:
  1952. dev_err(&vscsi->dev, "login_rej: error copying to client, rc %ld\n",
  1953. rc);
  1954. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  1955. break;
  1956. }
  1957. return rc;
  1958. }
  1959. static int ibmvscsis_make_nexus(struct ibmvscsis_tport *tport)
  1960. {
  1961. char *name = tport->tport_name;
  1962. struct ibmvscsis_nexus *nexus;
  1963. struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
  1964. int rc;
  1965. if (tport->ibmv_nexus) {
  1966. dev_dbg(&vscsi->dev, "tport->ibmv_nexus already exists\n");
  1967. return 0;
  1968. }
  1969. nexus = kzalloc(sizeof(*nexus), GFP_KERNEL);
  1970. if (!nexus) {
  1971. dev_err(&vscsi->dev, "Unable to allocate struct ibmvscsis_nexus\n");
  1972. return -ENOMEM;
  1973. }
  1974. nexus->se_sess = target_setup_session(&tport->se_tpg, 0, 0,
  1975. TARGET_PROT_NORMAL, name, nexus,
  1976. NULL);
  1977. if (IS_ERR(nexus->se_sess)) {
  1978. rc = PTR_ERR(nexus->se_sess);
  1979. goto transport_init_fail;
  1980. }
  1981. tport->ibmv_nexus = nexus;
  1982. return 0;
  1983. transport_init_fail:
  1984. kfree(nexus);
  1985. return rc;
  1986. }
  1987. static int ibmvscsis_drop_nexus(struct ibmvscsis_tport *tport)
  1988. {
  1989. struct se_session *se_sess;
  1990. struct ibmvscsis_nexus *nexus;
  1991. nexus = tport->ibmv_nexus;
  1992. if (!nexus)
  1993. return -ENODEV;
  1994. se_sess = nexus->se_sess;
  1995. if (!se_sess)
  1996. return -ENODEV;
  1997. /*
  1998. * Release the SCSI I_T Nexus to the emulated ibmvscsis Target Port
  1999. */
  2000. target_wait_for_sess_cmds(se_sess);
  2001. target_remove_session(se_sess);
  2002. tport->ibmv_nexus = NULL;
  2003. kfree(nexus);
  2004. return 0;
  2005. }
  2006. /**
  2007. * ibmvscsis_srp_login() - Process an SRP Login Request
  2008. * @vscsi: Pointer to our adapter structure
  2009. * @cmd: Command element to use to process the SRP Login request
  2010. * @crq: Pointer to CRQ entry containing the SRP Login request
  2011. *
  2012. * EXECUTION ENVIRONMENT:
  2013. * Interrupt, called with interrupt lock held
  2014. */
  2015. static long ibmvscsis_srp_login(struct scsi_info *vscsi,
  2016. struct ibmvscsis_cmd *cmd,
  2017. struct viosrp_crq *crq)
  2018. {
  2019. struct iu_entry *iue = cmd->iue;
  2020. struct srp_login_req *req = &vio_iu(iue)->srp.login_req;
  2021. struct port_id {
  2022. __be64 id_extension;
  2023. __be64 io_guid;
  2024. } *iport, *tport;
  2025. struct format_code *fmt;
  2026. u32 reason = 0x0;
  2027. long rc = ADAPT_SUCCESS;
  2028. iport = (struct port_id *)req->initiator_port_id;
  2029. tport = (struct port_id *)req->target_port_id;
  2030. fmt = (struct format_code *)&req->req_buf_fmt;
  2031. if (be32_to_cpu(req->req_it_iu_len) > SRP_MAX_IU_LEN)
  2032. reason = SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE;
  2033. else if (be32_to_cpu(req->req_it_iu_len) < 64)
  2034. reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
  2035. else if ((be64_to_cpu(iport->id_extension) > (MAX_NUM_PORTS - 1)) ||
  2036. (be64_to_cpu(tport->id_extension) > (MAX_NUM_PORTS - 1)))
  2037. reason = SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL;
  2038. else if (req->req_flags & SRP_MULTICHAN_MULTI)
  2039. reason = SRP_LOGIN_REJ_MULTI_CHANNEL_UNSUPPORTED;
  2040. else if (fmt->buffers & (~SUPPORTED_FORMATS))
  2041. reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
  2042. else if ((fmt->buffers & SUPPORTED_FORMATS) == 0)
  2043. reason = SRP_LOGIN_REJ_UNSUPPORTED_DESCRIPTOR_FMT;
  2044. if (vscsi->state == SRP_PROCESSING)
  2045. reason = SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED;
  2046. rc = ibmvscsis_make_nexus(&vscsi->tport);
  2047. if (rc)
  2048. reason = SRP_LOGIN_REJ_UNABLE_ESTABLISH_CHANNEL;
  2049. cmd->rsp.format = VIOSRP_SRP_FORMAT;
  2050. cmd->rsp.tag = req->tag;
  2051. dev_dbg(&vscsi->dev, "srp_login: reason 0x%x\n", reason);
  2052. if (reason)
  2053. rc = ibmvscsis_srp_login_rej(vscsi, cmd, reason);
  2054. else
  2055. rc = ibmvscsis_login_rsp(vscsi, cmd);
  2056. if (!rc) {
  2057. if (!reason)
  2058. vscsi->state = SRP_PROCESSING;
  2059. list_add_tail(&cmd->list, &vscsi->waiting_rsp);
  2060. ibmvscsis_send_messages(vscsi);
  2061. } else {
  2062. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2063. }
  2064. dev_dbg(&vscsi->dev, "Leaving srp_login, rc %ld\n", rc);
  2065. return rc;
  2066. }
  2067. /**
  2068. * ibmvscsis_srp_i_logout() - Helper Function to close I_T Nexus
  2069. * @vscsi: Pointer to our adapter structure
  2070. * @cmd: Command element to use to process the Implicit Logout request
  2071. * @crq: Pointer to CRQ entry containing the Implicit Logout request
  2072. *
  2073. * Do the logic to close the I_T nexus. This function may not
  2074. * behave to specification.
  2075. *
  2076. * EXECUTION ENVIRONMENT:
  2077. * Interrupt, interrupt lock held
  2078. */
  2079. static long ibmvscsis_srp_i_logout(struct scsi_info *vscsi,
  2080. struct ibmvscsis_cmd *cmd,
  2081. struct viosrp_crq *crq)
  2082. {
  2083. struct iu_entry *iue = cmd->iue;
  2084. struct srp_i_logout *log_out = &vio_iu(iue)->srp.i_logout;
  2085. long rc = ADAPT_SUCCESS;
  2086. if ((vscsi->debit > 0) || !list_empty(&vscsi->schedule_q) ||
  2087. !list_empty(&vscsi->waiting_rsp)) {
  2088. dev_err(&vscsi->dev, "i_logout: outstanding work\n");
  2089. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
  2090. } else {
  2091. cmd->rsp.format = SRP_FORMAT;
  2092. cmd->rsp.tag = log_out->tag;
  2093. cmd->rsp.len = sizeof(struct mad_common);
  2094. list_add_tail(&cmd->list, &vscsi->waiting_rsp);
  2095. ibmvscsis_send_messages(vscsi);
  2096. ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0);
  2097. }
  2098. return rc;
  2099. }
  2100. /* Called with intr lock held */
  2101. static void ibmvscsis_srp_cmd(struct scsi_info *vscsi, struct viosrp_crq *crq)
  2102. {
  2103. struct ibmvscsis_cmd *cmd;
  2104. struct iu_entry *iue;
  2105. struct srp_cmd *srp;
  2106. struct srp_tsk_mgmt *tsk;
  2107. long rc;
  2108. if (vscsi->request_limit - vscsi->debit <= 0) {
  2109. /* Client has exceeded request limit */
  2110. dev_err(&vscsi->dev, "Client exceeded the request limit (%d), debit %d\n",
  2111. vscsi->request_limit, vscsi->debit);
  2112. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2113. return;
  2114. }
  2115. cmd = ibmvscsis_get_free_cmd(vscsi);
  2116. if (!cmd) {
  2117. dev_err(&vscsi->dev, "srp_cmd failed to get cmd, debit %d\n",
  2118. vscsi->debit);
  2119. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2120. return;
  2121. }
  2122. iue = cmd->iue;
  2123. srp = &vio_iu(iue)->srp.cmd;
  2124. rc = ibmvscsis_copy_crq_packet(vscsi, cmd, crq);
  2125. if (rc) {
  2126. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2127. return;
  2128. }
  2129. if (vscsi->state == SRP_PROCESSING) {
  2130. switch (srp->opcode) {
  2131. case SRP_LOGIN_REQ:
  2132. rc = ibmvscsis_srp_login(vscsi, cmd, crq);
  2133. break;
  2134. case SRP_TSK_MGMT:
  2135. tsk = &vio_iu(iue)->srp.tsk_mgmt;
  2136. dev_dbg(&vscsi->dev, "tsk_mgmt tag: %llu (0x%llx)\n",
  2137. tsk->tag, tsk->tag);
  2138. cmd->rsp.tag = tsk->tag;
  2139. vscsi->debit += 1;
  2140. cmd->type = TASK_MANAGEMENT;
  2141. list_add_tail(&cmd->list, &vscsi->schedule_q);
  2142. queue_work(vscsi->work_q, &cmd->work);
  2143. break;
  2144. case SRP_CMD:
  2145. dev_dbg(&vscsi->dev, "srp_cmd tag: %llu (0x%llx)\n",
  2146. srp->tag, srp->tag);
  2147. cmd->rsp.tag = srp->tag;
  2148. vscsi->debit += 1;
  2149. cmd->type = SCSI_CDB;
  2150. /*
  2151. * We want to keep track of work waiting for
  2152. * the workqueue.
  2153. */
  2154. list_add_tail(&cmd->list, &vscsi->schedule_q);
  2155. queue_work(vscsi->work_q, &cmd->work);
  2156. break;
  2157. case SRP_I_LOGOUT:
  2158. rc = ibmvscsis_srp_i_logout(vscsi, cmd, crq);
  2159. break;
  2160. case SRP_CRED_RSP:
  2161. case SRP_AER_RSP:
  2162. default:
  2163. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2164. dev_err(&vscsi->dev, "invalid srp cmd, opcode %d\n",
  2165. (uint)srp->opcode);
  2166. ibmvscsis_post_disconnect(vscsi,
  2167. ERR_DISCONNECT_RECONNECT, 0);
  2168. break;
  2169. }
  2170. } else if (srp->opcode == SRP_LOGIN_REQ && vscsi->state == CONNECTED) {
  2171. rc = ibmvscsis_srp_login(vscsi, cmd, crq);
  2172. } else {
  2173. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2174. dev_err(&vscsi->dev, "Invalid state %d to handle srp cmd\n",
  2175. vscsi->state);
  2176. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2177. }
  2178. }
  2179. /**
  2180. * ibmvscsis_ping_response() - Respond to a ping request
  2181. * @vscsi: Pointer to our adapter structure
  2182. *
  2183. * Let the client know that the server is alive and waiting on
  2184. * its native I/O stack.
  2185. * If any type of error occurs from the call to queue a ping
  2186. * response then the client is either not accepting or receiving
  2187. * interrupts. Disconnect with an error.
  2188. *
  2189. * EXECUTION ENVIRONMENT:
  2190. * Interrupt, interrupt lock held
  2191. */
  2192. static long ibmvscsis_ping_response(struct scsi_info *vscsi)
  2193. {
  2194. struct viosrp_crq *crq;
  2195. u64 buffer[2] = { 0, 0 };
  2196. long rc;
  2197. crq = (struct viosrp_crq *)&buffer;
  2198. crq->valid = VALID_CMD_RESP_EL;
  2199. crq->format = (u8)MESSAGE_IN_CRQ;
  2200. crq->status = PING_RESPONSE;
  2201. rc = h_send_crq(vscsi->dds.unit_id, cpu_to_be64(buffer[MSG_HI]),
  2202. cpu_to_be64(buffer[MSG_LOW]));
  2203. switch (rc) {
  2204. case H_SUCCESS:
  2205. break;
  2206. case H_CLOSED:
  2207. vscsi->flags |= CLIENT_FAILED;
  2208. case H_DROPPED:
  2209. vscsi->flags |= RESPONSE_Q_DOWN;
  2210. case H_REMOTE_PARM:
  2211. dev_err(&vscsi->dev, "ping_response: h_send_crq failed, rc %ld\n",
  2212. rc);
  2213. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2214. break;
  2215. default:
  2216. dev_err(&vscsi->dev, "ping_response: h_send_crq returned unknown rc %ld\n",
  2217. rc);
  2218. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
  2219. break;
  2220. }
  2221. return rc;
  2222. }
  2223. /**
  2224. * ibmvscsis_parse_command() - Parse an element taken from the cmd rsp queue.
  2225. * @vscsi: Pointer to our adapter structure
  2226. * @crq: Pointer to CRQ element containing the SRP request
  2227. *
  2228. * This function will return success if the command queue element is valid
  2229. * and the srp iu or MAD request it pointed to was also valid. That does
  2230. * not mean that an error was not returned to the client.
  2231. *
  2232. * EXECUTION ENVIRONMENT:
  2233. * Interrupt, intr lock held
  2234. */
  2235. static long ibmvscsis_parse_command(struct scsi_info *vscsi,
  2236. struct viosrp_crq *crq)
  2237. {
  2238. long rc = ADAPT_SUCCESS;
  2239. switch (crq->valid) {
  2240. case VALID_CMD_RESP_EL:
  2241. switch (crq->format) {
  2242. case OS400_FORMAT:
  2243. case AIX_FORMAT:
  2244. case LINUX_FORMAT:
  2245. case MAD_FORMAT:
  2246. if (vscsi->flags & PROCESSING_MAD) {
  2247. rc = ERROR;
  2248. dev_err(&vscsi->dev, "parse_command: already processing mad\n");
  2249. ibmvscsis_post_disconnect(vscsi,
  2250. ERR_DISCONNECT_RECONNECT,
  2251. 0);
  2252. } else {
  2253. vscsi->flags |= PROCESSING_MAD;
  2254. rc = ibmvscsis_mad(vscsi, crq);
  2255. }
  2256. break;
  2257. case SRP_FORMAT:
  2258. ibmvscsis_srp_cmd(vscsi, crq);
  2259. break;
  2260. case MESSAGE_IN_CRQ:
  2261. if (crq->status == PING)
  2262. ibmvscsis_ping_response(vscsi);
  2263. break;
  2264. default:
  2265. dev_err(&vscsi->dev, "parse_command: invalid format %d\n",
  2266. (uint)crq->format);
  2267. ibmvscsis_post_disconnect(vscsi,
  2268. ERR_DISCONNECT_RECONNECT, 0);
  2269. break;
  2270. }
  2271. break;
  2272. case VALID_TRANS_EVENT:
  2273. rc = ibmvscsis_trans_event(vscsi, crq);
  2274. break;
  2275. case VALID_INIT_MSG:
  2276. rc = ibmvscsis_init_msg(vscsi, crq);
  2277. break;
  2278. default:
  2279. dev_err(&vscsi->dev, "parse_command: invalid valid field %d\n",
  2280. (uint)crq->valid);
  2281. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2282. break;
  2283. }
  2284. /*
  2285. * Return only what the interrupt handler cares
  2286. * about. Most errors we keep right on trucking.
  2287. */
  2288. rc = vscsi->flags & SCHEDULE_DISCONNECT;
  2289. return rc;
  2290. }
  2291. static int read_dma_window(struct scsi_info *vscsi)
  2292. {
  2293. struct vio_dev *vdev = vscsi->dma_dev;
  2294. const __be32 *dma_window;
  2295. const __be32 *prop;
  2296. /* TODO Using of_parse_dma_window would be better, but it doesn't give
  2297. * a way to read multiple windows without already knowing the size of
  2298. * a window or the number of windows.
  2299. */
  2300. dma_window = (const __be32 *)vio_get_attribute(vdev,
  2301. "ibm,my-dma-window",
  2302. NULL);
  2303. if (!dma_window) {
  2304. dev_err(&vscsi->dev, "Couldn't find ibm,my-dma-window property\n");
  2305. return -1;
  2306. }
  2307. vscsi->dds.window[LOCAL].liobn = be32_to_cpu(*dma_window);
  2308. dma_window++;
  2309. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-address-cells",
  2310. NULL);
  2311. if (!prop) {
  2312. dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-address-cells property\n");
  2313. dma_window++;
  2314. } else {
  2315. dma_window += be32_to_cpu(*prop);
  2316. }
  2317. prop = (const __be32 *)vio_get_attribute(vdev, "ibm,#dma-size-cells",
  2318. NULL);
  2319. if (!prop) {
  2320. dev_warn(&vscsi->dev, "Couldn't find ibm,#dma-size-cells property\n");
  2321. dma_window++;
  2322. } else {
  2323. dma_window += be32_to_cpu(*prop);
  2324. }
  2325. /* dma_window should point to the second window now */
  2326. vscsi->dds.window[REMOTE].liobn = be32_to_cpu(*dma_window);
  2327. return 0;
  2328. }
  2329. static struct ibmvscsis_tport *ibmvscsis_lookup_port(const char *name)
  2330. {
  2331. struct ibmvscsis_tport *tport = NULL;
  2332. struct vio_dev *vdev;
  2333. struct scsi_info *vscsi;
  2334. spin_lock_bh(&ibmvscsis_dev_lock);
  2335. list_for_each_entry(vscsi, &ibmvscsis_dev_list, list) {
  2336. vdev = vscsi->dma_dev;
  2337. if (!strcmp(dev_name(&vdev->dev), name)) {
  2338. tport = &vscsi->tport;
  2339. break;
  2340. }
  2341. }
  2342. spin_unlock_bh(&ibmvscsis_dev_lock);
  2343. return tport;
  2344. }
  2345. /**
  2346. * ibmvscsis_parse_cmd() - Parse SRP Command
  2347. * @vscsi: Pointer to our adapter structure
  2348. * @cmd: Pointer to command element with SRP command
  2349. *
  2350. * Parse the srp command; if it is valid then submit it to tcm.
  2351. * Note: The return code does not reflect the status of the SCSI CDB.
  2352. *
  2353. * EXECUTION ENVIRONMENT:
  2354. * Process level
  2355. */
  2356. static void ibmvscsis_parse_cmd(struct scsi_info *vscsi,
  2357. struct ibmvscsis_cmd *cmd)
  2358. {
  2359. struct iu_entry *iue = cmd->iue;
  2360. struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
  2361. struct ibmvscsis_nexus *nexus;
  2362. u64 data_len = 0;
  2363. enum dma_data_direction dir;
  2364. int attr = 0;
  2365. int rc = 0;
  2366. nexus = vscsi->tport.ibmv_nexus;
  2367. /*
  2368. * additional length in bytes. Note that the SRP spec says that
  2369. * additional length is in 4-byte words, but technically the
  2370. * additional length field is only the upper 6 bits of the byte.
  2371. * The lower 2 bits are reserved. If the lower 2 bits are 0 (as
  2372. * all reserved fields should be), then interpreting the byte as
  2373. * an int will yield the length in bytes.
  2374. */
  2375. if (srp->add_cdb_len & 0x03) {
  2376. dev_err(&vscsi->dev, "parse_cmd: reserved bits set in IU\n");
  2377. spin_lock_bh(&vscsi->intr_lock);
  2378. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2379. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2380. spin_unlock_bh(&vscsi->intr_lock);
  2381. return;
  2382. }
  2383. if (srp_get_desc_table(srp, &dir, &data_len)) {
  2384. dev_err(&vscsi->dev, "0x%llx: parsing SRP descriptor table failed.\n",
  2385. srp->tag);
  2386. goto fail;
  2387. }
  2388. cmd->rsp.sol_not = srp->sol_not;
  2389. switch (srp->task_attr) {
  2390. case SRP_SIMPLE_TASK:
  2391. attr = TCM_SIMPLE_TAG;
  2392. break;
  2393. case SRP_ORDERED_TASK:
  2394. attr = TCM_ORDERED_TAG;
  2395. break;
  2396. case SRP_HEAD_TASK:
  2397. attr = TCM_HEAD_TAG;
  2398. break;
  2399. case SRP_ACA_TASK:
  2400. attr = TCM_ACA_TAG;
  2401. break;
  2402. default:
  2403. dev_err(&vscsi->dev, "Invalid task attribute %d\n",
  2404. srp->task_attr);
  2405. goto fail;
  2406. }
  2407. cmd->se_cmd.tag = be64_to_cpu(srp->tag);
  2408. spin_lock_bh(&vscsi->intr_lock);
  2409. list_add_tail(&cmd->list, &vscsi->active_q);
  2410. spin_unlock_bh(&vscsi->intr_lock);
  2411. srp->lun.scsi_lun[0] &= 0x3f;
  2412. rc = target_submit_cmd(&cmd->se_cmd, nexus->se_sess, srp->cdb,
  2413. cmd->sense_buf, scsilun_to_int(&srp->lun),
  2414. data_len, attr, dir, 0);
  2415. if (rc) {
  2416. dev_err(&vscsi->dev, "target_submit_cmd failed, rc %d\n", rc);
  2417. spin_lock_bh(&vscsi->intr_lock);
  2418. list_del(&cmd->list);
  2419. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2420. spin_unlock_bh(&vscsi->intr_lock);
  2421. goto fail;
  2422. }
  2423. return;
  2424. fail:
  2425. spin_lock_bh(&vscsi->intr_lock);
  2426. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT_RECONNECT, 0);
  2427. spin_unlock_bh(&vscsi->intr_lock);
  2428. }
  2429. /**
  2430. * ibmvscsis_parse_task() - Parse SRP Task Management Request
  2431. * @vscsi: Pointer to our adapter structure
  2432. * @cmd: Pointer to command element with SRP task management request
  2433. *
  2434. * Parse the srp task management request; if it is valid then submit it to tcm.
  2435. * Note: The return code does not reflect the status of the task management
  2436. * request.
  2437. *
  2438. * EXECUTION ENVIRONMENT:
  2439. * Processor level
  2440. */
  2441. static void ibmvscsis_parse_task(struct scsi_info *vscsi,
  2442. struct ibmvscsis_cmd *cmd)
  2443. {
  2444. struct iu_entry *iue = cmd->iue;
  2445. struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
  2446. int tcm_type;
  2447. u64 tag_to_abort = 0;
  2448. int rc = 0;
  2449. struct ibmvscsis_nexus *nexus;
  2450. nexus = vscsi->tport.ibmv_nexus;
  2451. cmd->rsp.sol_not = srp_tsk->sol_not;
  2452. switch (srp_tsk->tsk_mgmt_func) {
  2453. case SRP_TSK_ABORT_TASK:
  2454. tcm_type = TMR_ABORT_TASK;
  2455. tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
  2456. break;
  2457. case SRP_TSK_ABORT_TASK_SET:
  2458. tcm_type = TMR_ABORT_TASK_SET;
  2459. break;
  2460. case SRP_TSK_CLEAR_TASK_SET:
  2461. tcm_type = TMR_CLEAR_TASK_SET;
  2462. break;
  2463. case SRP_TSK_LUN_RESET:
  2464. tcm_type = TMR_LUN_RESET;
  2465. break;
  2466. case SRP_TSK_CLEAR_ACA:
  2467. tcm_type = TMR_CLEAR_ACA;
  2468. break;
  2469. default:
  2470. dev_err(&vscsi->dev, "unknown task mgmt func %d\n",
  2471. srp_tsk->tsk_mgmt_func);
  2472. cmd->se_cmd.se_tmr_req->response =
  2473. TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
  2474. rc = -1;
  2475. break;
  2476. }
  2477. if (!rc) {
  2478. cmd->se_cmd.tag = be64_to_cpu(srp_tsk->tag);
  2479. spin_lock_bh(&vscsi->intr_lock);
  2480. list_add_tail(&cmd->list, &vscsi->active_q);
  2481. spin_unlock_bh(&vscsi->intr_lock);
  2482. srp_tsk->lun.scsi_lun[0] &= 0x3f;
  2483. dev_dbg(&vscsi->dev, "calling submit_tmr, func %d\n",
  2484. srp_tsk->tsk_mgmt_func);
  2485. rc = target_submit_tmr(&cmd->se_cmd, nexus->se_sess, NULL,
  2486. scsilun_to_int(&srp_tsk->lun), srp_tsk,
  2487. tcm_type, GFP_KERNEL, tag_to_abort, 0);
  2488. if (rc) {
  2489. dev_err(&vscsi->dev, "target_submit_tmr failed, rc %d\n",
  2490. rc);
  2491. spin_lock_bh(&vscsi->intr_lock);
  2492. list_del(&cmd->list);
  2493. spin_unlock_bh(&vscsi->intr_lock);
  2494. cmd->se_cmd.se_tmr_req->response =
  2495. TMR_FUNCTION_REJECTED;
  2496. }
  2497. }
  2498. if (rc)
  2499. transport_send_check_condition_and_sense(&cmd->se_cmd, 0, 0);
  2500. }
  2501. static void ibmvscsis_scheduler(struct work_struct *work)
  2502. {
  2503. struct ibmvscsis_cmd *cmd = container_of(work, struct ibmvscsis_cmd,
  2504. work);
  2505. struct scsi_info *vscsi = cmd->adapter;
  2506. spin_lock_bh(&vscsi->intr_lock);
  2507. /* Remove from schedule_q */
  2508. list_del(&cmd->list);
  2509. /* Don't submit cmd if we're disconnecting */
  2510. if (vscsi->flags & (SCHEDULE_DISCONNECT | DISCONNECT_SCHEDULED)) {
  2511. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2512. /* ibmvscsis_disconnect might be waiting for us */
  2513. if (list_empty(&vscsi->active_q) &&
  2514. list_empty(&vscsi->schedule_q) &&
  2515. (vscsi->flags & WAIT_FOR_IDLE)) {
  2516. vscsi->flags &= ~WAIT_FOR_IDLE;
  2517. complete(&vscsi->wait_idle);
  2518. }
  2519. spin_unlock_bh(&vscsi->intr_lock);
  2520. return;
  2521. }
  2522. spin_unlock_bh(&vscsi->intr_lock);
  2523. switch (cmd->type) {
  2524. case SCSI_CDB:
  2525. ibmvscsis_parse_cmd(vscsi, cmd);
  2526. break;
  2527. case TASK_MANAGEMENT:
  2528. ibmvscsis_parse_task(vscsi, cmd);
  2529. break;
  2530. default:
  2531. dev_err(&vscsi->dev, "scheduler, invalid cmd type %d\n",
  2532. cmd->type);
  2533. spin_lock_bh(&vscsi->intr_lock);
  2534. ibmvscsis_free_cmd_resources(vscsi, cmd);
  2535. spin_unlock_bh(&vscsi->intr_lock);
  2536. break;
  2537. }
  2538. }
  2539. static int ibmvscsis_alloc_cmds(struct scsi_info *vscsi, int num)
  2540. {
  2541. struct ibmvscsis_cmd *cmd;
  2542. int i;
  2543. INIT_LIST_HEAD(&vscsi->free_cmd);
  2544. vscsi->cmd_pool = kcalloc(num, sizeof(struct ibmvscsis_cmd),
  2545. GFP_KERNEL);
  2546. if (!vscsi->cmd_pool)
  2547. return -ENOMEM;
  2548. for (i = 0, cmd = (struct ibmvscsis_cmd *)vscsi->cmd_pool; i < num;
  2549. i++, cmd++) {
  2550. cmd->abort_cmd = NULL;
  2551. cmd->adapter = vscsi;
  2552. INIT_WORK(&cmd->work, ibmvscsis_scheduler);
  2553. list_add_tail(&cmd->list, &vscsi->free_cmd);
  2554. }
  2555. return 0;
  2556. }
  2557. static void ibmvscsis_free_cmds(struct scsi_info *vscsi)
  2558. {
  2559. kfree(vscsi->cmd_pool);
  2560. vscsi->cmd_pool = NULL;
  2561. INIT_LIST_HEAD(&vscsi->free_cmd);
  2562. }
  2563. /**
  2564. * ibmvscsis_service_wait_q() - Service Waiting Queue
  2565. * @timer: Pointer to timer which has expired
  2566. *
  2567. * This routine is called when the timer pops to service the waiting
  2568. * queue. Elements on the queue have completed, their responses have been
  2569. * copied to the client, but the client's response queue was full so
  2570. * the queue message could not be sent. The routine grabs the proper locks
  2571. * and calls send messages.
  2572. *
  2573. * EXECUTION ENVIRONMENT:
  2574. * called at interrupt level
  2575. */
  2576. static enum hrtimer_restart ibmvscsis_service_wait_q(struct hrtimer *timer)
  2577. {
  2578. struct timer_cb *p_timer = container_of(timer, struct timer_cb, timer);
  2579. struct scsi_info *vscsi = container_of(p_timer, struct scsi_info,
  2580. rsp_q_timer);
  2581. spin_lock_bh(&vscsi->intr_lock);
  2582. p_timer->timer_pops += 1;
  2583. p_timer->started = false;
  2584. ibmvscsis_send_messages(vscsi);
  2585. spin_unlock_bh(&vscsi->intr_lock);
  2586. return HRTIMER_NORESTART;
  2587. }
  2588. static long ibmvscsis_alloctimer(struct scsi_info *vscsi)
  2589. {
  2590. struct timer_cb *p_timer;
  2591. p_timer = &vscsi->rsp_q_timer;
  2592. hrtimer_init(&p_timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
  2593. p_timer->timer.function = ibmvscsis_service_wait_q;
  2594. p_timer->started = false;
  2595. p_timer->timer_pops = 0;
  2596. return ADAPT_SUCCESS;
  2597. }
  2598. static void ibmvscsis_freetimer(struct scsi_info *vscsi)
  2599. {
  2600. struct timer_cb *p_timer;
  2601. p_timer = &vscsi->rsp_q_timer;
  2602. (void)hrtimer_cancel(&p_timer->timer);
  2603. p_timer->started = false;
  2604. p_timer->timer_pops = 0;
  2605. }
  2606. static irqreturn_t ibmvscsis_interrupt(int dummy, void *data)
  2607. {
  2608. struct scsi_info *vscsi = data;
  2609. vio_disable_interrupts(vscsi->dma_dev);
  2610. tasklet_schedule(&vscsi->work_task);
  2611. return IRQ_HANDLED;
  2612. }
  2613. /**
  2614. * ibmvscsis_enable_change_state() - Set new state based on enabled status
  2615. * @vscsi: Pointer to our adapter structure
  2616. *
  2617. * This function determines our new state now that we are enabled. This
  2618. * may involve sending an Init Complete message to the client.
  2619. *
  2620. * Must be called with interrupt lock held.
  2621. */
  2622. static long ibmvscsis_enable_change_state(struct scsi_info *vscsi)
  2623. {
  2624. int bytes;
  2625. long rc = ADAPT_SUCCESS;
  2626. bytes = vscsi->cmd_q.size * PAGE_SIZE;
  2627. rc = h_reg_crq(vscsi->dds.unit_id, vscsi->cmd_q.crq_token, bytes);
  2628. if (rc == H_CLOSED || rc == H_SUCCESS) {
  2629. vscsi->state = WAIT_CONNECTION;
  2630. rc = ibmvscsis_establish_new_q(vscsi);
  2631. }
  2632. if (rc != ADAPT_SUCCESS) {
  2633. vscsi->state = ERR_DISCONNECTED;
  2634. vscsi->flags |= RESPONSE_Q_DOWN;
  2635. }
  2636. return rc;
  2637. }
  2638. /**
  2639. * ibmvscsis_create_command_q() - Create Command Queue
  2640. * @vscsi: Pointer to our adapter structure
  2641. * @num_cmds: Currently unused. In the future, may be used to determine
  2642. * the size of the CRQ.
  2643. *
  2644. * Allocates memory for command queue maps remote memory into an ioba
  2645. * initializes the command response queue
  2646. *
  2647. * EXECUTION ENVIRONMENT:
  2648. * Process level only
  2649. */
  2650. static long ibmvscsis_create_command_q(struct scsi_info *vscsi, int num_cmds)
  2651. {
  2652. int pages;
  2653. struct vio_dev *vdev = vscsi->dma_dev;
  2654. /* We might support multiple pages in the future, but just 1 for now */
  2655. pages = 1;
  2656. vscsi->cmd_q.size = pages;
  2657. vscsi->cmd_q.base_addr =
  2658. (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
  2659. if (!vscsi->cmd_q.base_addr)
  2660. return -ENOMEM;
  2661. vscsi->cmd_q.mask = ((uint)pages * CRQ_PER_PAGE) - 1;
  2662. vscsi->cmd_q.crq_token = dma_map_single(&vdev->dev,
  2663. vscsi->cmd_q.base_addr,
  2664. PAGE_SIZE, DMA_BIDIRECTIONAL);
  2665. if (dma_mapping_error(&vdev->dev, vscsi->cmd_q.crq_token)) {
  2666. free_page((unsigned long)vscsi->cmd_q.base_addr);
  2667. return -ENOMEM;
  2668. }
  2669. return 0;
  2670. }
  2671. /**
  2672. * ibmvscsis_destroy_command_q - Destroy Command Queue
  2673. * @vscsi: Pointer to our adapter structure
  2674. *
  2675. * Releases memory for command queue and unmaps mapped remote memory.
  2676. *
  2677. * EXECUTION ENVIRONMENT:
  2678. * Process level only
  2679. */
  2680. static void ibmvscsis_destroy_command_q(struct scsi_info *vscsi)
  2681. {
  2682. dma_unmap_single(&vscsi->dma_dev->dev, vscsi->cmd_q.crq_token,
  2683. PAGE_SIZE, DMA_BIDIRECTIONAL);
  2684. free_page((unsigned long)vscsi->cmd_q.base_addr);
  2685. vscsi->cmd_q.base_addr = NULL;
  2686. vscsi->state = NO_QUEUE;
  2687. }
  2688. static u8 ibmvscsis_fast_fail(struct scsi_info *vscsi,
  2689. struct ibmvscsis_cmd *cmd)
  2690. {
  2691. struct iu_entry *iue = cmd->iue;
  2692. struct se_cmd *se_cmd = &cmd->se_cmd;
  2693. struct srp_cmd *srp = (struct srp_cmd *)iue->sbuf->buf;
  2694. struct scsi_sense_hdr sshdr;
  2695. u8 rc = se_cmd->scsi_status;
  2696. if (vscsi->fast_fail && (READ_CMD(srp->cdb) || WRITE_CMD(srp->cdb)))
  2697. if (scsi_normalize_sense(se_cmd->sense_buffer,
  2698. se_cmd->scsi_sense_length, &sshdr))
  2699. if (sshdr.sense_key == HARDWARE_ERROR &&
  2700. (se_cmd->residual_count == 0 ||
  2701. se_cmd->residual_count == se_cmd->data_length)) {
  2702. rc = NO_SENSE;
  2703. cmd->flags |= CMD_FAST_FAIL;
  2704. }
  2705. return rc;
  2706. }
  2707. /**
  2708. * srp_build_response() - Build an SRP response buffer
  2709. * @vscsi: Pointer to our adapter structure
  2710. * @cmd: Pointer to command for which to send the response
  2711. * @len_p: Where to return the length of the IU response sent. This
  2712. * is needed to construct the CRQ response.
  2713. *
  2714. * Build the SRP response buffer and copy it to the client's memory space.
  2715. */
  2716. static long srp_build_response(struct scsi_info *vscsi,
  2717. struct ibmvscsis_cmd *cmd, uint *len_p)
  2718. {
  2719. struct iu_entry *iue = cmd->iue;
  2720. struct se_cmd *se_cmd = &cmd->se_cmd;
  2721. struct srp_rsp *rsp;
  2722. uint len;
  2723. u32 rsp_code;
  2724. char *data;
  2725. u32 *tsk_status;
  2726. long rc = ADAPT_SUCCESS;
  2727. spin_lock_bh(&vscsi->intr_lock);
  2728. rsp = &vio_iu(iue)->srp.rsp;
  2729. len = sizeof(*rsp);
  2730. memset(rsp, 0, len);
  2731. data = rsp->data;
  2732. rsp->opcode = SRP_RSP;
  2733. rsp->req_lim_delta = cpu_to_be32(1 + vscsi->credit);
  2734. rsp->tag = cmd->rsp.tag;
  2735. rsp->flags = 0;
  2736. if (cmd->type == SCSI_CDB) {
  2737. rsp->status = ibmvscsis_fast_fail(vscsi, cmd);
  2738. if (rsp->status) {
  2739. dev_dbg(&vscsi->dev, "build_resp: cmd %p, scsi status %d\n",
  2740. cmd, (int)rsp->status);
  2741. ibmvscsis_determine_resid(se_cmd, rsp);
  2742. if (se_cmd->scsi_sense_length && se_cmd->sense_buffer) {
  2743. rsp->sense_data_len =
  2744. cpu_to_be32(se_cmd->scsi_sense_length);
  2745. rsp->flags |= SRP_RSP_FLAG_SNSVALID;
  2746. len += se_cmd->scsi_sense_length;
  2747. memcpy(data, se_cmd->sense_buffer,
  2748. se_cmd->scsi_sense_length);
  2749. }
  2750. rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
  2751. UCSOLNT_RESP_SHIFT;
  2752. } else if (cmd->flags & CMD_FAST_FAIL) {
  2753. dev_dbg(&vscsi->dev, "build_resp: cmd %p, fast fail\n",
  2754. cmd);
  2755. rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
  2756. UCSOLNT_RESP_SHIFT;
  2757. } else {
  2758. rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
  2759. SCSOLNT_RESP_SHIFT;
  2760. }
  2761. } else {
  2762. /* this is task management */
  2763. rsp->status = 0;
  2764. rsp->resp_data_len = cpu_to_be32(4);
  2765. rsp->flags |= SRP_RSP_FLAG_RSPVALID;
  2766. switch (se_cmd->se_tmr_req->response) {
  2767. case TMR_FUNCTION_COMPLETE:
  2768. case TMR_TASK_DOES_NOT_EXIST:
  2769. rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_COMPLETE;
  2770. rsp->sol_not = (cmd->rsp.sol_not & SCSOLNT) >>
  2771. SCSOLNT_RESP_SHIFT;
  2772. break;
  2773. case TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED:
  2774. case TMR_LUN_DOES_NOT_EXIST:
  2775. rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_NOT_SUPPORTED;
  2776. rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
  2777. UCSOLNT_RESP_SHIFT;
  2778. break;
  2779. case TMR_FUNCTION_FAILED:
  2780. case TMR_FUNCTION_REJECTED:
  2781. default:
  2782. rsp_code = SRP_TASK_MANAGEMENT_FUNCTION_FAILED;
  2783. rsp->sol_not = (cmd->rsp.sol_not & UCSOLNT) >>
  2784. UCSOLNT_RESP_SHIFT;
  2785. break;
  2786. }
  2787. tsk_status = (u32 *)data;
  2788. *tsk_status = cpu_to_be32(rsp_code);
  2789. data = (char *)(tsk_status + 1);
  2790. len += 4;
  2791. }
  2792. dma_wmb();
  2793. rc = h_copy_rdma(len, vscsi->dds.window[LOCAL].liobn, iue->sbuf->dma,
  2794. vscsi->dds.window[REMOTE].liobn,
  2795. be64_to_cpu(iue->remote_token));
  2796. switch (rc) {
  2797. case H_SUCCESS:
  2798. vscsi->credit = 0;
  2799. *len_p = len;
  2800. break;
  2801. case H_PERMISSION:
  2802. if (connection_broken(vscsi))
  2803. vscsi->flags |= RESPONSE_Q_DOWN | CLIENT_FAILED;
  2804. dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld, flags 0x%x, state 0x%hx\n",
  2805. rc, vscsi->flags, vscsi->state);
  2806. break;
  2807. case H_SOURCE_PARM:
  2808. case H_DEST_PARM:
  2809. default:
  2810. dev_err(&vscsi->dev, "build_response: error copying to client, rc %ld\n",
  2811. rc);
  2812. break;
  2813. }
  2814. spin_unlock_bh(&vscsi->intr_lock);
  2815. return rc;
  2816. }
  2817. static int ibmvscsis_rdma(struct ibmvscsis_cmd *cmd, struct scatterlist *sg,
  2818. int nsg, struct srp_direct_buf *md, int nmd,
  2819. enum dma_data_direction dir, unsigned int bytes)
  2820. {
  2821. struct iu_entry *iue = cmd->iue;
  2822. struct srp_target *target = iue->target;
  2823. struct scsi_info *vscsi = target->ldata;
  2824. struct scatterlist *sgp;
  2825. dma_addr_t client_ioba, server_ioba;
  2826. ulong buf_len;
  2827. ulong client_len, server_len;
  2828. int md_idx;
  2829. long tx_len;
  2830. long rc = 0;
  2831. if (bytes == 0)
  2832. return 0;
  2833. sgp = sg;
  2834. client_len = 0;
  2835. server_len = 0;
  2836. md_idx = 0;
  2837. tx_len = bytes;
  2838. do {
  2839. if (client_len == 0) {
  2840. if (md_idx >= nmd) {
  2841. dev_err(&vscsi->dev, "rdma: ran out of client memory descriptors\n");
  2842. rc = -EIO;
  2843. break;
  2844. }
  2845. client_ioba = be64_to_cpu(md[md_idx].va);
  2846. client_len = be32_to_cpu(md[md_idx].len);
  2847. }
  2848. if (server_len == 0) {
  2849. if (!sgp) {
  2850. dev_err(&vscsi->dev, "rdma: ran out of scatter/gather list\n");
  2851. rc = -EIO;
  2852. break;
  2853. }
  2854. server_ioba = sg_dma_address(sgp);
  2855. server_len = sg_dma_len(sgp);
  2856. }
  2857. buf_len = tx_len;
  2858. if (buf_len > client_len)
  2859. buf_len = client_len;
  2860. if (buf_len > server_len)
  2861. buf_len = server_len;
  2862. if (buf_len > max_vdma_size)
  2863. buf_len = max_vdma_size;
  2864. if (dir == DMA_TO_DEVICE) {
  2865. /* read from client */
  2866. rc = h_copy_rdma(buf_len,
  2867. vscsi->dds.window[REMOTE].liobn,
  2868. client_ioba,
  2869. vscsi->dds.window[LOCAL].liobn,
  2870. server_ioba);
  2871. } else {
  2872. /* The h_copy_rdma will cause phyp, running in another
  2873. * partition, to read memory, so we need to make sure
  2874. * the data has been written out, hence these syncs.
  2875. */
  2876. /* ensure that everything is in memory */
  2877. isync();
  2878. /* ensure that memory has been made visible */
  2879. dma_wmb();
  2880. rc = h_copy_rdma(buf_len,
  2881. vscsi->dds.window[LOCAL].liobn,
  2882. server_ioba,
  2883. vscsi->dds.window[REMOTE].liobn,
  2884. client_ioba);
  2885. }
  2886. switch (rc) {
  2887. case H_SUCCESS:
  2888. break;
  2889. case H_PERMISSION:
  2890. case H_SOURCE_PARM:
  2891. case H_DEST_PARM:
  2892. if (connection_broken(vscsi)) {
  2893. spin_lock_bh(&vscsi->intr_lock);
  2894. vscsi->flags |=
  2895. (RESPONSE_Q_DOWN | CLIENT_FAILED);
  2896. spin_unlock_bh(&vscsi->intr_lock);
  2897. }
  2898. dev_err(&vscsi->dev, "rdma: h_copy_rdma failed, rc %ld\n",
  2899. rc);
  2900. break;
  2901. default:
  2902. dev_err(&vscsi->dev, "rdma: unknown error %ld from h_copy_rdma\n",
  2903. rc);
  2904. break;
  2905. }
  2906. if (!rc) {
  2907. tx_len -= buf_len;
  2908. if (tx_len) {
  2909. client_len -= buf_len;
  2910. if (client_len == 0)
  2911. md_idx++;
  2912. else
  2913. client_ioba += buf_len;
  2914. server_len -= buf_len;
  2915. if (server_len == 0)
  2916. sgp = sg_next(sgp);
  2917. else
  2918. server_ioba += buf_len;
  2919. } else {
  2920. break;
  2921. }
  2922. }
  2923. } while (!rc);
  2924. return rc;
  2925. }
  2926. /**
  2927. * ibmvscsis_handle_crq() - Handle CRQ
  2928. * @data: Pointer to our adapter structure
  2929. *
  2930. * Read the command elements from the command queue and copy the payloads
  2931. * associated with the command elements to local memory and execute the
  2932. * SRP requests.
  2933. *
  2934. * Note: this is an edge triggered interrupt. It can not be shared.
  2935. */
  2936. static void ibmvscsis_handle_crq(unsigned long data)
  2937. {
  2938. struct scsi_info *vscsi = (struct scsi_info *)data;
  2939. struct viosrp_crq *crq;
  2940. long rc;
  2941. bool ack = true;
  2942. volatile u8 valid;
  2943. spin_lock_bh(&vscsi->intr_lock);
  2944. dev_dbg(&vscsi->dev, "got interrupt\n");
  2945. /*
  2946. * if we are in a path where we are waiting for all pending commands
  2947. * to complete because we received a transport event and anything in
  2948. * the command queue is for a new connection, do nothing
  2949. */
  2950. if (TARGET_STOP(vscsi)) {
  2951. vio_enable_interrupts(vscsi->dma_dev);
  2952. dev_dbg(&vscsi->dev, "handle_crq, don't process: flags 0x%x, state 0x%hx\n",
  2953. vscsi->flags, vscsi->state);
  2954. spin_unlock_bh(&vscsi->intr_lock);
  2955. return;
  2956. }
  2957. rc = vscsi->flags & SCHEDULE_DISCONNECT;
  2958. crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
  2959. valid = crq->valid;
  2960. dma_rmb();
  2961. while (valid) {
  2962. /*
  2963. * These are edege triggered interrupts. After dropping out of
  2964. * the while loop, the code must check for work since an
  2965. * interrupt could be lost, and an elment be left on the queue,
  2966. * hence the label.
  2967. */
  2968. cmd_work:
  2969. vscsi->cmd_q.index =
  2970. (vscsi->cmd_q.index + 1) & vscsi->cmd_q.mask;
  2971. if (!rc) {
  2972. rc = ibmvscsis_parse_command(vscsi, crq);
  2973. } else {
  2974. if ((uint)crq->valid == VALID_TRANS_EVENT) {
  2975. /*
  2976. * must service the transport layer events even
  2977. * in an error state, dont break out until all
  2978. * the consecutive transport events have been
  2979. * processed
  2980. */
  2981. rc = ibmvscsis_trans_event(vscsi, crq);
  2982. } else if (vscsi->flags & TRANS_EVENT) {
  2983. /*
  2984. * if a transport event has occurred leave
  2985. * everything but transport events on the queue
  2986. *
  2987. * need to decrement the queue index so we can
  2988. * look at the element again
  2989. */
  2990. if (vscsi->cmd_q.index)
  2991. vscsi->cmd_q.index -= 1;
  2992. else
  2993. /*
  2994. * index is at 0 it just wrapped.
  2995. * have it index last element in q
  2996. */
  2997. vscsi->cmd_q.index = vscsi->cmd_q.mask;
  2998. break;
  2999. }
  3000. }
  3001. crq->valid = INVALIDATE_CMD_RESP_EL;
  3002. crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index;
  3003. valid = crq->valid;
  3004. dma_rmb();
  3005. }
  3006. if (!rc) {
  3007. if (ack) {
  3008. vio_enable_interrupts(vscsi->dma_dev);
  3009. ack = false;
  3010. dev_dbg(&vscsi->dev, "handle_crq, reenabling interrupts\n");
  3011. }
  3012. valid = crq->valid;
  3013. dma_rmb();
  3014. if (valid)
  3015. goto cmd_work;
  3016. } else {
  3017. dev_dbg(&vscsi->dev, "handle_crq, error: flags 0x%x, state 0x%hx, crq index 0x%x\n",
  3018. vscsi->flags, vscsi->state, vscsi->cmd_q.index);
  3019. }
  3020. dev_dbg(&vscsi->dev, "Leaving handle_crq: schedule_q empty %d, flags 0x%x, state 0x%hx\n",
  3021. (int)list_empty(&vscsi->schedule_q), vscsi->flags,
  3022. vscsi->state);
  3023. spin_unlock_bh(&vscsi->intr_lock);
  3024. }
  3025. static int ibmvscsis_probe(struct vio_dev *vdev,
  3026. const struct vio_device_id *id)
  3027. {
  3028. struct scsi_info *vscsi;
  3029. int rc = 0;
  3030. long hrc = 0;
  3031. char wq_name[24];
  3032. vscsi = kzalloc(sizeof(*vscsi), GFP_KERNEL);
  3033. if (!vscsi) {
  3034. rc = -ENOMEM;
  3035. dev_err(&vdev->dev, "probe: allocation of adapter failed\n");
  3036. return rc;
  3037. }
  3038. vscsi->dma_dev = vdev;
  3039. vscsi->dev = vdev->dev;
  3040. INIT_LIST_HEAD(&vscsi->schedule_q);
  3041. INIT_LIST_HEAD(&vscsi->waiting_rsp);
  3042. INIT_LIST_HEAD(&vscsi->active_q);
  3043. snprintf(vscsi->tport.tport_name, IBMVSCSIS_NAMELEN, "%s",
  3044. dev_name(&vdev->dev));
  3045. dev_dbg(&vscsi->dev, "probe tport_name: %s\n", vscsi->tport.tport_name);
  3046. rc = read_dma_window(vscsi);
  3047. if (rc)
  3048. goto free_adapter;
  3049. dev_dbg(&vscsi->dev, "Probe: liobn 0x%x, riobn 0x%x\n",
  3050. vscsi->dds.window[LOCAL].liobn,
  3051. vscsi->dds.window[REMOTE].liobn);
  3052. strcpy(vscsi->eye, "VSCSI ");
  3053. strncat(vscsi->eye, vdev->name, MAX_EYE);
  3054. vscsi->dds.unit_id = vdev->unit_address;
  3055. strncpy(vscsi->dds.partition_name, partition_name,
  3056. sizeof(vscsi->dds.partition_name));
  3057. vscsi->dds.partition_num = partition_number;
  3058. spin_lock_bh(&ibmvscsis_dev_lock);
  3059. list_add_tail(&vscsi->list, &ibmvscsis_dev_list);
  3060. spin_unlock_bh(&ibmvscsis_dev_lock);
  3061. /*
  3062. * TBD: How do we determine # of cmds to request? Do we know how
  3063. * many "children" we have?
  3064. */
  3065. vscsi->request_limit = INITIAL_SRP_LIMIT;
  3066. rc = srp_target_alloc(&vscsi->target, &vdev->dev, vscsi->request_limit,
  3067. SRP_MAX_IU_LEN);
  3068. if (rc)
  3069. goto rem_list;
  3070. vscsi->target.ldata = vscsi;
  3071. rc = ibmvscsis_alloc_cmds(vscsi, vscsi->request_limit);
  3072. if (rc) {
  3073. dev_err(&vscsi->dev, "alloc_cmds failed, rc %d, num %d\n",
  3074. rc, vscsi->request_limit);
  3075. goto free_target;
  3076. }
  3077. /*
  3078. * Note: the lock is used in freeing timers, so must initialize
  3079. * first so that ordering in case of error is correct.
  3080. */
  3081. spin_lock_init(&vscsi->intr_lock);
  3082. rc = ibmvscsis_alloctimer(vscsi);
  3083. if (rc) {
  3084. dev_err(&vscsi->dev, "probe: alloctimer failed, rc %d\n", rc);
  3085. goto free_cmds;
  3086. }
  3087. rc = ibmvscsis_create_command_q(vscsi, 256);
  3088. if (rc) {
  3089. dev_err(&vscsi->dev, "probe: create_command_q failed, rc %d\n",
  3090. rc);
  3091. goto free_timer;
  3092. }
  3093. vscsi->map_buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
  3094. if (!vscsi->map_buf) {
  3095. rc = -ENOMEM;
  3096. dev_err(&vscsi->dev, "probe: allocating cmd buffer failed\n");
  3097. goto destroy_queue;
  3098. }
  3099. vscsi->map_ioba = dma_map_single(&vdev->dev, vscsi->map_buf, PAGE_SIZE,
  3100. DMA_BIDIRECTIONAL);
  3101. if (dma_mapping_error(&vdev->dev, vscsi->map_ioba)) {
  3102. rc = -ENOMEM;
  3103. dev_err(&vscsi->dev, "probe: error mapping command buffer\n");
  3104. goto free_buf;
  3105. }
  3106. hrc = h_vioctl(vscsi->dds.unit_id, H_GET_PARTNER_INFO,
  3107. (u64)vscsi->map_ioba | ((u64)PAGE_SIZE << 32), 0, 0, 0,
  3108. 0);
  3109. if (hrc == H_SUCCESS)
  3110. vscsi->client_data.partition_number =
  3111. be64_to_cpu(*(u64 *)vscsi->map_buf);
  3112. /*
  3113. * We expect the VIOCTL to fail if we're configured as "any
  3114. * client can connect" and the client isn't activated yet.
  3115. * We'll make the call again when he sends an init msg.
  3116. */
  3117. dev_dbg(&vscsi->dev, "probe hrc %ld, client partition num %d\n",
  3118. hrc, vscsi->client_data.partition_number);
  3119. tasklet_init(&vscsi->work_task, ibmvscsis_handle_crq,
  3120. (unsigned long)vscsi);
  3121. init_completion(&vscsi->wait_idle);
  3122. init_completion(&vscsi->unconfig);
  3123. snprintf(wq_name, 24, "ibmvscsis%s", dev_name(&vdev->dev));
  3124. vscsi->work_q = create_workqueue(wq_name);
  3125. if (!vscsi->work_q) {
  3126. rc = -ENOMEM;
  3127. dev_err(&vscsi->dev, "create_workqueue failed\n");
  3128. goto unmap_buf;
  3129. }
  3130. rc = request_irq(vdev->irq, ibmvscsis_interrupt, 0, "ibmvscsis", vscsi);
  3131. if (rc) {
  3132. rc = -EPERM;
  3133. dev_err(&vscsi->dev, "probe: request_irq failed, rc %d\n", rc);
  3134. goto destroy_WQ;
  3135. }
  3136. vscsi->state = WAIT_ENABLED;
  3137. dev_set_drvdata(&vdev->dev, vscsi);
  3138. return 0;
  3139. destroy_WQ:
  3140. destroy_workqueue(vscsi->work_q);
  3141. unmap_buf:
  3142. dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
  3143. DMA_BIDIRECTIONAL);
  3144. free_buf:
  3145. kfree(vscsi->map_buf);
  3146. destroy_queue:
  3147. tasklet_kill(&vscsi->work_task);
  3148. ibmvscsis_unregister_command_q(vscsi);
  3149. ibmvscsis_destroy_command_q(vscsi);
  3150. free_timer:
  3151. ibmvscsis_freetimer(vscsi);
  3152. free_cmds:
  3153. ibmvscsis_free_cmds(vscsi);
  3154. free_target:
  3155. srp_target_free(&vscsi->target);
  3156. rem_list:
  3157. spin_lock_bh(&ibmvscsis_dev_lock);
  3158. list_del(&vscsi->list);
  3159. spin_unlock_bh(&ibmvscsis_dev_lock);
  3160. free_adapter:
  3161. kfree(vscsi);
  3162. return rc;
  3163. }
  3164. static int ibmvscsis_remove(struct vio_dev *vdev)
  3165. {
  3166. struct scsi_info *vscsi = dev_get_drvdata(&vdev->dev);
  3167. dev_dbg(&vscsi->dev, "remove (%s)\n", dev_name(&vscsi->dma_dev->dev));
  3168. spin_lock_bh(&vscsi->intr_lock);
  3169. ibmvscsis_post_disconnect(vscsi, UNCONFIGURING, 0);
  3170. vscsi->flags |= CFG_SLEEPING;
  3171. spin_unlock_bh(&vscsi->intr_lock);
  3172. wait_for_completion(&vscsi->unconfig);
  3173. vio_disable_interrupts(vdev);
  3174. free_irq(vdev->irq, vscsi);
  3175. destroy_workqueue(vscsi->work_q);
  3176. dma_unmap_single(&vdev->dev, vscsi->map_ioba, PAGE_SIZE,
  3177. DMA_BIDIRECTIONAL);
  3178. kfree(vscsi->map_buf);
  3179. tasklet_kill(&vscsi->work_task);
  3180. ibmvscsis_destroy_command_q(vscsi);
  3181. ibmvscsis_freetimer(vscsi);
  3182. ibmvscsis_free_cmds(vscsi);
  3183. srp_target_free(&vscsi->target);
  3184. spin_lock_bh(&ibmvscsis_dev_lock);
  3185. list_del(&vscsi->list);
  3186. spin_unlock_bh(&ibmvscsis_dev_lock);
  3187. kfree(vscsi);
  3188. return 0;
  3189. }
  3190. static ssize_t system_id_show(struct device *dev,
  3191. struct device_attribute *attr, char *buf)
  3192. {
  3193. return snprintf(buf, PAGE_SIZE, "%s\n", system_id);
  3194. }
  3195. static ssize_t partition_number_show(struct device *dev,
  3196. struct device_attribute *attr, char *buf)
  3197. {
  3198. return snprintf(buf, PAGE_SIZE, "%x\n", partition_number);
  3199. }
  3200. static ssize_t unit_address_show(struct device *dev,
  3201. struct device_attribute *attr, char *buf)
  3202. {
  3203. struct scsi_info *vscsi = container_of(dev, struct scsi_info, dev);
  3204. return snprintf(buf, PAGE_SIZE, "%x\n", vscsi->dma_dev->unit_address);
  3205. }
  3206. static int ibmvscsis_get_system_info(void)
  3207. {
  3208. struct device_node *rootdn, *vdevdn;
  3209. const char *id, *model, *name;
  3210. const uint *num;
  3211. rootdn = of_find_node_by_path("/");
  3212. if (!rootdn)
  3213. return -ENOENT;
  3214. model = of_get_property(rootdn, "model", NULL);
  3215. id = of_get_property(rootdn, "system-id", NULL);
  3216. if (model && id)
  3217. snprintf(system_id, sizeof(system_id), "%s-%s", model, id);
  3218. name = of_get_property(rootdn, "ibm,partition-name", NULL);
  3219. if (name)
  3220. strncpy(partition_name, name, sizeof(partition_name));
  3221. num = of_get_property(rootdn, "ibm,partition-no", NULL);
  3222. if (num)
  3223. partition_number = of_read_number(num, 1);
  3224. of_node_put(rootdn);
  3225. vdevdn = of_find_node_by_path("/vdevice");
  3226. if (vdevdn) {
  3227. const uint *mvds;
  3228. mvds = of_get_property(vdevdn, "ibm,max-virtual-dma-size",
  3229. NULL);
  3230. if (mvds)
  3231. max_vdma_size = *mvds;
  3232. of_node_put(vdevdn);
  3233. }
  3234. return 0;
  3235. }
  3236. static char *ibmvscsis_get_fabric_name(void)
  3237. {
  3238. return "ibmvscsis";
  3239. }
  3240. static char *ibmvscsis_get_fabric_wwn(struct se_portal_group *se_tpg)
  3241. {
  3242. struct ibmvscsis_tport *tport =
  3243. container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
  3244. return tport->tport_name;
  3245. }
  3246. static u16 ibmvscsis_get_tag(struct se_portal_group *se_tpg)
  3247. {
  3248. struct ibmvscsis_tport *tport =
  3249. container_of(se_tpg, struct ibmvscsis_tport, se_tpg);
  3250. return tport->tport_tpgt;
  3251. }
  3252. static u32 ibmvscsis_get_default_depth(struct se_portal_group *se_tpg)
  3253. {
  3254. return 1;
  3255. }
  3256. static int ibmvscsis_check_true(struct se_portal_group *se_tpg)
  3257. {
  3258. return 1;
  3259. }
  3260. static int ibmvscsis_check_false(struct se_portal_group *se_tpg)
  3261. {
  3262. return 0;
  3263. }
  3264. static u32 ibmvscsis_tpg_get_inst_index(struct se_portal_group *se_tpg)
  3265. {
  3266. return 1;
  3267. }
  3268. static int ibmvscsis_check_stop_free(struct se_cmd *se_cmd)
  3269. {
  3270. return target_put_sess_cmd(se_cmd);
  3271. }
  3272. static void ibmvscsis_release_cmd(struct se_cmd *se_cmd)
  3273. {
  3274. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3275. se_cmd);
  3276. struct scsi_info *vscsi = cmd->adapter;
  3277. spin_lock_bh(&vscsi->intr_lock);
  3278. /* Remove from active_q */
  3279. list_move_tail(&cmd->list, &vscsi->waiting_rsp);
  3280. ibmvscsis_send_messages(vscsi);
  3281. spin_unlock_bh(&vscsi->intr_lock);
  3282. }
  3283. static u32 ibmvscsis_sess_get_index(struct se_session *se_sess)
  3284. {
  3285. return 0;
  3286. }
  3287. static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
  3288. {
  3289. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3290. se_cmd);
  3291. struct scsi_info *vscsi = cmd->adapter;
  3292. struct iu_entry *iue = cmd->iue;
  3293. int rc;
  3294. /*
  3295. * If CLIENT_FAILED OR RESPONSE_Q_DOWN, then just return success
  3296. * since LIO can't do anything about it, and we dont want to
  3297. * attempt an srp_transfer_data.
  3298. */
  3299. if ((vscsi->flags & (CLIENT_FAILED | RESPONSE_Q_DOWN))) {
  3300. dev_err(&vscsi->dev, "write_pending failed since: %d\n",
  3301. vscsi->flags);
  3302. return -EIO;
  3303. }
  3304. rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma,
  3305. 1, 1);
  3306. if (rc) {
  3307. dev_err(&vscsi->dev, "srp_transfer_data() failed: %d\n", rc);
  3308. return -EIO;
  3309. }
  3310. /*
  3311. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  3312. * object execution queue.
  3313. */
  3314. target_execute_cmd(se_cmd);
  3315. return 0;
  3316. }
  3317. static int ibmvscsis_write_pending_status(struct se_cmd *se_cmd)
  3318. {
  3319. return 0;
  3320. }
  3321. static void ibmvscsis_set_default_node_attrs(struct se_node_acl *nacl)
  3322. {
  3323. }
  3324. static int ibmvscsis_get_cmd_state(struct se_cmd *se_cmd)
  3325. {
  3326. return 0;
  3327. }
  3328. static int ibmvscsis_queue_data_in(struct se_cmd *se_cmd)
  3329. {
  3330. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3331. se_cmd);
  3332. struct iu_entry *iue = cmd->iue;
  3333. struct scsi_info *vscsi = cmd->adapter;
  3334. char *sd;
  3335. uint len = 0;
  3336. int rc;
  3337. rc = srp_transfer_data(cmd, &vio_iu(iue)->srp.cmd, ibmvscsis_rdma, 1,
  3338. 1);
  3339. if (rc) {
  3340. dev_err(&vscsi->dev, "srp_transfer_data failed: %d\n", rc);
  3341. sd = se_cmd->sense_buffer;
  3342. se_cmd->scsi_sense_length = 18;
  3343. memset(se_cmd->sense_buffer, 0, se_cmd->scsi_sense_length);
  3344. /* Logical Unit Communication Time-out asc/ascq = 0x0801 */
  3345. scsi_build_sense_buffer(0, se_cmd->sense_buffer, MEDIUM_ERROR,
  3346. 0x08, 0x01);
  3347. }
  3348. srp_build_response(vscsi, cmd, &len);
  3349. cmd->rsp.format = SRP_FORMAT;
  3350. cmd->rsp.len = len;
  3351. return 0;
  3352. }
  3353. static int ibmvscsis_queue_status(struct se_cmd *se_cmd)
  3354. {
  3355. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3356. se_cmd);
  3357. struct scsi_info *vscsi = cmd->adapter;
  3358. uint len;
  3359. dev_dbg(&vscsi->dev, "queue_status %p\n", se_cmd);
  3360. srp_build_response(vscsi, cmd, &len);
  3361. cmd->rsp.format = SRP_FORMAT;
  3362. cmd->rsp.len = len;
  3363. return 0;
  3364. }
  3365. static void ibmvscsis_queue_tm_rsp(struct se_cmd *se_cmd)
  3366. {
  3367. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3368. se_cmd);
  3369. struct scsi_info *vscsi = cmd->adapter;
  3370. struct ibmvscsis_cmd *cmd_itr;
  3371. struct iu_entry *iue = iue = cmd->iue;
  3372. struct srp_tsk_mgmt *srp_tsk = &vio_iu(iue)->srp.tsk_mgmt;
  3373. u64 tag_to_abort = be64_to_cpu(srp_tsk->task_tag);
  3374. uint len;
  3375. dev_dbg(&vscsi->dev, "queue_tm_rsp %p, status %d\n",
  3376. se_cmd, (int)se_cmd->se_tmr_req->response);
  3377. if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK &&
  3378. cmd->se_cmd.se_tmr_req->response == TMR_TASK_DOES_NOT_EXIST) {
  3379. spin_lock_bh(&vscsi->intr_lock);
  3380. list_for_each_entry(cmd_itr, &vscsi->active_q, list) {
  3381. if (tag_to_abort == cmd_itr->se_cmd.tag) {
  3382. cmd_itr->abort_cmd = cmd;
  3383. cmd->flags |= DELAY_SEND;
  3384. break;
  3385. }
  3386. }
  3387. spin_unlock_bh(&vscsi->intr_lock);
  3388. }
  3389. srp_build_response(vscsi, cmd, &len);
  3390. cmd->rsp.format = SRP_FORMAT;
  3391. cmd->rsp.len = len;
  3392. }
  3393. static void ibmvscsis_aborted_task(struct se_cmd *se_cmd)
  3394. {
  3395. struct ibmvscsis_cmd *cmd = container_of(se_cmd, struct ibmvscsis_cmd,
  3396. se_cmd);
  3397. struct scsi_info *vscsi = cmd->adapter;
  3398. dev_dbg(&vscsi->dev, "ibmvscsis_aborted_task %p task_tag: %llu\n",
  3399. se_cmd, se_cmd->tag);
  3400. }
  3401. static struct se_wwn *ibmvscsis_make_tport(struct target_fabric_configfs *tf,
  3402. struct config_group *group,
  3403. const char *name)
  3404. {
  3405. struct ibmvscsis_tport *tport;
  3406. struct scsi_info *vscsi;
  3407. tport = ibmvscsis_lookup_port(name);
  3408. if (tport) {
  3409. vscsi = container_of(tport, struct scsi_info, tport);
  3410. tport->tport_proto_id = SCSI_PROTOCOL_SRP;
  3411. dev_dbg(&vscsi->dev, "make_tport(%s), pointer:%p, tport_id:%x\n",
  3412. name, tport, tport->tport_proto_id);
  3413. return &tport->tport_wwn;
  3414. }
  3415. return ERR_PTR(-EINVAL);
  3416. }
  3417. static void ibmvscsis_drop_tport(struct se_wwn *wwn)
  3418. {
  3419. struct ibmvscsis_tport *tport = container_of(wwn,
  3420. struct ibmvscsis_tport,
  3421. tport_wwn);
  3422. struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
  3423. dev_dbg(&vscsi->dev, "drop_tport(%s)\n",
  3424. config_item_name(&tport->tport_wwn.wwn_group.cg_item));
  3425. }
  3426. static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn,
  3427. const char *name)
  3428. {
  3429. struct ibmvscsis_tport *tport =
  3430. container_of(wwn, struct ibmvscsis_tport, tport_wwn);
  3431. u16 tpgt;
  3432. int rc;
  3433. if (strstr(name, "tpgt_") != name)
  3434. return ERR_PTR(-EINVAL);
  3435. rc = kstrtou16(name + 5, 0, &tpgt);
  3436. if (rc)
  3437. return ERR_PTR(rc);
  3438. tport->tport_tpgt = tpgt;
  3439. tport->releasing = false;
  3440. rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg,
  3441. tport->tport_proto_id);
  3442. if (rc)
  3443. return ERR_PTR(rc);
  3444. return &tport->se_tpg;
  3445. }
  3446. static void ibmvscsis_drop_tpg(struct se_portal_group *se_tpg)
  3447. {
  3448. struct ibmvscsis_tport *tport = container_of(se_tpg,
  3449. struct ibmvscsis_tport,
  3450. se_tpg);
  3451. tport->releasing = true;
  3452. tport->enabled = false;
  3453. /*
  3454. * Release the virtual I_T Nexus for this ibmvscsis TPG
  3455. */
  3456. ibmvscsis_drop_nexus(tport);
  3457. /*
  3458. * Deregister the se_tpg from TCM..
  3459. */
  3460. core_tpg_deregister(se_tpg);
  3461. }
  3462. static ssize_t ibmvscsis_wwn_version_show(struct config_item *item,
  3463. char *page)
  3464. {
  3465. return scnprintf(page, PAGE_SIZE, "%s\n", IBMVSCSIS_VERSION);
  3466. }
  3467. CONFIGFS_ATTR_RO(ibmvscsis_wwn_, version);
  3468. static struct configfs_attribute *ibmvscsis_wwn_attrs[] = {
  3469. &ibmvscsis_wwn_attr_version,
  3470. NULL,
  3471. };
  3472. static ssize_t ibmvscsis_tpg_enable_show(struct config_item *item,
  3473. char *page)
  3474. {
  3475. struct se_portal_group *se_tpg = to_tpg(item);
  3476. struct ibmvscsis_tport *tport = container_of(se_tpg,
  3477. struct ibmvscsis_tport,
  3478. se_tpg);
  3479. return snprintf(page, PAGE_SIZE, "%d\n", (tport->enabled) ? 1 : 0);
  3480. }
  3481. static ssize_t ibmvscsis_tpg_enable_store(struct config_item *item,
  3482. const char *page, size_t count)
  3483. {
  3484. struct se_portal_group *se_tpg = to_tpg(item);
  3485. struct ibmvscsis_tport *tport = container_of(se_tpg,
  3486. struct ibmvscsis_tport,
  3487. se_tpg);
  3488. struct scsi_info *vscsi = container_of(tport, struct scsi_info, tport);
  3489. unsigned long tmp;
  3490. int rc;
  3491. long lrc;
  3492. rc = kstrtoul(page, 0, &tmp);
  3493. if (rc < 0) {
  3494. dev_err(&vscsi->dev, "Unable to extract srpt_tpg_store_enable\n");
  3495. return -EINVAL;
  3496. }
  3497. if ((tmp != 0) && (tmp != 1)) {
  3498. dev_err(&vscsi->dev, "Illegal value for srpt_tpg_store_enable\n");
  3499. return -EINVAL;
  3500. }
  3501. if (tmp) {
  3502. spin_lock_bh(&vscsi->intr_lock);
  3503. tport->enabled = true;
  3504. lrc = ibmvscsis_enable_change_state(vscsi);
  3505. if (lrc)
  3506. dev_err(&vscsi->dev, "enable_change_state failed, rc %ld state %d\n",
  3507. lrc, vscsi->state);
  3508. spin_unlock_bh(&vscsi->intr_lock);
  3509. } else {
  3510. spin_lock_bh(&vscsi->intr_lock);
  3511. tport->enabled = false;
  3512. /* This simulates the server going down */
  3513. ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, 0);
  3514. spin_unlock_bh(&vscsi->intr_lock);
  3515. }
  3516. dev_dbg(&vscsi->dev, "tpg_enable_store, tmp %ld, state %d\n", tmp,
  3517. vscsi->state);
  3518. return count;
  3519. }
  3520. CONFIGFS_ATTR(ibmvscsis_tpg_, enable);
  3521. static struct configfs_attribute *ibmvscsis_tpg_attrs[] = {
  3522. &ibmvscsis_tpg_attr_enable,
  3523. NULL,
  3524. };
  3525. static const struct target_core_fabric_ops ibmvscsis_ops = {
  3526. .module = THIS_MODULE,
  3527. .name = "ibmvscsis",
  3528. .max_data_sg_nents = MAX_TXU / PAGE_SIZE,
  3529. .get_fabric_name = ibmvscsis_get_fabric_name,
  3530. .tpg_get_wwn = ibmvscsis_get_fabric_wwn,
  3531. .tpg_get_tag = ibmvscsis_get_tag,
  3532. .tpg_get_default_depth = ibmvscsis_get_default_depth,
  3533. .tpg_check_demo_mode = ibmvscsis_check_true,
  3534. .tpg_check_demo_mode_cache = ibmvscsis_check_true,
  3535. .tpg_check_demo_mode_write_protect = ibmvscsis_check_false,
  3536. .tpg_check_prod_mode_write_protect = ibmvscsis_check_false,
  3537. .tpg_get_inst_index = ibmvscsis_tpg_get_inst_index,
  3538. .check_stop_free = ibmvscsis_check_stop_free,
  3539. .release_cmd = ibmvscsis_release_cmd,
  3540. .sess_get_index = ibmvscsis_sess_get_index,
  3541. .write_pending = ibmvscsis_write_pending,
  3542. .write_pending_status = ibmvscsis_write_pending_status,
  3543. .set_default_node_attributes = ibmvscsis_set_default_node_attrs,
  3544. .get_cmd_state = ibmvscsis_get_cmd_state,
  3545. .queue_data_in = ibmvscsis_queue_data_in,
  3546. .queue_status = ibmvscsis_queue_status,
  3547. .queue_tm_rsp = ibmvscsis_queue_tm_rsp,
  3548. .aborted_task = ibmvscsis_aborted_task,
  3549. /*
  3550. * Setup function pointers for logic in target_core_fabric_configfs.c
  3551. */
  3552. .fabric_make_wwn = ibmvscsis_make_tport,
  3553. .fabric_drop_wwn = ibmvscsis_drop_tport,
  3554. .fabric_make_tpg = ibmvscsis_make_tpg,
  3555. .fabric_drop_tpg = ibmvscsis_drop_tpg,
  3556. .tfc_wwn_attrs = ibmvscsis_wwn_attrs,
  3557. .tfc_tpg_base_attrs = ibmvscsis_tpg_attrs,
  3558. };
  3559. static void ibmvscsis_dev_release(struct device *dev) {};
  3560. static struct device_attribute dev_attr_system_id =
  3561. __ATTR(system_id, S_IRUGO, system_id_show, NULL);
  3562. static struct device_attribute dev_attr_partition_number =
  3563. __ATTR(partition_number, S_IRUGO, partition_number_show, NULL);
  3564. static struct device_attribute dev_attr_unit_address =
  3565. __ATTR(unit_address, S_IRUGO, unit_address_show, NULL);
  3566. static struct attribute *ibmvscsis_dev_attrs[] = {
  3567. &dev_attr_system_id.attr,
  3568. &dev_attr_partition_number.attr,
  3569. &dev_attr_unit_address.attr,
  3570. };
  3571. ATTRIBUTE_GROUPS(ibmvscsis_dev);
  3572. static struct class ibmvscsis_class = {
  3573. .name = "ibmvscsis",
  3574. .dev_release = ibmvscsis_dev_release,
  3575. .dev_groups = ibmvscsis_dev_groups,
  3576. };
  3577. static const struct vio_device_id ibmvscsis_device_table[] = {
  3578. { "v-scsi-host", "IBM,v-scsi-host" },
  3579. { "", "" }
  3580. };
  3581. MODULE_DEVICE_TABLE(vio, ibmvscsis_device_table);
  3582. static struct vio_driver ibmvscsis_driver = {
  3583. .name = "ibmvscsis",
  3584. .id_table = ibmvscsis_device_table,
  3585. .probe = ibmvscsis_probe,
  3586. .remove = ibmvscsis_remove,
  3587. };
  3588. /*
  3589. * ibmvscsis_init() - Kernel Module initialization
  3590. *
  3591. * Note: vio_register_driver() registers callback functions, and at least one
  3592. * of those callback functions calls TCM - Linux IO Target Subsystem, thus
  3593. * the SCSI Target template must be registered before vio_register_driver()
  3594. * is called.
  3595. */
  3596. static int __init ibmvscsis_init(void)
  3597. {
  3598. int rc = 0;
  3599. rc = ibmvscsis_get_system_info();
  3600. if (rc) {
  3601. pr_err("rc %d from get_system_info\n", rc);
  3602. goto out;
  3603. }
  3604. rc = class_register(&ibmvscsis_class);
  3605. if (rc) {
  3606. pr_err("failed class register\n");
  3607. goto out;
  3608. }
  3609. rc = target_register_template(&ibmvscsis_ops);
  3610. if (rc) {
  3611. pr_err("rc %d from target_register_template\n", rc);
  3612. goto unregister_class;
  3613. }
  3614. rc = vio_register_driver(&ibmvscsis_driver);
  3615. if (rc) {
  3616. pr_err("rc %d from vio_register_driver\n", rc);
  3617. goto unregister_target;
  3618. }
  3619. return 0;
  3620. unregister_target:
  3621. target_unregister_template(&ibmvscsis_ops);
  3622. unregister_class:
  3623. class_unregister(&ibmvscsis_class);
  3624. out:
  3625. return rc;
  3626. }
  3627. static void __exit ibmvscsis_exit(void)
  3628. {
  3629. pr_info("Unregister IBM virtual SCSI host driver\n");
  3630. vio_unregister_driver(&ibmvscsis_driver);
  3631. target_unregister_template(&ibmvscsis_ops);
  3632. class_unregister(&ibmvscsis_class);
  3633. }
  3634. MODULE_DESCRIPTION("IBMVSCSIS fabric driver");
  3635. MODULE_AUTHOR("Bryant G. Ly and Michael Cyr");
  3636. MODULE_LICENSE("GPL");
  3637. MODULE_VERSION(IBMVSCSIS_VERSION);
  3638. module_init(ibmvscsis_init);
  3639. module_exit(ibmvscsis_exit);