fc.c 91 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/module.h>
  19. #include <linux/parser.h>
  20. #include <uapi/scsi/fc/fc_fs.h>
  21. #include <uapi/scsi/fc/fc_els.h>
  22. #include <linux/delay.h>
  23. #include "nvme.h"
  24. #include "fabrics.h"
  25. #include <linux/nvme-fc-driver.h>
  26. #include <linux/nvme-fc.h>
  27. /* *************************** Data Structures/Defines ****************** */
  28. enum nvme_fc_queue_flags {
  29. NVME_FC_Q_CONNECTED = (1 << 0),
  30. };
  31. #define NVMEFC_QUEUE_DELAY 3 /* ms units */
  32. #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
  33. struct nvme_fc_queue {
  34. struct nvme_fc_ctrl *ctrl;
  35. struct device *dev;
  36. struct blk_mq_hw_ctx *hctx;
  37. void *lldd_handle;
  38. size_t cmnd_capsule_len;
  39. u32 qnum;
  40. u32 rqcnt;
  41. u32 seqno;
  42. u64 connection_id;
  43. atomic_t csn;
  44. unsigned long flags;
  45. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  46. enum nvme_fcop_flags {
  47. FCOP_FLAGS_TERMIO = (1 << 0),
  48. FCOP_FLAGS_RELEASED = (1 << 1),
  49. FCOP_FLAGS_COMPLETE = (1 << 2),
  50. FCOP_FLAGS_AEN = (1 << 3),
  51. };
  52. struct nvmefc_ls_req_op {
  53. struct nvmefc_ls_req ls_req;
  54. struct nvme_fc_rport *rport;
  55. struct nvme_fc_queue *queue;
  56. struct request *rq;
  57. u32 flags;
  58. int ls_error;
  59. struct completion ls_done;
  60. struct list_head lsreq_list; /* rport->ls_req_list */
  61. bool req_queued;
  62. };
  63. enum nvme_fcpop_state {
  64. FCPOP_STATE_UNINIT = 0,
  65. FCPOP_STATE_IDLE = 1,
  66. FCPOP_STATE_ACTIVE = 2,
  67. FCPOP_STATE_ABORTED = 3,
  68. FCPOP_STATE_COMPLETE = 4,
  69. };
  70. struct nvme_fc_fcp_op {
  71. struct nvme_request nreq; /*
  72. * nvme/host/core.c
  73. * requires this to be
  74. * the 1st element in the
  75. * private structure
  76. * associated with the
  77. * request.
  78. */
  79. struct nvmefc_fcp_req fcp_req;
  80. struct nvme_fc_ctrl *ctrl;
  81. struct nvme_fc_queue *queue;
  82. struct request *rq;
  83. atomic_t state;
  84. u32 flags;
  85. u32 rqno;
  86. u32 nents;
  87. struct nvme_fc_cmd_iu cmd_iu;
  88. struct nvme_fc_ersp_iu rsp_iu;
  89. };
  90. struct nvme_fc_lport {
  91. struct nvme_fc_local_port localport;
  92. struct ida endp_cnt;
  93. struct list_head port_list; /* nvme_fc_port_list */
  94. struct list_head endp_list;
  95. struct device *dev; /* physical device for dma */
  96. struct nvme_fc_port_template *ops;
  97. struct kref ref;
  98. atomic_t act_rport_cnt;
  99. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  100. struct nvme_fc_rport {
  101. struct nvme_fc_remote_port remoteport;
  102. struct list_head endp_list; /* for lport->endp_list */
  103. struct list_head ctrl_list;
  104. struct list_head ls_req_list;
  105. struct device *dev; /* physical device for dma */
  106. struct nvme_fc_lport *lport;
  107. spinlock_t lock;
  108. struct kref ref;
  109. atomic_t act_ctrl_cnt;
  110. unsigned long dev_loss_end;
  111. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  112. enum nvme_fcctrl_flags {
  113. FCCTRL_TERMIO = (1 << 0),
  114. };
  115. struct nvme_fc_ctrl {
  116. spinlock_t lock;
  117. struct nvme_fc_queue *queues;
  118. struct device *dev;
  119. struct nvme_fc_lport *lport;
  120. struct nvme_fc_rport *rport;
  121. u32 cnum;
  122. bool assoc_active;
  123. u64 association_id;
  124. struct list_head ctrl_list; /* rport->ctrl_list */
  125. struct blk_mq_tag_set admin_tag_set;
  126. struct blk_mq_tag_set tag_set;
  127. struct delayed_work connect_work;
  128. struct kref ref;
  129. u32 flags;
  130. u32 iocnt;
  131. wait_queue_head_t ioabort_wait;
  132. struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
  133. struct nvme_ctrl ctrl;
  134. };
  135. static inline struct nvme_fc_ctrl *
  136. to_fc_ctrl(struct nvme_ctrl *ctrl)
  137. {
  138. return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
  139. }
  140. static inline struct nvme_fc_lport *
  141. localport_to_lport(struct nvme_fc_local_port *portptr)
  142. {
  143. return container_of(portptr, struct nvme_fc_lport, localport);
  144. }
  145. static inline struct nvme_fc_rport *
  146. remoteport_to_rport(struct nvme_fc_remote_port *portptr)
  147. {
  148. return container_of(portptr, struct nvme_fc_rport, remoteport);
  149. }
  150. static inline struct nvmefc_ls_req_op *
  151. ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
  152. {
  153. return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
  154. }
  155. static inline struct nvme_fc_fcp_op *
  156. fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
  157. {
  158. return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
  159. }
  160. /* *************************** Globals **************************** */
  161. static DEFINE_SPINLOCK(nvme_fc_lock);
  162. static LIST_HEAD(nvme_fc_lport_list);
  163. static DEFINE_IDA(nvme_fc_local_port_cnt);
  164. static DEFINE_IDA(nvme_fc_ctrl_cnt);
  165. /*
  166. * These items are short-term. They will eventually be moved into
  167. * a generic FC class. See comments in module init.
  168. */
  169. static struct class *fc_class;
  170. static struct device *fc_udev_device;
  171. /* *********************** FC-NVME Port Management ************************ */
  172. static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
  173. struct nvme_fc_queue *, unsigned int);
  174. static void
  175. nvme_fc_free_lport(struct kref *ref)
  176. {
  177. struct nvme_fc_lport *lport =
  178. container_of(ref, struct nvme_fc_lport, ref);
  179. unsigned long flags;
  180. WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
  181. WARN_ON(!list_empty(&lport->endp_list));
  182. /* remove from transport list */
  183. spin_lock_irqsave(&nvme_fc_lock, flags);
  184. list_del(&lport->port_list);
  185. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  186. ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
  187. ida_destroy(&lport->endp_cnt);
  188. put_device(lport->dev);
  189. kfree(lport);
  190. }
  191. static void
  192. nvme_fc_lport_put(struct nvme_fc_lport *lport)
  193. {
  194. kref_put(&lport->ref, nvme_fc_free_lport);
  195. }
  196. static int
  197. nvme_fc_lport_get(struct nvme_fc_lport *lport)
  198. {
  199. return kref_get_unless_zero(&lport->ref);
  200. }
  201. static struct nvme_fc_lport *
  202. nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
  203. struct nvme_fc_port_template *ops,
  204. struct device *dev)
  205. {
  206. struct nvme_fc_lport *lport;
  207. unsigned long flags;
  208. spin_lock_irqsave(&nvme_fc_lock, flags);
  209. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  210. if (lport->localport.node_name != pinfo->node_name ||
  211. lport->localport.port_name != pinfo->port_name)
  212. continue;
  213. if (lport->dev != dev) {
  214. lport = ERR_PTR(-EXDEV);
  215. goto out_done;
  216. }
  217. if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
  218. lport = ERR_PTR(-EEXIST);
  219. goto out_done;
  220. }
  221. if (!nvme_fc_lport_get(lport)) {
  222. /*
  223. * fails if ref cnt already 0. If so,
  224. * act as if lport already deleted
  225. */
  226. lport = NULL;
  227. goto out_done;
  228. }
  229. /* resume the lport */
  230. lport->ops = ops;
  231. lport->localport.port_role = pinfo->port_role;
  232. lport->localport.port_id = pinfo->port_id;
  233. lport->localport.port_state = FC_OBJSTATE_ONLINE;
  234. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  235. return lport;
  236. }
  237. lport = NULL;
  238. out_done:
  239. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  240. return lport;
  241. }
  242. /**
  243. * nvme_fc_register_localport - transport entry point called by an
  244. * LLDD to register the existence of a NVME
  245. * host FC port.
  246. * @pinfo: pointer to information about the port to be registered
  247. * @template: LLDD entrypoints and operational parameters for the port
  248. * @dev: physical hardware device node port corresponds to. Will be
  249. * used for DMA mappings
  250. * @lport_p: pointer to a local port pointer. Upon success, the routine
  251. * will allocate a nvme_fc_local_port structure and place its
  252. * address in the local port pointer. Upon failure, local port
  253. * pointer will be set to 0.
  254. *
  255. * Returns:
  256. * a completion status. Must be 0 upon success; a negative errno
  257. * (ex: -ENXIO) upon failure.
  258. */
  259. int
  260. nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
  261. struct nvme_fc_port_template *template,
  262. struct device *dev,
  263. struct nvme_fc_local_port **portptr)
  264. {
  265. struct nvme_fc_lport *newrec;
  266. unsigned long flags;
  267. int ret, idx;
  268. if (!template->localport_delete || !template->remoteport_delete ||
  269. !template->ls_req || !template->fcp_io ||
  270. !template->ls_abort || !template->fcp_abort ||
  271. !template->max_hw_queues || !template->max_sgl_segments ||
  272. !template->max_dif_sgl_segments || !template->dma_boundary) {
  273. ret = -EINVAL;
  274. goto out_reghost_failed;
  275. }
  276. /*
  277. * look to see if there is already a localport that had been
  278. * deregistered and in the process of waiting for all the
  279. * references to fully be removed. If the references haven't
  280. * expired, we can simply re-enable the localport. Remoteports
  281. * and controller reconnections should resume naturally.
  282. */
  283. newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
  284. /* found an lport, but something about its state is bad */
  285. if (IS_ERR(newrec)) {
  286. ret = PTR_ERR(newrec);
  287. goto out_reghost_failed;
  288. /* found existing lport, which was resumed */
  289. } else if (newrec) {
  290. *portptr = &newrec->localport;
  291. return 0;
  292. }
  293. /* nothing found - allocate a new localport struct */
  294. newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
  295. GFP_KERNEL);
  296. if (!newrec) {
  297. ret = -ENOMEM;
  298. goto out_reghost_failed;
  299. }
  300. idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
  301. if (idx < 0) {
  302. ret = -ENOSPC;
  303. goto out_fail_kfree;
  304. }
  305. if (!get_device(dev) && dev) {
  306. ret = -ENODEV;
  307. goto out_ida_put;
  308. }
  309. INIT_LIST_HEAD(&newrec->port_list);
  310. INIT_LIST_HEAD(&newrec->endp_list);
  311. kref_init(&newrec->ref);
  312. atomic_set(&newrec->act_rport_cnt, 0);
  313. newrec->ops = template;
  314. newrec->dev = dev;
  315. ida_init(&newrec->endp_cnt);
  316. newrec->localport.private = &newrec[1];
  317. newrec->localport.node_name = pinfo->node_name;
  318. newrec->localport.port_name = pinfo->port_name;
  319. newrec->localport.port_role = pinfo->port_role;
  320. newrec->localport.port_id = pinfo->port_id;
  321. newrec->localport.port_state = FC_OBJSTATE_ONLINE;
  322. newrec->localport.port_num = idx;
  323. spin_lock_irqsave(&nvme_fc_lock, flags);
  324. list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
  325. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  326. if (dev)
  327. dma_set_seg_boundary(dev, template->dma_boundary);
  328. *portptr = &newrec->localport;
  329. return 0;
  330. out_ida_put:
  331. ida_simple_remove(&nvme_fc_local_port_cnt, idx);
  332. out_fail_kfree:
  333. kfree(newrec);
  334. out_reghost_failed:
  335. *portptr = NULL;
  336. return ret;
  337. }
  338. EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
  339. /**
  340. * nvme_fc_unregister_localport - transport entry point called by an
  341. * LLDD to deregister/remove a previously
  342. * registered a NVME host FC port.
  343. * @localport: pointer to the (registered) local port that is to be
  344. * deregistered.
  345. *
  346. * Returns:
  347. * a completion status. Must be 0 upon success; a negative errno
  348. * (ex: -ENXIO) upon failure.
  349. */
  350. int
  351. nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
  352. {
  353. struct nvme_fc_lport *lport = localport_to_lport(portptr);
  354. unsigned long flags;
  355. if (!portptr)
  356. return -EINVAL;
  357. spin_lock_irqsave(&nvme_fc_lock, flags);
  358. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  359. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  360. return -EINVAL;
  361. }
  362. portptr->port_state = FC_OBJSTATE_DELETED;
  363. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  364. if (atomic_read(&lport->act_rport_cnt) == 0)
  365. lport->ops->localport_delete(&lport->localport);
  366. nvme_fc_lport_put(lport);
  367. return 0;
  368. }
  369. EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
  370. /*
  371. * TRADDR strings, per FC-NVME are fixed format:
  372. * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
  373. * udev event will only differ by prefix of what field is
  374. * being specified:
  375. * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
  376. * 19 + 43 + null_fudge = 64 characters
  377. */
  378. #define FCNVME_TRADDR_LENGTH 64
  379. static void
  380. nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
  381. struct nvme_fc_rport *rport)
  382. {
  383. char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
  384. char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
  385. char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
  386. if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
  387. return;
  388. snprintf(hostaddr, sizeof(hostaddr),
  389. "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
  390. lport->localport.node_name, lport->localport.port_name);
  391. snprintf(tgtaddr, sizeof(tgtaddr),
  392. "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
  393. rport->remoteport.node_name, rport->remoteport.port_name);
  394. kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
  395. }
  396. static void
  397. nvme_fc_free_rport(struct kref *ref)
  398. {
  399. struct nvme_fc_rport *rport =
  400. container_of(ref, struct nvme_fc_rport, ref);
  401. struct nvme_fc_lport *lport =
  402. localport_to_lport(rport->remoteport.localport);
  403. unsigned long flags;
  404. WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
  405. WARN_ON(!list_empty(&rport->ctrl_list));
  406. /* remove from lport list */
  407. spin_lock_irqsave(&nvme_fc_lock, flags);
  408. list_del(&rport->endp_list);
  409. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  410. ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
  411. kfree(rport);
  412. nvme_fc_lport_put(lport);
  413. }
  414. static void
  415. nvme_fc_rport_put(struct nvme_fc_rport *rport)
  416. {
  417. kref_put(&rport->ref, nvme_fc_free_rport);
  418. }
  419. static int
  420. nvme_fc_rport_get(struct nvme_fc_rport *rport)
  421. {
  422. return kref_get_unless_zero(&rport->ref);
  423. }
  424. static void
  425. nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
  426. {
  427. switch (ctrl->ctrl.state) {
  428. case NVME_CTRL_NEW:
  429. case NVME_CTRL_RECONNECTING:
  430. /*
  431. * As all reconnects were suppressed, schedule a
  432. * connect.
  433. */
  434. dev_info(ctrl->ctrl.device,
  435. "NVME-FC{%d}: connectivity re-established. "
  436. "Attempting reconnect\n", ctrl->cnum);
  437. queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
  438. break;
  439. case NVME_CTRL_RESETTING:
  440. /*
  441. * Controller is already in the process of terminating the
  442. * association. No need to do anything further. The reconnect
  443. * step will naturally occur after the reset completes.
  444. */
  445. break;
  446. default:
  447. /* no action to take - let it delete */
  448. break;
  449. }
  450. }
  451. static struct nvme_fc_rport *
  452. nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
  453. struct nvme_fc_port_info *pinfo)
  454. {
  455. struct nvme_fc_rport *rport;
  456. struct nvme_fc_ctrl *ctrl;
  457. unsigned long flags;
  458. spin_lock_irqsave(&nvme_fc_lock, flags);
  459. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  460. if (rport->remoteport.node_name != pinfo->node_name ||
  461. rport->remoteport.port_name != pinfo->port_name)
  462. continue;
  463. if (!nvme_fc_rport_get(rport)) {
  464. rport = ERR_PTR(-ENOLCK);
  465. goto out_done;
  466. }
  467. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  468. spin_lock_irqsave(&rport->lock, flags);
  469. /* has it been unregistered */
  470. if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
  471. /* means lldd called us twice */
  472. spin_unlock_irqrestore(&rport->lock, flags);
  473. nvme_fc_rport_put(rport);
  474. return ERR_PTR(-ESTALE);
  475. }
  476. rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
  477. rport->dev_loss_end = 0;
  478. /*
  479. * kick off a reconnect attempt on all associations to the
  480. * remote port. A successful reconnects will resume i/o.
  481. */
  482. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
  483. nvme_fc_resume_controller(ctrl);
  484. spin_unlock_irqrestore(&rport->lock, flags);
  485. return rport;
  486. }
  487. rport = NULL;
  488. out_done:
  489. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  490. return rport;
  491. }
  492. static inline void
  493. __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
  494. struct nvme_fc_port_info *pinfo)
  495. {
  496. if (pinfo->dev_loss_tmo)
  497. rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
  498. else
  499. rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
  500. }
  501. /**
  502. * nvme_fc_register_remoteport - transport entry point called by an
  503. * LLDD to register the existence of a NVME
  504. * subsystem FC port on its fabric.
  505. * @localport: pointer to the (registered) local port that the remote
  506. * subsystem port is connected to.
  507. * @pinfo: pointer to information about the port to be registered
  508. * @rport_p: pointer to a remote port pointer. Upon success, the routine
  509. * will allocate a nvme_fc_remote_port structure and place its
  510. * address in the remote port pointer. Upon failure, remote port
  511. * pointer will be set to 0.
  512. *
  513. * Returns:
  514. * a completion status. Must be 0 upon success; a negative errno
  515. * (ex: -ENXIO) upon failure.
  516. */
  517. int
  518. nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
  519. struct nvme_fc_port_info *pinfo,
  520. struct nvme_fc_remote_port **portptr)
  521. {
  522. struct nvme_fc_lport *lport = localport_to_lport(localport);
  523. struct nvme_fc_rport *newrec;
  524. unsigned long flags;
  525. int ret, idx;
  526. if (!nvme_fc_lport_get(lport)) {
  527. ret = -ESHUTDOWN;
  528. goto out_reghost_failed;
  529. }
  530. /*
  531. * look to see if there is already a remoteport that is waiting
  532. * for a reconnect (within dev_loss_tmo) with the same WWN's.
  533. * If so, transition to it and reconnect.
  534. */
  535. newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
  536. /* found an rport, but something about its state is bad */
  537. if (IS_ERR(newrec)) {
  538. ret = PTR_ERR(newrec);
  539. goto out_lport_put;
  540. /* found existing rport, which was resumed */
  541. } else if (newrec) {
  542. nvme_fc_lport_put(lport);
  543. __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
  544. nvme_fc_signal_discovery_scan(lport, newrec);
  545. *portptr = &newrec->remoteport;
  546. return 0;
  547. }
  548. /* nothing found - allocate a new remoteport struct */
  549. newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
  550. GFP_KERNEL);
  551. if (!newrec) {
  552. ret = -ENOMEM;
  553. goto out_lport_put;
  554. }
  555. idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
  556. if (idx < 0) {
  557. ret = -ENOSPC;
  558. goto out_kfree_rport;
  559. }
  560. INIT_LIST_HEAD(&newrec->endp_list);
  561. INIT_LIST_HEAD(&newrec->ctrl_list);
  562. INIT_LIST_HEAD(&newrec->ls_req_list);
  563. kref_init(&newrec->ref);
  564. atomic_set(&newrec->act_ctrl_cnt, 0);
  565. spin_lock_init(&newrec->lock);
  566. newrec->remoteport.localport = &lport->localport;
  567. newrec->dev = lport->dev;
  568. newrec->lport = lport;
  569. newrec->remoteport.private = &newrec[1];
  570. newrec->remoteport.port_role = pinfo->port_role;
  571. newrec->remoteport.node_name = pinfo->node_name;
  572. newrec->remoteport.port_name = pinfo->port_name;
  573. newrec->remoteport.port_id = pinfo->port_id;
  574. newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
  575. newrec->remoteport.port_num = idx;
  576. __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
  577. spin_lock_irqsave(&nvme_fc_lock, flags);
  578. list_add_tail(&newrec->endp_list, &lport->endp_list);
  579. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  580. nvme_fc_signal_discovery_scan(lport, newrec);
  581. *portptr = &newrec->remoteport;
  582. return 0;
  583. out_kfree_rport:
  584. kfree(newrec);
  585. out_lport_put:
  586. nvme_fc_lport_put(lport);
  587. out_reghost_failed:
  588. *portptr = NULL;
  589. return ret;
  590. }
  591. EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
  592. static int
  593. nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
  594. {
  595. struct nvmefc_ls_req_op *lsop;
  596. unsigned long flags;
  597. restart:
  598. spin_lock_irqsave(&rport->lock, flags);
  599. list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
  600. if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
  601. lsop->flags |= FCOP_FLAGS_TERMIO;
  602. spin_unlock_irqrestore(&rport->lock, flags);
  603. rport->lport->ops->ls_abort(&rport->lport->localport,
  604. &rport->remoteport,
  605. &lsop->ls_req);
  606. goto restart;
  607. }
  608. }
  609. spin_unlock_irqrestore(&rport->lock, flags);
  610. return 0;
  611. }
  612. static void
  613. nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
  614. {
  615. dev_info(ctrl->ctrl.device,
  616. "NVME-FC{%d}: controller connectivity lost. Awaiting "
  617. "Reconnect", ctrl->cnum);
  618. switch (ctrl->ctrl.state) {
  619. case NVME_CTRL_NEW:
  620. case NVME_CTRL_LIVE:
  621. /*
  622. * Schedule a controller reset. The reset will terminate the
  623. * association and schedule the reconnect timer. Reconnects
  624. * will be attempted until either the ctlr_loss_tmo
  625. * (max_retries * connect_delay) expires or the remoteport's
  626. * dev_loss_tmo expires.
  627. */
  628. if (nvme_reset_ctrl(&ctrl->ctrl)) {
  629. dev_warn(ctrl->ctrl.device,
  630. "NVME-FC{%d}: Couldn't schedule reset. "
  631. "Deleting controller.\n",
  632. ctrl->cnum);
  633. nvme_delete_ctrl(&ctrl->ctrl);
  634. }
  635. break;
  636. case NVME_CTRL_RECONNECTING:
  637. /*
  638. * The association has already been terminated and the
  639. * controller is attempting reconnects. No need to do anything
  640. * futher. Reconnects will be attempted until either the
  641. * ctlr_loss_tmo (max_retries * connect_delay) expires or the
  642. * remoteport's dev_loss_tmo expires.
  643. */
  644. break;
  645. case NVME_CTRL_RESETTING:
  646. /*
  647. * Controller is already in the process of terminating the
  648. * association. No need to do anything further. The reconnect
  649. * step will kick in naturally after the association is
  650. * terminated.
  651. */
  652. break;
  653. case NVME_CTRL_DELETING:
  654. default:
  655. /* no action to take - let it delete */
  656. break;
  657. }
  658. }
  659. /**
  660. * nvme_fc_unregister_remoteport - transport entry point called by an
  661. * LLDD to deregister/remove a previously
  662. * registered a NVME subsystem FC port.
  663. * @remoteport: pointer to the (registered) remote port that is to be
  664. * deregistered.
  665. *
  666. * Returns:
  667. * a completion status. Must be 0 upon success; a negative errno
  668. * (ex: -ENXIO) upon failure.
  669. */
  670. int
  671. nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
  672. {
  673. struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
  674. struct nvme_fc_ctrl *ctrl;
  675. unsigned long flags;
  676. if (!portptr)
  677. return -EINVAL;
  678. spin_lock_irqsave(&rport->lock, flags);
  679. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  680. spin_unlock_irqrestore(&rport->lock, flags);
  681. return -EINVAL;
  682. }
  683. portptr->port_state = FC_OBJSTATE_DELETED;
  684. rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
  685. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
  686. /* if dev_loss_tmo==0, dev loss is immediate */
  687. if (!portptr->dev_loss_tmo) {
  688. dev_warn(ctrl->ctrl.device,
  689. "NVME-FC{%d}: controller connectivity lost. "
  690. "Deleting controller.\n",
  691. ctrl->cnum);
  692. nvme_delete_ctrl(&ctrl->ctrl);
  693. } else
  694. nvme_fc_ctrl_connectivity_loss(ctrl);
  695. }
  696. spin_unlock_irqrestore(&rport->lock, flags);
  697. nvme_fc_abort_lsops(rport);
  698. if (atomic_read(&rport->act_ctrl_cnt) == 0)
  699. rport->lport->ops->remoteport_delete(portptr);
  700. /*
  701. * release the reference, which will allow, if all controllers
  702. * go away, which should only occur after dev_loss_tmo occurs,
  703. * for the rport to be torn down.
  704. */
  705. nvme_fc_rport_put(rport);
  706. return 0;
  707. }
  708. EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
  709. /**
  710. * nvme_fc_rescan_remoteport - transport entry point called by an
  711. * LLDD to request a nvme device rescan.
  712. * @remoteport: pointer to the (registered) remote port that is to be
  713. * rescanned.
  714. *
  715. * Returns: N/A
  716. */
  717. void
  718. nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
  719. {
  720. struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
  721. nvme_fc_signal_discovery_scan(rport->lport, rport);
  722. }
  723. EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
  724. int
  725. nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
  726. u32 dev_loss_tmo)
  727. {
  728. struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
  729. unsigned long flags;
  730. spin_lock_irqsave(&rport->lock, flags);
  731. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  732. spin_unlock_irqrestore(&rport->lock, flags);
  733. return -EINVAL;
  734. }
  735. /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
  736. rport->remoteport.dev_loss_tmo = dev_loss_tmo;
  737. spin_unlock_irqrestore(&rport->lock, flags);
  738. return 0;
  739. }
  740. EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
  741. /* *********************** FC-NVME DMA Handling **************************** */
  742. /*
  743. * The fcloop device passes in a NULL device pointer. Real LLD's will
  744. * pass in a valid device pointer. If NULL is passed to the dma mapping
  745. * routines, depending on the platform, it may or may not succeed, and
  746. * may crash.
  747. *
  748. * As such:
  749. * Wrapper all the dma routines and check the dev pointer.
  750. *
  751. * If simple mappings (return just a dma address, we'll noop them,
  752. * returning a dma address of 0.
  753. *
  754. * On more complex mappings (dma_map_sg), a pseudo routine fills
  755. * in the scatter list, setting all dma addresses to 0.
  756. */
  757. static inline dma_addr_t
  758. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  759. enum dma_data_direction dir)
  760. {
  761. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  762. }
  763. static inline int
  764. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  765. {
  766. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  767. }
  768. static inline void
  769. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  770. enum dma_data_direction dir)
  771. {
  772. if (dev)
  773. dma_unmap_single(dev, addr, size, dir);
  774. }
  775. static inline void
  776. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  777. enum dma_data_direction dir)
  778. {
  779. if (dev)
  780. dma_sync_single_for_cpu(dev, addr, size, dir);
  781. }
  782. static inline void
  783. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  784. enum dma_data_direction dir)
  785. {
  786. if (dev)
  787. dma_sync_single_for_device(dev, addr, size, dir);
  788. }
  789. /* pseudo dma_map_sg call */
  790. static int
  791. fc_map_sg(struct scatterlist *sg, int nents)
  792. {
  793. struct scatterlist *s;
  794. int i;
  795. WARN_ON(nents == 0 || sg[0].length == 0);
  796. for_each_sg(sg, s, nents, i) {
  797. s->dma_address = 0L;
  798. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  799. s->dma_length = s->length;
  800. #endif
  801. }
  802. return nents;
  803. }
  804. static inline int
  805. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  806. enum dma_data_direction dir)
  807. {
  808. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  809. }
  810. static inline void
  811. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  812. enum dma_data_direction dir)
  813. {
  814. if (dev)
  815. dma_unmap_sg(dev, sg, nents, dir);
  816. }
  817. /* *********************** FC-NVME LS Handling **************************** */
  818. static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
  819. static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
  820. static void
  821. __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
  822. {
  823. struct nvme_fc_rport *rport = lsop->rport;
  824. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  825. unsigned long flags;
  826. spin_lock_irqsave(&rport->lock, flags);
  827. if (!lsop->req_queued) {
  828. spin_unlock_irqrestore(&rport->lock, flags);
  829. return;
  830. }
  831. list_del(&lsop->lsreq_list);
  832. lsop->req_queued = false;
  833. spin_unlock_irqrestore(&rport->lock, flags);
  834. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  835. (lsreq->rqstlen + lsreq->rsplen),
  836. DMA_BIDIRECTIONAL);
  837. nvme_fc_rport_put(rport);
  838. }
  839. static int
  840. __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
  841. struct nvmefc_ls_req_op *lsop,
  842. void (*done)(struct nvmefc_ls_req *req, int status))
  843. {
  844. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  845. unsigned long flags;
  846. int ret = 0;
  847. if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  848. return -ECONNREFUSED;
  849. if (!nvme_fc_rport_get(rport))
  850. return -ESHUTDOWN;
  851. lsreq->done = done;
  852. lsop->rport = rport;
  853. lsop->req_queued = false;
  854. INIT_LIST_HEAD(&lsop->lsreq_list);
  855. init_completion(&lsop->ls_done);
  856. lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
  857. lsreq->rqstlen + lsreq->rsplen,
  858. DMA_BIDIRECTIONAL);
  859. if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
  860. ret = -EFAULT;
  861. goto out_putrport;
  862. }
  863. lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
  864. spin_lock_irqsave(&rport->lock, flags);
  865. list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
  866. lsop->req_queued = true;
  867. spin_unlock_irqrestore(&rport->lock, flags);
  868. ret = rport->lport->ops->ls_req(&rport->lport->localport,
  869. &rport->remoteport, lsreq);
  870. if (ret)
  871. goto out_unlink;
  872. return 0;
  873. out_unlink:
  874. lsop->ls_error = ret;
  875. spin_lock_irqsave(&rport->lock, flags);
  876. lsop->req_queued = false;
  877. list_del(&lsop->lsreq_list);
  878. spin_unlock_irqrestore(&rport->lock, flags);
  879. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  880. (lsreq->rqstlen + lsreq->rsplen),
  881. DMA_BIDIRECTIONAL);
  882. out_putrport:
  883. nvme_fc_rport_put(rport);
  884. return ret;
  885. }
  886. static void
  887. nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
  888. {
  889. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  890. lsop->ls_error = status;
  891. complete(&lsop->ls_done);
  892. }
  893. static int
  894. nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
  895. {
  896. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  897. struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
  898. int ret;
  899. ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
  900. if (!ret) {
  901. /*
  902. * No timeout/not interruptible as we need the struct
  903. * to exist until the lldd calls us back. Thus mandate
  904. * wait until driver calls back. lldd responsible for
  905. * the timeout action
  906. */
  907. wait_for_completion(&lsop->ls_done);
  908. __nvme_fc_finish_ls_req(lsop);
  909. ret = lsop->ls_error;
  910. }
  911. if (ret)
  912. return ret;
  913. /* ACC or RJT payload ? */
  914. if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
  915. return -ENXIO;
  916. return 0;
  917. }
  918. static int
  919. nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
  920. struct nvmefc_ls_req_op *lsop,
  921. void (*done)(struct nvmefc_ls_req *req, int status))
  922. {
  923. /* don't wait for completion */
  924. return __nvme_fc_send_ls_req(rport, lsop, done);
  925. }
  926. /* Validation Error indexes into the string table below */
  927. enum {
  928. VERR_NO_ERROR = 0,
  929. VERR_LSACC = 1,
  930. VERR_LSDESC_RQST = 2,
  931. VERR_LSDESC_RQST_LEN = 3,
  932. VERR_ASSOC_ID = 4,
  933. VERR_ASSOC_ID_LEN = 5,
  934. VERR_CONN_ID = 6,
  935. VERR_CONN_ID_LEN = 7,
  936. VERR_CR_ASSOC = 8,
  937. VERR_CR_ASSOC_ACC_LEN = 9,
  938. VERR_CR_CONN = 10,
  939. VERR_CR_CONN_ACC_LEN = 11,
  940. VERR_DISCONN = 12,
  941. VERR_DISCONN_ACC_LEN = 13,
  942. };
  943. static char *validation_errors[] = {
  944. "OK",
  945. "Not LS_ACC",
  946. "Not LSDESC_RQST",
  947. "Bad LSDESC_RQST Length",
  948. "Not Association ID",
  949. "Bad Association ID Length",
  950. "Not Connection ID",
  951. "Bad Connection ID Length",
  952. "Not CR_ASSOC Rqst",
  953. "Bad CR_ASSOC ACC Length",
  954. "Not CR_CONN Rqst",
  955. "Bad CR_CONN ACC Length",
  956. "Not Disconnect Rqst",
  957. "Bad Disconnect ACC Length",
  958. };
  959. static int
  960. nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
  961. struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
  962. {
  963. struct nvmefc_ls_req_op *lsop;
  964. struct nvmefc_ls_req *lsreq;
  965. struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
  966. struct fcnvme_ls_cr_assoc_acc *assoc_acc;
  967. int ret, fcret = 0;
  968. lsop = kzalloc((sizeof(*lsop) +
  969. ctrl->lport->ops->lsrqst_priv_sz +
  970. sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
  971. if (!lsop) {
  972. ret = -ENOMEM;
  973. goto out_no_memory;
  974. }
  975. lsreq = &lsop->ls_req;
  976. lsreq->private = (void *)&lsop[1];
  977. assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
  978. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  979. assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
  980. assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
  981. assoc_rqst->desc_list_len =
  982. cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  983. assoc_rqst->assoc_cmd.desc_tag =
  984. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
  985. assoc_rqst->assoc_cmd.desc_len =
  986. fcnvme_lsdesc_len(
  987. sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  988. assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  989. assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
  990. /* Linux supports only Dynamic controllers */
  991. assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
  992. uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
  993. strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
  994. min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
  995. strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
  996. min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
  997. lsop->queue = queue;
  998. lsreq->rqstaddr = assoc_rqst;
  999. lsreq->rqstlen = sizeof(*assoc_rqst);
  1000. lsreq->rspaddr = assoc_acc;
  1001. lsreq->rsplen = sizeof(*assoc_acc);
  1002. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1003. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  1004. if (ret)
  1005. goto out_free_buffer;
  1006. /* process connect LS completion */
  1007. /* validate the ACC response */
  1008. if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  1009. fcret = VERR_LSACC;
  1010. else if (assoc_acc->hdr.desc_list_len !=
  1011. fcnvme_lsdesc_len(
  1012. sizeof(struct fcnvme_ls_cr_assoc_acc)))
  1013. fcret = VERR_CR_ASSOC_ACC_LEN;
  1014. else if (assoc_acc->hdr.rqst.desc_tag !=
  1015. cpu_to_be32(FCNVME_LSDESC_RQST))
  1016. fcret = VERR_LSDESC_RQST;
  1017. else if (assoc_acc->hdr.rqst.desc_len !=
  1018. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  1019. fcret = VERR_LSDESC_RQST_LEN;
  1020. else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
  1021. fcret = VERR_CR_ASSOC;
  1022. else if (assoc_acc->associd.desc_tag !=
  1023. cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1024. fcret = VERR_ASSOC_ID;
  1025. else if (assoc_acc->associd.desc_len !=
  1026. fcnvme_lsdesc_len(
  1027. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1028. fcret = VERR_ASSOC_ID_LEN;
  1029. else if (assoc_acc->connectid.desc_tag !=
  1030. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  1031. fcret = VERR_CONN_ID;
  1032. else if (assoc_acc->connectid.desc_len !=
  1033. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  1034. fcret = VERR_CONN_ID_LEN;
  1035. if (fcret) {
  1036. ret = -EBADF;
  1037. dev_err(ctrl->dev,
  1038. "q %d connect failed: %s\n",
  1039. queue->qnum, validation_errors[fcret]);
  1040. } else {
  1041. ctrl->association_id =
  1042. be64_to_cpu(assoc_acc->associd.association_id);
  1043. queue->connection_id =
  1044. be64_to_cpu(assoc_acc->connectid.connection_id);
  1045. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1046. }
  1047. out_free_buffer:
  1048. kfree(lsop);
  1049. out_no_memory:
  1050. if (ret)
  1051. dev_err(ctrl->dev,
  1052. "queue %d connect admin queue failed (%d).\n",
  1053. queue->qnum, ret);
  1054. return ret;
  1055. }
  1056. static int
  1057. nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  1058. u16 qsize, u16 ersp_ratio)
  1059. {
  1060. struct nvmefc_ls_req_op *lsop;
  1061. struct nvmefc_ls_req *lsreq;
  1062. struct fcnvme_ls_cr_conn_rqst *conn_rqst;
  1063. struct fcnvme_ls_cr_conn_acc *conn_acc;
  1064. int ret, fcret = 0;
  1065. lsop = kzalloc((sizeof(*lsop) +
  1066. ctrl->lport->ops->lsrqst_priv_sz +
  1067. sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
  1068. if (!lsop) {
  1069. ret = -ENOMEM;
  1070. goto out_no_memory;
  1071. }
  1072. lsreq = &lsop->ls_req;
  1073. lsreq->private = (void *)&lsop[1];
  1074. conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
  1075. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  1076. conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
  1077. conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
  1078. conn_rqst->desc_list_len = cpu_to_be32(
  1079. sizeof(struct fcnvme_lsdesc_assoc_id) +
  1080. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  1081. conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1082. conn_rqst->associd.desc_len =
  1083. fcnvme_lsdesc_len(
  1084. sizeof(struct fcnvme_lsdesc_assoc_id));
  1085. conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  1086. conn_rqst->connect_cmd.desc_tag =
  1087. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
  1088. conn_rqst->connect_cmd.desc_len =
  1089. fcnvme_lsdesc_len(
  1090. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  1091. conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  1092. conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
  1093. conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
  1094. lsop->queue = queue;
  1095. lsreq->rqstaddr = conn_rqst;
  1096. lsreq->rqstlen = sizeof(*conn_rqst);
  1097. lsreq->rspaddr = conn_acc;
  1098. lsreq->rsplen = sizeof(*conn_acc);
  1099. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1100. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  1101. if (ret)
  1102. goto out_free_buffer;
  1103. /* process connect LS completion */
  1104. /* validate the ACC response */
  1105. if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  1106. fcret = VERR_LSACC;
  1107. else if (conn_acc->hdr.desc_list_len !=
  1108. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
  1109. fcret = VERR_CR_CONN_ACC_LEN;
  1110. else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
  1111. fcret = VERR_LSDESC_RQST;
  1112. else if (conn_acc->hdr.rqst.desc_len !=
  1113. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  1114. fcret = VERR_LSDESC_RQST_LEN;
  1115. else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
  1116. fcret = VERR_CR_CONN;
  1117. else if (conn_acc->connectid.desc_tag !=
  1118. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  1119. fcret = VERR_CONN_ID;
  1120. else if (conn_acc->connectid.desc_len !=
  1121. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  1122. fcret = VERR_CONN_ID_LEN;
  1123. if (fcret) {
  1124. ret = -EBADF;
  1125. dev_err(ctrl->dev,
  1126. "q %d connect failed: %s\n",
  1127. queue->qnum, validation_errors[fcret]);
  1128. } else {
  1129. queue->connection_id =
  1130. be64_to_cpu(conn_acc->connectid.connection_id);
  1131. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1132. }
  1133. out_free_buffer:
  1134. kfree(lsop);
  1135. out_no_memory:
  1136. if (ret)
  1137. dev_err(ctrl->dev,
  1138. "queue %d connect command failed (%d).\n",
  1139. queue->qnum, ret);
  1140. return ret;
  1141. }
  1142. static void
  1143. nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
  1144. {
  1145. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  1146. __nvme_fc_finish_ls_req(lsop);
  1147. /* fc-nvme iniator doesn't care about success or failure of cmd */
  1148. kfree(lsop);
  1149. }
  1150. /*
  1151. * This routine sends a FC-NVME LS to disconnect (aka terminate)
  1152. * the FC-NVME Association. Terminating the association also
  1153. * terminates the FC-NVME connections (per queue, both admin and io
  1154. * queues) that are part of the association. E.g. things are torn
  1155. * down, and the related FC-NVME Association ID and Connection IDs
  1156. * become invalid.
  1157. *
  1158. * The behavior of the fc-nvme initiator is such that it's
  1159. * understanding of the association and connections will implicitly
  1160. * be torn down. The action is implicit as it may be due to a loss of
  1161. * connectivity with the fc-nvme target, so you may never get a
  1162. * response even if you tried. As such, the action of this routine
  1163. * is to asynchronously send the LS, ignore any results of the LS, and
  1164. * continue on with terminating the association. If the fc-nvme target
  1165. * is present and receives the LS, it too can tear down.
  1166. */
  1167. static void
  1168. nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
  1169. {
  1170. struct fcnvme_ls_disconnect_rqst *discon_rqst;
  1171. struct fcnvme_ls_disconnect_acc *discon_acc;
  1172. struct nvmefc_ls_req_op *lsop;
  1173. struct nvmefc_ls_req *lsreq;
  1174. int ret;
  1175. lsop = kzalloc((sizeof(*lsop) +
  1176. ctrl->lport->ops->lsrqst_priv_sz +
  1177. sizeof(*discon_rqst) + sizeof(*discon_acc)),
  1178. GFP_KERNEL);
  1179. if (!lsop)
  1180. /* couldn't sent it... too bad */
  1181. return;
  1182. lsreq = &lsop->ls_req;
  1183. lsreq->private = (void *)&lsop[1];
  1184. discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
  1185. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  1186. discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
  1187. discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
  1188. discon_rqst->desc_list_len = cpu_to_be32(
  1189. sizeof(struct fcnvme_lsdesc_assoc_id) +
  1190. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  1191. discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1192. discon_rqst->associd.desc_len =
  1193. fcnvme_lsdesc_len(
  1194. sizeof(struct fcnvme_lsdesc_assoc_id));
  1195. discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  1196. discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
  1197. FCNVME_LSDESC_DISCONN_CMD);
  1198. discon_rqst->discon_cmd.desc_len =
  1199. fcnvme_lsdesc_len(
  1200. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  1201. discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
  1202. discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
  1203. lsreq->rqstaddr = discon_rqst;
  1204. lsreq->rqstlen = sizeof(*discon_rqst);
  1205. lsreq->rspaddr = discon_acc;
  1206. lsreq->rsplen = sizeof(*discon_acc);
  1207. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1208. ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
  1209. nvme_fc_disconnect_assoc_done);
  1210. if (ret)
  1211. kfree(lsop);
  1212. /* only meaningful part to terminating the association */
  1213. ctrl->association_id = 0;
  1214. }
  1215. /* *********************** NVME Ctrl Routines **************************** */
  1216. static void __nvme_fc_final_op_cleanup(struct request *rq);
  1217. static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
  1218. static int
  1219. nvme_fc_reinit_request(void *data, struct request *rq)
  1220. {
  1221. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1222. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1223. memset(cmdiu, 0, sizeof(*cmdiu));
  1224. cmdiu->scsi_id = NVME_CMD_SCSI_ID;
  1225. cmdiu->fc_id = NVME_CMD_FC_ID;
  1226. cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
  1227. memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
  1228. return 0;
  1229. }
  1230. static void
  1231. __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
  1232. struct nvme_fc_fcp_op *op)
  1233. {
  1234. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
  1235. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  1236. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
  1237. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1238. atomic_set(&op->state, FCPOP_STATE_UNINIT);
  1239. }
  1240. static void
  1241. nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  1242. unsigned int hctx_idx)
  1243. {
  1244. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1245. return __nvme_fc_exit_request(set->driver_data, op);
  1246. }
  1247. static int
  1248. __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
  1249. {
  1250. int state;
  1251. state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
  1252. if (state != FCPOP_STATE_ACTIVE) {
  1253. atomic_set(&op->state, state);
  1254. return -ECANCELED;
  1255. }
  1256. ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
  1257. &ctrl->rport->remoteport,
  1258. op->queue->lldd_handle,
  1259. &op->fcp_req);
  1260. return 0;
  1261. }
  1262. static void
  1263. nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
  1264. {
  1265. struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
  1266. unsigned long flags;
  1267. int i, ret;
  1268. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
  1269. if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
  1270. continue;
  1271. spin_lock_irqsave(&ctrl->lock, flags);
  1272. if (ctrl->flags & FCCTRL_TERMIO) {
  1273. ctrl->iocnt++;
  1274. aen_op->flags |= FCOP_FLAGS_TERMIO;
  1275. }
  1276. spin_unlock_irqrestore(&ctrl->lock, flags);
  1277. ret = __nvme_fc_abort_op(ctrl, aen_op);
  1278. if (ret) {
  1279. /*
  1280. * if __nvme_fc_abort_op failed the io wasn't
  1281. * active. Thus this call path is running in
  1282. * parallel to the io complete. Treat as non-error.
  1283. */
  1284. /* back out the flags/counters */
  1285. spin_lock_irqsave(&ctrl->lock, flags);
  1286. if (ctrl->flags & FCCTRL_TERMIO)
  1287. ctrl->iocnt--;
  1288. aen_op->flags &= ~FCOP_FLAGS_TERMIO;
  1289. spin_unlock_irqrestore(&ctrl->lock, flags);
  1290. return;
  1291. }
  1292. }
  1293. }
  1294. static inline int
  1295. __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
  1296. struct nvme_fc_fcp_op *op)
  1297. {
  1298. unsigned long flags;
  1299. bool complete_rq = false;
  1300. spin_lock_irqsave(&ctrl->lock, flags);
  1301. if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
  1302. if (ctrl->flags & FCCTRL_TERMIO) {
  1303. if (!--ctrl->iocnt)
  1304. wake_up(&ctrl->ioabort_wait);
  1305. }
  1306. }
  1307. if (op->flags & FCOP_FLAGS_RELEASED)
  1308. complete_rq = true;
  1309. else
  1310. op->flags |= FCOP_FLAGS_COMPLETE;
  1311. spin_unlock_irqrestore(&ctrl->lock, flags);
  1312. return complete_rq;
  1313. }
  1314. static void
  1315. nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
  1316. {
  1317. struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
  1318. struct request *rq = op->rq;
  1319. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1320. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1321. struct nvme_fc_queue *queue = op->queue;
  1322. struct nvme_completion *cqe = &op->rsp_iu.cqe;
  1323. struct nvme_command *sqe = &op->cmd_iu.sqe;
  1324. __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
  1325. union nvme_result result;
  1326. bool terminate_assoc = true;
  1327. /*
  1328. * WARNING:
  1329. * The current linux implementation of a nvme controller
  1330. * allocates a single tag set for all io queues and sizes
  1331. * the io queues to fully hold all possible tags. Thus, the
  1332. * implementation does not reference or care about the sqhd
  1333. * value as it never needs to use the sqhd/sqtail pointers
  1334. * for submission pacing.
  1335. *
  1336. * This affects the FC-NVME implementation in two ways:
  1337. * 1) As the value doesn't matter, we don't need to waste
  1338. * cycles extracting it from ERSPs and stamping it in the
  1339. * cases where the transport fabricates CQEs on successful
  1340. * completions.
  1341. * 2) The FC-NVME implementation requires that delivery of
  1342. * ERSP completions are to go back to the nvme layer in order
  1343. * relative to the rsn, such that the sqhd value will always
  1344. * be "in order" for the nvme layer. As the nvme layer in
  1345. * linux doesn't care about sqhd, there's no need to return
  1346. * them in order.
  1347. *
  1348. * Additionally:
  1349. * As the core nvme layer in linux currently does not look at
  1350. * every field in the cqe - in cases where the FC transport must
  1351. * fabricate a CQE, the following fields will not be set as they
  1352. * are not referenced:
  1353. * cqe.sqid, cqe.sqhd, cqe.command_id
  1354. *
  1355. * Failure or error of an individual i/o, in a transport
  1356. * detected fashion unrelated to the nvme completion status,
  1357. * potentially cause the initiator and target sides to get out
  1358. * of sync on SQ head/tail (aka outstanding io count allowed).
  1359. * Per FC-NVME spec, failure of an individual command requires
  1360. * the connection to be terminated, which in turn requires the
  1361. * association to be terminated.
  1362. */
  1363. fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
  1364. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  1365. if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
  1366. op->flags & FCOP_FLAGS_TERMIO)
  1367. status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
  1368. else if (freq->status)
  1369. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1370. /*
  1371. * For the linux implementation, if we have an unsuccesful
  1372. * status, they blk-mq layer can typically be called with the
  1373. * non-zero status and the content of the cqe isn't important.
  1374. */
  1375. if (status)
  1376. goto done;
  1377. /*
  1378. * command completed successfully relative to the wire
  1379. * protocol. However, validate anything received and
  1380. * extract the status and result from the cqe (create it
  1381. * where necessary).
  1382. */
  1383. switch (freq->rcv_rsplen) {
  1384. case 0:
  1385. case NVME_FC_SIZEOF_ZEROS_RSP:
  1386. /*
  1387. * No response payload or 12 bytes of payload (which
  1388. * should all be zeros) are considered successful and
  1389. * no payload in the CQE by the transport.
  1390. */
  1391. if (freq->transferred_length !=
  1392. be32_to_cpu(op->cmd_iu.data_len)) {
  1393. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1394. goto done;
  1395. }
  1396. result.u64 = 0;
  1397. break;
  1398. case sizeof(struct nvme_fc_ersp_iu):
  1399. /*
  1400. * The ERSP IU contains a full completion with CQE.
  1401. * Validate ERSP IU and look at cqe.
  1402. */
  1403. if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
  1404. (freq->rcv_rsplen / 4) ||
  1405. be32_to_cpu(op->rsp_iu.xfrd_len) !=
  1406. freq->transferred_length ||
  1407. op->rsp_iu.status_code ||
  1408. sqe->common.command_id != cqe->command_id)) {
  1409. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1410. goto done;
  1411. }
  1412. result = cqe->result;
  1413. status = cqe->status;
  1414. break;
  1415. default:
  1416. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1417. goto done;
  1418. }
  1419. terminate_assoc = false;
  1420. done:
  1421. if (op->flags & FCOP_FLAGS_AEN) {
  1422. nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
  1423. __nvme_fc_fcpop_chk_teardowns(ctrl, op);
  1424. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1425. op->flags = FCOP_FLAGS_AEN; /* clear other flags */
  1426. nvme_fc_ctrl_put(ctrl);
  1427. goto check_error;
  1428. }
  1429. /*
  1430. * Force failures of commands if we're killing the controller
  1431. * or have an error on a command used to create an new association
  1432. */
  1433. if (status &&
  1434. (blk_queue_dying(rq->q) ||
  1435. ctrl->ctrl.state == NVME_CTRL_NEW ||
  1436. ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
  1437. status |= cpu_to_le16(NVME_SC_DNR << 1);
  1438. if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
  1439. __nvme_fc_final_op_cleanup(rq);
  1440. else
  1441. nvme_end_request(rq, status, result);
  1442. check_error:
  1443. if (terminate_assoc)
  1444. nvme_fc_error_recovery(ctrl, "transport detected io error");
  1445. }
  1446. static int
  1447. __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
  1448. struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
  1449. struct request *rq, u32 rqno)
  1450. {
  1451. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1452. int ret = 0;
  1453. memset(op, 0, sizeof(*op));
  1454. op->fcp_req.cmdaddr = &op->cmd_iu;
  1455. op->fcp_req.cmdlen = sizeof(op->cmd_iu);
  1456. op->fcp_req.rspaddr = &op->rsp_iu;
  1457. op->fcp_req.rsplen = sizeof(op->rsp_iu);
  1458. op->fcp_req.done = nvme_fc_fcpio_done;
  1459. op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
  1460. op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
  1461. op->ctrl = ctrl;
  1462. op->queue = queue;
  1463. op->rq = rq;
  1464. op->rqno = rqno;
  1465. cmdiu->scsi_id = NVME_CMD_SCSI_ID;
  1466. cmdiu->fc_id = NVME_CMD_FC_ID;
  1467. cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
  1468. op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
  1469. &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1470. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
  1471. dev_err(ctrl->dev,
  1472. "FCP Op failed - cmdiu dma mapping failed.\n");
  1473. ret = EFAULT;
  1474. goto out_on_error;
  1475. }
  1476. op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
  1477. &op->rsp_iu, sizeof(op->rsp_iu),
  1478. DMA_FROM_DEVICE);
  1479. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
  1480. dev_err(ctrl->dev,
  1481. "FCP Op failed - rspiu dma mapping failed.\n");
  1482. ret = EFAULT;
  1483. }
  1484. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1485. out_on_error:
  1486. return ret;
  1487. }
  1488. static int
  1489. nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1490. unsigned int hctx_idx, unsigned int numa_node)
  1491. {
  1492. struct nvme_fc_ctrl *ctrl = set->driver_data;
  1493. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1494. int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
  1495. struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
  1496. return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
  1497. }
  1498. static int
  1499. nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
  1500. {
  1501. struct nvme_fc_fcp_op *aen_op;
  1502. struct nvme_fc_cmd_iu *cmdiu;
  1503. struct nvme_command *sqe;
  1504. void *private;
  1505. int i, ret;
  1506. aen_op = ctrl->aen_ops;
  1507. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
  1508. private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
  1509. GFP_KERNEL);
  1510. if (!private)
  1511. return -ENOMEM;
  1512. cmdiu = &aen_op->cmd_iu;
  1513. sqe = &cmdiu->sqe;
  1514. ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
  1515. aen_op, (struct request *)NULL,
  1516. (NVME_AQ_BLK_MQ_DEPTH + i));
  1517. if (ret) {
  1518. kfree(private);
  1519. return ret;
  1520. }
  1521. aen_op->flags = FCOP_FLAGS_AEN;
  1522. aen_op->fcp_req.first_sgl = NULL; /* no sg list */
  1523. aen_op->fcp_req.private = private;
  1524. memset(sqe, 0, sizeof(*sqe));
  1525. sqe->common.opcode = nvme_admin_async_event;
  1526. /* Note: core layer may overwrite the sqe.command_id value */
  1527. sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
  1528. }
  1529. return 0;
  1530. }
  1531. static void
  1532. nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
  1533. {
  1534. struct nvme_fc_fcp_op *aen_op;
  1535. int i;
  1536. aen_op = ctrl->aen_ops;
  1537. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
  1538. if (!aen_op->fcp_req.private)
  1539. continue;
  1540. __nvme_fc_exit_request(ctrl, aen_op);
  1541. kfree(aen_op->fcp_req.private);
  1542. aen_op->fcp_req.private = NULL;
  1543. }
  1544. }
  1545. static inline void
  1546. __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
  1547. unsigned int qidx)
  1548. {
  1549. struct nvme_fc_queue *queue = &ctrl->queues[qidx];
  1550. hctx->driver_data = queue;
  1551. queue->hctx = hctx;
  1552. }
  1553. static int
  1554. nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1555. unsigned int hctx_idx)
  1556. {
  1557. struct nvme_fc_ctrl *ctrl = data;
  1558. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
  1559. return 0;
  1560. }
  1561. static int
  1562. nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1563. unsigned int hctx_idx)
  1564. {
  1565. struct nvme_fc_ctrl *ctrl = data;
  1566. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
  1567. return 0;
  1568. }
  1569. static void
  1570. nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
  1571. {
  1572. struct nvme_fc_queue *queue;
  1573. queue = &ctrl->queues[idx];
  1574. memset(queue, 0, sizeof(*queue));
  1575. queue->ctrl = ctrl;
  1576. queue->qnum = idx;
  1577. atomic_set(&queue->csn, 1);
  1578. queue->dev = ctrl->dev;
  1579. if (idx > 0)
  1580. queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
  1581. else
  1582. queue->cmnd_capsule_len = sizeof(struct nvme_command);
  1583. /*
  1584. * Considered whether we should allocate buffers for all SQEs
  1585. * and CQEs and dma map them - mapping their respective entries
  1586. * into the request structures (kernel vm addr and dma address)
  1587. * thus the driver could use the buffers/mappings directly.
  1588. * It only makes sense if the LLDD would use them for its
  1589. * messaging api. It's very unlikely most adapter api's would use
  1590. * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
  1591. * structures were used instead.
  1592. */
  1593. }
  1594. /*
  1595. * This routine terminates a queue at the transport level.
  1596. * The transport has already ensured that all outstanding ios on
  1597. * the queue have been terminated.
  1598. * The transport will send a Disconnect LS request to terminate
  1599. * the queue's connection. Termination of the admin queue will also
  1600. * terminate the association at the target.
  1601. */
  1602. static void
  1603. nvme_fc_free_queue(struct nvme_fc_queue *queue)
  1604. {
  1605. if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
  1606. return;
  1607. /*
  1608. * Current implementation never disconnects a single queue.
  1609. * It always terminates a whole association. So there is never
  1610. * a disconnect(queue) LS sent to the target.
  1611. */
  1612. queue->connection_id = 0;
  1613. clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1614. }
  1615. static void
  1616. __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
  1617. struct nvme_fc_queue *queue, unsigned int qidx)
  1618. {
  1619. if (ctrl->lport->ops->delete_queue)
  1620. ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
  1621. queue->lldd_handle);
  1622. queue->lldd_handle = NULL;
  1623. }
  1624. static void
  1625. nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
  1626. {
  1627. int i;
  1628. for (i = 1; i < ctrl->ctrl.queue_count; i++)
  1629. nvme_fc_free_queue(&ctrl->queues[i]);
  1630. }
  1631. static int
  1632. __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
  1633. struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
  1634. {
  1635. int ret = 0;
  1636. queue->lldd_handle = NULL;
  1637. if (ctrl->lport->ops->create_queue)
  1638. ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
  1639. qidx, qsize, &queue->lldd_handle);
  1640. return ret;
  1641. }
  1642. static void
  1643. nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
  1644. {
  1645. struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
  1646. int i;
  1647. for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
  1648. __nvme_fc_delete_hw_queue(ctrl, queue, i);
  1649. }
  1650. static int
  1651. nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1652. {
  1653. struct nvme_fc_queue *queue = &ctrl->queues[1];
  1654. int i, ret;
  1655. for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
  1656. ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
  1657. if (ret)
  1658. goto delete_queues;
  1659. }
  1660. return 0;
  1661. delete_queues:
  1662. for (; i >= 0; i--)
  1663. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
  1664. return ret;
  1665. }
  1666. static int
  1667. nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1668. {
  1669. int i, ret = 0;
  1670. for (i = 1; i < ctrl->ctrl.queue_count; i++) {
  1671. ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
  1672. (qsize / 5));
  1673. if (ret)
  1674. break;
  1675. ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
  1676. if (ret)
  1677. break;
  1678. }
  1679. return ret;
  1680. }
  1681. static void
  1682. nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
  1683. {
  1684. int i;
  1685. for (i = 1; i < ctrl->ctrl.queue_count; i++)
  1686. nvme_fc_init_queue(ctrl, i);
  1687. }
  1688. static void
  1689. nvme_fc_ctrl_free(struct kref *ref)
  1690. {
  1691. struct nvme_fc_ctrl *ctrl =
  1692. container_of(ref, struct nvme_fc_ctrl, ref);
  1693. unsigned long flags;
  1694. if (ctrl->ctrl.tagset) {
  1695. blk_cleanup_queue(ctrl->ctrl.connect_q);
  1696. blk_mq_free_tag_set(&ctrl->tag_set);
  1697. }
  1698. /* remove from rport list */
  1699. spin_lock_irqsave(&ctrl->rport->lock, flags);
  1700. list_del(&ctrl->ctrl_list);
  1701. spin_unlock_irqrestore(&ctrl->rport->lock, flags);
  1702. blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
  1703. blk_cleanup_queue(ctrl->ctrl.admin_q);
  1704. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  1705. kfree(ctrl->queues);
  1706. put_device(ctrl->dev);
  1707. nvme_fc_rport_put(ctrl->rport);
  1708. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  1709. if (ctrl->ctrl.opts)
  1710. nvmf_free_options(ctrl->ctrl.opts);
  1711. kfree(ctrl);
  1712. }
  1713. static void
  1714. nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
  1715. {
  1716. kref_put(&ctrl->ref, nvme_fc_ctrl_free);
  1717. }
  1718. static int
  1719. nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
  1720. {
  1721. return kref_get_unless_zero(&ctrl->ref);
  1722. }
  1723. /*
  1724. * All accesses from nvme core layer done - can now free the
  1725. * controller. Called after last nvme_put_ctrl() call
  1726. */
  1727. static void
  1728. nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
  1729. {
  1730. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  1731. WARN_ON(nctrl != &ctrl->ctrl);
  1732. nvme_fc_ctrl_put(ctrl);
  1733. }
  1734. static void
  1735. nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
  1736. {
  1737. /* only proceed if in LIVE state - e.g. on first error */
  1738. if (ctrl->ctrl.state != NVME_CTRL_LIVE)
  1739. return;
  1740. dev_warn(ctrl->ctrl.device,
  1741. "NVME-FC{%d}: transport association error detected: %s\n",
  1742. ctrl->cnum, errmsg);
  1743. dev_warn(ctrl->ctrl.device,
  1744. "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
  1745. nvme_reset_ctrl(&ctrl->ctrl);
  1746. }
  1747. static enum blk_eh_timer_return
  1748. nvme_fc_timeout(struct request *rq, bool reserved)
  1749. {
  1750. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1751. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1752. int ret;
  1753. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
  1754. atomic_read(&op->state) == FCPOP_STATE_ABORTED)
  1755. return BLK_EH_RESET_TIMER;
  1756. ret = __nvme_fc_abort_op(ctrl, op);
  1757. if (ret)
  1758. /* io wasn't active to abort */
  1759. return BLK_EH_NOT_HANDLED;
  1760. /*
  1761. * we can't individually ABTS an io without affecting the queue,
  1762. * thus killing the queue, adn thus the association.
  1763. * So resolve by performing a controller reset, which will stop
  1764. * the host/io stack, terminate the association on the link,
  1765. * and recreate an association on the link.
  1766. */
  1767. nvme_fc_error_recovery(ctrl, "io timeout error");
  1768. /*
  1769. * the io abort has been initiated. Have the reset timer
  1770. * restarted and the abort completion will complete the io
  1771. * shortly. Avoids a synchronous wait while the abort finishes.
  1772. */
  1773. return BLK_EH_RESET_TIMER;
  1774. }
  1775. static int
  1776. nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1777. struct nvme_fc_fcp_op *op)
  1778. {
  1779. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1780. enum dma_data_direction dir;
  1781. int ret;
  1782. freq->sg_cnt = 0;
  1783. if (!blk_rq_payload_bytes(rq))
  1784. return 0;
  1785. freq->sg_table.sgl = freq->first_sgl;
  1786. ret = sg_alloc_table_chained(&freq->sg_table,
  1787. blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
  1788. if (ret)
  1789. return -ENOMEM;
  1790. op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
  1791. WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
  1792. dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1793. freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
  1794. op->nents, dir);
  1795. if (unlikely(freq->sg_cnt <= 0)) {
  1796. sg_free_table_chained(&freq->sg_table, true);
  1797. freq->sg_cnt = 0;
  1798. return -EFAULT;
  1799. }
  1800. /*
  1801. * TODO: blk_integrity_rq(rq) for DIF
  1802. */
  1803. return 0;
  1804. }
  1805. static void
  1806. nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1807. struct nvme_fc_fcp_op *op)
  1808. {
  1809. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1810. if (!freq->sg_cnt)
  1811. return;
  1812. fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
  1813. ((rq_data_dir(rq) == WRITE) ?
  1814. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  1815. nvme_cleanup_cmd(rq);
  1816. sg_free_table_chained(&freq->sg_table, true);
  1817. freq->sg_cnt = 0;
  1818. }
  1819. /*
  1820. * In FC, the queue is a logical thing. At transport connect, the target
  1821. * creates its "queue" and returns a handle that is to be given to the
  1822. * target whenever it posts something to the corresponding SQ. When an
  1823. * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
  1824. * command contained within the SQE, an io, and assigns a FC exchange
  1825. * to it. The SQE and the associated SQ handle are sent in the initial
  1826. * CMD IU sents on the exchange. All transfers relative to the io occur
  1827. * as part of the exchange. The CQE is the last thing for the io,
  1828. * which is transferred (explicitly or implicitly) with the RSP IU
  1829. * sent on the exchange. After the CQE is received, the FC exchange is
  1830. * terminaed and the Exchange may be used on a different io.
  1831. *
  1832. * The transport to LLDD api has the transport making a request for a
  1833. * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
  1834. * resource and transfers the command. The LLDD will then process all
  1835. * steps to complete the io. Upon completion, the transport done routine
  1836. * is called.
  1837. *
  1838. * So - while the operation is outstanding to the LLDD, there is a link
  1839. * level FC exchange resource that is also outstanding. This must be
  1840. * considered in all cleanup operations.
  1841. */
  1842. static blk_status_t
  1843. nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  1844. struct nvme_fc_fcp_op *op, u32 data_len,
  1845. enum nvmefc_fcp_datadir io_dir)
  1846. {
  1847. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1848. struct nvme_command *sqe = &cmdiu->sqe;
  1849. u32 csn;
  1850. int ret;
  1851. /*
  1852. * before attempting to send the io, check to see if we believe
  1853. * the target device is present
  1854. */
  1855. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  1856. goto busy;
  1857. if (!nvme_fc_ctrl_get(ctrl))
  1858. return BLK_STS_IOERR;
  1859. /* format the FC-NVME CMD IU and fcp_req */
  1860. cmdiu->connection_id = cpu_to_be64(queue->connection_id);
  1861. csn = atomic_inc_return(&queue->csn);
  1862. cmdiu->csn = cpu_to_be32(csn);
  1863. cmdiu->data_len = cpu_to_be32(data_len);
  1864. switch (io_dir) {
  1865. case NVMEFC_FCP_WRITE:
  1866. cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
  1867. break;
  1868. case NVMEFC_FCP_READ:
  1869. cmdiu->flags = FCNVME_CMD_FLAGS_READ;
  1870. break;
  1871. case NVMEFC_FCP_NODATA:
  1872. cmdiu->flags = 0;
  1873. break;
  1874. }
  1875. op->fcp_req.payload_length = data_len;
  1876. op->fcp_req.io_dir = io_dir;
  1877. op->fcp_req.transferred_length = 0;
  1878. op->fcp_req.rcv_rsplen = 0;
  1879. op->fcp_req.status = NVME_SC_SUCCESS;
  1880. op->fcp_req.sqid = cpu_to_le16(queue->qnum);
  1881. /*
  1882. * validate per fabric rules, set fields mandated by fabric spec
  1883. * as well as those by FC-NVME spec.
  1884. */
  1885. WARN_ON_ONCE(sqe->common.metadata);
  1886. sqe->common.flags |= NVME_CMD_SGL_METABUF;
  1887. /*
  1888. * format SQE DPTR field per FC-NVME rules:
  1889. * type=0x5 Transport SGL Data Block Descriptor
  1890. * subtype=0xA Transport-specific value
  1891. * address=0
  1892. * length=length of the data series
  1893. */
  1894. sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
  1895. NVME_SGL_FMT_TRANSPORT_A;
  1896. sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
  1897. sqe->rw.dptr.sgl.addr = 0;
  1898. if (!(op->flags & FCOP_FLAGS_AEN)) {
  1899. ret = nvme_fc_map_data(ctrl, op->rq, op);
  1900. if (ret < 0) {
  1901. nvme_cleanup_cmd(op->rq);
  1902. nvme_fc_ctrl_put(ctrl);
  1903. if (ret == -ENOMEM || ret == -EAGAIN)
  1904. return BLK_STS_RESOURCE;
  1905. return BLK_STS_IOERR;
  1906. }
  1907. }
  1908. fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
  1909. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1910. atomic_set(&op->state, FCPOP_STATE_ACTIVE);
  1911. if (!(op->flags & FCOP_FLAGS_AEN))
  1912. blk_mq_start_request(op->rq);
  1913. ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
  1914. &ctrl->rport->remoteport,
  1915. queue->lldd_handle, &op->fcp_req);
  1916. if (ret) {
  1917. if (!(op->flags & FCOP_FLAGS_AEN))
  1918. nvme_fc_unmap_data(ctrl, op->rq, op);
  1919. nvme_fc_ctrl_put(ctrl);
  1920. if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
  1921. ret != -EBUSY)
  1922. return BLK_STS_IOERR;
  1923. goto busy;
  1924. }
  1925. return BLK_STS_OK;
  1926. busy:
  1927. if (!(op->flags & FCOP_FLAGS_AEN) && queue->hctx)
  1928. blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
  1929. return BLK_STS_RESOURCE;
  1930. }
  1931. static blk_status_t
  1932. nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
  1933. const struct blk_mq_queue_data *bd)
  1934. {
  1935. struct nvme_ns *ns = hctx->queue->queuedata;
  1936. struct nvme_fc_queue *queue = hctx->driver_data;
  1937. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1938. struct request *rq = bd->rq;
  1939. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1940. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1941. struct nvme_command *sqe = &cmdiu->sqe;
  1942. enum nvmefc_fcp_datadir io_dir;
  1943. u32 data_len;
  1944. blk_status_t ret;
  1945. ret = nvme_setup_cmd(ns, rq, sqe);
  1946. if (ret)
  1947. return ret;
  1948. data_len = blk_rq_payload_bytes(rq);
  1949. if (data_len)
  1950. io_dir = ((rq_data_dir(rq) == WRITE) ?
  1951. NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
  1952. else
  1953. io_dir = NVMEFC_FCP_NODATA;
  1954. return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
  1955. }
  1956. static struct blk_mq_tags *
  1957. nvme_fc_tagset(struct nvme_fc_queue *queue)
  1958. {
  1959. if (queue->qnum == 0)
  1960. return queue->ctrl->admin_tag_set.tags[queue->qnum];
  1961. return queue->ctrl->tag_set.tags[queue->qnum - 1];
  1962. }
  1963. static int
  1964. nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
  1965. {
  1966. struct nvme_fc_queue *queue = hctx->driver_data;
  1967. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1968. struct request *req;
  1969. struct nvme_fc_fcp_op *op;
  1970. req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
  1971. if (!req)
  1972. return 0;
  1973. op = blk_mq_rq_to_pdu(req);
  1974. if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
  1975. (ctrl->lport->ops->poll_queue))
  1976. ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
  1977. queue->lldd_handle);
  1978. return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
  1979. }
  1980. static void
  1981. nvme_fc_submit_async_event(struct nvme_ctrl *arg)
  1982. {
  1983. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
  1984. struct nvme_fc_fcp_op *aen_op;
  1985. unsigned long flags;
  1986. bool terminating = false;
  1987. blk_status_t ret;
  1988. spin_lock_irqsave(&ctrl->lock, flags);
  1989. if (ctrl->flags & FCCTRL_TERMIO)
  1990. terminating = true;
  1991. spin_unlock_irqrestore(&ctrl->lock, flags);
  1992. if (terminating)
  1993. return;
  1994. aen_op = &ctrl->aen_ops[0];
  1995. ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
  1996. NVMEFC_FCP_NODATA);
  1997. if (ret)
  1998. dev_err(ctrl->ctrl.device,
  1999. "failed async event work\n");
  2000. }
  2001. static void
  2002. __nvme_fc_final_op_cleanup(struct request *rq)
  2003. {
  2004. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  2005. struct nvme_fc_ctrl *ctrl = op->ctrl;
  2006. atomic_set(&op->state, FCPOP_STATE_IDLE);
  2007. op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
  2008. FCOP_FLAGS_COMPLETE);
  2009. nvme_fc_unmap_data(ctrl, rq, op);
  2010. nvme_complete_rq(rq);
  2011. nvme_fc_ctrl_put(ctrl);
  2012. }
  2013. static void
  2014. nvme_fc_complete_rq(struct request *rq)
  2015. {
  2016. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  2017. struct nvme_fc_ctrl *ctrl = op->ctrl;
  2018. unsigned long flags;
  2019. bool completed = false;
  2020. /*
  2021. * the core layer, on controller resets after calling
  2022. * nvme_shutdown_ctrl(), calls complete_rq without our
  2023. * calling blk_mq_complete_request(), thus there may still
  2024. * be live i/o outstanding with the LLDD. Means transport has
  2025. * to track complete calls vs fcpio_done calls to know what
  2026. * path to take on completes and dones.
  2027. */
  2028. spin_lock_irqsave(&ctrl->lock, flags);
  2029. if (op->flags & FCOP_FLAGS_COMPLETE)
  2030. completed = true;
  2031. else
  2032. op->flags |= FCOP_FLAGS_RELEASED;
  2033. spin_unlock_irqrestore(&ctrl->lock, flags);
  2034. if (completed)
  2035. __nvme_fc_final_op_cleanup(rq);
  2036. }
  2037. /*
  2038. * This routine is used by the transport when it needs to find active
  2039. * io on a queue that is to be terminated. The transport uses
  2040. * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
  2041. * this routine to kill them on a 1 by 1 basis.
  2042. *
  2043. * As FC allocates FC exchange for each io, the transport must contact
  2044. * the LLDD to terminate the exchange, thus releasing the FC exchange.
  2045. * After terminating the exchange the LLDD will call the transport's
  2046. * normal io done path for the request, but it will have an aborted
  2047. * status. The done path will return the io request back to the block
  2048. * layer with an error status.
  2049. */
  2050. static void
  2051. nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
  2052. {
  2053. struct nvme_ctrl *nctrl = data;
  2054. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  2055. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
  2056. unsigned long flags;
  2057. int status;
  2058. if (!blk_mq_request_started(req))
  2059. return;
  2060. spin_lock_irqsave(&ctrl->lock, flags);
  2061. if (ctrl->flags & FCCTRL_TERMIO) {
  2062. ctrl->iocnt++;
  2063. op->flags |= FCOP_FLAGS_TERMIO;
  2064. }
  2065. spin_unlock_irqrestore(&ctrl->lock, flags);
  2066. status = __nvme_fc_abort_op(ctrl, op);
  2067. if (status) {
  2068. /*
  2069. * if __nvme_fc_abort_op failed the io wasn't
  2070. * active. Thus this call path is running in
  2071. * parallel to the io complete. Treat as non-error.
  2072. */
  2073. /* back out the flags/counters */
  2074. spin_lock_irqsave(&ctrl->lock, flags);
  2075. if (ctrl->flags & FCCTRL_TERMIO)
  2076. ctrl->iocnt--;
  2077. op->flags &= ~FCOP_FLAGS_TERMIO;
  2078. spin_unlock_irqrestore(&ctrl->lock, flags);
  2079. return;
  2080. }
  2081. }
  2082. static const struct blk_mq_ops nvme_fc_mq_ops = {
  2083. .queue_rq = nvme_fc_queue_rq,
  2084. .complete = nvme_fc_complete_rq,
  2085. .init_request = nvme_fc_init_request,
  2086. .exit_request = nvme_fc_exit_request,
  2087. .init_hctx = nvme_fc_init_hctx,
  2088. .poll = nvme_fc_poll,
  2089. .timeout = nvme_fc_timeout,
  2090. };
  2091. static int
  2092. nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
  2093. {
  2094. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  2095. unsigned int nr_io_queues;
  2096. int ret;
  2097. nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
  2098. ctrl->lport->ops->max_hw_queues);
  2099. ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
  2100. if (ret) {
  2101. dev_info(ctrl->ctrl.device,
  2102. "set_queue_count failed: %d\n", ret);
  2103. return ret;
  2104. }
  2105. ctrl->ctrl.queue_count = nr_io_queues + 1;
  2106. if (!nr_io_queues)
  2107. return 0;
  2108. nvme_fc_init_io_queues(ctrl);
  2109. memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
  2110. ctrl->tag_set.ops = &nvme_fc_mq_ops;
  2111. ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
  2112. ctrl->tag_set.reserved_tags = 1; /* fabric connect */
  2113. ctrl->tag_set.numa_node = NUMA_NO_NODE;
  2114. ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  2115. ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  2116. (SG_CHUNK_SIZE *
  2117. sizeof(struct scatterlist)) +
  2118. ctrl->lport->ops->fcprqst_priv_sz;
  2119. ctrl->tag_set.driver_data = ctrl;
  2120. ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
  2121. ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
  2122. ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
  2123. if (ret)
  2124. return ret;
  2125. ctrl->ctrl.tagset = &ctrl->tag_set;
  2126. ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
  2127. if (IS_ERR(ctrl->ctrl.connect_q)) {
  2128. ret = PTR_ERR(ctrl->ctrl.connect_q);
  2129. goto out_free_tag_set;
  2130. }
  2131. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  2132. if (ret)
  2133. goto out_cleanup_blk_queue;
  2134. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  2135. if (ret)
  2136. goto out_delete_hw_queues;
  2137. return 0;
  2138. out_delete_hw_queues:
  2139. nvme_fc_delete_hw_io_queues(ctrl);
  2140. out_cleanup_blk_queue:
  2141. blk_cleanup_queue(ctrl->ctrl.connect_q);
  2142. out_free_tag_set:
  2143. blk_mq_free_tag_set(&ctrl->tag_set);
  2144. nvme_fc_free_io_queues(ctrl);
  2145. /* force put free routine to ignore io queues */
  2146. ctrl->ctrl.tagset = NULL;
  2147. return ret;
  2148. }
  2149. static int
  2150. nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
  2151. {
  2152. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  2153. unsigned int nr_io_queues;
  2154. int ret;
  2155. nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
  2156. ctrl->lport->ops->max_hw_queues);
  2157. ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
  2158. if (ret) {
  2159. dev_info(ctrl->ctrl.device,
  2160. "set_queue_count failed: %d\n", ret);
  2161. return ret;
  2162. }
  2163. ctrl->ctrl.queue_count = nr_io_queues + 1;
  2164. /* check for io queues existing */
  2165. if (ctrl->ctrl.queue_count == 1)
  2166. return 0;
  2167. nvme_fc_init_io_queues(ctrl);
  2168. ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
  2169. if (ret)
  2170. goto out_free_io_queues;
  2171. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  2172. if (ret)
  2173. goto out_free_io_queues;
  2174. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  2175. if (ret)
  2176. goto out_delete_hw_queues;
  2177. blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
  2178. return 0;
  2179. out_delete_hw_queues:
  2180. nvme_fc_delete_hw_io_queues(ctrl);
  2181. out_free_io_queues:
  2182. nvme_fc_free_io_queues(ctrl);
  2183. return ret;
  2184. }
  2185. static void
  2186. nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
  2187. {
  2188. struct nvme_fc_lport *lport = rport->lport;
  2189. atomic_inc(&lport->act_rport_cnt);
  2190. }
  2191. static void
  2192. nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
  2193. {
  2194. struct nvme_fc_lport *lport = rport->lport;
  2195. u32 cnt;
  2196. cnt = atomic_dec_return(&lport->act_rport_cnt);
  2197. if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
  2198. lport->ops->localport_delete(&lport->localport);
  2199. }
  2200. static int
  2201. nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
  2202. {
  2203. struct nvme_fc_rport *rport = ctrl->rport;
  2204. u32 cnt;
  2205. if (ctrl->assoc_active)
  2206. return 1;
  2207. ctrl->assoc_active = true;
  2208. cnt = atomic_inc_return(&rport->act_ctrl_cnt);
  2209. if (cnt == 1)
  2210. nvme_fc_rport_active_on_lport(rport);
  2211. return 0;
  2212. }
  2213. static int
  2214. nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
  2215. {
  2216. struct nvme_fc_rport *rport = ctrl->rport;
  2217. struct nvme_fc_lport *lport = rport->lport;
  2218. u32 cnt;
  2219. /* ctrl->assoc_active=false will be set independently */
  2220. cnt = atomic_dec_return(&rport->act_ctrl_cnt);
  2221. if (cnt == 0) {
  2222. if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
  2223. lport->ops->remoteport_delete(&rport->remoteport);
  2224. nvme_fc_rport_inactive_on_lport(rport);
  2225. }
  2226. return 0;
  2227. }
  2228. /*
  2229. * This routine restarts the controller on the host side, and
  2230. * on the link side, recreates the controller association.
  2231. */
  2232. static int
  2233. nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
  2234. {
  2235. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  2236. int ret;
  2237. bool changed;
  2238. ++ctrl->ctrl.nr_reconnects;
  2239. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  2240. return -ENODEV;
  2241. if (nvme_fc_ctlr_active_on_rport(ctrl))
  2242. return -ENOTUNIQ;
  2243. /*
  2244. * Create the admin queue
  2245. */
  2246. nvme_fc_init_queue(ctrl, 0);
  2247. ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
  2248. NVME_AQ_BLK_MQ_DEPTH);
  2249. if (ret)
  2250. goto out_free_queue;
  2251. ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
  2252. NVME_AQ_BLK_MQ_DEPTH,
  2253. (NVME_AQ_BLK_MQ_DEPTH / 4));
  2254. if (ret)
  2255. goto out_delete_hw_queue;
  2256. if (ctrl->ctrl.state != NVME_CTRL_NEW)
  2257. blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
  2258. ret = nvmf_connect_admin_queue(&ctrl->ctrl);
  2259. if (ret)
  2260. goto out_disconnect_admin_queue;
  2261. /*
  2262. * Check controller capabilities
  2263. *
  2264. * todo:- add code to check if ctrl attributes changed from
  2265. * prior connection values
  2266. */
  2267. ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
  2268. if (ret) {
  2269. dev_err(ctrl->ctrl.device,
  2270. "prop_get NVME_REG_CAP failed\n");
  2271. goto out_disconnect_admin_queue;
  2272. }
  2273. ctrl->ctrl.sqsize =
  2274. min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize);
  2275. ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
  2276. if (ret)
  2277. goto out_disconnect_admin_queue;
  2278. ctrl->ctrl.max_hw_sectors =
  2279. (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
  2280. ret = nvme_init_identify(&ctrl->ctrl);
  2281. if (ret)
  2282. goto out_disconnect_admin_queue;
  2283. /* sanity checks */
  2284. /* FC-NVME does not have other data in the capsule */
  2285. if (ctrl->ctrl.icdoff) {
  2286. dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
  2287. ctrl->ctrl.icdoff);
  2288. goto out_disconnect_admin_queue;
  2289. }
  2290. /* FC-NVME supports normal SGL Data Block Descriptors */
  2291. if (opts->queue_size > ctrl->ctrl.maxcmd) {
  2292. /* warn if maxcmd is lower than queue_size */
  2293. dev_warn(ctrl->ctrl.device,
  2294. "queue_size %zu > ctrl maxcmd %u, reducing "
  2295. "to queue_size\n",
  2296. opts->queue_size, ctrl->ctrl.maxcmd);
  2297. opts->queue_size = ctrl->ctrl.maxcmd;
  2298. }
  2299. ret = nvme_fc_init_aen_ops(ctrl);
  2300. if (ret)
  2301. goto out_term_aen_ops;
  2302. /*
  2303. * Create the io queues
  2304. */
  2305. if (ctrl->ctrl.queue_count > 1) {
  2306. if (ctrl->ctrl.state == NVME_CTRL_NEW)
  2307. ret = nvme_fc_create_io_queues(ctrl);
  2308. else
  2309. ret = nvme_fc_reinit_io_queues(ctrl);
  2310. if (ret)
  2311. goto out_term_aen_ops;
  2312. }
  2313. changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
  2314. ctrl->ctrl.nr_reconnects = 0;
  2315. if (changed)
  2316. nvme_start_ctrl(&ctrl->ctrl);
  2317. return 0; /* Success */
  2318. out_term_aen_ops:
  2319. nvme_fc_term_aen_ops(ctrl);
  2320. out_disconnect_admin_queue:
  2321. /* send a Disconnect(association) LS to fc-nvme target */
  2322. nvme_fc_xmt_disconnect_assoc(ctrl);
  2323. out_delete_hw_queue:
  2324. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2325. out_free_queue:
  2326. nvme_fc_free_queue(&ctrl->queues[0]);
  2327. ctrl->assoc_active = false;
  2328. nvme_fc_ctlr_inactive_on_rport(ctrl);
  2329. return ret;
  2330. }
  2331. /*
  2332. * This routine stops operation of the controller on the host side.
  2333. * On the host os stack side: Admin and IO queues are stopped,
  2334. * outstanding ios on them terminated via FC ABTS.
  2335. * On the link side: the association is terminated.
  2336. */
  2337. static void
  2338. nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
  2339. {
  2340. unsigned long flags;
  2341. if (!ctrl->assoc_active)
  2342. return;
  2343. ctrl->assoc_active = false;
  2344. spin_lock_irqsave(&ctrl->lock, flags);
  2345. ctrl->flags |= FCCTRL_TERMIO;
  2346. ctrl->iocnt = 0;
  2347. spin_unlock_irqrestore(&ctrl->lock, flags);
  2348. /*
  2349. * If io queues are present, stop them and terminate all outstanding
  2350. * ios on them. As FC allocates FC exchange for each io, the
  2351. * transport must contact the LLDD to terminate the exchange,
  2352. * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
  2353. * to tell us what io's are busy and invoke a transport routine
  2354. * to kill them with the LLDD. After terminating the exchange
  2355. * the LLDD will call the transport's normal io done path, but it
  2356. * will have an aborted status. The done path will return the
  2357. * io requests back to the block layer as part of normal completions
  2358. * (but with error status).
  2359. */
  2360. if (ctrl->ctrl.queue_count > 1) {
  2361. nvme_stop_queues(&ctrl->ctrl);
  2362. blk_mq_tagset_busy_iter(&ctrl->tag_set,
  2363. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2364. }
  2365. /*
  2366. * Other transports, which don't have link-level contexts bound
  2367. * to sqe's, would try to gracefully shutdown the controller by
  2368. * writing the registers for shutdown and polling (call
  2369. * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
  2370. * just aborted and we will wait on those contexts, and given
  2371. * there was no indication of how live the controlelr is on the
  2372. * link, don't send more io to create more contexts for the
  2373. * shutdown. Let the controller fail via keepalive failure if
  2374. * its still present.
  2375. */
  2376. /*
  2377. * clean up the admin queue. Same thing as above.
  2378. * use blk_mq_tagset_busy_itr() and the transport routine to
  2379. * terminate the exchanges.
  2380. */
  2381. if (ctrl->ctrl.state != NVME_CTRL_NEW)
  2382. blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
  2383. blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
  2384. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2385. /* kill the aens as they are a separate path */
  2386. nvme_fc_abort_aen_ops(ctrl);
  2387. /* wait for all io that had to be aborted */
  2388. spin_lock_irq(&ctrl->lock);
  2389. wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
  2390. ctrl->flags &= ~FCCTRL_TERMIO;
  2391. spin_unlock_irq(&ctrl->lock);
  2392. nvme_fc_term_aen_ops(ctrl);
  2393. /*
  2394. * send a Disconnect(association) LS to fc-nvme target
  2395. * Note: could have been sent at top of process, but
  2396. * cleaner on link traffic if after the aborts complete.
  2397. * Note: if association doesn't exist, association_id will be 0
  2398. */
  2399. if (ctrl->association_id)
  2400. nvme_fc_xmt_disconnect_assoc(ctrl);
  2401. if (ctrl->ctrl.tagset) {
  2402. nvme_fc_delete_hw_io_queues(ctrl);
  2403. nvme_fc_free_io_queues(ctrl);
  2404. }
  2405. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2406. nvme_fc_free_queue(&ctrl->queues[0]);
  2407. nvme_fc_ctlr_inactive_on_rport(ctrl);
  2408. }
  2409. static void
  2410. nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
  2411. {
  2412. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  2413. cancel_delayed_work_sync(&ctrl->connect_work);
  2414. /*
  2415. * kill the association on the link side. this will block
  2416. * waiting for io to terminate
  2417. */
  2418. nvme_fc_delete_association(ctrl);
  2419. }
  2420. static void
  2421. nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
  2422. {
  2423. struct nvme_fc_rport *rport = ctrl->rport;
  2424. struct nvme_fc_remote_port *portptr = &rport->remoteport;
  2425. unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
  2426. bool recon = true;
  2427. if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
  2428. return;
  2429. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2430. dev_info(ctrl->ctrl.device,
  2431. "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
  2432. ctrl->cnum, status);
  2433. else if (time_after_eq(jiffies, rport->dev_loss_end))
  2434. recon = false;
  2435. if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
  2436. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2437. dev_info(ctrl->ctrl.device,
  2438. "NVME-FC{%d}: Reconnect attempt in %ld "
  2439. "seconds\n",
  2440. ctrl->cnum, recon_delay / HZ);
  2441. else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
  2442. recon_delay = rport->dev_loss_end - jiffies;
  2443. queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
  2444. } else {
  2445. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2446. dev_warn(ctrl->ctrl.device,
  2447. "NVME-FC{%d}: Max reconnect attempts (%d) "
  2448. "reached. Removing controller\n",
  2449. ctrl->cnum, ctrl->ctrl.nr_reconnects);
  2450. else
  2451. dev_warn(ctrl->ctrl.device,
  2452. "NVME-FC{%d}: dev_loss_tmo (%d) expired "
  2453. "while waiting for remoteport connectivity. "
  2454. "Removing controller\n", ctrl->cnum,
  2455. portptr->dev_loss_tmo);
  2456. WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
  2457. }
  2458. }
  2459. static void
  2460. nvme_fc_reset_ctrl_work(struct work_struct *work)
  2461. {
  2462. struct nvme_fc_ctrl *ctrl =
  2463. container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
  2464. int ret;
  2465. nvme_stop_ctrl(&ctrl->ctrl);
  2466. /* will block will waiting for io to terminate */
  2467. nvme_fc_delete_association(ctrl);
  2468. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
  2469. dev_err(ctrl->ctrl.device,
  2470. "NVME-FC{%d}: error_recovery: Couldn't change state "
  2471. "to RECONNECTING\n", ctrl->cnum);
  2472. return;
  2473. }
  2474. if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
  2475. ret = nvme_fc_create_association(ctrl);
  2476. else
  2477. ret = -ENOTCONN;
  2478. if (ret)
  2479. nvme_fc_reconnect_or_delete(ctrl, ret);
  2480. else
  2481. dev_info(ctrl->ctrl.device,
  2482. "NVME-FC{%d}: controller reset complete\n",
  2483. ctrl->cnum);
  2484. }
  2485. static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
  2486. .name = "fc",
  2487. .module = THIS_MODULE,
  2488. .flags = NVME_F_FABRICS,
  2489. .reg_read32 = nvmf_reg_read32,
  2490. .reg_read64 = nvmf_reg_read64,
  2491. .reg_write32 = nvmf_reg_write32,
  2492. .free_ctrl = nvme_fc_nvme_ctrl_freed,
  2493. .submit_async_event = nvme_fc_submit_async_event,
  2494. .delete_ctrl = nvme_fc_delete_ctrl,
  2495. .get_address = nvmf_get_address,
  2496. .reinit_request = nvme_fc_reinit_request,
  2497. };
  2498. static void
  2499. nvme_fc_connect_ctrl_work(struct work_struct *work)
  2500. {
  2501. int ret;
  2502. struct nvme_fc_ctrl *ctrl =
  2503. container_of(to_delayed_work(work),
  2504. struct nvme_fc_ctrl, connect_work);
  2505. ret = nvme_fc_create_association(ctrl);
  2506. if (ret)
  2507. nvme_fc_reconnect_or_delete(ctrl, ret);
  2508. else
  2509. dev_info(ctrl->ctrl.device,
  2510. "NVME-FC{%d}: controller reconnect complete\n",
  2511. ctrl->cnum);
  2512. }
  2513. static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
  2514. .queue_rq = nvme_fc_queue_rq,
  2515. .complete = nvme_fc_complete_rq,
  2516. .init_request = nvme_fc_init_request,
  2517. .exit_request = nvme_fc_exit_request,
  2518. .init_hctx = nvme_fc_init_admin_hctx,
  2519. .timeout = nvme_fc_timeout,
  2520. };
  2521. /*
  2522. * Fails a controller request if it matches an existing controller
  2523. * (association) with the same tuple:
  2524. * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
  2525. *
  2526. * The ports don't need to be compared as they are intrinsically
  2527. * already matched by the port pointers supplied.
  2528. */
  2529. static bool
  2530. nvme_fc_existing_controller(struct nvme_fc_rport *rport,
  2531. struct nvmf_ctrl_options *opts)
  2532. {
  2533. struct nvme_fc_ctrl *ctrl;
  2534. unsigned long flags;
  2535. bool found = false;
  2536. spin_lock_irqsave(&rport->lock, flags);
  2537. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
  2538. found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
  2539. if (found)
  2540. break;
  2541. }
  2542. spin_unlock_irqrestore(&rport->lock, flags);
  2543. return found;
  2544. }
  2545. static struct nvme_ctrl *
  2546. nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
  2547. struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
  2548. {
  2549. struct nvme_fc_ctrl *ctrl;
  2550. unsigned long flags;
  2551. int ret, idx, retry;
  2552. if (!(rport->remoteport.port_role &
  2553. (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
  2554. ret = -EBADR;
  2555. goto out_fail;
  2556. }
  2557. if (!opts->duplicate_connect &&
  2558. nvme_fc_existing_controller(rport, opts)) {
  2559. ret = -EALREADY;
  2560. goto out_fail;
  2561. }
  2562. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  2563. if (!ctrl) {
  2564. ret = -ENOMEM;
  2565. goto out_fail;
  2566. }
  2567. idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
  2568. if (idx < 0) {
  2569. ret = -ENOSPC;
  2570. goto out_free_ctrl;
  2571. }
  2572. ctrl->ctrl.opts = opts;
  2573. INIT_LIST_HEAD(&ctrl->ctrl_list);
  2574. ctrl->lport = lport;
  2575. ctrl->rport = rport;
  2576. ctrl->dev = lport->dev;
  2577. ctrl->cnum = idx;
  2578. ctrl->assoc_active = false;
  2579. init_waitqueue_head(&ctrl->ioabort_wait);
  2580. get_device(ctrl->dev);
  2581. kref_init(&ctrl->ref);
  2582. INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
  2583. INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
  2584. spin_lock_init(&ctrl->lock);
  2585. /* io queue count */
  2586. ctrl->ctrl.queue_count = min_t(unsigned int,
  2587. opts->nr_io_queues,
  2588. lport->ops->max_hw_queues);
  2589. ctrl->ctrl.queue_count++; /* +1 for admin queue */
  2590. ctrl->ctrl.sqsize = opts->queue_size - 1;
  2591. ctrl->ctrl.kato = opts->kato;
  2592. ret = -ENOMEM;
  2593. ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
  2594. sizeof(struct nvme_fc_queue), GFP_KERNEL);
  2595. if (!ctrl->queues)
  2596. goto out_free_ida;
  2597. memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
  2598. ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
  2599. ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
  2600. ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
  2601. ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
  2602. ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  2603. (SG_CHUNK_SIZE *
  2604. sizeof(struct scatterlist)) +
  2605. ctrl->lport->ops->fcprqst_priv_sz;
  2606. ctrl->admin_tag_set.driver_data = ctrl;
  2607. ctrl->admin_tag_set.nr_hw_queues = 1;
  2608. ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
  2609. ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
  2610. ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
  2611. if (ret)
  2612. goto out_free_queues;
  2613. ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
  2614. ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
  2615. if (IS_ERR(ctrl->ctrl.admin_q)) {
  2616. ret = PTR_ERR(ctrl->ctrl.admin_q);
  2617. goto out_free_admin_tag_set;
  2618. }
  2619. /*
  2620. * Would have been nice to init io queues tag set as well.
  2621. * However, we require interaction from the controller
  2622. * for max io queue count before we can do so.
  2623. * Defer this to the connect path.
  2624. */
  2625. ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
  2626. if (ret)
  2627. goto out_cleanup_admin_q;
  2628. /* at this point, teardown path changes to ref counting on nvme ctrl */
  2629. spin_lock_irqsave(&rport->lock, flags);
  2630. list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
  2631. spin_unlock_irqrestore(&rport->lock, flags);
  2632. /*
  2633. * It's possible that transactions used to create the association
  2634. * may fail. Examples: CreateAssociation LS or CreateIOConnection
  2635. * LS gets dropped/corrupted/fails; or a frame gets dropped or a
  2636. * command times out for one of the actions to init the controller
  2637. * (Connect, Get/Set_Property, Set_Features, etc). Many of these
  2638. * transport errors (frame drop, LS failure) inherently must kill
  2639. * the association. The transport is coded so that any command used
  2640. * to create the association (prior to a LIVE state transition
  2641. * while NEW or RECONNECTING) will fail if it completes in error or
  2642. * times out.
  2643. *
  2644. * As such: as the connect request was mostly likely due to a
  2645. * udev event that discovered the remote port, meaning there is
  2646. * not an admin or script there to restart if the connect
  2647. * request fails, retry the initial connection creation up to
  2648. * three times before giving up and declaring failure.
  2649. */
  2650. for (retry = 0; retry < 3; retry++) {
  2651. ret = nvme_fc_create_association(ctrl);
  2652. if (!ret)
  2653. break;
  2654. }
  2655. if (ret) {
  2656. /* couldn't schedule retry - fail out */
  2657. dev_err(ctrl->ctrl.device,
  2658. "NVME-FC{%d}: Connect retry failed\n", ctrl->cnum);
  2659. ctrl->ctrl.opts = NULL;
  2660. /* initiate nvme ctrl ref counting teardown */
  2661. nvme_uninit_ctrl(&ctrl->ctrl);
  2662. nvme_put_ctrl(&ctrl->ctrl);
  2663. /* Remove core ctrl ref. */
  2664. nvme_put_ctrl(&ctrl->ctrl);
  2665. /* as we're past the point where we transition to the ref
  2666. * counting teardown path, if we return a bad pointer here,
  2667. * the calling routine, thinking it's prior to the
  2668. * transition, will do an rport put. Since the teardown
  2669. * path also does a rport put, we do an extra get here to
  2670. * so proper order/teardown happens.
  2671. */
  2672. nvme_fc_rport_get(rport);
  2673. if (ret > 0)
  2674. ret = -EIO;
  2675. return ERR_PTR(ret);
  2676. }
  2677. nvme_get_ctrl(&ctrl->ctrl);
  2678. dev_info(ctrl->ctrl.device,
  2679. "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
  2680. ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
  2681. return &ctrl->ctrl;
  2682. out_cleanup_admin_q:
  2683. blk_cleanup_queue(ctrl->ctrl.admin_q);
  2684. out_free_admin_tag_set:
  2685. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  2686. out_free_queues:
  2687. kfree(ctrl->queues);
  2688. out_free_ida:
  2689. put_device(ctrl->dev);
  2690. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  2691. out_free_ctrl:
  2692. kfree(ctrl);
  2693. out_fail:
  2694. /* exit via here doesn't follow ctlr ref points */
  2695. return ERR_PTR(ret);
  2696. }
  2697. struct nvmet_fc_traddr {
  2698. u64 nn;
  2699. u64 pn;
  2700. };
  2701. static int
  2702. __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
  2703. {
  2704. u64 token64;
  2705. if (match_u64(sstr, &token64))
  2706. return -EINVAL;
  2707. *val = token64;
  2708. return 0;
  2709. }
  2710. /*
  2711. * This routine validates and extracts the WWN's from the TRADDR string.
  2712. * As kernel parsers need the 0x to determine number base, universally
  2713. * build string to parse with 0x prefix before parsing name strings.
  2714. */
  2715. static int
  2716. nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
  2717. {
  2718. char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
  2719. substring_t wwn = { name, &name[sizeof(name)-1] };
  2720. int nnoffset, pnoffset;
  2721. /* validate it string one of the 2 allowed formats */
  2722. if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
  2723. !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
  2724. !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
  2725. "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
  2726. nnoffset = NVME_FC_TRADDR_OXNNLEN;
  2727. pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
  2728. NVME_FC_TRADDR_OXNNLEN;
  2729. } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
  2730. !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
  2731. !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
  2732. "pn-", NVME_FC_TRADDR_NNLEN))) {
  2733. nnoffset = NVME_FC_TRADDR_NNLEN;
  2734. pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
  2735. } else
  2736. goto out_einval;
  2737. name[0] = '0';
  2738. name[1] = 'x';
  2739. name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
  2740. memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2741. if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
  2742. goto out_einval;
  2743. memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2744. if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
  2745. goto out_einval;
  2746. return 0;
  2747. out_einval:
  2748. pr_warn("%s: bad traddr string\n", __func__);
  2749. return -EINVAL;
  2750. }
  2751. static struct nvme_ctrl *
  2752. nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
  2753. {
  2754. struct nvme_fc_lport *lport;
  2755. struct nvme_fc_rport *rport;
  2756. struct nvme_ctrl *ctrl;
  2757. struct nvmet_fc_traddr laddr = { 0L, 0L };
  2758. struct nvmet_fc_traddr raddr = { 0L, 0L };
  2759. unsigned long flags;
  2760. int ret;
  2761. ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
  2762. if (ret || !raddr.nn || !raddr.pn)
  2763. return ERR_PTR(-EINVAL);
  2764. ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
  2765. if (ret || !laddr.nn || !laddr.pn)
  2766. return ERR_PTR(-EINVAL);
  2767. /* find the host and remote ports to connect together */
  2768. spin_lock_irqsave(&nvme_fc_lock, flags);
  2769. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  2770. if (lport->localport.node_name != laddr.nn ||
  2771. lport->localport.port_name != laddr.pn)
  2772. continue;
  2773. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  2774. if (rport->remoteport.node_name != raddr.nn ||
  2775. rport->remoteport.port_name != raddr.pn)
  2776. continue;
  2777. /* if fail to get reference fall through. Will error */
  2778. if (!nvme_fc_rport_get(rport))
  2779. break;
  2780. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2781. ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
  2782. if (IS_ERR(ctrl))
  2783. nvme_fc_rport_put(rport);
  2784. return ctrl;
  2785. }
  2786. }
  2787. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2788. return ERR_PTR(-ENOENT);
  2789. }
  2790. static struct nvmf_transport_ops nvme_fc_transport = {
  2791. .name = "fc",
  2792. .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
  2793. .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
  2794. .create_ctrl = nvme_fc_create_ctrl,
  2795. };
  2796. static int __init nvme_fc_init_module(void)
  2797. {
  2798. int ret;
  2799. /*
  2800. * NOTE:
  2801. * It is expected that in the future the kernel will combine
  2802. * the FC-isms that are currently under scsi and now being
  2803. * added to by NVME into a new standalone FC class. The SCSI
  2804. * and NVME protocols and their devices would be under this
  2805. * new FC class.
  2806. *
  2807. * As we need something to post FC-specific udev events to,
  2808. * specifically for nvme probe events, start by creating the
  2809. * new device class. When the new standalone FC class is
  2810. * put in place, this code will move to a more generic
  2811. * location for the class.
  2812. */
  2813. fc_class = class_create(THIS_MODULE, "fc");
  2814. if (IS_ERR(fc_class)) {
  2815. pr_err("couldn't register class fc\n");
  2816. return PTR_ERR(fc_class);
  2817. }
  2818. /*
  2819. * Create a device for the FC-centric udev events
  2820. */
  2821. fc_udev_device = device_create(fc_class, NULL, MKDEV(0, 0), NULL,
  2822. "fc_udev_device");
  2823. if (IS_ERR(fc_udev_device)) {
  2824. pr_err("couldn't create fc_udev device!\n");
  2825. ret = PTR_ERR(fc_udev_device);
  2826. goto out_destroy_class;
  2827. }
  2828. ret = nvmf_register_transport(&nvme_fc_transport);
  2829. if (ret)
  2830. goto out_destroy_device;
  2831. return 0;
  2832. out_destroy_device:
  2833. device_destroy(fc_class, MKDEV(0, 0));
  2834. out_destroy_class:
  2835. class_destroy(fc_class);
  2836. return ret;
  2837. }
  2838. static void __exit nvme_fc_exit_module(void)
  2839. {
  2840. /* sanity check - all lports should be removed */
  2841. if (!list_empty(&nvme_fc_lport_list))
  2842. pr_warn("%s: localport list not empty\n", __func__);
  2843. nvmf_unregister_transport(&nvme_fc_transport);
  2844. ida_destroy(&nvme_fc_local_port_cnt);
  2845. ida_destroy(&nvme_fc_ctrl_cnt);
  2846. device_destroy(fc_class, MKDEV(0, 0));
  2847. class_destroy(fc_class);
  2848. }
  2849. module_init(nvme_fc_init_module);
  2850. module_exit(nvme_fc_exit_module);
  2851. MODULE_LICENSE("GPL v2");