fc.c 90 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986298729882989299029912992299329942995299629972998299930003001300230033004300530063007300830093010301130123013301430153016301730183019302030213022302330243025302630273028302930303031303230333034303530363037303830393040304130423043304430453046304730483049305030513052305330543055305630573058305930603061306230633064306530663067306830693070307130723073307430753076307730783079308030813082308330843085308630873088308930903091309230933094309530963097309830993100310131023103310431053106310731083109311031113112311331143115311631173118311931203121312231233124312531263127312831293130313131323133313431353136313731383139314031413142314331443145314631473148314931503151315231533154315531563157315831593160316131623163316431653166316731683169317031713172317331743175317631773178317931803181318231833184318531863187318831893190319131923193319431953196319731983199320032013202320332043205320632073208320932103211321232133214321532163217321832193220322132223223322432253226322732283229323032313232323332343235323632373238323932403241324232433244324532463247324832493250325132523253325432553256325732583259326032613262326332643265326632673268326932703271327232733274327532763277327832793280328132823283328432853286328732883289329032913292329332943295329632973298329933003301330233033304330533063307330833093310331133123313331433153316331733183319332033213322332333243325332633273328332933303331333233333334333533363337333833393340334133423343334433453346334733483349335033513352335333543355335633573358335933603361336233633364336533663367336833693370337133723373337433753376337733783379338033813382338333843385338633873388338933903391339233933394339533963397339833993400340134023403340434053406340734083409
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/module.h>
  19. #include <linux/parser.h>
  20. #include <uapi/scsi/fc/fc_fs.h>
  21. #include <uapi/scsi/fc/fc_els.h>
  22. #include <linux/delay.h>
  23. #include "nvme.h"
  24. #include "fabrics.h"
  25. #include <linux/nvme-fc-driver.h>
  26. #include <linux/nvme-fc.h>
  27. /* *************************** Data Structures/Defines ****************** */
  28. enum nvme_fc_queue_flags {
  29. NVME_FC_Q_CONNECTED = 0,
  30. NVME_FC_Q_LIVE,
  31. };
  32. #define NVME_FC_DEFAULT_DEV_LOSS_TMO 60 /* seconds */
  33. struct nvme_fc_queue {
  34. struct nvme_fc_ctrl *ctrl;
  35. struct device *dev;
  36. struct blk_mq_hw_ctx *hctx;
  37. void *lldd_handle;
  38. size_t cmnd_capsule_len;
  39. u32 qnum;
  40. u32 rqcnt;
  41. u32 seqno;
  42. u64 connection_id;
  43. atomic_t csn;
  44. unsigned long flags;
  45. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  46. enum nvme_fcop_flags {
  47. FCOP_FLAGS_TERMIO = (1 << 0),
  48. FCOP_FLAGS_AEN = (1 << 1),
  49. };
  50. struct nvmefc_ls_req_op {
  51. struct nvmefc_ls_req ls_req;
  52. struct nvme_fc_rport *rport;
  53. struct nvme_fc_queue *queue;
  54. struct request *rq;
  55. u32 flags;
  56. int ls_error;
  57. struct completion ls_done;
  58. struct list_head lsreq_list; /* rport->ls_req_list */
  59. bool req_queued;
  60. };
  61. enum nvme_fcpop_state {
  62. FCPOP_STATE_UNINIT = 0,
  63. FCPOP_STATE_IDLE = 1,
  64. FCPOP_STATE_ACTIVE = 2,
  65. FCPOP_STATE_ABORTED = 3,
  66. FCPOP_STATE_COMPLETE = 4,
  67. };
  68. struct nvme_fc_fcp_op {
  69. struct nvme_request nreq; /*
  70. * nvme/host/core.c
  71. * requires this to be
  72. * the 1st element in the
  73. * private structure
  74. * associated with the
  75. * request.
  76. */
  77. struct nvmefc_fcp_req fcp_req;
  78. struct nvme_fc_ctrl *ctrl;
  79. struct nvme_fc_queue *queue;
  80. struct request *rq;
  81. atomic_t state;
  82. u32 flags;
  83. u32 rqno;
  84. u32 nents;
  85. struct nvme_fc_cmd_iu cmd_iu;
  86. struct nvme_fc_ersp_iu rsp_iu;
  87. };
  88. struct nvme_fc_lport {
  89. struct nvme_fc_local_port localport;
  90. struct ida endp_cnt;
  91. struct list_head port_list; /* nvme_fc_port_list */
  92. struct list_head endp_list;
  93. struct device *dev; /* physical device for dma */
  94. struct nvme_fc_port_template *ops;
  95. struct kref ref;
  96. atomic_t act_rport_cnt;
  97. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  98. struct nvme_fc_rport {
  99. struct nvme_fc_remote_port remoteport;
  100. struct list_head endp_list; /* for lport->endp_list */
  101. struct list_head ctrl_list;
  102. struct list_head ls_req_list;
  103. struct list_head disc_list;
  104. struct device *dev; /* physical device for dma */
  105. struct nvme_fc_lport *lport;
  106. spinlock_t lock;
  107. struct kref ref;
  108. atomic_t act_ctrl_cnt;
  109. unsigned long dev_loss_end;
  110. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  111. enum nvme_fcctrl_flags {
  112. FCCTRL_TERMIO = (1 << 0),
  113. };
  114. struct nvme_fc_ctrl {
  115. spinlock_t lock;
  116. struct nvme_fc_queue *queues;
  117. struct device *dev;
  118. struct nvme_fc_lport *lport;
  119. struct nvme_fc_rport *rport;
  120. u32 cnum;
  121. bool ioq_live;
  122. bool assoc_active;
  123. u64 association_id;
  124. struct list_head ctrl_list; /* rport->ctrl_list */
  125. struct blk_mq_tag_set admin_tag_set;
  126. struct blk_mq_tag_set tag_set;
  127. struct delayed_work connect_work;
  128. struct kref ref;
  129. u32 flags;
  130. u32 iocnt;
  131. wait_queue_head_t ioabort_wait;
  132. struct nvme_fc_fcp_op aen_ops[NVME_NR_AEN_COMMANDS];
  133. struct nvme_ctrl ctrl;
  134. };
  135. static inline struct nvme_fc_ctrl *
  136. to_fc_ctrl(struct nvme_ctrl *ctrl)
  137. {
  138. return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
  139. }
  140. static inline struct nvme_fc_lport *
  141. localport_to_lport(struct nvme_fc_local_port *portptr)
  142. {
  143. return container_of(portptr, struct nvme_fc_lport, localport);
  144. }
  145. static inline struct nvme_fc_rport *
  146. remoteport_to_rport(struct nvme_fc_remote_port *portptr)
  147. {
  148. return container_of(portptr, struct nvme_fc_rport, remoteport);
  149. }
  150. static inline struct nvmefc_ls_req_op *
  151. ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
  152. {
  153. return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
  154. }
  155. static inline struct nvme_fc_fcp_op *
  156. fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
  157. {
  158. return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
  159. }
  160. /* *************************** Globals **************************** */
  161. static DEFINE_SPINLOCK(nvme_fc_lock);
  162. static LIST_HEAD(nvme_fc_lport_list);
  163. static DEFINE_IDA(nvme_fc_local_port_cnt);
  164. static DEFINE_IDA(nvme_fc_ctrl_cnt);
  165. /*
  166. * These items are short-term. They will eventually be moved into
  167. * a generic FC class. See comments in module init.
  168. */
  169. static struct device *fc_udev_device;
  170. /* *********************** FC-NVME Port Management ************************ */
  171. static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
  172. struct nvme_fc_queue *, unsigned int);
  173. static void
  174. nvme_fc_free_lport(struct kref *ref)
  175. {
  176. struct nvme_fc_lport *lport =
  177. container_of(ref, struct nvme_fc_lport, ref);
  178. unsigned long flags;
  179. WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
  180. WARN_ON(!list_empty(&lport->endp_list));
  181. /* remove from transport list */
  182. spin_lock_irqsave(&nvme_fc_lock, flags);
  183. list_del(&lport->port_list);
  184. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  185. ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
  186. ida_destroy(&lport->endp_cnt);
  187. put_device(lport->dev);
  188. kfree(lport);
  189. }
  190. static void
  191. nvme_fc_lport_put(struct nvme_fc_lport *lport)
  192. {
  193. kref_put(&lport->ref, nvme_fc_free_lport);
  194. }
  195. static int
  196. nvme_fc_lport_get(struct nvme_fc_lport *lport)
  197. {
  198. return kref_get_unless_zero(&lport->ref);
  199. }
  200. static struct nvme_fc_lport *
  201. nvme_fc_attach_to_unreg_lport(struct nvme_fc_port_info *pinfo,
  202. struct nvme_fc_port_template *ops,
  203. struct device *dev)
  204. {
  205. struct nvme_fc_lport *lport;
  206. unsigned long flags;
  207. spin_lock_irqsave(&nvme_fc_lock, flags);
  208. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  209. if (lport->localport.node_name != pinfo->node_name ||
  210. lport->localport.port_name != pinfo->port_name)
  211. continue;
  212. if (lport->dev != dev) {
  213. lport = ERR_PTR(-EXDEV);
  214. goto out_done;
  215. }
  216. if (lport->localport.port_state != FC_OBJSTATE_DELETED) {
  217. lport = ERR_PTR(-EEXIST);
  218. goto out_done;
  219. }
  220. if (!nvme_fc_lport_get(lport)) {
  221. /*
  222. * fails if ref cnt already 0. If so,
  223. * act as if lport already deleted
  224. */
  225. lport = NULL;
  226. goto out_done;
  227. }
  228. /* resume the lport */
  229. lport->ops = ops;
  230. lport->localport.port_role = pinfo->port_role;
  231. lport->localport.port_id = pinfo->port_id;
  232. lport->localport.port_state = FC_OBJSTATE_ONLINE;
  233. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  234. return lport;
  235. }
  236. lport = NULL;
  237. out_done:
  238. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  239. return lport;
  240. }
  241. /**
  242. * nvme_fc_register_localport - transport entry point called by an
  243. * LLDD to register the existence of a NVME
  244. * host FC port.
  245. * @pinfo: pointer to information about the port to be registered
  246. * @template: LLDD entrypoints and operational parameters for the port
  247. * @dev: physical hardware device node port corresponds to. Will be
  248. * used for DMA mappings
  249. * @lport_p: pointer to a local port pointer. Upon success, the routine
  250. * will allocate a nvme_fc_local_port structure and place its
  251. * address in the local port pointer. Upon failure, local port
  252. * pointer will be set to 0.
  253. *
  254. * Returns:
  255. * a completion status. Must be 0 upon success; a negative errno
  256. * (ex: -ENXIO) upon failure.
  257. */
  258. int
  259. nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
  260. struct nvme_fc_port_template *template,
  261. struct device *dev,
  262. struct nvme_fc_local_port **portptr)
  263. {
  264. struct nvme_fc_lport *newrec;
  265. unsigned long flags;
  266. int ret, idx;
  267. if (!template->localport_delete || !template->remoteport_delete ||
  268. !template->ls_req || !template->fcp_io ||
  269. !template->ls_abort || !template->fcp_abort ||
  270. !template->max_hw_queues || !template->max_sgl_segments ||
  271. !template->max_dif_sgl_segments || !template->dma_boundary) {
  272. ret = -EINVAL;
  273. goto out_reghost_failed;
  274. }
  275. /*
  276. * look to see if there is already a localport that had been
  277. * deregistered and in the process of waiting for all the
  278. * references to fully be removed. If the references haven't
  279. * expired, we can simply re-enable the localport. Remoteports
  280. * and controller reconnections should resume naturally.
  281. */
  282. newrec = nvme_fc_attach_to_unreg_lport(pinfo, template, dev);
  283. /* found an lport, but something about its state is bad */
  284. if (IS_ERR(newrec)) {
  285. ret = PTR_ERR(newrec);
  286. goto out_reghost_failed;
  287. /* found existing lport, which was resumed */
  288. } else if (newrec) {
  289. *portptr = &newrec->localport;
  290. return 0;
  291. }
  292. /* nothing found - allocate a new localport struct */
  293. newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
  294. GFP_KERNEL);
  295. if (!newrec) {
  296. ret = -ENOMEM;
  297. goto out_reghost_failed;
  298. }
  299. idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
  300. if (idx < 0) {
  301. ret = -ENOSPC;
  302. goto out_fail_kfree;
  303. }
  304. if (!get_device(dev) && dev) {
  305. ret = -ENODEV;
  306. goto out_ida_put;
  307. }
  308. INIT_LIST_HEAD(&newrec->port_list);
  309. INIT_LIST_HEAD(&newrec->endp_list);
  310. kref_init(&newrec->ref);
  311. atomic_set(&newrec->act_rport_cnt, 0);
  312. newrec->ops = template;
  313. newrec->dev = dev;
  314. ida_init(&newrec->endp_cnt);
  315. newrec->localport.private = &newrec[1];
  316. newrec->localport.node_name = pinfo->node_name;
  317. newrec->localport.port_name = pinfo->port_name;
  318. newrec->localport.port_role = pinfo->port_role;
  319. newrec->localport.port_id = pinfo->port_id;
  320. newrec->localport.port_state = FC_OBJSTATE_ONLINE;
  321. newrec->localport.port_num = idx;
  322. spin_lock_irqsave(&nvme_fc_lock, flags);
  323. list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
  324. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  325. if (dev)
  326. dma_set_seg_boundary(dev, template->dma_boundary);
  327. *portptr = &newrec->localport;
  328. return 0;
  329. out_ida_put:
  330. ida_simple_remove(&nvme_fc_local_port_cnt, idx);
  331. out_fail_kfree:
  332. kfree(newrec);
  333. out_reghost_failed:
  334. *portptr = NULL;
  335. return ret;
  336. }
  337. EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
  338. /**
  339. * nvme_fc_unregister_localport - transport entry point called by an
  340. * LLDD to deregister/remove a previously
  341. * registered a NVME host FC port.
  342. * @localport: pointer to the (registered) local port that is to be
  343. * deregistered.
  344. *
  345. * Returns:
  346. * a completion status. Must be 0 upon success; a negative errno
  347. * (ex: -ENXIO) upon failure.
  348. */
  349. int
  350. nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
  351. {
  352. struct nvme_fc_lport *lport = localport_to_lport(portptr);
  353. unsigned long flags;
  354. if (!portptr)
  355. return -EINVAL;
  356. spin_lock_irqsave(&nvme_fc_lock, flags);
  357. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  358. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  359. return -EINVAL;
  360. }
  361. portptr->port_state = FC_OBJSTATE_DELETED;
  362. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  363. if (atomic_read(&lport->act_rport_cnt) == 0)
  364. lport->ops->localport_delete(&lport->localport);
  365. nvme_fc_lport_put(lport);
  366. return 0;
  367. }
  368. EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
  369. /*
  370. * TRADDR strings, per FC-NVME are fixed format:
  371. * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
  372. * udev event will only differ by prefix of what field is
  373. * being specified:
  374. * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
  375. * 19 + 43 + null_fudge = 64 characters
  376. */
  377. #define FCNVME_TRADDR_LENGTH 64
  378. static void
  379. nvme_fc_signal_discovery_scan(struct nvme_fc_lport *lport,
  380. struct nvme_fc_rport *rport)
  381. {
  382. char hostaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_HOST_TRADDR=...*/
  383. char tgtaddr[FCNVME_TRADDR_LENGTH]; /* NVMEFC_TRADDR=...*/
  384. char *envp[4] = { "FC_EVENT=nvmediscovery", hostaddr, tgtaddr, NULL };
  385. if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY))
  386. return;
  387. snprintf(hostaddr, sizeof(hostaddr),
  388. "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx",
  389. lport->localport.node_name, lport->localport.port_name);
  390. snprintf(tgtaddr, sizeof(tgtaddr),
  391. "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx",
  392. rport->remoteport.node_name, rport->remoteport.port_name);
  393. kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp);
  394. }
  395. static void
  396. nvme_fc_free_rport(struct kref *ref)
  397. {
  398. struct nvme_fc_rport *rport =
  399. container_of(ref, struct nvme_fc_rport, ref);
  400. struct nvme_fc_lport *lport =
  401. localport_to_lport(rport->remoteport.localport);
  402. unsigned long flags;
  403. WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
  404. WARN_ON(!list_empty(&rport->ctrl_list));
  405. /* remove from lport list */
  406. spin_lock_irqsave(&nvme_fc_lock, flags);
  407. list_del(&rport->endp_list);
  408. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  409. WARN_ON(!list_empty(&rport->disc_list));
  410. ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
  411. kfree(rport);
  412. nvme_fc_lport_put(lport);
  413. }
  414. static void
  415. nvme_fc_rport_put(struct nvme_fc_rport *rport)
  416. {
  417. kref_put(&rport->ref, nvme_fc_free_rport);
  418. }
  419. static int
  420. nvme_fc_rport_get(struct nvme_fc_rport *rport)
  421. {
  422. return kref_get_unless_zero(&rport->ref);
  423. }
  424. static void
  425. nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
  426. {
  427. switch (ctrl->ctrl.state) {
  428. case NVME_CTRL_NEW:
  429. case NVME_CTRL_CONNECTING:
  430. /*
  431. * As all reconnects were suppressed, schedule a
  432. * connect.
  433. */
  434. dev_info(ctrl->ctrl.device,
  435. "NVME-FC{%d}: connectivity re-established. "
  436. "Attempting reconnect\n", ctrl->cnum);
  437. queue_delayed_work(nvme_wq, &ctrl->connect_work, 0);
  438. break;
  439. case NVME_CTRL_RESETTING:
  440. /*
  441. * Controller is already in the process of terminating the
  442. * association. No need to do anything further. The reconnect
  443. * step will naturally occur after the reset completes.
  444. */
  445. break;
  446. default:
  447. /* no action to take - let it delete */
  448. break;
  449. }
  450. }
  451. static struct nvme_fc_rport *
  452. nvme_fc_attach_to_suspended_rport(struct nvme_fc_lport *lport,
  453. struct nvme_fc_port_info *pinfo)
  454. {
  455. struct nvme_fc_rport *rport;
  456. struct nvme_fc_ctrl *ctrl;
  457. unsigned long flags;
  458. spin_lock_irqsave(&nvme_fc_lock, flags);
  459. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  460. if (rport->remoteport.node_name != pinfo->node_name ||
  461. rport->remoteport.port_name != pinfo->port_name)
  462. continue;
  463. if (!nvme_fc_rport_get(rport)) {
  464. rport = ERR_PTR(-ENOLCK);
  465. goto out_done;
  466. }
  467. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  468. spin_lock_irqsave(&rport->lock, flags);
  469. /* has it been unregistered */
  470. if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) {
  471. /* means lldd called us twice */
  472. spin_unlock_irqrestore(&rport->lock, flags);
  473. nvme_fc_rport_put(rport);
  474. return ERR_PTR(-ESTALE);
  475. }
  476. rport->remoteport.port_role = pinfo->port_role;
  477. rport->remoteport.port_id = pinfo->port_id;
  478. rport->remoteport.port_state = FC_OBJSTATE_ONLINE;
  479. rport->dev_loss_end = 0;
  480. /*
  481. * kick off a reconnect attempt on all associations to the
  482. * remote port. A successful reconnects will resume i/o.
  483. */
  484. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
  485. nvme_fc_resume_controller(ctrl);
  486. spin_unlock_irqrestore(&rport->lock, flags);
  487. return rport;
  488. }
  489. rport = NULL;
  490. out_done:
  491. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  492. return rport;
  493. }
  494. static inline void
  495. __nvme_fc_set_dev_loss_tmo(struct nvme_fc_rport *rport,
  496. struct nvme_fc_port_info *pinfo)
  497. {
  498. if (pinfo->dev_loss_tmo)
  499. rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo;
  500. else
  501. rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO;
  502. }
  503. /**
  504. * nvme_fc_register_remoteport - transport entry point called by an
  505. * LLDD to register the existence of a NVME
  506. * subsystem FC port on its fabric.
  507. * @localport: pointer to the (registered) local port that the remote
  508. * subsystem port is connected to.
  509. * @pinfo: pointer to information about the port to be registered
  510. * @rport_p: pointer to a remote port pointer. Upon success, the routine
  511. * will allocate a nvme_fc_remote_port structure and place its
  512. * address in the remote port pointer. Upon failure, remote port
  513. * pointer will be set to 0.
  514. *
  515. * Returns:
  516. * a completion status. Must be 0 upon success; a negative errno
  517. * (ex: -ENXIO) upon failure.
  518. */
  519. int
  520. nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
  521. struct nvme_fc_port_info *pinfo,
  522. struct nvme_fc_remote_port **portptr)
  523. {
  524. struct nvme_fc_lport *lport = localport_to_lport(localport);
  525. struct nvme_fc_rport *newrec;
  526. unsigned long flags;
  527. int ret, idx;
  528. if (!nvme_fc_lport_get(lport)) {
  529. ret = -ESHUTDOWN;
  530. goto out_reghost_failed;
  531. }
  532. /*
  533. * look to see if there is already a remoteport that is waiting
  534. * for a reconnect (within dev_loss_tmo) with the same WWN's.
  535. * If so, transition to it and reconnect.
  536. */
  537. newrec = nvme_fc_attach_to_suspended_rport(lport, pinfo);
  538. /* found an rport, but something about its state is bad */
  539. if (IS_ERR(newrec)) {
  540. ret = PTR_ERR(newrec);
  541. goto out_lport_put;
  542. /* found existing rport, which was resumed */
  543. } else if (newrec) {
  544. nvme_fc_lport_put(lport);
  545. __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
  546. nvme_fc_signal_discovery_scan(lport, newrec);
  547. *portptr = &newrec->remoteport;
  548. return 0;
  549. }
  550. /* nothing found - allocate a new remoteport struct */
  551. newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
  552. GFP_KERNEL);
  553. if (!newrec) {
  554. ret = -ENOMEM;
  555. goto out_lport_put;
  556. }
  557. idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
  558. if (idx < 0) {
  559. ret = -ENOSPC;
  560. goto out_kfree_rport;
  561. }
  562. INIT_LIST_HEAD(&newrec->endp_list);
  563. INIT_LIST_HEAD(&newrec->ctrl_list);
  564. INIT_LIST_HEAD(&newrec->ls_req_list);
  565. INIT_LIST_HEAD(&newrec->disc_list);
  566. kref_init(&newrec->ref);
  567. atomic_set(&newrec->act_ctrl_cnt, 0);
  568. spin_lock_init(&newrec->lock);
  569. newrec->remoteport.localport = &lport->localport;
  570. newrec->dev = lport->dev;
  571. newrec->lport = lport;
  572. newrec->remoteport.private = &newrec[1];
  573. newrec->remoteport.port_role = pinfo->port_role;
  574. newrec->remoteport.node_name = pinfo->node_name;
  575. newrec->remoteport.port_name = pinfo->port_name;
  576. newrec->remoteport.port_id = pinfo->port_id;
  577. newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
  578. newrec->remoteport.port_num = idx;
  579. __nvme_fc_set_dev_loss_tmo(newrec, pinfo);
  580. spin_lock_irqsave(&nvme_fc_lock, flags);
  581. list_add_tail(&newrec->endp_list, &lport->endp_list);
  582. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  583. nvme_fc_signal_discovery_scan(lport, newrec);
  584. *portptr = &newrec->remoteport;
  585. return 0;
  586. out_kfree_rport:
  587. kfree(newrec);
  588. out_lport_put:
  589. nvme_fc_lport_put(lport);
  590. out_reghost_failed:
  591. *portptr = NULL;
  592. return ret;
  593. }
  594. EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
  595. static int
  596. nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
  597. {
  598. struct nvmefc_ls_req_op *lsop;
  599. unsigned long flags;
  600. restart:
  601. spin_lock_irqsave(&rport->lock, flags);
  602. list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
  603. if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
  604. lsop->flags |= FCOP_FLAGS_TERMIO;
  605. spin_unlock_irqrestore(&rport->lock, flags);
  606. rport->lport->ops->ls_abort(&rport->lport->localport,
  607. &rport->remoteport,
  608. &lsop->ls_req);
  609. goto restart;
  610. }
  611. }
  612. spin_unlock_irqrestore(&rport->lock, flags);
  613. return 0;
  614. }
  615. static void
  616. nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
  617. {
  618. dev_info(ctrl->ctrl.device,
  619. "NVME-FC{%d}: controller connectivity lost. Awaiting "
  620. "Reconnect", ctrl->cnum);
  621. switch (ctrl->ctrl.state) {
  622. case NVME_CTRL_NEW:
  623. case NVME_CTRL_LIVE:
  624. /*
  625. * Schedule a controller reset. The reset will terminate the
  626. * association and schedule the reconnect timer. Reconnects
  627. * will be attempted until either the ctlr_loss_tmo
  628. * (max_retries * connect_delay) expires or the remoteport's
  629. * dev_loss_tmo expires.
  630. */
  631. if (nvme_reset_ctrl(&ctrl->ctrl)) {
  632. dev_warn(ctrl->ctrl.device,
  633. "NVME-FC{%d}: Couldn't schedule reset.\n",
  634. ctrl->cnum);
  635. nvme_delete_ctrl(&ctrl->ctrl);
  636. }
  637. break;
  638. case NVME_CTRL_CONNECTING:
  639. /*
  640. * The association has already been terminated and the
  641. * controller is attempting reconnects. No need to do anything
  642. * futher. Reconnects will be attempted until either the
  643. * ctlr_loss_tmo (max_retries * connect_delay) expires or the
  644. * remoteport's dev_loss_tmo expires.
  645. */
  646. break;
  647. case NVME_CTRL_RESETTING:
  648. /*
  649. * Controller is already in the process of terminating the
  650. * association. No need to do anything further. The reconnect
  651. * step will kick in naturally after the association is
  652. * terminated.
  653. */
  654. break;
  655. case NVME_CTRL_DELETING:
  656. default:
  657. /* no action to take - let it delete */
  658. break;
  659. }
  660. }
  661. /**
  662. * nvme_fc_unregister_remoteport - transport entry point called by an
  663. * LLDD to deregister/remove a previously
  664. * registered a NVME subsystem FC port.
  665. * @remoteport: pointer to the (registered) remote port that is to be
  666. * deregistered.
  667. *
  668. * Returns:
  669. * a completion status. Must be 0 upon success; a negative errno
  670. * (ex: -ENXIO) upon failure.
  671. */
  672. int
  673. nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
  674. {
  675. struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
  676. struct nvme_fc_ctrl *ctrl;
  677. unsigned long flags;
  678. if (!portptr)
  679. return -EINVAL;
  680. spin_lock_irqsave(&rport->lock, flags);
  681. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  682. spin_unlock_irqrestore(&rport->lock, flags);
  683. return -EINVAL;
  684. }
  685. portptr->port_state = FC_OBJSTATE_DELETED;
  686. rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ);
  687. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
  688. /* if dev_loss_tmo==0, dev loss is immediate */
  689. if (!portptr->dev_loss_tmo) {
  690. dev_warn(ctrl->ctrl.device,
  691. "NVME-FC{%d}: controller connectivity lost.\n",
  692. ctrl->cnum);
  693. nvme_delete_ctrl(&ctrl->ctrl);
  694. } else
  695. nvme_fc_ctrl_connectivity_loss(ctrl);
  696. }
  697. spin_unlock_irqrestore(&rport->lock, flags);
  698. nvme_fc_abort_lsops(rport);
  699. if (atomic_read(&rport->act_ctrl_cnt) == 0)
  700. rport->lport->ops->remoteport_delete(portptr);
  701. /*
  702. * release the reference, which will allow, if all controllers
  703. * go away, which should only occur after dev_loss_tmo occurs,
  704. * for the rport to be torn down.
  705. */
  706. nvme_fc_rport_put(rport);
  707. return 0;
  708. }
  709. EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
  710. /**
  711. * nvme_fc_rescan_remoteport - transport entry point called by an
  712. * LLDD to request a nvme device rescan.
  713. * @remoteport: pointer to the (registered) remote port that is to be
  714. * rescanned.
  715. *
  716. * Returns: N/A
  717. */
  718. void
  719. nvme_fc_rescan_remoteport(struct nvme_fc_remote_port *remoteport)
  720. {
  721. struct nvme_fc_rport *rport = remoteport_to_rport(remoteport);
  722. nvme_fc_signal_discovery_scan(rport->lport, rport);
  723. }
  724. EXPORT_SYMBOL_GPL(nvme_fc_rescan_remoteport);
  725. int
  726. nvme_fc_set_remoteport_devloss(struct nvme_fc_remote_port *portptr,
  727. u32 dev_loss_tmo)
  728. {
  729. struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
  730. unsigned long flags;
  731. spin_lock_irqsave(&rport->lock, flags);
  732. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  733. spin_unlock_irqrestore(&rport->lock, flags);
  734. return -EINVAL;
  735. }
  736. /* a dev_loss_tmo of 0 (immediate) is allowed to be set */
  737. rport->remoteport.dev_loss_tmo = dev_loss_tmo;
  738. spin_unlock_irqrestore(&rport->lock, flags);
  739. return 0;
  740. }
  741. EXPORT_SYMBOL_GPL(nvme_fc_set_remoteport_devloss);
  742. /* *********************** FC-NVME DMA Handling **************************** */
  743. /*
  744. * The fcloop device passes in a NULL device pointer. Real LLD's will
  745. * pass in a valid device pointer. If NULL is passed to the dma mapping
  746. * routines, depending on the platform, it may or may not succeed, and
  747. * may crash.
  748. *
  749. * As such:
  750. * Wrapper all the dma routines and check the dev pointer.
  751. *
  752. * If simple mappings (return just a dma address, we'll noop them,
  753. * returning a dma address of 0.
  754. *
  755. * On more complex mappings (dma_map_sg), a pseudo routine fills
  756. * in the scatter list, setting all dma addresses to 0.
  757. */
  758. static inline dma_addr_t
  759. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  760. enum dma_data_direction dir)
  761. {
  762. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  763. }
  764. static inline int
  765. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  766. {
  767. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  768. }
  769. static inline void
  770. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  771. enum dma_data_direction dir)
  772. {
  773. if (dev)
  774. dma_unmap_single(dev, addr, size, dir);
  775. }
  776. static inline void
  777. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  778. enum dma_data_direction dir)
  779. {
  780. if (dev)
  781. dma_sync_single_for_cpu(dev, addr, size, dir);
  782. }
  783. static inline void
  784. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  785. enum dma_data_direction dir)
  786. {
  787. if (dev)
  788. dma_sync_single_for_device(dev, addr, size, dir);
  789. }
  790. /* pseudo dma_map_sg call */
  791. static int
  792. fc_map_sg(struct scatterlist *sg, int nents)
  793. {
  794. struct scatterlist *s;
  795. int i;
  796. WARN_ON(nents == 0 || sg[0].length == 0);
  797. for_each_sg(sg, s, nents, i) {
  798. s->dma_address = 0L;
  799. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  800. s->dma_length = s->length;
  801. #endif
  802. }
  803. return nents;
  804. }
  805. static inline int
  806. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  807. enum dma_data_direction dir)
  808. {
  809. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  810. }
  811. static inline void
  812. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  813. enum dma_data_direction dir)
  814. {
  815. if (dev)
  816. dma_unmap_sg(dev, sg, nents, dir);
  817. }
  818. /* *********************** FC-NVME LS Handling **************************** */
  819. static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
  820. static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
  821. static void
  822. __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
  823. {
  824. struct nvme_fc_rport *rport = lsop->rport;
  825. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  826. unsigned long flags;
  827. spin_lock_irqsave(&rport->lock, flags);
  828. if (!lsop->req_queued) {
  829. spin_unlock_irqrestore(&rport->lock, flags);
  830. return;
  831. }
  832. list_del(&lsop->lsreq_list);
  833. lsop->req_queued = false;
  834. spin_unlock_irqrestore(&rport->lock, flags);
  835. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  836. (lsreq->rqstlen + lsreq->rsplen),
  837. DMA_BIDIRECTIONAL);
  838. nvme_fc_rport_put(rport);
  839. }
  840. static int
  841. __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
  842. struct nvmefc_ls_req_op *lsop,
  843. void (*done)(struct nvmefc_ls_req *req, int status))
  844. {
  845. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  846. unsigned long flags;
  847. int ret = 0;
  848. if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  849. return -ECONNREFUSED;
  850. if (!nvme_fc_rport_get(rport))
  851. return -ESHUTDOWN;
  852. lsreq->done = done;
  853. lsop->rport = rport;
  854. lsop->req_queued = false;
  855. INIT_LIST_HEAD(&lsop->lsreq_list);
  856. init_completion(&lsop->ls_done);
  857. lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
  858. lsreq->rqstlen + lsreq->rsplen,
  859. DMA_BIDIRECTIONAL);
  860. if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
  861. ret = -EFAULT;
  862. goto out_putrport;
  863. }
  864. lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
  865. spin_lock_irqsave(&rport->lock, flags);
  866. list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
  867. lsop->req_queued = true;
  868. spin_unlock_irqrestore(&rport->lock, flags);
  869. ret = rport->lport->ops->ls_req(&rport->lport->localport,
  870. &rport->remoteport, lsreq);
  871. if (ret)
  872. goto out_unlink;
  873. return 0;
  874. out_unlink:
  875. lsop->ls_error = ret;
  876. spin_lock_irqsave(&rport->lock, flags);
  877. lsop->req_queued = false;
  878. list_del(&lsop->lsreq_list);
  879. spin_unlock_irqrestore(&rport->lock, flags);
  880. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  881. (lsreq->rqstlen + lsreq->rsplen),
  882. DMA_BIDIRECTIONAL);
  883. out_putrport:
  884. nvme_fc_rport_put(rport);
  885. return ret;
  886. }
  887. static void
  888. nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
  889. {
  890. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  891. lsop->ls_error = status;
  892. complete(&lsop->ls_done);
  893. }
  894. static int
  895. nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
  896. {
  897. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  898. struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
  899. int ret;
  900. ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
  901. if (!ret) {
  902. /*
  903. * No timeout/not interruptible as we need the struct
  904. * to exist until the lldd calls us back. Thus mandate
  905. * wait until driver calls back. lldd responsible for
  906. * the timeout action
  907. */
  908. wait_for_completion(&lsop->ls_done);
  909. __nvme_fc_finish_ls_req(lsop);
  910. ret = lsop->ls_error;
  911. }
  912. if (ret)
  913. return ret;
  914. /* ACC or RJT payload ? */
  915. if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
  916. return -ENXIO;
  917. return 0;
  918. }
  919. static int
  920. nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
  921. struct nvmefc_ls_req_op *lsop,
  922. void (*done)(struct nvmefc_ls_req *req, int status))
  923. {
  924. /* don't wait for completion */
  925. return __nvme_fc_send_ls_req(rport, lsop, done);
  926. }
  927. /* Validation Error indexes into the string table below */
  928. enum {
  929. VERR_NO_ERROR = 0,
  930. VERR_LSACC = 1,
  931. VERR_LSDESC_RQST = 2,
  932. VERR_LSDESC_RQST_LEN = 3,
  933. VERR_ASSOC_ID = 4,
  934. VERR_ASSOC_ID_LEN = 5,
  935. VERR_CONN_ID = 6,
  936. VERR_CONN_ID_LEN = 7,
  937. VERR_CR_ASSOC = 8,
  938. VERR_CR_ASSOC_ACC_LEN = 9,
  939. VERR_CR_CONN = 10,
  940. VERR_CR_CONN_ACC_LEN = 11,
  941. VERR_DISCONN = 12,
  942. VERR_DISCONN_ACC_LEN = 13,
  943. };
  944. static char *validation_errors[] = {
  945. "OK",
  946. "Not LS_ACC",
  947. "Not LSDESC_RQST",
  948. "Bad LSDESC_RQST Length",
  949. "Not Association ID",
  950. "Bad Association ID Length",
  951. "Not Connection ID",
  952. "Bad Connection ID Length",
  953. "Not CR_ASSOC Rqst",
  954. "Bad CR_ASSOC ACC Length",
  955. "Not CR_CONN Rqst",
  956. "Bad CR_CONN ACC Length",
  957. "Not Disconnect Rqst",
  958. "Bad Disconnect ACC Length",
  959. };
  960. static int
  961. nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
  962. struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
  963. {
  964. struct nvmefc_ls_req_op *lsop;
  965. struct nvmefc_ls_req *lsreq;
  966. struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
  967. struct fcnvme_ls_cr_assoc_acc *assoc_acc;
  968. int ret, fcret = 0;
  969. lsop = kzalloc((sizeof(*lsop) +
  970. ctrl->lport->ops->lsrqst_priv_sz +
  971. sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
  972. if (!lsop) {
  973. ret = -ENOMEM;
  974. goto out_no_memory;
  975. }
  976. lsreq = &lsop->ls_req;
  977. lsreq->private = (void *)&lsop[1];
  978. assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
  979. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  980. assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
  981. assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
  982. assoc_rqst->desc_list_len =
  983. cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  984. assoc_rqst->assoc_cmd.desc_tag =
  985. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
  986. assoc_rqst->assoc_cmd.desc_len =
  987. fcnvme_lsdesc_len(
  988. sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  989. assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  990. assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1);
  991. /* Linux supports only Dynamic controllers */
  992. assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
  993. uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id);
  994. strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
  995. min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
  996. strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
  997. min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
  998. lsop->queue = queue;
  999. lsreq->rqstaddr = assoc_rqst;
  1000. lsreq->rqstlen = sizeof(*assoc_rqst);
  1001. lsreq->rspaddr = assoc_acc;
  1002. lsreq->rsplen = sizeof(*assoc_acc);
  1003. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1004. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  1005. if (ret)
  1006. goto out_free_buffer;
  1007. /* process connect LS completion */
  1008. /* validate the ACC response */
  1009. if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  1010. fcret = VERR_LSACC;
  1011. else if (assoc_acc->hdr.desc_list_len !=
  1012. fcnvme_lsdesc_len(
  1013. sizeof(struct fcnvme_ls_cr_assoc_acc)))
  1014. fcret = VERR_CR_ASSOC_ACC_LEN;
  1015. else if (assoc_acc->hdr.rqst.desc_tag !=
  1016. cpu_to_be32(FCNVME_LSDESC_RQST))
  1017. fcret = VERR_LSDESC_RQST;
  1018. else if (assoc_acc->hdr.rqst.desc_len !=
  1019. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  1020. fcret = VERR_LSDESC_RQST_LEN;
  1021. else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
  1022. fcret = VERR_CR_ASSOC;
  1023. else if (assoc_acc->associd.desc_tag !=
  1024. cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1025. fcret = VERR_ASSOC_ID;
  1026. else if (assoc_acc->associd.desc_len !=
  1027. fcnvme_lsdesc_len(
  1028. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1029. fcret = VERR_ASSOC_ID_LEN;
  1030. else if (assoc_acc->connectid.desc_tag !=
  1031. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  1032. fcret = VERR_CONN_ID;
  1033. else if (assoc_acc->connectid.desc_len !=
  1034. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  1035. fcret = VERR_CONN_ID_LEN;
  1036. if (fcret) {
  1037. ret = -EBADF;
  1038. dev_err(ctrl->dev,
  1039. "q %d connect failed: %s\n",
  1040. queue->qnum, validation_errors[fcret]);
  1041. } else {
  1042. ctrl->association_id =
  1043. be64_to_cpu(assoc_acc->associd.association_id);
  1044. queue->connection_id =
  1045. be64_to_cpu(assoc_acc->connectid.connection_id);
  1046. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1047. }
  1048. out_free_buffer:
  1049. kfree(lsop);
  1050. out_no_memory:
  1051. if (ret)
  1052. dev_err(ctrl->dev,
  1053. "queue %d connect admin queue failed (%d).\n",
  1054. queue->qnum, ret);
  1055. return ret;
  1056. }
  1057. static int
  1058. nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  1059. u16 qsize, u16 ersp_ratio)
  1060. {
  1061. struct nvmefc_ls_req_op *lsop;
  1062. struct nvmefc_ls_req *lsreq;
  1063. struct fcnvme_ls_cr_conn_rqst *conn_rqst;
  1064. struct fcnvme_ls_cr_conn_acc *conn_acc;
  1065. int ret, fcret = 0;
  1066. lsop = kzalloc((sizeof(*lsop) +
  1067. ctrl->lport->ops->lsrqst_priv_sz +
  1068. sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
  1069. if (!lsop) {
  1070. ret = -ENOMEM;
  1071. goto out_no_memory;
  1072. }
  1073. lsreq = &lsop->ls_req;
  1074. lsreq->private = (void *)&lsop[1];
  1075. conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
  1076. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  1077. conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
  1078. conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
  1079. conn_rqst->desc_list_len = cpu_to_be32(
  1080. sizeof(struct fcnvme_lsdesc_assoc_id) +
  1081. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  1082. conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1083. conn_rqst->associd.desc_len =
  1084. fcnvme_lsdesc_len(
  1085. sizeof(struct fcnvme_lsdesc_assoc_id));
  1086. conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  1087. conn_rqst->connect_cmd.desc_tag =
  1088. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
  1089. conn_rqst->connect_cmd.desc_len =
  1090. fcnvme_lsdesc_len(
  1091. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  1092. conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  1093. conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
  1094. conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1);
  1095. lsop->queue = queue;
  1096. lsreq->rqstaddr = conn_rqst;
  1097. lsreq->rqstlen = sizeof(*conn_rqst);
  1098. lsreq->rspaddr = conn_acc;
  1099. lsreq->rsplen = sizeof(*conn_acc);
  1100. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1101. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  1102. if (ret)
  1103. goto out_free_buffer;
  1104. /* process connect LS completion */
  1105. /* validate the ACC response */
  1106. if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  1107. fcret = VERR_LSACC;
  1108. else if (conn_acc->hdr.desc_list_len !=
  1109. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
  1110. fcret = VERR_CR_CONN_ACC_LEN;
  1111. else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
  1112. fcret = VERR_LSDESC_RQST;
  1113. else if (conn_acc->hdr.rqst.desc_len !=
  1114. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  1115. fcret = VERR_LSDESC_RQST_LEN;
  1116. else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
  1117. fcret = VERR_CR_CONN;
  1118. else if (conn_acc->connectid.desc_tag !=
  1119. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  1120. fcret = VERR_CONN_ID;
  1121. else if (conn_acc->connectid.desc_len !=
  1122. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  1123. fcret = VERR_CONN_ID_LEN;
  1124. if (fcret) {
  1125. ret = -EBADF;
  1126. dev_err(ctrl->dev,
  1127. "q %d connect failed: %s\n",
  1128. queue->qnum, validation_errors[fcret]);
  1129. } else {
  1130. queue->connection_id =
  1131. be64_to_cpu(conn_acc->connectid.connection_id);
  1132. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1133. }
  1134. out_free_buffer:
  1135. kfree(lsop);
  1136. out_no_memory:
  1137. if (ret)
  1138. dev_err(ctrl->dev,
  1139. "queue %d connect command failed (%d).\n",
  1140. queue->qnum, ret);
  1141. return ret;
  1142. }
  1143. static void
  1144. nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
  1145. {
  1146. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  1147. __nvme_fc_finish_ls_req(lsop);
  1148. /* fc-nvme initiator doesn't care about success or failure of cmd */
  1149. kfree(lsop);
  1150. }
  1151. /*
  1152. * This routine sends a FC-NVME LS to disconnect (aka terminate)
  1153. * the FC-NVME Association. Terminating the association also
  1154. * terminates the FC-NVME connections (per queue, both admin and io
  1155. * queues) that are part of the association. E.g. things are torn
  1156. * down, and the related FC-NVME Association ID and Connection IDs
  1157. * become invalid.
  1158. *
  1159. * The behavior of the fc-nvme initiator is such that it's
  1160. * understanding of the association and connections will implicitly
  1161. * be torn down. The action is implicit as it may be due to a loss of
  1162. * connectivity with the fc-nvme target, so you may never get a
  1163. * response even if you tried. As such, the action of this routine
  1164. * is to asynchronously send the LS, ignore any results of the LS, and
  1165. * continue on with terminating the association. If the fc-nvme target
  1166. * is present and receives the LS, it too can tear down.
  1167. */
  1168. static void
  1169. nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
  1170. {
  1171. struct fcnvme_ls_disconnect_rqst *discon_rqst;
  1172. struct fcnvme_ls_disconnect_acc *discon_acc;
  1173. struct nvmefc_ls_req_op *lsop;
  1174. struct nvmefc_ls_req *lsreq;
  1175. int ret;
  1176. lsop = kzalloc((sizeof(*lsop) +
  1177. ctrl->lport->ops->lsrqst_priv_sz +
  1178. sizeof(*discon_rqst) + sizeof(*discon_acc)),
  1179. GFP_KERNEL);
  1180. if (!lsop)
  1181. /* couldn't sent it... too bad */
  1182. return;
  1183. lsreq = &lsop->ls_req;
  1184. lsreq->private = (void *)&lsop[1];
  1185. discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
  1186. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  1187. discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
  1188. discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
  1189. discon_rqst->desc_list_len = cpu_to_be32(
  1190. sizeof(struct fcnvme_lsdesc_assoc_id) +
  1191. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  1192. discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1193. discon_rqst->associd.desc_len =
  1194. fcnvme_lsdesc_len(
  1195. sizeof(struct fcnvme_lsdesc_assoc_id));
  1196. discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  1197. discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
  1198. FCNVME_LSDESC_DISCONN_CMD);
  1199. discon_rqst->discon_cmd.desc_len =
  1200. fcnvme_lsdesc_len(
  1201. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  1202. discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
  1203. discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
  1204. lsreq->rqstaddr = discon_rqst;
  1205. lsreq->rqstlen = sizeof(*discon_rqst);
  1206. lsreq->rspaddr = discon_acc;
  1207. lsreq->rsplen = sizeof(*discon_acc);
  1208. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  1209. ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
  1210. nvme_fc_disconnect_assoc_done);
  1211. if (ret)
  1212. kfree(lsop);
  1213. /* only meaningful part to terminating the association */
  1214. ctrl->association_id = 0;
  1215. }
  1216. /* *********************** NVME Ctrl Routines **************************** */
  1217. static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
  1218. static void
  1219. __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
  1220. struct nvme_fc_fcp_op *op)
  1221. {
  1222. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
  1223. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  1224. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
  1225. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1226. atomic_set(&op->state, FCPOP_STATE_UNINIT);
  1227. }
  1228. static void
  1229. nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  1230. unsigned int hctx_idx)
  1231. {
  1232. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1233. return __nvme_fc_exit_request(set->driver_data, op);
  1234. }
  1235. static int
  1236. __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
  1237. {
  1238. unsigned long flags;
  1239. int opstate;
  1240. spin_lock_irqsave(&ctrl->lock, flags);
  1241. opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
  1242. if (opstate != FCPOP_STATE_ACTIVE)
  1243. atomic_set(&op->state, opstate);
  1244. else if (ctrl->flags & FCCTRL_TERMIO)
  1245. ctrl->iocnt++;
  1246. spin_unlock_irqrestore(&ctrl->lock, flags);
  1247. if (opstate != FCPOP_STATE_ACTIVE)
  1248. return -ECANCELED;
  1249. ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
  1250. &ctrl->rport->remoteport,
  1251. op->queue->lldd_handle,
  1252. &op->fcp_req);
  1253. return 0;
  1254. }
  1255. static void
  1256. nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
  1257. {
  1258. struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
  1259. int i;
  1260. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
  1261. __nvme_fc_abort_op(ctrl, aen_op);
  1262. }
  1263. static inline void
  1264. __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
  1265. struct nvme_fc_fcp_op *op, int opstate)
  1266. {
  1267. unsigned long flags;
  1268. if (opstate == FCPOP_STATE_ABORTED) {
  1269. spin_lock_irqsave(&ctrl->lock, flags);
  1270. if (ctrl->flags & FCCTRL_TERMIO) {
  1271. if (!--ctrl->iocnt)
  1272. wake_up(&ctrl->ioabort_wait);
  1273. }
  1274. spin_unlock_irqrestore(&ctrl->lock, flags);
  1275. }
  1276. }
  1277. static void
  1278. nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
  1279. {
  1280. struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
  1281. struct request *rq = op->rq;
  1282. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1283. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1284. struct nvme_fc_queue *queue = op->queue;
  1285. struct nvme_completion *cqe = &op->rsp_iu.cqe;
  1286. struct nvme_command *sqe = &op->cmd_iu.sqe;
  1287. __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
  1288. union nvme_result result;
  1289. bool terminate_assoc = true;
  1290. int opstate;
  1291. /*
  1292. * WARNING:
  1293. * The current linux implementation of a nvme controller
  1294. * allocates a single tag set for all io queues and sizes
  1295. * the io queues to fully hold all possible tags. Thus, the
  1296. * implementation does not reference or care about the sqhd
  1297. * value as it never needs to use the sqhd/sqtail pointers
  1298. * for submission pacing.
  1299. *
  1300. * This affects the FC-NVME implementation in two ways:
  1301. * 1) As the value doesn't matter, we don't need to waste
  1302. * cycles extracting it from ERSPs and stamping it in the
  1303. * cases where the transport fabricates CQEs on successful
  1304. * completions.
  1305. * 2) The FC-NVME implementation requires that delivery of
  1306. * ERSP completions are to go back to the nvme layer in order
  1307. * relative to the rsn, such that the sqhd value will always
  1308. * be "in order" for the nvme layer. As the nvme layer in
  1309. * linux doesn't care about sqhd, there's no need to return
  1310. * them in order.
  1311. *
  1312. * Additionally:
  1313. * As the core nvme layer in linux currently does not look at
  1314. * every field in the cqe - in cases where the FC transport must
  1315. * fabricate a CQE, the following fields will not be set as they
  1316. * are not referenced:
  1317. * cqe.sqid, cqe.sqhd, cqe.command_id
  1318. *
  1319. * Failure or error of an individual i/o, in a transport
  1320. * detected fashion unrelated to the nvme completion status,
  1321. * potentially cause the initiator and target sides to get out
  1322. * of sync on SQ head/tail (aka outstanding io count allowed).
  1323. * Per FC-NVME spec, failure of an individual command requires
  1324. * the connection to be terminated, which in turn requires the
  1325. * association to be terminated.
  1326. */
  1327. opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
  1328. fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
  1329. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  1330. if (opstate == FCPOP_STATE_ABORTED)
  1331. status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
  1332. else if (freq->status)
  1333. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1334. /*
  1335. * For the linux implementation, if we have an unsuccesful
  1336. * status, they blk-mq layer can typically be called with the
  1337. * non-zero status and the content of the cqe isn't important.
  1338. */
  1339. if (status)
  1340. goto done;
  1341. /*
  1342. * command completed successfully relative to the wire
  1343. * protocol. However, validate anything received and
  1344. * extract the status and result from the cqe (create it
  1345. * where necessary).
  1346. */
  1347. switch (freq->rcv_rsplen) {
  1348. case 0:
  1349. case NVME_FC_SIZEOF_ZEROS_RSP:
  1350. /*
  1351. * No response payload or 12 bytes of payload (which
  1352. * should all be zeros) are considered successful and
  1353. * no payload in the CQE by the transport.
  1354. */
  1355. if (freq->transferred_length !=
  1356. be32_to_cpu(op->cmd_iu.data_len)) {
  1357. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1358. goto done;
  1359. }
  1360. result.u64 = 0;
  1361. break;
  1362. case sizeof(struct nvme_fc_ersp_iu):
  1363. /*
  1364. * The ERSP IU contains a full completion with CQE.
  1365. * Validate ERSP IU and look at cqe.
  1366. */
  1367. if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
  1368. (freq->rcv_rsplen / 4) ||
  1369. be32_to_cpu(op->rsp_iu.xfrd_len) !=
  1370. freq->transferred_length ||
  1371. op->rsp_iu.status_code ||
  1372. sqe->common.command_id != cqe->command_id)) {
  1373. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1374. goto done;
  1375. }
  1376. result = cqe->result;
  1377. status = cqe->status;
  1378. break;
  1379. default:
  1380. status = cpu_to_le16(NVME_SC_INTERNAL << 1);
  1381. goto done;
  1382. }
  1383. terminate_assoc = false;
  1384. done:
  1385. if (op->flags & FCOP_FLAGS_AEN) {
  1386. nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
  1387. __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
  1388. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1389. op->flags = FCOP_FLAGS_AEN; /* clear other flags */
  1390. nvme_fc_ctrl_put(ctrl);
  1391. goto check_error;
  1392. }
  1393. __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
  1394. nvme_end_request(rq, status, result);
  1395. check_error:
  1396. if (terminate_assoc)
  1397. nvme_fc_error_recovery(ctrl, "transport detected io error");
  1398. }
  1399. static int
  1400. __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
  1401. struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
  1402. struct request *rq, u32 rqno)
  1403. {
  1404. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1405. int ret = 0;
  1406. memset(op, 0, sizeof(*op));
  1407. op->fcp_req.cmdaddr = &op->cmd_iu;
  1408. op->fcp_req.cmdlen = sizeof(op->cmd_iu);
  1409. op->fcp_req.rspaddr = &op->rsp_iu;
  1410. op->fcp_req.rsplen = sizeof(op->rsp_iu);
  1411. op->fcp_req.done = nvme_fc_fcpio_done;
  1412. op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
  1413. op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
  1414. op->ctrl = ctrl;
  1415. op->queue = queue;
  1416. op->rq = rq;
  1417. op->rqno = rqno;
  1418. cmdiu->scsi_id = NVME_CMD_SCSI_ID;
  1419. cmdiu->fc_id = NVME_CMD_FC_ID;
  1420. cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
  1421. op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
  1422. &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1423. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
  1424. dev_err(ctrl->dev,
  1425. "FCP Op failed - cmdiu dma mapping failed.\n");
  1426. ret = EFAULT;
  1427. goto out_on_error;
  1428. }
  1429. op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
  1430. &op->rsp_iu, sizeof(op->rsp_iu),
  1431. DMA_FROM_DEVICE);
  1432. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
  1433. dev_err(ctrl->dev,
  1434. "FCP Op failed - rspiu dma mapping failed.\n");
  1435. ret = EFAULT;
  1436. }
  1437. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1438. out_on_error:
  1439. return ret;
  1440. }
  1441. static int
  1442. nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1443. unsigned int hctx_idx, unsigned int numa_node)
  1444. {
  1445. struct nvme_fc_ctrl *ctrl = set->driver_data;
  1446. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1447. int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
  1448. struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
  1449. nvme_req(rq)->ctrl = &ctrl->ctrl;
  1450. return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
  1451. }
  1452. static int
  1453. nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
  1454. {
  1455. struct nvme_fc_fcp_op *aen_op;
  1456. struct nvme_fc_cmd_iu *cmdiu;
  1457. struct nvme_command *sqe;
  1458. void *private;
  1459. int i, ret;
  1460. aen_op = ctrl->aen_ops;
  1461. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
  1462. private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
  1463. GFP_KERNEL);
  1464. if (!private)
  1465. return -ENOMEM;
  1466. cmdiu = &aen_op->cmd_iu;
  1467. sqe = &cmdiu->sqe;
  1468. ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
  1469. aen_op, (struct request *)NULL,
  1470. (NVME_AQ_BLK_MQ_DEPTH + i));
  1471. if (ret) {
  1472. kfree(private);
  1473. return ret;
  1474. }
  1475. aen_op->flags = FCOP_FLAGS_AEN;
  1476. aen_op->fcp_req.first_sgl = NULL; /* no sg list */
  1477. aen_op->fcp_req.private = private;
  1478. memset(sqe, 0, sizeof(*sqe));
  1479. sqe->common.opcode = nvme_admin_async_event;
  1480. /* Note: core layer may overwrite the sqe.command_id value */
  1481. sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i;
  1482. }
  1483. return 0;
  1484. }
  1485. static void
  1486. nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
  1487. {
  1488. struct nvme_fc_fcp_op *aen_op;
  1489. int i;
  1490. aen_op = ctrl->aen_ops;
  1491. for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
  1492. if (!aen_op->fcp_req.private)
  1493. continue;
  1494. __nvme_fc_exit_request(ctrl, aen_op);
  1495. kfree(aen_op->fcp_req.private);
  1496. aen_op->fcp_req.private = NULL;
  1497. }
  1498. }
  1499. static inline void
  1500. __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
  1501. unsigned int qidx)
  1502. {
  1503. struct nvme_fc_queue *queue = &ctrl->queues[qidx];
  1504. hctx->driver_data = queue;
  1505. queue->hctx = hctx;
  1506. }
  1507. static int
  1508. nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1509. unsigned int hctx_idx)
  1510. {
  1511. struct nvme_fc_ctrl *ctrl = data;
  1512. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
  1513. return 0;
  1514. }
  1515. static int
  1516. nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1517. unsigned int hctx_idx)
  1518. {
  1519. struct nvme_fc_ctrl *ctrl = data;
  1520. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
  1521. return 0;
  1522. }
  1523. static void
  1524. nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx)
  1525. {
  1526. struct nvme_fc_queue *queue;
  1527. queue = &ctrl->queues[idx];
  1528. memset(queue, 0, sizeof(*queue));
  1529. queue->ctrl = ctrl;
  1530. queue->qnum = idx;
  1531. atomic_set(&queue->csn, 1);
  1532. queue->dev = ctrl->dev;
  1533. if (idx > 0)
  1534. queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
  1535. else
  1536. queue->cmnd_capsule_len = sizeof(struct nvme_command);
  1537. /*
  1538. * Considered whether we should allocate buffers for all SQEs
  1539. * and CQEs and dma map them - mapping their respective entries
  1540. * into the request structures (kernel vm addr and dma address)
  1541. * thus the driver could use the buffers/mappings directly.
  1542. * It only makes sense if the LLDD would use them for its
  1543. * messaging api. It's very unlikely most adapter api's would use
  1544. * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
  1545. * structures were used instead.
  1546. */
  1547. }
  1548. /*
  1549. * This routine terminates a queue at the transport level.
  1550. * The transport has already ensured that all outstanding ios on
  1551. * the queue have been terminated.
  1552. * The transport will send a Disconnect LS request to terminate
  1553. * the queue's connection. Termination of the admin queue will also
  1554. * terminate the association at the target.
  1555. */
  1556. static void
  1557. nvme_fc_free_queue(struct nvme_fc_queue *queue)
  1558. {
  1559. if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
  1560. return;
  1561. clear_bit(NVME_FC_Q_LIVE, &queue->flags);
  1562. /*
  1563. * Current implementation never disconnects a single queue.
  1564. * It always terminates a whole association. So there is never
  1565. * a disconnect(queue) LS sent to the target.
  1566. */
  1567. queue->connection_id = 0;
  1568. atomic_set(&queue->csn, 1);
  1569. }
  1570. static void
  1571. __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
  1572. struct nvme_fc_queue *queue, unsigned int qidx)
  1573. {
  1574. if (ctrl->lport->ops->delete_queue)
  1575. ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
  1576. queue->lldd_handle);
  1577. queue->lldd_handle = NULL;
  1578. }
  1579. static void
  1580. nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
  1581. {
  1582. int i;
  1583. for (i = 1; i < ctrl->ctrl.queue_count; i++)
  1584. nvme_fc_free_queue(&ctrl->queues[i]);
  1585. }
  1586. static int
  1587. __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
  1588. struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
  1589. {
  1590. int ret = 0;
  1591. queue->lldd_handle = NULL;
  1592. if (ctrl->lport->ops->create_queue)
  1593. ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
  1594. qidx, qsize, &queue->lldd_handle);
  1595. return ret;
  1596. }
  1597. static void
  1598. nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
  1599. {
  1600. struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1];
  1601. int i;
  1602. for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--)
  1603. __nvme_fc_delete_hw_queue(ctrl, queue, i);
  1604. }
  1605. static int
  1606. nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1607. {
  1608. struct nvme_fc_queue *queue = &ctrl->queues[1];
  1609. int i, ret;
  1610. for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) {
  1611. ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
  1612. if (ret)
  1613. goto delete_queues;
  1614. }
  1615. return 0;
  1616. delete_queues:
  1617. for (; i >= 0; i--)
  1618. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
  1619. return ret;
  1620. }
  1621. static int
  1622. nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1623. {
  1624. int i, ret = 0;
  1625. for (i = 1; i < ctrl->ctrl.queue_count; i++) {
  1626. ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
  1627. (qsize / 5));
  1628. if (ret)
  1629. break;
  1630. ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
  1631. if (ret)
  1632. break;
  1633. set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
  1634. }
  1635. return ret;
  1636. }
  1637. static void
  1638. nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
  1639. {
  1640. int i;
  1641. for (i = 1; i < ctrl->ctrl.queue_count; i++)
  1642. nvme_fc_init_queue(ctrl, i);
  1643. }
  1644. static void
  1645. nvme_fc_ctrl_free(struct kref *ref)
  1646. {
  1647. struct nvme_fc_ctrl *ctrl =
  1648. container_of(ref, struct nvme_fc_ctrl, ref);
  1649. unsigned long flags;
  1650. if (ctrl->ctrl.tagset) {
  1651. blk_cleanup_queue(ctrl->ctrl.connect_q);
  1652. blk_mq_free_tag_set(&ctrl->tag_set);
  1653. }
  1654. /* remove from rport list */
  1655. spin_lock_irqsave(&ctrl->rport->lock, flags);
  1656. list_del(&ctrl->ctrl_list);
  1657. spin_unlock_irqrestore(&ctrl->rport->lock, flags);
  1658. blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
  1659. blk_cleanup_queue(ctrl->ctrl.admin_q);
  1660. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  1661. kfree(ctrl->queues);
  1662. put_device(ctrl->dev);
  1663. nvme_fc_rport_put(ctrl->rport);
  1664. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  1665. if (ctrl->ctrl.opts)
  1666. nvmf_free_options(ctrl->ctrl.opts);
  1667. kfree(ctrl);
  1668. }
  1669. static void
  1670. nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
  1671. {
  1672. kref_put(&ctrl->ref, nvme_fc_ctrl_free);
  1673. }
  1674. static int
  1675. nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
  1676. {
  1677. return kref_get_unless_zero(&ctrl->ref);
  1678. }
  1679. /*
  1680. * All accesses from nvme core layer done - can now free the
  1681. * controller. Called after last nvme_put_ctrl() call
  1682. */
  1683. static void
  1684. nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
  1685. {
  1686. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  1687. WARN_ON(nctrl != &ctrl->ctrl);
  1688. nvme_fc_ctrl_put(ctrl);
  1689. }
  1690. static void
  1691. nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
  1692. {
  1693. /* only proceed if in LIVE state - e.g. on first error */
  1694. if (ctrl->ctrl.state != NVME_CTRL_LIVE)
  1695. return;
  1696. dev_warn(ctrl->ctrl.device,
  1697. "NVME-FC{%d}: transport association error detected: %s\n",
  1698. ctrl->cnum, errmsg);
  1699. dev_warn(ctrl->ctrl.device,
  1700. "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
  1701. nvme_reset_ctrl(&ctrl->ctrl);
  1702. }
  1703. static enum blk_eh_timer_return
  1704. nvme_fc_timeout(struct request *rq, bool reserved)
  1705. {
  1706. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1707. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1708. /*
  1709. * we can't individually ABTS an io without affecting the queue,
  1710. * thus killing the queue, and thus the association.
  1711. * So resolve by performing a controller reset, which will stop
  1712. * the host/io stack, terminate the association on the link,
  1713. * and recreate an association on the link.
  1714. */
  1715. nvme_fc_error_recovery(ctrl, "io timeout error");
  1716. /*
  1717. * the io abort has been initiated. Have the reset timer
  1718. * restarted and the abort completion will complete the io
  1719. * shortly. Avoids a synchronous wait while the abort finishes.
  1720. */
  1721. return BLK_EH_RESET_TIMER;
  1722. }
  1723. static int
  1724. nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1725. struct nvme_fc_fcp_op *op)
  1726. {
  1727. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1728. enum dma_data_direction dir;
  1729. int ret;
  1730. freq->sg_cnt = 0;
  1731. if (!blk_rq_payload_bytes(rq))
  1732. return 0;
  1733. freq->sg_table.sgl = freq->first_sgl;
  1734. ret = sg_alloc_table_chained(&freq->sg_table,
  1735. blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
  1736. if (ret)
  1737. return -ENOMEM;
  1738. op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
  1739. WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
  1740. dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1741. freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
  1742. op->nents, dir);
  1743. if (unlikely(freq->sg_cnt <= 0)) {
  1744. sg_free_table_chained(&freq->sg_table, true);
  1745. freq->sg_cnt = 0;
  1746. return -EFAULT;
  1747. }
  1748. /*
  1749. * TODO: blk_integrity_rq(rq) for DIF
  1750. */
  1751. return 0;
  1752. }
  1753. static void
  1754. nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1755. struct nvme_fc_fcp_op *op)
  1756. {
  1757. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1758. if (!freq->sg_cnt)
  1759. return;
  1760. fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
  1761. ((rq_data_dir(rq) == WRITE) ?
  1762. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  1763. nvme_cleanup_cmd(rq);
  1764. sg_free_table_chained(&freq->sg_table, true);
  1765. freq->sg_cnt = 0;
  1766. }
  1767. /*
  1768. * In FC, the queue is a logical thing. At transport connect, the target
  1769. * creates its "queue" and returns a handle that is to be given to the
  1770. * target whenever it posts something to the corresponding SQ. When an
  1771. * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
  1772. * command contained within the SQE, an io, and assigns a FC exchange
  1773. * to it. The SQE and the associated SQ handle are sent in the initial
  1774. * CMD IU sents on the exchange. All transfers relative to the io occur
  1775. * as part of the exchange. The CQE is the last thing for the io,
  1776. * which is transferred (explicitly or implicitly) with the RSP IU
  1777. * sent on the exchange. After the CQE is received, the FC exchange is
  1778. * terminaed and the Exchange may be used on a different io.
  1779. *
  1780. * The transport to LLDD api has the transport making a request for a
  1781. * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
  1782. * resource and transfers the command. The LLDD will then process all
  1783. * steps to complete the io. Upon completion, the transport done routine
  1784. * is called.
  1785. *
  1786. * So - while the operation is outstanding to the LLDD, there is a link
  1787. * level FC exchange resource that is also outstanding. This must be
  1788. * considered in all cleanup operations.
  1789. */
  1790. static blk_status_t
  1791. nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  1792. struct nvme_fc_fcp_op *op, u32 data_len,
  1793. enum nvmefc_fcp_datadir io_dir)
  1794. {
  1795. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1796. struct nvme_command *sqe = &cmdiu->sqe;
  1797. u32 csn;
  1798. int ret, opstate;
  1799. /*
  1800. * before attempting to send the io, check to see if we believe
  1801. * the target device is present
  1802. */
  1803. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  1804. return BLK_STS_RESOURCE;
  1805. if (!nvme_fc_ctrl_get(ctrl))
  1806. return BLK_STS_IOERR;
  1807. /* format the FC-NVME CMD IU and fcp_req */
  1808. cmdiu->connection_id = cpu_to_be64(queue->connection_id);
  1809. csn = atomic_inc_return(&queue->csn);
  1810. cmdiu->csn = cpu_to_be32(csn);
  1811. cmdiu->data_len = cpu_to_be32(data_len);
  1812. switch (io_dir) {
  1813. case NVMEFC_FCP_WRITE:
  1814. cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
  1815. break;
  1816. case NVMEFC_FCP_READ:
  1817. cmdiu->flags = FCNVME_CMD_FLAGS_READ;
  1818. break;
  1819. case NVMEFC_FCP_NODATA:
  1820. cmdiu->flags = 0;
  1821. break;
  1822. }
  1823. op->fcp_req.payload_length = data_len;
  1824. op->fcp_req.io_dir = io_dir;
  1825. op->fcp_req.transferred_length = 0;
  1826. op->fcp_req.rcv_rsplen = 0;
  1827. op->fcp_req.status = NVME_SC_SUCCESS;
  1828. op->fcp_req.sqid = cpu_to_le16(queue->qnum);
  1829. /*
  1830. * validate per fabric rules, set fields mandated by fabric spec
  1831. * as well as those by FC-NVME spec.
  1832. */
  1833. WARN_ON_ONCE(sqe->common.metadata);
  1834. sqe->common.flags |= NVME_CMD_SGL_METABUF;
  1835. /*
  1836. * format SQE DPTR field per FC-NVME rules:
  1837. * type=0x5 Transport SGL Data Block Descriptor
  1838. * subtype=0xA Transport-specific value
  1839. * address=0
  1840. * length=length of the data series
  1841. */
  1842. sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) |
  1843. NVME_SGL_FMT_TRANSPORT_A;
  1844. sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
  1845. sqe->rw.dptr.sgl.addr = 0;
  1846. if (!(op->flags & FCOP_FLAGS_AEN)) {
  1847. ret = nvme_fc_map_data(ctrl, op->rq, op);
  1848. if (ret < 0) {
  1849. nvme_cleanup_cmd(op->rq);
  1850. nvme_fc_ctrl_put(ctrl);
  1851. if (ret == -ENOMEM || ret == -EAGAIN)
  1852. return BLK_STS_RESOURCE;
  1853. return BLK_STS_IOERR;
  1854. }
  1855. }
  1856. fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
  1857. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1858. atomic_set(&op->state, FCPOP_STATE_ACTIVE);
  1859. if (!(op->flags & FCOP_FLAGS_AEN))
  1860. blk_mq_start_request(op->rq);
  1861. ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
  1862. &ctrl->rport->remoteport,
  1863. queue->lldd_handle, &op->fcp_req);
  1864. if (ret) {
  1865. opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
  1866. __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
  1867. if (!(op->flags & FCOP_FLAGS_AEN))
  1868. nvme_fc_unmap_data(ctrl, op->rq, op);
  1869. nvme_fc_ctrl_put(ctrl);
  1870. if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE &&
  1871. ret != -EBUSY)
  1872. return BLK_STS_IOERR;
  1873. return BLK_STS_RESOURCE;
  1874. }
  1875. return BLK_STS_OK;
  1876. }
  1877. static blk_status_t
  1878. nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
  1879. const struct blk_mq_queue_data *bd)
  1880. {
  1881. struct nvme_ns *ns = hctx->queue->queuedata;
  1882. struct nvme_fc_queue *queue = hctx->driver_data;
  1883. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1884. struct request *rq = bd->rq;
  1885. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1886. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1887. struct nvme_command *sqe = &cmdiu->sqe;
  1888. enum nvmefc_fcp_datadir io_dir;
  1889. bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags);
  1890. u32 data_len;
  1891. blk_status_t ret;
  1892. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
  1893. !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
  1894. return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
  1895. ret = nvme_setup_cmd(ns, rq, sqe);
  1896. if (ret)
  1897. return ret;
  1898. data_len = blk_rq_payload_bytes(rq);
  1899. if (data_len)
  1900. io_dir = ((rq_data_dir(rq) == WRITE) ?
  1901. NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
  1902. else
  1903. io_dir = NVMEFC_FCP_NODATA;
  1904. return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
  1905. }
  1906. static struct blk_mq_tags *
  1907. nvme_fc_tagset(struct nvme_fc_queue *queue)
  1908. {
  1909. if (queue->qnum == 0)
  1910. return queue->ctrl->admin_tag_set.tags[queue->qnum];
  1911. return queue->ctrl->tag_set.tags[queue->qnum - 1];
  1912. }
  1913. static int
  1914. nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
  1915. {
  1916. struct nvme_fc_queue *queue = hctx->driver_data;
  1917. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1918. struct request *req;
  1919. struct nvme_fc_fcp_op *op;
  1920. req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
  1921. if (!req)
  1922. return 0;
  1923. op = blk_mq_rq_to_pdu(req);
  1924. if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
  1925. (ctrl->lport->ops->poll_queue))
  1926. ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
  1927. queue->lldd_handle);
  1928. return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
  1929. }
  1930. static void
  1931. nvme_fc_submit_async_event(struct nvme_ctrl *arg)
  1932. {
  1933. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
  1934. struct nvme_fc_fcp_op *aen_op;
  1935. unsigned long flags;
  1936. bool terminating = false;
  1937. blk_status_t ret;
  1938. spin_lock_irqsave(&ctrl->lock, flags);
  1939. if (ctrl->flags & FCCTRL_TERMIO)
  1940. terminating = true;
  1941. spin_unlock_irqrestore(&ctrl->lock, flags);
  1942. if (terminating)
  1943. return;
  1944. aen_op = &ctrl->aen_ops[0];
  1945. ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
  1946. NVMEFC_FCP_NODATA);
  1947. if (ret)
  1948. dev_err(ctrl->ctrl.device,
  1949. "failed async event work\n");
  1950. }
  1951. static void
  1952. nvme_fc_complete_rq(struct request *rq)
  1953. {
  1954. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1955. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1956. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1957. nvme_fc_unmap_data(ctrl, rq, op);
  1958. nvme_complete_rq(rq);
  1959. nvme_fc_ctrl_put(ctrl);
  1960. }
  1961. /*
  1962. * This routine is used by the transport when it needs to find active
  1963. * io on a queue that is to be terminated. The transport uses
  1964. * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
  1965. * this routine to kill them on a 1 by 1 basis.
  1966. *
  1967. * As FC allocates FC exchange for each io, the transport must contact
  1968. * the LLDD to terminate the exchange, thus releasing the FC exchange.
  1969. * After terminating the exchange the LLDD will call the transport's
  1970. * normal io done path for the request, but it will have an aborted
  1971. * status. The done path will return the io request back to the block
  1972. * layer with an error status.
  1973. */
  1974. static void
  1975. nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
  1976. {
  1977. struct nvme_ctrl *nctrl = data;
  1978. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  1979. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
  1980. __nvme_fc_abort_op(ctrl, op);
  1981. }
  1982. static const struct blk_mq_ops nvme_fc_mq_ops = {
  1983. .queue_rq = nvme_fc_queue_rq,
  1984. .complete = nvme_fc_complete_rq,
  1985. .init_request = nvme_fc_init_request,
  1986. .exit_request = nvme_fc_exit_request,
  1987. .init_hctx = nvme_fc_init_hctx,
  1988. .poll = nvme_fc_poll,
  1989. .timeout = nvme_fc_timeout,
  1990. };
  1991. static int
  1992. nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
  1993. {
  1994. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  1995. unsigned int nr_io_queues;
  1996. int ret;
  1997. nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
  1998. ctrl->lport->ops->max_hw_queues);
  1999. ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
  2000. if (ret) {
  2001. dev_info(ctrl->ctrl.device,
  2002. "set_queue_count failed: %d\n", ret);
  2003. return ret;
  2004. }
  2005. ctrl->ctrl.queue_count = nr_io_queues + 1;
  2006. if (!nr_io_queues)
  2007. return 0;
  2008. nvme_fc_init_io_queues(ctrl);
  2009. memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
  2010. ctrl->tag_set.ops = &nvme_fc_mq_ops;
  2011. ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
  2012. ctrl->tag_set.reserved_tags = 1; /* fabric connect */
  2013. ctrl->tag_set.numa_node = NUMA_NO_NODE;
  2014. ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  2015. ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  2016. (SG_CHUNK_SIZE *
  2017. sizeof(struct scatterlist)) +
  2018. ctrl->lport->ops->fcprqst_priv_sz;
  2019. ctrl->tag_set.driver_data = ctrl;
  2020. ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
  2021. ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
  2022. ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
  2023. if (ret)
  2024. return ret;
  2025. ctrl->ctrl.tagset = &ctrl->tag_set;
  2026. ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
  2027. if (IS_ERR(ctrl->ctrl.connect_q)) {
  2028. ret = PTR_ERR(ctrl->ctrl.connect_q);
  2029. goto out_free_tag_set;
  2030. }
  2031. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
  2032. if (ret)
  2033. goto out_cleanup_blk_queue;
  2034. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
  2035. if (ret)
  2036. goto out_delete_hw_queues;
  2037. ctrl->ioq_live = true;
  2038. return 0;
  2039. out_delete_hw_queues:
  2040. nvme_fc_delete_hw_io_queues(ctrl);
  2041. out_cleanup_blk_queue:
  2042. blk_cleanup_queue(ctrl->ctrl.connect_q);
  2043. out_free_tag_set:
  2044. blk_mq_free_tag_set(&ctrl->tag_set);
  2045. nvme_fc_free_io_queues(ctrl);
  2046. /* force put free routine to ignore io queues */
  2047. ctrl->ctrl.tagset = NULL;
  2048. return ret;
  2049. }
  2050. static int
  2051. nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl)
  2052. {
  2053. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  2054. unsigned int nr_io_queues;
  2055. int ret;
  2056. nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()),
  2057. ctrl->lport->ops->max_hw_queues);
  2058. ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
  2059. if (ret) {
  2060. dev_info(ctrl->ctrl.device,
  2061. "set_queue_count failed: %d\n", ret);
  2062. return ret;
  2063. }
  2064. ctrl->ctrl.queue_count = nr_io_queues + 1;
  2065. /* check for io queues existing */
  2066. if (ctrl->ctrl.queue_count == 1)
  2067. return 0;
  2068. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
  2069. if (ret)
  2070. goto out_free_io_queues;
  2071. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1);
  2072. if (ret)
  2073. goto out_delete_hw_queues;
  2074. blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues);
  2075. return 0;
  2076. out_delete_hw_queues:
  2077. nvme_fc_delete_hw_io_queues(ctrl);
  2078. out_free_io_queues:
  2079. nvme_fc_free_io_queues(ctrl);
  2080. return ret;
  2081. }
  2082. static void
  2083. nvme_fc_rport_active_on_lport(struct nvme_fc_rport *rport)
  2084. {
  2085. struct nvme_fc_lport *lport = rport->lport;
  2086. atomic_inc(&lport->act_rport_cnt);
  2087. }
  2088. static void
  2089. nvme_fc_rport_inactive_on_lport(struct nvme_fc_rport *rport)
  2090. {
  2091. struct nvme_fc_lport *lport = rport->lport;
  2092. u32 cnt;
  2093. cnt = atomic_dec_return(&lport->act_rport_cnt);
  2094. if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED)
  2095. lport->ops->localport_delete(&lport->localport);
  2096. }
  2097. static int
  2098. nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl)
  2099. {
  2100. struct nvme_fc_rport *rport = ctrl->rport;
  2101. u32 cnt;
  2102. if (ctrl->assoc_active)
  2103. return 1;
  2104. ctrl->assoc_active = true;
  2105. cnt = atomic_inc_return(&rport->act_ctrl_cnt);
  2106. if (cnt == 1)
  2107. nvme_fc_rport_active_on_lport(rport);
  2108. return 0;
  2109. }
  2110. static int
  2111. nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl)
  2112. {
  2113. struct nvme_fc_rport *rport = ctrl->rport;
  2114. struct nvme_fc_lport *lport = rport->lport;
  2115. u32 cnt;
  2116. /* ctrl->assoc_active=false will be set independently */
  2117. cnt = atomic_dec_return(&rport->act_ctrl_cnt);
  2118. if (cnt == 0) {
  2119. if (rport->remoteport.port_state == FC_OBJSTATE_DELETED)
  2120. lport->ops->remoteport_delete(&rport->remoteport);
  2121. nvme_fc_rport_inactive_on_lport(rport);
  2122. }
  2123. return 0;
  2124. }
  2125. /*
  2126. * This routine restarts the controller on the host side, and
  2127. * on the link side, recreates the controller association.
  2128. */
  2129. static int
  2130. nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
  2131. {
  2132. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  2133. int ret;
  2134. bool changed;
  2135. ++ctrl->ctrl.nr_reconnects;
  2136. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  2137. return -ENODEV;
  2138. if (nvme_fc_ctlr_active_on_rport(ctrl))
  2139. return -ENOTUNIQ;
  2140. /*
  2141. * Create the admin queue
  2142. */
  2143. ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
  2144. NVME_AQ_DEPTH);
  2145. if (ret)
  2146. goto out_free_queue;
  2147. ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
  2148. NVME_AQ_DEPTH, (NVME_AQ_DEPTH / 4));
  2149. if (ret)
  2150. goto out_delete_hw_queue;
  2151. blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
  2152. ret = nvmf_connect_admin_queue(&ctrl->ctrl);
  2153. if (ret)
  2154. goto out_disconnect_admin_queue;
  2155. set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
  2156. /*
  2157. * Check controller capabilities
  2158. *
  2159. * todo:- add code to check if ctrl attributes changed from
  2160. * prior connection values
  2161. */
  2162. ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
  2163. if (ret) {
  2164. dev_err(ctrl->ctrl.device,
  2165. "prop_get NVME_REG_CAP failed\n");
  2166. goto out_disconnect_admin_queue;
  2167. }
  2168. ctrl->ctrl.sqsize =
  2169. min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize);
  2170. ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
  2171. if (ret)
  2172. goto out_disconnect_admin_queue;
  2173. ctrl->ctrl.max_hw_sectors =
  2174. (ctrl->lport->ops->max_sgl_segments - 1) << (PAGE_SHIFT - 9);
  2175. ret = nvme_init_identify(&ctrl->ctrl);
  2176. if (ret)
  2177. goto out_disconnect_admin_queue;
  2178. /* sanity checks */
  2179. /* FC-NVME does not have other data in the capsule */
  2180. if (ctrl->ctrl.icdoff) {
  2181. dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
  2182. ctrl->ctrl.icdoff);
  2183. goto out_disconnect_admin_queue;
  2184. }
  2185. /* FC-NVME supports normal SGL Data Block Descriptors */
  2186. if (opts->queue_size > ctrl->ctrl.maxcmd) {
  2187. /* warn if maxcmd is lower than queue_size */
  2188. dev_warn(ctrl->ctrl.device,
  2189. "queue_size %zu > ctrl maxcmd %u, reducing "
  2190. "to queue_size\n",
  2191. opts->queue_size, ctrl->ctrl.maxcmd);
  2192. opts->queue_size = ctrl->ctrl.maxcmd;
  2193. }
  2194. if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
  2195. /* warn if sqsize is lower than queue_size */
  2196. dev_warn(ctrl->ctrl.device,
  2197. "queue_size %zu > ctrl sqsize %u, clamping down\n",
  2198. opts->queue_size, ctrl->ctrl.sqsize + 1);
  2199. opts->queue_size = ctrl->ctrl.sqsize + 1;
  2200. }
  2201. ret = nvme_fc_init_aen_ops(ctrl);
  2202. if (ret)
  2203. goto out_term_aen_ops;
  2204. /*
  2205. * Create the io queues
  2206. */
  2207. if (ctrl->ctrl.queue_count > 1) {
  2208. if (!ctrl->ioq_live)
  2209. ret = nvme_fc_create_io_queues(ctrl);
  2210. else
  2211. ret = nvme_fc_recreate_io_queues(ctrl);
  2212. if (ret)
  2213. goto out_term_aen_ops;
  2214. }
  2215. changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
  2216. ctrl->ctrl.nr_reconnects = 0;
  2217. if (changed)
  2218. nvme_start_ctrl(&ctrl->ctrl);
  2219. return 0; /* Success */
  2220. out_term_aen_ops:
  2221. nvme_fc_term_aen_ops(ctrl);
  2222. out_disconnect_admin_queue:
  2223. /* send a Disconnect(association) LS to fc-nvme target */
  2224. nvme_fc_xmt_disconnect_assoc(ctrl);
  2225. out_delete_hw_queue:
  2226. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2227. out_free_queue:
  2228. nvme_fc_free_queue(&ctrl->queues[0]);
  2229. ctrl->assoc_active = false;
  2230. nvme_fc_ctlr_inactive_on_rport(ctrl);
  2231. return ret;
  2232. }
  2233. /*
  2234. * This routine stops operation of the controller on the host side.
  2235. * On the host os stack side: Admin and IO queues are stopped,
  2236. * outstanding ios on them terminated via FC ABTS.
  2237. * On the link side: the association is terminated.
  2238. */
  2239. static void
  2240. nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
  2241. {
  2242. unsigned long flags;
  2243. if (!ctrl->assoc_active)
  2244. return;
  2245. ctrl->assoc_active = false;
  2246. spin_lock_irqsave(&ctrl->lock, flags);
  2247. ctrl->flags |= FCCTRL_TERMIO;
  2248. ctrl->iocnt = 0;
  2249. spin_unlock_irqrestore(&ctrl->lock, flags);
  2250. /*
  2251. * If io queues are present, stop them and terminate all outstanding
  2252. * ios on them. As FC allocates FC exchange for each io, the
  2253. * transport must contact the LLDD to terminate the exchange,
  2254. * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
  2255. * to tell us what io's are busy and invoke a transport routine
  2256. * to kill them with the LLDD. After terminating the exchange
  2257. * the LLDD will call the transport's normal io done path, but it
  2258. * will have an aborted status. The done path will return the
  2259. * io requests back to the block layer as part of normal completions
  2260. * (but with error status).
  2261. */
  2262. if (ctrl->ctrl.queue_count > 1) {
  2263. nvme_stop_queues(&ctrl->ctrl);
  2264. blk_mq_tagset_busy_iter(&ctrl->tag_set,
  2265. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2266. }
  2267. /*
  2268. * Other transports, which don't have link-level contexts bound
  2269. * to sqe's, would try to gracefully shutdown the controller by
  2270. * writing the registers for shutdown and polling (call
  2271. * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
  2272. * just aborted and we will wait on those contexts, and given
  2273. * there was no indication of how live the controlelr is on the
  2274. * link, don't send more io to create more contexts for the
  2275. * shutdown. Let the controller fail via keepalive failure if
  2276. * its still present.
  2277. */
  2278. /*
  2279. * clean up the admin queue. Same thing as above.
  2280. * use blk_mq_tagset_busy_itr() and the transport routine to
  2281. * terminate the exchanges.
  2282. */
  2283. blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
  2284. blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
  2285. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2286. /* kill the aens as they are a separate path */
  2287. nvme_fc_abort_aen_ops(ctrl);
  2288. /* wait for all io that had to be aborted */
  2289. spin_lock_irq(&ctrl->lock);
  2290. wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock);
  2291. ctrl->flags &= ~FCCTRL_TERMIO;
  2292. spin_unlock_irq(&ctrl->lock);
  2293. nvme_fc_term_aen_ops(ctrl);
  2294. /*
  2295. * send a Disconnect(association) LS to fc-nvme target
  2296. * Note: could have been sent at top of process, but
  2297. * cleaner on link traffic if after the aborts complete.
  2298. * Note: if association doesn't exist, association_id will be 0
  2299. */
  2300. if (ctrl->association_id)
  2301. nvme_fc_xmt_disconnect_assoc(ctrl);
  2302. if (ctrl->ctrl.tagset) {
  2303. nvme_fc_delete_hw_io_queues(ctrl);
  2304. nvme_fc_free_io_queues(ctrl);
  2305. }
  2306. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2307. nvme_fc_free_queue(&ctrl->queues[0]);
  2308. /* re-enable the admin_q so anything new can fast fail */
  2309. blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
  2310. /* resume the io queues so that things will fast fail */
  2311. nvme_start_queues(&ctrl->ctrl);
  2312. nvme_fc_ctlr_inactive_on_rport(ctrl);
  2313. }
  2314. static void
  2315. nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
  2316. {
  2317. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  2318. cancel_delayed_work_sync(&ctrl->connect_work);
  2319. /*
  2320. * kill the association on the link side. this will block
  2321. * waiting for io to terminate
  2322. */
  2323. nvme_fc_delete_association(ctrl);
  2324. }
  2325. static void
  2326. nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
  2327. {
  2328. struct nvme_fc_rport *rport = ctrl->rport;
  2329. struct nvme_fc_remote_port *portptr = &rport->remoteport;
  2330. unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
  2331. bool recon = true;
  2332. if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
  2333. return;
  2334. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2335. dev_info(ctrl->ctrl.device,
  2336. "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
  2337. ctrl->cnum, status);
  2338. else if (time_after_eq(jiffies, rport->dev_loss_end))
  2339. recon = false;
  2340. if (recon && nvmf_should_reconnect(&ctrl->ctrl)) {
  2341. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2342. dev_info(ctrl->ctrl.device,
  2343. "NVME-FC{%d}: Reconnect attempt in %ld "
  2344. "seconds\n",
  2345. ctrl->cnum, recon_delay / HZ);
  2346. else if (time_after(jiffies + recon_delay, rport->dev_loss_end))
  2347. recon_delay = rport->dev_loss_end - jiffies;
  2348. queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay);
  2349. } else {
  2350. if (portptr->port_state == FC_OBJSTATE_ONLINE)
  2351. dev_warn(ctrl->ctrl.device,
  2352. "NVME-FC{%d}: Max reconnect attempts (%d) "
  2353. "reached.\n",
  2354. ctrl->cnum, ctrl->ctrl.nr_reconnects);
  2355. else
  2356. dev_warn(ctrl->ctrl.device,
  2357. "NVME-FC{%d}: dev_loss_tmo (%d) expired "
  2358. "while waiting for remoteport connectivity.\n",
  2359. ctrl->cnum, portptr->dev_loss_tmo);
  2360. WARN_ON(nvme_delete_ctrl(&ctrl->ctrl));
  2361. }
  2362. }
  2363. static void
  2364. nvme_fc_reset_ctrl_work(struct work_struct *work)
  2365. {
  2366. struct nvme_fc_ctrl *ctrl =
  2367. container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
  2368. int ret;
  2369. nvme_stop_ctrl(&ctrl->ctrl);
  2370. /* will block will waiting for io to terminate */
  2371. nvme_fc_delete_association(ctrl);
  2372. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
  2373. dev_err(ctrl->ctrl.device,
  2374. "NVME-FC{%d}: error_recovery: Couldn't change state "
  2375. "to CONNECTING\n", ctrl->cnum);
  2376. return;
  2377. }
  2378. if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
  2379. ret = nvme_fc_create_association(ctrl);
  2380. else
  2381. ret = -ENOTCONN;
  2382. if (ret)
  2383. nvme_fc_reconnect_or_delete(ctrl, ret);
  2384. else
  2385. dev_info(ctrl->ctrl.device,
  2386. "NVME-FC{%d}: controller reset complete\n",
  2387. ctrl->cnum);
  2388. }
  2389. static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
  2390. .name = "fc",
  2391. .module = THIS_MODULE,
  2392. .flags = NVME_F_FABRICS,
  2393. .reg_read32 = nvmf_reg_read32,
  2394. .reg_read64 = nvmf_reg_read64,
  2395. .reg_write32 = nvmf_reg_write32,
  2396. .free_ctrl = nvme_fc_nvme_ctrl_freed,
  2397. .submit_async_event = nvme_fc_submit_async_event,
  2398. .delete_ctrl = nvme_fc_delete_ctrl,
  2399. .get_address = nvmf_get_address,
  2400. };
  2401. static void
  2402. nvme_fc_connect_ctrl_work(struct work_struct *work)
  2403. {
  2404. int ret;
  2405. struct nvme_fc_ctrl *ctrl =
  2406. container_of(to_delayed_work(work),
  2407. struct nvme_fc_ctrl, connect_work);
  2408. ret = nvme_fc_create_association(ctrl);
  2409. if (ret)
  2410. nvme_fc_reconnect_or_delete(ctrl, ret);
  2411. else
  2412. dev_info(ctrl->ctrl.device,
  2413. "NVME-FC{%d}: controller connect complete\n",
  2414. ctrl->cnum);
  2415. }
  2416. static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
  2417. .queue_rq = nvme_fc_queue_rq,
  2418. .complete = nvme_fc_complete_rq,
  2419. .init_request = nvme_fc_init_request,
  2420. .exit_request = nvme_fc_exit_request,
  2421. .init_hctx = nvme_fc_init_admin_hctx,
  2422. .timeout = nvme_fc_timeout,
  2423. };
  2424. /*
  2425. * Fails a controller request if it matches an existing controller
  2426. * (association) with the same tuple:
  2427. * <Host NQN, Host ID, local FC port, remote FC port, SUBSYS NQN>
  2428. *
  2429. * The ports don't need to be compared as they are intrinsically
  2430. * already matched by the port pointers supplied.
  2431. */
  2432. static bool
  2433. nvme_fc_existing_controller(struct nvme_fc_rport *rport,
  2434. struct nvmf_ctrl_options *opts)
  2435. {
  2436. struct nvme_fc_ctrl *ctrl;
  2437. unsigned long flags;
  2438. bool found = false;
  2439. spin_lock_irqsave(&rport->lock, flags);
  2440. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) {
  2441. found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts);
  2442. if (found)
  2443. break;
  2444. }
  2445. spin_unlock_irqrestore(&rport->lock, flags);
  2446. return found;
  2447. }
  2448. static struct nvme_ctrl *
  2449. nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
  2450. struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
  2451. {
  2452. struct nvme_fc_ctrl *ctrl;
  2453. unsigned long flags;
  2454. int ret, idx;
  2455. if (!(rport->remoteport.port_role &
  2456. (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
  2457. ret = -EBADR;
  2458. goto out_fail;
  2459. }
  2460. if (!opts->duplicate_connect &&
  2461. nvme_fc_existing_controller(rport, opts)) {
  2462. ret = -EALREADY;
  2463. goto out_fail;
  2464. }
  2465. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  2466. if (!ctrl) {
  2467. ret = -ENOMEM;
  2468. goto out_fail;
  2469. }
  2470. idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
  2471. if (idx < 0) {
  2472. ret = -ENOSPC;
  2473. goto out_free_ctrl;
  2474. }
  2475. ctrl->ctrl.opts = opts;
  2476. ctrl->ctrl.nr_reconnects = 0;
  2477. INIT_LIST_HEAD(&ctrl->ctrl_list);
  2478. ctrl->lport = lport;
  2479. ctrl->rport = rport;
  2480. ctrl->dev = lport->dev;
  2481. ctrl->cnum = idx;
  2482. ctrl->ioq_live = false;
  2483. ctrl->assoc_active = false;
  2484. init_waitqueue_head(&ctrl->ioabort_wait);
  2485. get_device(ctrl->dev);
  2486. kref_init(&ctrl->ref);
  2487. INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
  2488. INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
  2489. spin_lock_init(&ctrl->lock);
  2490. /* io queue count */
  2491. ctrl->ctrl.queue_count = min_t(unsigned int,
  2492. opts->nr_io_queues,
  2493. lport->ops->max_hw_queues);
  2494. ctrl->ctrl.queue_count++; /* +1 for admin queue */
  2495. ctrl->ctrl.sqsize = opts->queue_size - 1;
  2496. ctrl->ctrl.kato = opts->kato;
  2497. ctrl->ctrl.cntlid = 0xffff;
  2498. ret = -ENOMEM;
  2499. ctrl->queues = kcalloc(ctrl->ctrl.queue_count,
  2500. sizeof(struct nvme_fc_queue), GFP_KERNEL);
  2501. if (!ctrl->queues)
  2502. goto out_free_ida;
  2503. nvme_fc_init_queue(ctrl, 0);
  2504. memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
  2505. ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
  2506. ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
  2507. ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
  2508. ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
  2509. ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  2510. (SG_CHUNK_SIZE *
  2511. sizeof(struct scatterlist)) +
  2512. ctrl->lport->ops->fcprqst_priv_sz;
  2513. ctrl->admin_tag_set.driver_data = ctrl;
  2514. ctrl->admin_tag_set.nr_hw_queues = 1;
  2515. ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
  2516. ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
  2517. ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
  2518. if (ret)
  2519. goto out_free_queues;
  2520. ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
  2521. ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
  2522. if (IS_ERR(ctrl->ctrl.admin_q)) {
  2523. ret = PTR_ERR(ctrl->ctrl.admin_q);
  2524. goto out_free_admin_tag_set;
  2525. }
  2526. /*
  2527. * Would have been nice to init io queues tag set as well.
  2528. * However, we require interaction from the controller
  2529. * for max io queue count before we can do so.
  2530. * Defer this to the connect path.
  2531. */
  2532. ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
  2533. if (ret)
  2534. goto out_cleanup_admin_q;
  2535. /* at this point, teardown path changes to ref counting on nvme ctrl */
  2536. spin_lock_irqsave(&rport->lock, flags);
  2537. list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
  2538. spin_unlock_irqrestore(&rport->lock, flags);
  2539. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) ||
  2540. !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
  2541. dev_err(ctrl->ctrl.device,
  2542. "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum);
  2543. goto fail_ctrl;
  2544. }
  2545. nvme_get_ctrl(&ctrl->ctrl);
  2546. if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) {
  2547. nvme_put_ctrl(&ctrl->ctrl);
  2548. dev_err(ctrl->ctrl.device,
  2549. "NVME-FC{%d}: failed to schedule initial connect\n",
  2550. ctrl->cnum);
  2551. goto fail_ctrl;
  2552. }
  2553. flush_delayed_work(&ctrl->connect_work);
  2554. dev_info(ctrl->ctrl.device,
  2555. "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
  2556. ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
  2557. return &ctrl->ctrl;
  2558. fail_ctrl:
  2559. nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
  2560. cancel_work_sync(&ctrl->ctrl.reset_work);
  2561. cancel_delayed_work_sync(&ctrl->connect_work);
  2562. ctrl->ctrl.opts = NULL;
  2563. /* initiate nvme ctrl ref counting teardown */
  2564. nvme_uninit_ctrl(&ctrl->ctrl);
  2565. /* Remove core ctrl ref. */
  2566. nvme_put_ctrl(&ctrl->ctrl);
  2567. /* as we're past the point where we transition to the ref
  2568. * counting teardown path, if we return a bad pointer here,
  2569. * the calling routine, thinking it's prior to the
  2570. * transition, will do an rport put. Since the teardown
  2571. * path also does a rport put, we do an extra get here to
  2572. * so proper order/teardown happens.
  2573. */
  2574. nvme_fc_rport_get(rport);
  2575. return ERR_PTR(-EIO);
  2576. out_cleanup_admin_q:
  2577. blk_cleanup_queue(ctrl->ctrl.admin_q);
  2578. out_free_admin_tag_set:
  2579. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  2580. out_free_queues:
  2581. kfree(ctrl->queues);
  2582. out_free_ida:
  2583. put_device(ctrl->dev);
  2584. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  2585. out_free_ctrl:
  2586. kfree(ctrl);
  2587. out_fail:
  2588. /* exit via here doesn't follow ctlr ref points */
  2589. return ERR_PTR(ret);
  2590. }
  2591. struct nvmet_fc_traddr {
  2592. u64 nn;
  2593. u64 pn;
  2594. };
  2595. static int
  2596. __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
  2597. {
  2598. u64 token64;
  2599. if (match_u64(sstr, &token64))
  2600. return -EINVAL;
  2601. *val = token64;
  2602. return 0;
  2603. }
  2604. /*
  2605. * This routine validates and extracts the WWN's from the TRADDR string.
  2606. * As kernel parsers need the 0x to determine number base, universally
  2607. * build string to parse with 0x prefix before parsing name strings.
  2608. */
  2609. static int
  2610. nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
  2611. {
  2612. char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
  2613. substring_t wwn = { name, &name[sizeof(name)-1] };
  2614. int nnoffset, pnoffset;
  2615. /* validate if string is one of the 2 allowed formats */
  2616. if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
  2617. !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
  2618. !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
  2619. "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
  2620. nnoffset = NVME_FC_TRADDR_OXNNLEN;
  2621. pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
  2622. NVME_FC_TRADDR_OXNNLEN;
  2623. } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
  2624. !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
  2625. !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
  2626. "pn-", NVME_FC_TRADDR_NNLEN))) {
  2627. nnoffset = NVME_FC_TRADDR_NNLEN;
  2628. pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
  2629. } else
  2630. goto out_einval;
  2631. name[0] = '0';
  2632. name[1] = 'x';
  2633. name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
  2634. memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2635. if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
  2636. goto out_einval;
  2637. memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
  2638. if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
  2639. goto out_einval;
  2640. return 0;
  2641. out_einval:
  2642. pr_warn("%s: bad traddr string\n", __func__);
  2643. return -EINVAL;
  2644. }
  2645. static struct nvme_ctrl *
  2646. nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
  2647. {
  2648. struct nvme_fc_lport *lport;
  2649. struct nvme_fc_rport *rport;
  2650. struct nvme_ctrl *ctrl;
  2651. struct nvmet_fc_traddr laddr = { 0L, 0L };
  2652. struct nvmet_fc_traddr raddr = { 0L, 0L };
  2653. unsigned long flags;
  2654. int ret;
  2655. ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE);
  2656. if (ret || !raddr.nn || !raddr.pn)
  2657. return ERR_PTR(-EINVAL);
  2658. ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE);
  2659. if (ret || !laddr.nn || !laddr.pn)
  2660. return ERR_PTR(-EINVAL);
  2661. /* find the host and remote ports to connect together */
  2662. spin_lock_irqsave(&nvme_fc_lock, flags);
  2663. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  2664. if (lport->localport.node_name != laddr.nn ||
  2665. lport->localport.port_name != laddr.pn)
  2666. continue;
  2667. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  2668. if (rport->remoteport.node_name != raddr.nn ||
  2669. rport->remoteport.port_name != raddr.pn)
  2670. continue;
  2671. /* if fail to get reference fall through. Will error */
  2672. if (!nvme_fc_rport_get(rport))
  2673. break;
  2674. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2675. ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
  2676. if (IS_ERR(ctrl))
  2677. nvme_fc_rport_put(rport);
  2678. return ctrl;
  2679. }
  2680. }
  2681. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2682. pr_warn("%s: %s - %s combination not found\n",
  2683. __func__, opts->traddr, opts->host_traddr);
  2684. return ERR_PTR(-ENOENT);
  2685. }
  2686. static struct nvmf_transport_ops nvme_fc_transport = {
  2687. .name = "fc",
  2688. .module = THIS_MODULE,
  2689. .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
  2690. .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
  2691. .create_ctrl = nvme_fc_create_ctrl,
  2692. };
  2693. /* Arbitrary successive failures max. With lots of subsystems could be high */
  2694. #define DISCOVERY_MAX_FAIL 20
  2695. static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
  2696. struct device_attribute *attr, const char *buf, size_t count)
  2697. {
  2698. unsigned long flags;
  2699. LIST_HEAD(local_disc_list);
  2700. struct nvme_fc_lport *lport;
  2701. struct nvme_fc_rport *rport;
  2702. int failcnt = 0;
  2703. spin_lock_irqsave(&nvme_fc_lock, flags);
  2704. restart:
  2705. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  2706. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  2707. if (!nvme_fc_lport_get(lport))
  2708. continue;
  2709. if (!nvme_fc_rport_get(rport)) {
  2710. /*
  2711. * This is a temporary condition. Upon restart
  2712. * this rport will be gone from the list.
  2713. *
  2714. * Revert the lport put and retry. Anything
  2715. * added to the list already will be skipped (as
  2716. * they are no longer list_empty). Loops should
  2717. * resume at rports that were not yet seen.
  2718. */
  2719. nvme_fc_lport_put(lport);
  2720. if (failcnt++ < DISCOVERY_MAX_FAIL)
  2721. goto restart;
  2722. pr_err("nvme_discovery: too many reference "
  2723. "failures\n");
  2724. goto process_local_list;
  2725. }
  2726. if (list_empty(&rport->disc_list))
  2727. list_add_tail(&rport->disc_list,
  2728. &local_disc_list);
  2729. }
  2730. }
  2731. process_local_list:
  2732. while (!list_empty(&local_disc_list)) {
  2733. rport = list_first_entry(&local_disc_list,
  2734. struct nvme_fc_rport, disc_list);
  2735. list_del_init(&rport->disc_list);
  2736. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2737. lport = rport->lport;
  2738. /* signal discovery. Won't hurt if it repeats */
  2739. nvme_fc_signal_discovery_scan(lport, rport);
  2740. nvme_fc_rport_put(rport);
  2741. nvme_fc_lport_put(lport);
  2742. spin_lock_irqsave(&nvme_fc_lock, flags);
  2743. }
  2744. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2745. return count;
  2746. }
  2747. static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
  2748. static struct attribute *nvme_fc_attrs[] = {
  2749. &dev_attr_nvme_discovery.attr,
  2750. NULL
  2751. };
  2752. static struct attribute_group nvme_fc_attr_group = {
  2753. .attrs = nvme_fc_attrs,
  2754. };
  2755. static const struct attribute_group *nvme_fc_attr_groups[] = {
  2756. &nvme_fc_attr_group,
  2757. NULL
  2758. };
  2759. static struct class fc_class = {
  2760. .name = "fc",
  2761. .dev_groups = nvme_fc_attr_groups,
  2762. .owner = THIS_MODULE,
  2763. };
  2764. static int __init nvme_fc_init_module(void)
  2765. {
  2766. int ret;
  2767. /*
  2768. * NOTE:
  2769. * It is expected that in the future the kernel will combine
  2770. * the FC-isms that are currently under scsi and now being
  2771. * added to by NVME into a new standalone FC class. The SCSI
  2772. * and NVME protocols and their devices would be under this
  2773. * new FC class.
  2774. *
  2775. * As we need something to post FC-specific udev events to,
  2776. * specifically for nvme probe events, start by creating the
  2777. * new device class. When the new standalone FC class is
  2778. * put in place, this code will move to a more generic
  2779. * location for the class.
  2780. */
  2781. ret = class_register(&fc_class);
  2782. if (ret) {
  2783. pr_err("couldn't register class fc\n");
  2784. return ret;
  2785. }
  2786. /*
  2787. * Create a device for the FC-centric udev events
  2788. */
  2789. fc_udev_device = device_create(&fc_class, NULL, MKDEV(0, 0), NULL,
  2790. "fc_udev_device");
  2791. if (IS_ERR(fc_udev_device)) {
  2792. pr_err("couldn't create fc_udev device!\n");
  2793. ret = PTR_ERR(fc_udev_device);
  2794. goto out_destroy_class;
  2795. }
  2796. ret = nvmf_register_transport(&nvme_fc_transport);
  2797. if (ret)
  2798. goto out_destroy_device;
  2799. return 0;
  2800. out_destroy_device:
  2801. device_destroy(&fc_class, MKDEV(0, 0));
  2802. out_destroy_class:
  2803. class_unregister(&fc_class);
  2804. return ret;
  2805. }
  2806. static void __exit nvme_fc_exit_module(void)
  2807. {
  2808. /* sanity check - all lports should be removed */
  2809. if (!list_empty(&nvme_fc_lport_list))
  2810. pr_warn("%s: localport list not empty\n", __func__);
  2811. nvmf_unregister_transport(&nvme_fc_transport);
  2812. ida_destroy(&nvme_fc_local_port_cnt);
  2813. ida_destroy(&nvme_fc_ctrl_cnt);
  2814. device_destroy(&fc_class, MKDEV(0, 0));
  2815. class_unregister(&fc_class);
  2816. }
  2817. module_init(nvme_fc_init_module);
  2818. module_exit(nvme_fc_exit_module);
  2819. MODULE_LICENSE("GPL v2");