fc.c 78 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461246224632464246524662467246824692470247124722473247424752476247724782479248024812482248324842485248624872488248924902491249224932494249524962497249824992500250125022503250425052506250725082509251025112512251325142515251625172518251925202521252225232524252525262527252825292530253125322533253425352536253725382539254025412542254325442545254625472548254925502551255225532554255525562557255825592560256125622563256425652566256725682569257025712572257325742575257625772578257925802581258225832584258525862587258825892590259125922593259425952596259725982599260026012602260326042605260626072608260926102611261226132614261526162617261826192620262126222623262426252626262726282629263026312632263326342635263626372638263926402641264226432644264526462647264826492650265126522653265426552656265726582659266026612662266326642665266626672668266926702671267226732674267526762677267826792680268126822683268426852686268726882689269026912692269326942695269626972698269927002701270227032704270527062707270827092710271127122713271427152716271727182719272027212722272327242725272627272728272927302731273227332734273527362737273827392740274127422743274427452746274727482749275027512752275327542755275627572758275927602761276227632764276527662767276827692770277127722773277427752776277727782779278027812782278327842785278627872788278927902791279227932794279527962797279827992800280128022803280428052806280728082809281028112812281328142815281628172818281928202821282228232824282528262827282828292830283128322833283428352836283728382839284028412842284328442845284628472848284928502851285228532854285528562857285828592860286128622863286428652866286728682869287028712872287328742875287628772878287928802881288228832884288528862887288828892890289128922893289428952896289728982899290029012902290329042905290629072908290929102911291229132914291529162917291829192920292129222923292429252926292729282929293029312932293329342935293629372938293929402941294229432944294529462947294829492950295129522953295429552956295729582959296029612962296329642965296629672968296929702971297229732974297529762977297829792980298129822983298429852986
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/module.h>
  19. #include <linux/parser.h>
  20. #include <uapi/scsi/fc/fc_fs.h>
  21. #include <uapi/scsi/fc/fc_els.h>
  22. #include <linux/delay.h>
  23. #include "nvme.h"
  24. #include "fabrics.h"
  25. #include <linux/nvme-fc-driver.h>
  26. #include <linux/nvme-fc.h>
  27. /* *************************** Data Structures/Defines ****************** */
  28. /*
  29. * We handle AEN commands ourselves and don't even let the
  30. * block layer know about them.
  31. */
  32. #define NVME_FC_NR_AEN_COMMANDS 1
  33. #define NVME_FC_AQ_BLKMQ_DEPTH \
  34. (NVMF_AQ_DEPTH - NVME_FC_NR_AEN_COMMANDS)
  35. #define AEN_CMDID_BASE (NVME_FC_AQ_BLKMQ_DEPTH + 1)
  36. enum nvme_fc_queue_flags {
  37. NVME_FC_Q_CONNECTED = (1 << 0),
  38. };
  39. #define NVMEFC_QUEUE_DELAY 3 /* ms units */
  40. struct nvme_fc_queue {
  41. struct nvme_fc_ctrl *ctrl;
  42. struct device *dev;
  43. struct blk_mq_hw_ctx *hctx;
  44. void *lldd_handle;
  45. int queue_size;
  46. size_t cmnd_capsule_len;
  47. u32 qnum;
  48. u32 rqcnt;
  49. u32 seqno;
  50. u64 connection_id;
  51. atomic_t csn;
  52. unsigned long flags;
  53. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  54. enum nvme_fcop_flags {
  55. FCOP_FLAGS_TERMIO = (1 << 0),
  56. FCOP_FLAGS_RELEASED = (1 << 1),
  57. FCOP_FLAGS_COMPLETE = (1 << 2),
  58. FCOP_FLAGS_AEN = (1 << 3),
  59. };
  60. struct nvmefc_ls_req_op {
  61. struct nvmefc_ls_req ls_req;
  62. struct nvme_fc_rport *rport;
  63. struct nvme_fc_queue *queue;
  64. struct request *rq;
  65. u32 flags;
  66. int ls_error;
  67. struct completion ls_done;
  68. struct list_head lsreq_list; /* rport->ls_req_list */
  69. bool req_queued;
  70. };
  71. enum nvme_fcpop_state {
  72. FCPOP_STATE_UNINIT = 0,
  73. FCPOP_STATE_IDLE = 1,
  74. FCPOP_STATE_ACTIVE = 2,
  75. FCPOP_STATE_ABORTED = 3,
  76. FCPOP_STATE_COMPLETE = 4,
  77. };
  78. struct nvme_fc_fcp_op {
  79. struct nvme_request nreq; /*
  80. * nvme/host/core.c
  81. * requires this to be
  82. * the 1st element in the
  83. * private structure
  84. * associated with the
  85. * request.
  86. */
  87. struct nvmefc_fcp_req fcp_req;
  88. struct nvme_fc_ctrl *ctrl;
  89. struct nvme_fc_queue *queue;
  90. struct request *rq;
  91. atomic_t state;
  92. u32 flags;
  93. u32 rqno;
  94. u32 nents;
  95. struct nvme_fc_cmd_iu cmd_iu;
  96. struct nvme_fc_ersp_iu rsp_iu;
  97. };
  98. struct nvme_fc_lport {
  99. struct nvme_fc_local_port localport;
  100. struct ida endp_cnt;
  101. struct list_head port_list; /* nvme_fc_port_list */
  102. struct list_head endp_list;
  103. struct device *dev; /* physical device for dma */
  104. struct nvme_fc_port_template *ops;
  105. struct kref ref;
  106. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  107. struct nvme_fc_rport {
  108. struct nvme_fc_remote_port remoteport;
  109. struct list_head endp_list; /* for lport->endp_list */
  110. struct list_head ctrl_list;
  111. struct list_head ls_req_list;
  112. struct device *dev; /* physical device for dma */
  113. struct nvme_fc_lport *lport;
  114. spinlock_t lock;
  115. struct kref ref;
  116. } __aligned(sizeof(u64)); /* alignment for other things alloc'd with */
  117. enum nvme_fcctrl_flags {
  118. FCCTRL_TERMIO = (1 << 0),
  119. };
  120. struct nvme_fc_ctrl {
  121. spinlock_t lock;
  122. struct nvme_fc_queue *queues;
  123. struct device *dev;
  124. struct nvme_fc_lport *lport;
  125. struct nvme_fc_rport *rport;
  126. u32 queue_count;
  127. u32 cnum;
  128. u64 association_id;
  129. u64 cap;
  130. struct list_head ctrl_list; /* rport->ctrl_list */
  131. struct blk_mq_tag_set admin_tag_set;
  132. struct blk_mq_tag_set tag_set;
  133. struct work_struct delete_work;
  134. struct work_struct reset_work;
  135. struct delayed_work connect_work;
  136. struct kref ref;
  137. u32 flags;
  138. u32 iocnt;
  139. struct nvme_fc_fcp_op aen_ops[NVME_FC_NR_AEN_COMMANDS];
  140. struct nvme_ctrl ctrl;
  141. };
  142. static inline struct nvme_fc_ctrl *
  143. to_fc_ctrl(struct nvme_ctrl *ctrl)
  144. {
  145. return container_of(ctrl, struct nvme_fc_ctrl, ctrl);
  146. }
  147. static inline struct nvme_fc_lport *
  148. localport_to_lport(struct nvme_fc_local_port *portptr)
  149. {
  150. return container_of(portptr, struct nvme_fc_lport, localport);
  151. }
  152. static inline struct nvme_fc_rport *
  153. remoteport_to_rport(struct nvme_fc_remote_port *portptr)
  154. {
  155. return container_of(portptr, struct nvme_fc_rport, remoteport);
  156. }
  157. static inline struct nvmefc_ls_req_op *
  158. ls_req_to_lsop(struct nvmefc_ls_req *lsreq)
  159. {
  160. return container_of(lsreq, struct nvmefc_ls_req_op, ls_req);
  161. }
  162. static inline struct nvme_fc_fcp_op *
  163. fcp_req_to_fcp_op(struct nvmefc_fcp_req *fcpreq)
  164. {
  165. return container_of(fcpreq, struct nvme_fc_fcp_op, fcp_req);
  166. }
  167. /* *************************** Globals **************************** */
  168. static DEFINE_SPINLOCK(nvme_fc_lock);
  169. static LIST_HEAD(nvme_fc_lport_list);
  170. static DEFINE_IDA(nvme_fc_local_port_cnt);
  171. static DEFINE_IDA(nvme_fc_ctrl_cnt);
  172. static struct workqueue_struct *nvme_fc_wq;
  173. /* *********************** FC-NVME Port Management ************************ */
  174. static int __nvme_fc_del_ctrl(struct nvme_fc_ctrl *);
  175. static void __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *,
  176. struct nvme_fc_queue *, unsigned int);
  177. /**
  178. * nvme_fc_register_localport - transport entry point called by an
  179. * LLDD to register the existence of a NVME
  180. * host FC port.
  181. * @pinfo: pointer to information about the port to be registered
  182. * @template: LLDD entrypoints and operational parameters for the port
  183. * @dev: physical hardware device node port corresponds to. Will be
  184. * used for DMA mappings
  185. * @lport_p: pointer to a local port pointer. Upon success, the routine
  186. * will allocate a nvme_fc_local_port structure and place its
  187. * address in the local port pointer. Upon failure, local port
  188. * pointer will be set to 0.
  189. *
  190. * Returns:
  191. * a completion status. Must be 0 upon success; a negative errno
  192. * (ex: -ENXIO) upon failure.
  193. */
  194. int
  195. nvme_fc_register_localport(struct nvme_fc_port_info *pinfo,
  196. struct nvme_fc_port_template *template,
  197. struct device *dev,
  198. struct nvme_fc_local_port **portptr)
  199. {
  200. struct nvme_fc_lport *newrec;
  201. unsigned long flags;
  202. int ret, idx;
  203. if (!template->localport_delete || !template->remoteport_delete ||
  204. !template->ls_req || !template->fcp_io ||
  205. !template->ls_abort || !template->fcp_abort ||
  206. !template->max_hw_queues || !template->max_sgl_segments ||
  207. !template->max_dif_sgl_segments || !template->dma_boundary) {
  208. ret = -EINVAL;
  209. goto out_reghost_failed;
  210. }
  211. newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz),
  212. GFP_KERNEL);
  213. if (!newrec) {
  214. ret = -ENOMEM;
  215. goto out_reghost_failed;
  216. }
  217. idx = ida_simple_get(&nvme_fc_local_port_cnt, 0, 0, GFP_KERNEL);
  218. if (idx < 0) {
  219. ret = -ENOSPC;
  220. goto out_fail_kfree;
  221. }
  222. if (!get_device(dev) && dev) {
  223. ret = -ENODEV;
  224. goto out_ida_put;
  225. }
  226. INIT_LIST_HEAD(&newrec->port_list);
  227. INIT_LIST_HEAD(&newrec->endp_list);
  228. kref_init(&newrec->ref);
  229. newrec->ops = template;
  230. newrec->dev = dev;
  231. ida_init(&newrec->endp_cnt);
  232. newrec->localport.private = &newrec[1];
  233. newrec->localport.node_name = pinfo->node_name;
  234. newrec->localport.port_name = pinfo->port_name;
  235. newrec->localport.port_role = pinfo->port_role;
  236. newrec->localport.port_id = pinfo->port_id;
  237. newrec->localport.port_state = FC_OBJSTATE_ONLINE;
  238. newrec->localport.port_num = idx;
  239. spin_lock_irqsave(&nvme_fc_lock, flags);
  240. list_add_tail(&newrec->port_list, &nvme_fc_lport_list);
  241. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  242. if (dev)
  243. dma_set_seg_boundary(dev, template->dma_boundary);
  244. *portptr = &newrec->localport;
  245. return 0;
  246. out_ida_put:
  247. ida_simple_remove(&nvme_fc_local_port_cnt, idx);
  248. out_fail_kfree:
  249. kfree(newrec);
  250. out_reghost_failed:
  251. *portptr = NULL;
  252. return ret;
  253. }
  254. EXPORT_SYMBOL_GPL(nvme_fc_register_localport);
  255. static void
  256. nvme_fc_free_lport(struct kref *ref)
  257. {
  258. struct nvme_fc_lport *lport =
  259. container_of(ref, struct nvme_fc_lport, ref);
  260. unsigned long flags;
  261. WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED);
  262. WARN_ON(!list_empty(&lport->endp_list));
  263. /* remove from transport list */
  264. spin_lock_irqsave(&nvme_fc_lock, flags);
  265. list_del(&lport->port_list);
  266. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  267. /* let the LLDD know we've finished tearing it down */
  268. lport->ops->localport_delete(&lport->localport);
  269. ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num);
  270. ida_destroy(&lport->endp_cnt);
  271. put_device(lport->dev);
  272. kfree(lport);
  273. }
  274. static void
  275. nvme_fc_lport_put(struct nvme_fc_lport *lport)
  276. {
  277. kref_put(&lport->ref, nvme_fc_free_lport);
  278. }
  279. static int
  280. nvme_fc_lport_get(struct nvme_fc_lport *lport)
  281. {
  282. return kref_get_unless_zero(&lport->ref);
  283. }
  284. /**
  285. * nvme_fc_unregister_localport - transport entry point called by an
  286. * LLDD to deregister/remove a previously
  287. * registered a NVME host FC port.
  288. * @localport: pointer to the (registered) local port that is to be
  289. * deregistered.
  290. *
  291. * Returns:
  292. * a completion status. Must be 0 upon success; a negative errno
  293. * (ex: -ENXIO) upon failure.
  294. */
  295. int
  296. nvme_fc_unregister_localport(struct nvme_fc_local_port *portptr)
  297. {
  298. struct nvme_fc_lport *lport = localport_to_lport(portptr);
  299. unsigned long flags;
  300. if (!portptr)
  301. return -EINVAL;
  302. spin_lock_irqsave(&nvme_fc_lock, flags);
  303. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  304. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  305. return -EINVAL;
  306. }
  307. portptr->port_state = FC_OBJSTATE_DELETED;
  308. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  309. nvme_fc_lport_put(lport);
  310. return 0;
  311. }
  312. EXPORT_SYMBOL_GPL(nvme_fc_unregister_localport);
  313. /**
  314. * nvme_fc_register_remoteport - transport entry point called by an
  315. * LLDD to register the existence of a NVME
  316. * subsystem FC port on its fabric.
  317. * @localport: pointer to the (registered) local port that the remote
  318. * subsystem port is connected to.
  319. * @pinfo: pointer to information about the port to be registered
  320. * @rport_p: pointer to a remote port pointer. Upon success, the routine
  321. * will allocate a nvme_fc_remote_port structure and place its
  322. * address in the remote port pointer. Upon failure, remote port
  323. * pointer will be set to 0.
  324. *
  325. * Returns:
  326. * a completion status. Must be 0 upon success; a negative errno
  327. * (ex: -ENXIO) upon failure.
  328. */
  329. int
  330. nvme_fc_register_remoteport(struct nvme_fc_local_port *localport,
  331. struct nvme_fc_port_info *pinfo,
  332. struct nvme_fc_remote_port **portptr)
  333. {
  334. struct nvme_fc_lport *lport = localport_to_lport(localport);
  335. struct nvme_fc_rport *newrec;
  336. unsigned long flags;
  337. int ret, idx;
  338. newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz),
  339. GFP_KERNEL);
  340. if (!newrec) {
  341. ret = -ENOMEM;
  342. goto out_reghost_failed;
  343. }
  344. if (!nvme_fc_lport_get(lport)) {
  345. ret = -ESHUTDOWN;
  346. goto out_kfree_rport;
  347. }
  348. idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL);
  349. if (idx < 0) {
  350. ret = -ENOSPC;
  351. goto out_lport_put;
  352. }
  353. INIT_LIST_HEAD(&newrec->endp_list);
  354. INIT_LIST_HEAD(&newrec->ctrl_list);
  355. INIT_LIST_HEAD(&newrec->ls_req_list);
  356. kref_init(&newrec->ref);
  357. spin_lock_init(&newrec->lock);
  358. newrec->remoteport.localport = &lport->localport;
  359. newrec->dev = lport->dev;
  360. newrec->lport = lport;
  361. newrec->remoteport.private = &newrec[1];
  362. newrec->remoteport.port_role = pinfo->port_role;
  363. newrec->remoteport.node_name = pinfo->node_name;
  364. newrec->remoteport.port_name = pinfo->port_name;
  365. newrec->remoteport.port_id = pinfo->port_id;
  366. newrec->remoteport.port_state = FC_OBJSTATE_ONLINE;
  367. newrec->remoteport.port_num = idx;
  368. spin_lock_irqsave(&nvme_fc_lock, flags);
  369. list_add_tail(&newrec->endp_list, &lport->endp_list);
  370. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  371. *portptr = &newrec->remoteport;
  372. return 0;
  373. out_lport_put:
  374. nvme_fc_lport_put(lport);
  375. out_kfree_rport:
  376. kfree(newrec);
  377. out_reghost_failed:
  378. *portptr = NULL;
  379. return ret;
  380. }
  381. EXPORT_SYMBOL_GPL(nvme_fc_register_remoteport);
  382. static void
  383. nvme_fc_free_rport(struct kref *ref)
  384. {
  385. struct nvme_fc_rport *rport =
  386. container_of(ref, struct nvme_fc_rport, ref);
  387. struct nvme_fc_lport *lport =
  388. localport_to_lport(rport->remoteport.localport);
  389. unsigned long flags;
  390. WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED);
  391. WARN_ON(!list_empty(&rport->ctrl_list));
  392. /* remove from lport list */
  393. spin_lock_irqsave(&nvme_fc_lock, flags);
  394. list_del(&rport->endp_list);
  395. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  396. /* let the LLDD know we've finished tearing it down */
  397. lport->ops->remoteport_delete(&rport->remoteport);
  398. ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num);
  399. kfree(rport);
  400. nvme_fc_lport_put(lport);
  401. }
  402. static void
  403. nvme_fc_rport_put(struct nvme_fc_rport *rport)
  404. {
  405. kref_put(&rport->ref, nvme_fc_free_rport);
  406. }
  407. static int
  408. nvme_fc_rport_get(struct nvme_fc_rport *rport)
  409. {
  410. return kref_get_unless_zero(&rport->ref);
  411. }
  412. static int
  413. nvme_fc_abort_lsops(struct nvme_fc_rport *rport)
  414. {
  415. struct nvmefc_ls_req_op *lsop;
  416. unsigned long flags;
  417. restart:
  418. spin_lock_irqsave(&rport->lock, flags);
  419. list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) {
  420. if (!(lsop->flags & FCOP_FLAGS_TERMIO)) {
  421. lsop->flags |= FCOP_FLAGS_TERMIO;
  422. spin_unlock_irqrestore(&rport->lock, flags);
  423. rport->lport->ops->ls_abort(&rport->lport->localport,
  424. &rport->remoteport,
  425. &lsop->ls_req);
  426. goto restart;
  427. }
  428. }
  429. spin_unlock_irqrestore(&rport->lock, flags);
  430. return 0;
  431. }
  432. /**
  433. * nvme_fc_unregister_remoteport - transport entry point called by an
  434. * LLDD to deregister/remove a previously
  435. * registered a NVME subsystem FC port.
  436. * @remoteport: pointer to the (registered) remote port that is to be
  437. * deregistered.
  438. *
  439. * Returns:
  440. * a completion status. Must be 0 upon success; a negative errno
  441. * (ex: -ENXIO) upon failure.
  442. */
  443. int
  444. nvme_fc_unregister_remoteport(struct nvme_fc_remote_port *portptr)
  445. {
  446. struct nvme_fc_rport *rport = remoteport_to_rport(portptr);
  447. struct nvme_fc_ctrl *ctrl;
  448. unsigned long flags;
  449. if (!portptr)
  450. return -EINVAL;
  451. spin_lock_irqsave(&rport->lock, flags);
  452. if (portptr->port_state != FC_OBJSTATE_ONLINE) {
  453. spin_unlock_irqrestore(&rport->lock, flags);
  454. return -EINVAL;
  455. }
  456. portptr->port_state = FC_OBJSTATE_DELETED;
  457. /* tear down all associations to the remote port */
  458. list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list)
  459. __nvme_fc_del_ctrl(ctrl);
  460. spin_unlock_irqrestore(&rport->lock, flags);
  461. nvme_fc_abort_lsops(rport);
  462. nvme_fc_rport_put(rport);
  463. return 0;
  464. }
  465. EXPORT_SYMBOL_GPL(nvme_fc_unregister_remoteport);
  466. /* *********************** FC-NVME DMA Handling **************************** */
  467. /*
  468. * The fcloop device passes in a NULL device pointer. Real LLD's will
  469. * pass in a valid device pointer. If NULL is passed to the dma mapping
  470. * routines, depending on the platform, it may or may not succeed, and
  471. * may crash.
  472. *
  473. * As such:
  474. * Wrapper all the dma routines and check the dev pointer.
  475. *
  476. * If simple mappings (return just a dma address, we'll noop them,
  477. * returning a dma address of 0.
  478. *
  479. * On more complex mappings (dma_map_sg), a pseudo routine fills
  480. * in the scatter list, setting all dma addresses to 0.
  481. */
  482. static inline dma_addr_t
  483. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  484. enum dma_data_direction dir)
  485. {
  486. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  487. }
  488. static inline int
  489. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  490. {
  491. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  492. }
  493. static inline void
  494. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  495. enum dma_data_direction dir)
  496. {
  497. if (dev)
  498. dma_unmap_single(dev, addr, size, dir);
  499. }
  500. static inline void
  501. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  502. enum dma_data_direction dir)
  503. {
  504. if (dev)
  505. dma_sync_single_for_cpu(dev, addr, size, dir);
  506. }
  507. static inline void
  508. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  509. enum dma_data_direction dir)
  510. {
  511. if (dev)
  512. dma_sync_single_for_device(dev, addr, size, dir);
  513. }
  514. /* pseudo dma_map_sg call */
  515. static int
  516. fc_map_sg(struct scatterlist *sg, int nents)
  517. {
  518. struct scatterlist *s;
  519. int i;
  520. WARN_ON(nents == 0 || sg[0].length == 0);
  521. for_each_sg(sg, s, nents, i) {
  522. s->dma_address = 0L;
  523. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  524. s->dma_length = s->length;
  525. #endif
  526. }
  527. return nents;
  528. }
  529. static inline int
  530. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  531. enum dma_data_direction dir)
  532. {
  533. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  534. }
  535. static inline void
  536. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  537. enum dma_data_direction dir)
  538. {
  539. if (dev)
  540. dma_unmap_sg(dev, sg, nents, dir);
  541. }
  542. /* *********************** FC-NVME LS Handling **************************** */
  543. static void nvme_fc_ctrl_put(struct nvme_fc_ctrl *);
  544. static int nvme_fc_ctrl_get(struct nvme_fc_ctrl *);
  545. static void
  546. __nvme_fc_finish_ls_req(struct nvmefc_ls_req_op *lsop)
  547. {
  548. struct nvme_fc_rport *rport = lsop->rport;
  549. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  550. unsigned long flags;
  551. spin_lock_irqsave(&rport->lock, flags);
  552. if (!lsop->req_queued) {
  553. spin_unlock_irqrestore(&rport->lock, flags);
  554. return;
  555. }
  556. list_del(&lsop->lsreq_list);
  557. lsop->req_queued = false;
  558. spin_unlock_irqrestore(&rport->lock, flags);
  559. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  560. (lsreq->rqstlen + lsreq->rsplen),
  561. DMA_BIDIRECTIONAL);
  562. nvme_fc_rport_put(rport);
  563. }
  564. static int
  565. __nvme_fc_send_ls_req(struct nvme_fc_rport *rport,
  566. struct nvmefc_ls_req_op *lsop,
  567. void (*done)(struct nvmefc_ls_req *req, int status))
  568. {
  569. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  570. unsigned long flags;
  571. int ret = 0;
  572. if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  573. return -ECONNREFUSED;
  574. if (!nvme_fc_rport_get(rport))
  575. return -ESHUTDOWN;
  576. lsreq->done = done;
  577. lsop->rport = rport;
  578. lsop->req_queued = false;
  579. INIT_LIST_HEAD(&lsop->lsreq_list);
  580. init_completion(&lsop->ls_done);
  581. lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr,
  582. lsreq->rqstlen + lsreq->rsplen,
  583. DMA_BIDIRECTIONAL);
  584. if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) {
  585. ret = -EFAULT;
  586. goto out_putrport;
  587. }
  588. lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
  589. spin_lock_irqsave(&rport->lock, flags);
  590. list_add_tail(&lsop->lsreq_list, &rport->ls_req_list);
  591. lsop->req_queued = true;
  592. spin_unlock_irqrestore(&rport->lock, flags);
  593. ret = rport->lport->ops->ls_req(&rport->lport->localport,
  594. &rport->remoteport, lsreq);
  595. if (ret)
  596. goto out_unlink;
  597. return 0;
  598. out_unlink:
  599. lsop->ls_error = ret;
  600. spin_lock_irqsave(&rport->lock, flags);
  601. lsop->req_queued = false;
  602. list_del(&lsop->lsreq_list);
  603. spin_unlock_irqrestore(&rport->lock, flags);
  604. fc_dma_unmap_single(rport->dev, lsreq->rqstdma,
  605. (lsreq->rqstlen + lsreq->rsplen),
  606. DMA_BIDIRECTIONAL);
  607. out_putrport:
  608. nvme_fc_rport_put(rport);
  609. return ret;
  610. }
  611. static void
  612. nvme_fc_send_ls_req_done(struct nvmefc_ls_req *lsreq, int status)
  613. {
  614. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  615. lsop->ls_error = status;
  616. complete(&lsop->ls_done);
  617. }
  618. static int
  619. nvme_fc_send_ls_req(struct nvme_fc_rport *rport, struct nvmefc_ls_req_op *lsop)
  620. {
  621. struct nvmefc_ls_req *lsreq = &lsop->ls_req;
  622. struct fcnvme_ls_rjt *rjt = lsreq->rspaddr;
  623. int ret;
  624. ret = __nvme_fc_send_ls_req(rport, lsop, nvme_fc_send_ls_req_done);
  625. if (!ret) {
  626. /*
  627. * No timeout/not interruptible as we need the struct
  628. * to exist until the lldd calls us back. Thus mandate
  629. * wait until driver calls back. lldd responsible for
  630. * the timeout action
  631. */
  632. wait_for_completion(&lsop->ls_done);
  633. __nvme_fc_finish_ls_req(lsop);
  634. ret = lsop->ls_error;
  635. }
  636. if (ret)
  637. return ret;
  638. /* ACC or RJT payload ? */
  639. if (rjt->w0.ls_cmd == FCNVME_LS_RJT)
  640. return -ENXIO;
  641. return 0;
  642. }
  643. static int
  644. nvme_fc_send_ls_req_async(struct nvme_fc_rport *rport,
  645. struct nvmefc_ls_req_op *lsop,
  646. void (*done)(struct nvmefc_ls_req *req, int status))
  647. {
  648. /* don't wait for completion */
  649. return __nvme_fc_send_ls_req(rport, lsop, done);
  650. }
  651. /* Validation Error indexes into the string table below */
  652. enum {
  653. VERR_NO_ERROR = 0,
  654. VERR_LSACC = 1,
  655. VERR_LSDESC_RQST = 2,
  656. VERR_LSDESC_RQST_LEN = 3,
  657. VERR_ASSOC_ID = 4,
  658. VERR_ASSOC_ID_LEN = 5,
  659. VERR_CONN_ID = 6,
  660. VERR_CONN_ID_LEN = 7,
  661. VERR_CR_ASSOC = 8,
  662. VERR_CR_ASSOC_ACC_LEN = 9,
  663. VERR_CR_CONN = 10,
  664. VERR_CR_CONN_ACC_LEN = 11,
  665. VERR_DISCONN = 12,
  666. VERR_DISCONN_ACC_LEN = 13,
  667. };
  668. static char *validation_errors[] = {
  669. "OK",
  670. "Not LS_ACC",
  671. "Not LSDESC_RQST",
  672. "Bad LSDESC_RQST Length",
  673. "Not Association ID",
  674. "Bad Association ID Length",
  675. "Not Connection ID",
  676. "Bad Connection ID Length",
  677. "Not CR_ASSOC Rqst",
  678. "Bad CR_ASSOC ACC Length",
  679. "Not CR_CONN Rqst",
  680. "Bad CR_CONN ACC Length",
  681. "Not Disconnect Rqst",
  682. "Bad Disconnect ACC Length",
  683. };
  684. static int
  685. nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl,
  686. struct nvme_fc_queue *queue, u16 qsize, u16 ersp_ratio)
  687. {
  688. struct nvmefc_ls_req_op *lsop;
  689. struct nvmefc_ls_req *lsreq;
  690. struct fcnvme_ls_cr_assoc_rqst *assoc_rqst;
  691. struct fcnvme_ls_cr_assoc_acc *assoc_acc;
  692. int ret, fcret = 0;
  693. lsop = kzalloc((sizeof(*lsop) +
  694. ctrl->lport->ops->lsrqst_priv_sz +
  695. sizeof(*assoc_rqst) + sizeof(*assoc_acc)), GFP_KERNEL);
  696. if (!lsop) {
  697. ret = -ENOMEM;
  698. goto out_no_memory;
  699. }
  700. lsreq = &lsop->ls_req;
  701. lsreq->private = (void *)&lsop[1];
  702. assoc_rqst = (struct fcnvme_ls_cr_assoc_rqst *)
  703. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  704. assoc_acc = (struct fcnvme_ls_cr_assoc_acc *)&assoc_rqst[1];
  705. assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION;
  706. assoc_rqst->desc_list_len =
  707. cpu_to_be32(sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  708. assoc_rqst->assoc_cmd.desc_tag =
  709. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD);
  710. assoc_rqst->assoc_cmd.desc_len =
  711. fcnvme_lsdesc_len(
  712. sizeof(struct fcnvme_lsdesc_cr_assoc_cmd));
  713. assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  714. assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize);
  715. /* Linux supports only Dynamic controllers */
  716. assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff);
  717. memcpy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id,
  718. min_t(size_t, FCNVME_ASSOC_HOSTID_LEN, sizeof(uuid_be)));
  719. strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn,
  720. min(FCNVME_ASSOC_HOSTNQN_LEN, NVMF_NQN_SIZE));
  721. strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn,
  722. min(FCNVME_ASSOC_SUBNQN_LEN, NVMF_NQN_SIZE));
  723. lsop->queue = queue;
  724. lsreq->rqstaddr = assoc_rqst;
  725. lsreq->rqstlen = sizeof(*assoc_rqst);
  726. lsreq->rspaddr = assoc_acc;
  727. lsreq->rsplen = sizeof(*assoc_acc);
  728. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  729. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  730. if (ret)
  731. goto out_free_buffer;
  732. /* process connect LS completion */
  733. /* validate the ACC response */
  734. if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  735. fcret = VERR_LSACC;
  736. else if (assoc_acc->hdr.desc_list_len !=
  737. fcnvme_lsdesc_len(
  738. sizeof(struct fcnvme_ls_cr_assoc_acc)))
  739. fcret = VERR_CR_ASSOC_ACC_LEN;
  740. else if (assoc_acc->hdr.rqst.desc_tag !=
  741. cpu_to_be32(FCNVME_LSDESC_RQST))
  742. fcret = VERR_LSDESC_RQST;
  743. else if (assoc_acc->hdr.rqst.desc_len !=
  744. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  745. fcret = VERR_LSDESC_RQST_LEN;
  746. else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION)
  747. fcret = VERR_CR_ASSOC;
  748. else if (assoc_acc->associd.desc_tag !=
  749. cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  750. fcret = VERR_ASSOC_ID;
  751. else if (assoc_acc->associd.desc_len !=
  752. fcnvme_lsdesc_len(
  753. sizeof(struct fcnvme_lsdesc_assoc_id)))
  754. fcret = VERR_ASSOC_ID_LEN;
  755. else if (assoc_acc->connectid.desc_tag !=
  756. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  757. fcret = VERR_CONN_ID;
  758. else if (assoc_acc->connectid.desc_len !=
  759. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  760. fcret = VERR_CONN_ID_LEN;
  761. if (fcret) {
  762. ret = -EBADF;
  763. dev_err(ctrl->dev,
  764. "q %d connect failed: %s\n",
  765. queue->qnum, validation_errors[fcret]);
  766. } else {
  767. ctrl->association_id =
  768. be64_to_cpu(assoc_acc->associd.association_id);
  769. queue->connection_id =
  770. be64_to_cpu(assoc_acc->connectid.connection_id);
  771. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  772. }
  773. out_free_buffer:
  774. kfree(lsop);
  775. out_no_memory:
  776. if (ret)
  777. dev_err(ctrl->dev,
  778. "queue %d connect admin queue failed (%d).\n",
  779. queue->qnum, ret);
  780. return ret;
  781. }
  782. static int
  783. nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  784. u16 qsize, u16 ersp_ratio)
  785. {
  786. struct nvmefc_ls_req_op *lsop;
  787. struct nvmefc_ls_req *lsreq;
  788. struct fcnvme_ls_cr_conn_rqst *conn_rqst;
  789. struct fcnvme_ls_cr_conn_acc *conn_acc;
  790. int ret, fcret = 0;
  791. lsop = kzalloc((sizeof(*lsop) +
  792. ctrl->lport->ops->lsrqst_priv_sz +
  793. sizeof(*conn_rqst) + sizeof(*conn_acc)), GFP_KERNEL);
  794. if (!lsop) {
  795. ret = -ENOMEM;
  796. goto out_no_memory;
  797. }
  798. lsreq = &lsop->ls_req;
  799. lsreq->private = (void *)&lsop[1];
  800. conn_rqst = (struct fcnvme_ls_cr_conn_rqst *)
  801. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  802. conn_acc = (struct fcnvme_ls_cr_conn_acc *)&conn_rqst[1];
  803. conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION;
  804. conn_rqst->desc_list_len = cpu_to_be32(
  805. sizeof(struct fcnvme_lsdesc_assoc_id) +
  806. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  807. conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  808. conn_rqst->associd.desc_len =
  809. fcnvme_lsdesc_len(
  810. sizeof(struct fcnvme_lsdesc_assoc_id));
  811. conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  812. conn_rqst->connect_cmd.desc_tag =
  813. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD);
  814. conn_rqst->connect_cmd.desc_len =
  815. fcnvme_lsdesc_len(
  816. sizeof(struct fcnvme_lsdesc_cr_conn_cmd));
  817. conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio);
  818. conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum);
  819. conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize);
  820. lsop->queue = queue;
  821. lsreq->rqstaddr = conn_rqst;
  822. lsreq->rqstlen = sizeof(*conn_rqst);
  823. lsreq->rspaddr = conn_acc;
  824. lsreq->rsplen = sizeof(*conn_acc);
  825. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  826. ret = nvme_fc_send_ls_req(ctrl->rport, lsop);
  827. if (ret)
  828. goto out_free_buffer;
  829. /* process connect LS completion */
  830. /* validate the ACC response */
  831. if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC)
  832. fcret = VERR_LSACC;
  833. else if (conn_acc->hdr.desc_list_len !=
  834. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)))
  835. fcret = VERR_CR_CONN_ACC_LEN;
  836. else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST))
  837. fcret = VERR_LSDESC_RQST;
  838. else if (conn_acc->hdr.rqst.desc_len !=
  839. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst)))
  840. fcret = VERR_LSDESC_RQST_LEN;
  841. else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION)
  842. fcret = VERR_CR_CONN;
  843. else if (conn_acc->connectid.desc_tag !=
  844. cpu_to_be32(FCNVME_LSDESC_CONN_ID))
  845. fcret = VERR_CONN_ID;
  846. else if (conn_acc->connectid.desc_len !=
  847. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_conn_id)))
  848. fcret = VERR_CONN_ID_LEN;
  849. if (fcret) {
  850. ret = -EBADF;
  851. dev_err(ctrl->dev,
  852. "q %d connect failed: %s\n",
  853. queue->qnum, validation_errors[fcret]);
  854. } else {
  855. queue->connection_id =
  856. be64_to_cpu(conn_acc->connectid.connection_id);
  857. set_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  858. }
  859. out_free_buffer:
  860. kfree(lsop);
  861. out_no_memory:
  862. if (ret)
  863. dev_err(ctrl->dev,
  864. "queue %d connect command failed (%d).\n",
  865. queue->qnum, ret);
  866. return ret;
  867. }
  868. static void
  869. nvme_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
  870. {
  871. struct nvmefc_ls_req_op *lsop = ls_req_to_lsop(lsreq);
  872. __nvme_fc_finish_ls_req(lsop);
  873. /* fc-nvme iniator doesn't care about success or failure of cmd */
  874. kfree(lsop);
  875. }
  876. /*
  877. * This routine sends a FC-NVME LS to disconnect (aka terminate)
  878. * the FC-NVME Association. Terminating the association also
  879. * terminates the FC-NVME connections (per queue, both admin and io
  880. * queues) that are part of the association. E.g. things are torn
  881. * down, and the related FC-NVME Association ID and Connection IDs
  882. * become invalid.
  883. *
  884. * The behavior of the fc-nvme initiator is such that it's
  885. * understanding of the association and connections will implicitly
  886. * be torn down. The action is implicit as it may be due to a loss of
  887. * connectivity with the fc-nvme target, so you may never get a
  888. * response even if you tried. As such, the action of this routine
  889. * is to asynchronously send the LS, ignore any results of the LS, and
  890. * continue on with terminating the association. If the fc-nvme target
  891. * is present and receives the LS, it too can tear down.
  892. */
  893. static void
  894. nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
  895. {
  896. struct fcnvme_ls_disconnect_rqst *discon_rqst;
  897. struct fcnvme_ls_disconnect_acc *discon_acc;
  898. struct nvmefc_ls_req_op *lsop;
  899. struct nvmefc_ls_req *lsreq;
  900. int ret;
  901. lsop = kzalloc((sizeof(*lsop) +
  902. ctrl->lport->ops->lsrqst_priv_sz +
  903. sizeof(*discon_rqst) + sizeof(*discon_acc)),
  904. GFP_KERNEL);
  905. if (!lsop)
  906. /* couldn't sent it... too bad */
  907. return;
  908. lsreq = &lsop->ls_req;
  909. lsreq->private = (void *)&lsop[1];
  910. discon_rqst = (struct fcnvme_ls_disconnect_rqst *)
  911. (lsreq->private + ctrl->lport->ops->lsrqst_priv_sz);
  912. discon_acc = (struct fcnvme_ls_disconnect_acc *)&discon_rqst[1];
  913. discon_rqst->w0.ls_cmd = FCNVME_LS_DISCONNECT;
  914. discon_rqst->desc_list_len = cpu_to_be32(
  915. sizeof(struct fcnvme_lsdesc_assoc_id) +
  916. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  917. discon_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  918. discon_rqst->associd.desc_len =
  919. fcnvme_lsdesc_len(
  920. sizeof(struct fcnvme_lsdesc_assoc_id));
  921. discon_rqst->associd.association_id = cpu_to_be64(ctrl->association_id);
  922. discon_rqst->discon_cmd.desc_tag = cpu_to_be32(
  923. FCNVME_LSDESC_DISCONN_CMD);
  924. discon_rqst->discon_cmd.desc_len =
  925. fcnvme_lsdesc_len(
  926. sizeof(struct fcnvme_lsdesc_disconn_cmd));
  927. discon_rqst->discon_cmd.scope = FCNVME_DISCONN_ASSOCIATION;
  928. discon_rqst->discon_cmd.id = cpu_to_be64(ctrl->association_id);
  929. lsreq->rqstaddr = discon_rqst;
  930. lsreq->rqstlen = sizeof(*discon_rqst);
  931. lsreq->rspaddr = discon_acc;
  932. lsreq->rsplen = sizeof(*discon_acc);
  933. lsreq->timeout = NVME_FC_CONNECT_TIMEOUT_SEC;
  934. ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop,
  935. nvme_fc_disconnect_assoc_done);
  936. if (ret)
  937. kfree(lsop);
  938. /* only meaningful part to terminating the association */
  939. ctrl->association_id = 0;
  940. }
  941. /* *********************** NVME Ctrl Routines **************************** */
  942. static void __nvme_fc_final_op_cleanup(struct request *rq);
  943. static int
  944. nvme_fc_reinit_request(void *data, struct request *rq)
  945. {
  946. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  947. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  948. memset(cmdiu, 0, sizeof(*cmdiu));
  949. cmdiu->scsi_id = NVME_CMD_SCSI_ID;
  950. cmdiu->fc_id = NVME_CMD_FC_ID;
  951. cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
  952. memset(&op->rsp_iu, 0, sizeof(op->rsp_iu));
  953. return 0;
  954. }
  955. static void
  956. __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl,
  957. struct nvme_fc_fcp_op *op)
  958. {
  959. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma,
  960. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  961. fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma,
  962. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  963. atomic_set(&op->state, FCPOP_STATE_UNINIT);
  964. }
  965. static void
  966. nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
  967. unsigned int hctx_idx)
  968. {
  969. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  970. return __nvme_fc_exit_request(set->driver_data, op);
  971. }
  972. static int
  973. __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
  974. {
  975. int state;
  976. state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
  977. if (state != FCPOP_STATE_ACTIVE) {
  978. atomic_set(&op->state, state);
  979. return -ECANCELED;
  980. }
  981. ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
  982. &ctrl->rport->remoteport,
  983. op->queue->lldd_handle,
  984. &op->fcp_req);
  985. return 0;
  986. }
  987. static void
  988. nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
  989. {
  990. struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
  991. unsigned long flags;
  992. int i, ret;
  993. for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
  994. if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
  995. continue;
  996. spin_lock_irqsave(&ctrl->lock, flags);
  997. if (ctrl->flags & FCCTRL_TERMIO) {
  998. ctrl->iocnt++;
  999. aen_op->flags |= FCOP_FLAGS_TERMIO;
  1000. }
  1001. spin_unlock_irqrestore(&ctrl->lock, flags);
  1002. ret = __nvme_fc_abort_op(ctrl, aen_op);
  1003. if (ret) {
  1004. /*
  1005. * if __nvme_fc_abort_op failed the io wasn't
  1006. * active. Thus this call path is running in
  1007. * parallel to the io complete. Treat as non-error.
  1008. */
  1009. /* back out the flags/counters */
  1010. spin_lock_irqsave(&ctrl->lock, flags);
  1011. if (ctrl->flags & FCCTRL_TERMIO)
  1012. ctrl->iocnt--;
  1013. aen_op->flags &= ~FCOP_FLAGS_TERMIO;
  1014. spin_unlock_irqrestore(&ctrl->lock, flags);
  1015. return;
  1016. }
  1017. }
  1018. }
  1019. static inline int
  1020. __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
  1021. struct nvme_fc_fcp_op *op)
  1022. {
  1023. unsigned long flags;
  1024. bool complete_rq = false;
  1025. spin_lock_irqsave(&ctrl->lock, flags);
  1026. if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
  1027. if (ctrl->flags & FCCTRL_TERMIO)
  1028. ctrl->iocnt--;
  1029. }
  1030. if (op->flags & FCOP_FLAGS_RELEASED)
  1031. complete_rq = true;
  1032. else
  1033. op->flags |= FCOP_FLAGS_COMPLETE;
  1034. spin_unlock_irqrestore(&ctrl->lock, flags);
  1035. return complete_rq;
  1036. }
  1037. static void
  1038. nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
  1039. {
  1040. struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req);
  1041. struct request *rq = op->rq;
  1042. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1043. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1044. struct nvme_fc_queue *queue = op->queue;
  1045. struct nvme_completion *cqe = &op->rsp_iu.cqe;
  1046. struct nvme_command *sqe = &op->cmd_iu.sqe;
  1047. __le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
  1048. union nvme_result result;
  1049. bool complete_rq;
  1050. /*
  1051. * WARNING:
  1052. * The current linux implementation of a nvme controller
  1053. * allocates a single tag set for all io queues and sizes
  1054. * the io queues to fully hold all possible tags. Thus, the
  1055. * implementation does not reference or care about the sqhd
  1056. * value as it never needs to use the sqhd/sqtail pointers
  1057. * for submission pacing.
  1058. *
  1059. * This affects the FC-NVME implementation in two ways:
  1060. * 1) As the value doesn't matter, we don't need to waste
  1061. * cycles extracting it from ERSPs and stamping it in the
  1062. * cases where the transport fabricates CQEs on successful
  1063. * completions.
  1064. * 2) The FC-NVME implementation requires that delivery of
  1065. * ERSP completions are to go back to the nvme layer in order
  1066. * relative to the rsn, such that the sqhd value will always
  1067. * be "in order" for the nvme layer. As the nvme layer in
  1068. * linux doesn't care about sqhd, there's no need to return
  1069. * them in order.
  1070. *
  1071. * Additionally:
  1072. * As the core nvme layer in linux currently does not look at
  1073. * every field in the cqe - in cases where the FC transport must
  1074. * fabricate a CQE, the following fields will not be set as they
  1075. * are not referenced:
  1076. * cqe.sqid, cqe.sqhd, cqe.command_id
  1077. */
  1078. fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
  1079. sizeof(op->rsp_iu), DMA_FROM_DEVICE);
  1080. if (atomic_read(&op->state) == FCPOP_STATE_ABORTED)
  1081. status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
  1082. else if (freq->status)
  1083. status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
  1084. /*
  1085. * For the linux implementation, if we have an unsuccesful
  1086. * status, they blk-mq layer can typically be called with the
  1087. * non-zero status and the content of the cqe isn't important.
  1088. */
  1089. if (status)
  1090. goto done;
  1091. /*
  1092. * command completed successfully relative to the wire
  1093. * protocol. However, validate anything received and
  1094. * extract the status and result from the cqe (create it
  1095. * where necessary).
  1096. */
  1097. switch (freq->rcv_rsplen) {
  1098. case 0:
  1099. case NVME_FC_SIZEOF_ZEROS_RSP:
  1100. /*
  1101. * No response payload or 12 bytes of payload (which
  1102. * should all be zeros) are considered successful and
  1103. * no payload in the CQE by the transport.
  1104. */
  1105. if (freq->transferred_length !=
  1106. be32_to_cpu(op->cmd_iu.data_len)) {
  1107. status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
  1108. goto done;
  1109. }
  1110. result.u64 = 0;
  1111. break;
  1112. case sizeof(struct nvme_fc_ersp_iu):
  1113. /*
  1114. * The ERSP IU contains a full completion with CQE.
  1115. * Validate ERSP IU and look at cqe.
  1116. */
  1117. if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) !=
  1118. (freq->rcv_rsplen / 4) ||
  1119. be32_to_cpu(op->rsp_iu.xfrd_len) !=
  1120. freq->transferred_length ||
  1121. op->rsp_iu.status_code ||
  1122. sqe->common.command_id != cqe->command_id)) {
  1123. status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
  1124. goto done;
  1125. }
  1126. result = cqe->result;
  1127. status = cqe->status;
  1128. break;
  1129. default:
  1130. status = cpu_to_le16(NVME_SC_FC_TRANSPORT_ERROR << 1);
  1131. goto done;
  1132. }
  1133. done:
  1134. if (op->flags & FCOP_FLAGS_AEN) {
  1135. nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
  1136. complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
  1137. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1138. op->flags = FCOP_FLAGS_AEN; /* clear other flags */
  1139. nvme_fc_ctrl_put(ctrl);
  1140. return;
  1141. }
  1142. complete_rq = __nvme_fc_fcpop_chk_teardowns(ctrl, op);
  1143. if (!complete_rq) {
  1144. if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
  1145. status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
  1146. if (blk_queue_dying(rq->q))
  1147. status |= cpu_to_le16(NVME_SC_DNR << 1);
  1148. }
  1149. nvme_end_request(rq, status, result);
  1150. } else
  1151. __nvme_fc_final_op_cleanup(rq);
  1152. }
  1153. static int
  1154. __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
  1155. struct nvme_fc_queue *queue, struct nvme_fc_fcp_op *op,
  1156. struct request *rq, u32 rqno)
  1157. {
  1158. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1159. int ret = 0;
  1160. memset(op, 0, sizeof(*op));
  1161. op->fcp_req.cmdaddr = &op->cmd_iu;
  1162. op->fcp_req.cmdlen = sizeof(op->cmd_iu);
  1163. op->fcp_req.rspaddr = &op->rsp_iu;
  1164. op->fcp_req.rsplen = sizeof(op->rsp_iu);
  1165. op->fcp_req.done = nvme_fc_fcpio_done;
  1166. op->fcp_req.first_sgl = (struct scatterlist *)&op[1];
  1167. op->fcp_req.private = &op->fcp_req.first_sgl[SG_CHUNK_SIZE];
  1168. op->ctrl = ctrl;
  1169. op->queue = queue;
  1170. op->rq = rq;
  1171. op->rqno = rqno;
  1172. cmdiu->scsi_id = NVME_CMD_SCSI_ID;
  1173. cmdiu->fc_id = NVME_CMD_FC_ID;
  1174. cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32));
  1175. op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev,
  1176. &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1177. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
  1178. dev_err(ctrl->dev,
  1179. "FCP Op failed - cmdiu dma mapping failed.\n");
  1180. ret = EFAULT;
  1181. goto out_on_error;
  1182. }
  1183. op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev,
  1184. &op->rsp_iu, sizeof(op->rsp_iu),
  1185. DMA_FROM_DEVICE);
  1186. if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
  1187. dev_err(ctrl->dev,
  1188. "FCP Op failed - rspiu dma mapping failed.\n");
  1189. ret = EFAULT;
  1190. }
  1191. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1192. out_on_error:
  1193. return ret;
  1194. }
  1195. static int
  1196. nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
  1197. unsigned int hctx_idx, unsigned int numa_node)
  1198. {
  1199. struct nvme_fc_ctrl *ctrl = set->driver_data;
  1200. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1201. struct nvme_fc_queue *queue = &ctrl->queues[hctx_idx+1];
  1202. return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
  1203. }
  1204. static int
  1205. nvme_fc_init_admin_request(struct blk_mq_tag_set *set, struct request *rq,
  1206. unsigned int hctx_idx, unsigned int numa_node)
  1207. {
  1208. struct nvme_fc_ctrl *ctrl = set->driver_data;
  1209. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1210. struct nvme_fc_queue *queue = &ctrl->queues[0];
  1211. return __nvme_fc_init_request(ctrl, queue, op, rq, queue->rqcnt++);
  1212. }
  1213. static int
  1214. nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl)
  1215. {
  1216. struct nvme_fc_fcp_op *aen_op;
  1217. struct nvme_fc_cmd_iu *cmdiu;
  1218. struct nvme_command *sqe;
  1219. void *private;
  1220. int i, ret;
  1221. aen_op = ctrl->aen_ops;
  1222. for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
  1223. private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz,
  1224. GFP_KERNEL);
  1225. if (!private)
  1226. return -ENOMEM;
  1227. cmdiu = &aen_op->cmd_iu;
  1228. sqe = &cmdiu->sqe;
  1229. ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0],
  1230. aen_op, (struct request *)NULL,
  1231. (AEN_CMDID_BASE + i));
  1232. if (ret) {
  1233. kfree(private);
  1234. return ret;
  1235. }
  1236. aen_op->flags = FCOP_FLAGS_AEN;
  1237. aen_op->fcp_req.first_sgl = NULL; /* no sg list */
  1238. aen_op->fcp_req.private = private;
  1239. memset(sqe, 0, sizeof(*sqe));
  1240. sqe->common.opcode = nvme_admin_async_event;
  1241. /* Note: core layer may overwrite the sqe.command_id value */
  1242. sqe->common.command_id = AEN_CMDID_BASE + i;
  1243. }
  1244. return 0;
  1245. }
  1246. static void
  1247. nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl)
  1248. {
  1249. struct nvme_fc_fcp_op *aen_op;
  1250. int i;
  1251. aen_op = ctrl->aen_ops;
  1252. for (i = 0; i < NVME_FC_NR_AEN_COMMANDS; i++, aen_op++) {
  1253. if (!aen_op->fcp_req.private)
  1254. continue;
  1255. __nvme_fc_exit_request(ctrl, aen_op);
  1256. kfree(aen_op->fcp_req.private);
  1257. aen_op->fcp_req.private = NULL;
  1258. }
  1259. }
  1260. static inline void
  1261. __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl,
  1262. unsigned int qidx)
  1263. {
  1264. struct nvme_fc_queue *queue = &ctrl->queues[qidx];
  1265. hctx->driver_data = queue;
  1266. queue->hctx = hctx;
  1267. }
  1268. static int
  1269. nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1270. unsigned int hctx_idx)
  1271. {
  1272. struct nvme_fc_ctrl *ctrl = data;
  1273. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1);
  1274. return 0;
  1275. }
  1276. static int
  1277. nvme_fc_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
  1278. unsigned int hctx_idx)
  1279. {
  1280. struct nvme_fc_ctrl *ctrl = data;
  1281. __nvme_fc_init_hctx(hctx, ctrl, hctx_idx);
  1282. return 0;
  1283. }
  1284. static void
  1285. nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx, size_t queue_size)
  1286. {
  1287. struct nvme_fc_queue *queue;
  1288. queue = &ctrl->queues[idx];
  1289. memset(queue, 0, sizeof(*queue));
  1290. queue->ctrl = ctrl;
  1291. queue->qnum = idx;
  1292. atomic_set(&queue->csn, 1);
  1293. queue->dev = ctrl->dev;
  1294. if (idx > 0)
  1295. queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16;
  1296. else
  1297. queue->cmnd_capsule_len = sizeof(struct nvme_command);
  1298. queue->queue_size = queue_size;
  1299. /*
  1300. * Considered whether we should allocate buffers for all SQEs
  1301. * and CQEs and dma map them - mapping their respective entries
  1302. * into the request structures (kernel vm addr and dma address)
  1303. * thus the driver could use the buffers/mappings directly.
  1304. * It only makes sense if the LLDD would use them for its
  1305. * messaging api. It's very unlikely most adapter api's would use
  1306. * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload
  1307. * structures were used instead.
  1308. */
  1309. }
  1310. /*
  1311. * This routine terminates a queue at the transport level.
  1312. * The transport has already ensured that all outstanding ios on
  1313. * the queue have been terminated.
  1314. * The transport will send a Disconnect LS request to terminate
  1315. * the queue's connection. Termination of the admin queue will also
  1316. * terminate the association at the target.
  1317. */
  1318. static void
  1319. nvme_fc_free_queue(struct nvme_fc_queue *queue)
  1320. {
  1321. if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
  1322. return;
  1323. /*
  1324. * Current implementation never disconnects a single queue.
  1325. * It always terminates a whole association. So there is never
  1326. * a disconnect(queue) LS sent to the target.
  1327. */
  1328. queue->connection_id = 0;
  1329. clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
  1330. }
  1331. static void
  1332. __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl,
  1333. struct nvme_fc_queue *queue, unsigned int qidx)
  1334. {
  1335. if (ctrl->lport->ops->delete_queue)
  1336. ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx,
  1337. queue->lldd_handle);
  1338. queue->lldd_handle = NULL;
  1339. }
  1340. static void
  1341. nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl)
  1342. {
  1343. int i;
  1344. for (i = 1; i < ctrl->queue_count; i++)
  1345. nvme_fc_free_queue(&ctrl->queues[i]);
  1346. }
  1347. static int
  1348. __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl,
  1349. struct nvme_fc_queue *queue, unsigned int qidx, u16 qsize)
  1350. {
  1351. int ret = 0;
  1352. queue->lldd_handle = NULL;
  1353. if (ctrl->lport->ops->create_queue)
  1354. ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport,
  1355. qidx, qsize, &queue->lldd_handle);
  1356. return ret;
  1357. }
  1358. static void
  1359. nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl)
  1360. {
  1361. struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1];
  1362. int i;
  1363. for (i = ctrl->queue_count - 1; i >= 1; i--, queue--)
  1364. __nvme_fc_delete_hw_queue(ctrl, queue, i);
  1365. }
  1366. static int
  1367. nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1368. {
  1369. struct nvme_fc_queue *queue = &ctrl->queues[1];
  1370. int i, ret;
  1371. for (i = 1; i < ctrl->queue_count; i++, queue++) {
  1372. ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
  1373. if (ret)
  1374. goto delete_queues;
  1375. }
  1376. return 0;
  1377. delete_queues:
  1378. for (; i >= 0; i--)
  1379. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
  1380. return ret;
  1381. }
  1382. static int
  1383. nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
  1384. {
  1385. int i, ret = 0;
  1386. for (i = 1; i < ctrl->queue_count; i++) {
  1387. ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize,
  1388. (qsize / 5));
  1389. if (ret)
  1390. break;
  1391. ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
  1392. if (ret)
  1393. break;
  1394. }
  1395. return ret;
  1396. }
  1397. static void
  1398. nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl)
  1399. {
  1400. int i;
  1401. for (i = 1; i < ctrl->queue_count; i++)
  1402. nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize);
  1403. }
  1404. static void
  1405. nvme_fc_ctrl_free(struct kref *ref)
  1406. {
  1407. struct nvme_fc_ctrl *ctrl =
  1408. container_of(ref, struct nvme_fc_ctrl, ref);
  1409. unsigned long flags;
  1410. if (ctrl->ctrl.tagset) {
  1411. blk_cleanup_queue(ctrl->ctrl.connect_q);
  1412. blk_mq_free_tag_set(&ctrl->tag_set);
  1413. }
  1414. /* remove from rport list */
  1415. spin_lock_irqsave(&ctrl->rport->lock, flags);
  1416. list_del(&ctrl->ctrl_list);
  1417. spin_unlock_irqrestore(&ctrl->rport->lock, flags);
  1418. blk_cleanup_queue(ctrl->ctrl.admin_q);
  1419. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  1420. kfree(ctrl->queues);
  1421. put_device(ctrl->dev);
  1422. nvme_fc_rport_put(ctrl->rport);
  1423. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  1424. if (ctrl->ctrl.opts)
  1425. nvmf_free_options(ctrl->ctrl.opts);
  1426. kfree(ctrl);
  1427. }
  1428. static void
  1429. nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl)
  1430. {
  1431. kref_put(&ctrl->ref, nvme_fc_ctrl_free);
  1432. }
  1433. static int
  1434. nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl)
  1435. {
  1436. return kref_get_unless_zero(&ctrl->ref);
  1437. }
  1438. /*
  1439. * All accesses from nvme core layer done - can now free the
  1440. * controller. Called after last nvme_put_ctrl() call
  1441. */
  1442. static void
  1443. nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
  1444. {
  1445. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  1446. WARN_ON(nctrl != &ctrl->ctrl);
  1447. nvme_fc_ctrl_put(ctrl);
  1448. }
  1449. static void
  1450. nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
  1451. {
  1452. dev_warn(ctrl->ctrl.device,
  1453. "NVME-FC{%d}: transport association error detected: %s\n",
  1454. ctrl->cnum, errmsg);
  1455. dev_warn(ctrl->ctrl.device,
  1456. "NVME-FC{%d}: resetting controller\n", ctrl->cnum);
  1457. /* stop the queues on error, cleanup is in reset thread */
  1458. if (ctrl->queue_count > 1)
  1459. nvme_stop_queues(&ctrl->ctrl);
  1460. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
  1461. dev_err(ctrl->ctrl.device,
  1462. "NVME-FC{%d}: error_recovery: Couldn't change state "
  1463. "to RECONNECTING\n", ctrl->cnum);
  1464. return;
  1465. }
  1466. if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
  1467. dev_err(ctrl->ctrl.device,
  1468. "NVME-FC{%d}: error_recovery: Failed to schedule "
  1469. "reset work\n", ctrl->cnum);
  1470. }
  1471. static enum blk_eh_timer_return
  1472. nvme_fc_timeout(struct request *rq, bool reserved)
  1473. {
  1474. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1475. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1476. int ret;
  1477. if (reserved)
  1478. return BLK_EH_RESET_TIMER;
  1479. ret = __nvme_fc_abort_op(ctrl, op);
  1480. if (ret)
  1481. /* io wasn't active to abort consider it done */
  1482. return BLK_EH_HANDLED;
  1483. /*
  1484. * we can't individually ABTS an io without affecting the queue,
  1485. * thus killing the queue, adn thus the association.
  1486. * So resolve by performing a controller reset, which will stop
  1487. * the host/io stack, terminate the association on the link,
  1488. * and recreate an association on the link.
  1489. */
  1490. nvme_fc_error_recovery(ctrl, "io timeout error");
  1491. return BLK_EH_HANDLED;
  1492. }
  1493. static int
  1494. nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1495. struct nvme_fc_fcp_op *op)
  1496. {
  1497. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1498. enum dma_data_direction dir;
  1499. int ret;
  1500. freq->sg_cnt = 0;
  1501. if (!blk_rq_payload_bytes(rq))
  1502. return 0;
  1503. freq->sg_table.sgl = freq->first_sgl;
  1504. ret = sg_alloc_table_chained(&freq->sg_table,
  1505. blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
  1506. if (ret)
  1507. return -ENOMEM;
  1508. op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
  1509. WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
  1510. dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
  1511. freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
  1512. op->nents, dir);
  1513. if (unlikely(freq->sg_cnt <= 0)) {
  1514. sg_free_table_chained(&freq->sg_table, true);
  1515. freq->sg_cnt = 0;
  1516. return -EFAULT;
  1517. }
  1518. /*
  1519. * TODO: blk_integrity_rq(rq) for DIF
  1520. */
  1521. return 0;
  1522. }
  1523. static void
  1524. nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
  1525. struct nvme_fc_fcp_op *op)
  1526. {
  1527. struct nvmefc_fcp_req *freq = &op->fcp_req;
  1528. if (!freq->sg_cnt)
  1529. return;
  1530. fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents,
  1531. ((rq_data_dir(rq) == WRITE) ?
  1532. DMA_TO_DEVICE : DMA_FROM_DEVICE));
  1533. nvme_cleanup_cmd(rq);
  1534. sg_free_table_chained(&freq->sg_table, true);
  1535. freq->sg_cnt = 0;
  1536. }
  1537. /*
  1538. * In FC, the queue is a logical thing. At transport connect, the target
  1539. * creates its "queue" and returns a handle that is to be given to the
  1540. * target whenever it posts something to the corresponding SQ. When an
  1541. * SQE is sent on a SQ, FC effectively considers the SQE, or rather the
  1542. * command contained within the SQE, an io, and assigns a FC exchange
  1543. * to it. The SQE and the associated SQ handle are sent in the initial
  1544. * CMD IU sents on the exchange. All transfers relative to the io occur
  1545. * as part of the exchange. The CQE is the last thing for the io,
  1546. * which is transferred (explicitly or implicitly) with the RSP IU
  1547. * sent on the exchange. After the CQE is received, the FC exchange is
  1548. * terminaed and the Exchange may be used on a different io.
  1549. *
  1550. * The transport to LLDD api has the transport making a request for a
  1551. * new fcp io request to the LLDD. The LLDD then allocates a FC exchange
  1552. * resource and transfers the command. The LLDD will then process all
  1553. * steps to complete the io. Upon completion, the transport done routine
  1554. * is called.
  1555. *
  1556. * So - while the operation is outstanding to the LLDD, there is a link
  1557. * level FC exchange resource that is also outstanding. This must be
  1558. * considered in all cleanup operations.
  1559. */
  1560. static int
  1561. nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
  1562. struct nvme_fc_fcp_op *op, u32 data_len,
  1563. enum nvmefc_fcp_datadir io_dir)
  1564. {
  1565. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1566. struct nvme_command *sqe = &cmdiu->sqe;
  1567. u32 csn;
  1568. int ret;
  1569. /*
  1570. * before attempting to send the io, check to see if we believe
  1571. * the target device is present
  1572. */
  1573. if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
  1574. return BLK_MQ_RQ_QUEUE_ERROR;
  1575. if (!nvme_fc_ctrl_get(ctrl))
  1576. return BLK_MQ_RQ_QUEUE_ERROR;
  1577. /* format the FC-NVME CMD IU and fcp_req */
  1578. cmdiu->connection_id = cpu_to_be64(queue->connection_id);
  1579. csn = atomic_inc_return(&queue->csn);
  1580. cmdiu->csn = cpu_to_be32(csn);
  1581. cmdiu->data_len = cpu_to_be32(data_len);
  1582. switch (io_dir) {
  1583. case NVMEFC_FCP_WRITE:
  1584. cmdiu->flags = FCNVME_CMD_FLAGS_WRITE;
  1585. break;
  1586. case NVMEFC_FCP_READ:
  1587. cmdiu->flags = FCNVME_CMD_FLAGS_READ;
  1588. break;
  1589. case NVMEFC_FCP_NODATA:
  1590. cmdiu->flags = 0;
  1591. break;
  1592. }
  1593. op->fcp_req.payload_length = data_len;
  1594. op->fcp_req.io_dir = io_dir;
  1595. op->fcp_req.transferred_length = 0;
  1596. op->fcp_req.rcv_rsplen = 0;
  1597. op->fcp_req.status = NVME_SC_SUCCESS;
  1598. op->fcp_req.sqid = cpu_to_le16(queue->qnum);
  1599. /*
  1600. * validate per fabric rules, set fields mandated by fabric spec
  1601. * as well as those by FC-NVME spec.
  1602. */
  1603. WARN_ON_ONCE(sqe->common.metadata);
  1604. WARN_ON_ONCE(sqe->common.dptr.prp1);
  1605. WARN_ON_ONCE(sqe->common.dptr.prp2);
  1606. sqe->common.flags |= NVME_CMD_SGL_METABUF;
  1607. /*
  1608. * format SQE DPTR field per FC-NVME rules
  1609. * type=data block descr; subtype=offset;
  1610. * offset is currently 0.
  1611. */
  1612. sqe->rw.dptr.sgl.type = NVME_SGL_FMT_OFFSET;
  1613. sqe->rw.dptr.sgl.length = cpu_to_le32(data_len);
  1614. sqe->rw.dptr.sgl.addr = 0;
  1615. if (!(op->flags & FCOP_FLAGS_AEN)) {
  1616. ret = nvme_fc_map_data(ctrl, op->rq, op);
  1617. if (ret < 0) {
  1618. nvme_cleanup_cmd(op->rq);
  1619. nvme_fc_ctrl_put(ctrl);
  1620. return (ret == -ENOMEM || ret == -EAGAIN) ?
  1621. BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR;
  1622. }
  1623. }
  1624. fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma,
  1625. sizeof(op->cmd_iu), DMA_TO_DEVICE);
  1626. atomic_set(&op->state, FCPOP_STATE_ACTIVE);
  1627. if (!(op->flags & FCOP_FLAGS_AEN))
  1628. blk_mq_start_request(op->rq);
  1629. ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport,
  1630. &ctrl->rport->remoteport,
  1631. queue->lldd_handle, &op->fcp_req);
  1632. if (ret) {
  1633. if (op->rq) { /* normal request */
  1634. nvme_fc_unmap_data(ctrl, op->rq, op);
  1635. nvme_cleanup_cmd(op->rq);
  1636. }
  1637. /* else - aen. no cleanup needed */
  1638. nvme_fc_ctrl_put(ctrl);
  1639. if (ret != -EBUSY)
  1640. return BLK_MQ_RQ_QUEUE_ERROR;
  1641. if (op->rq) {
  1642. blk_mq_stop_hw_queues(op->rq->q);
  1643. blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
  1644. }
  1645. return BLK_MQ_RQ_QUEUE_BUSY;
  1646. }
  1647. return BLK_MQ_RQ_QUEUE_OK;
  1648. }
  1649. static int
  1650. nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
  1651. const struct blk_mq_queue_data *bd)
  1652. {
  1653. struct nvme_ns *ns = hctx->queue->queuedata;
  1654. struct nvme_fc_queue *queue = hctx->driver_data;
  1655. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1656. struct request *rq = bd->rq;
  1657. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1658. struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu;
  1659. struct nvme_command *sqe = &cmdiu->sqe;
  1660. enum nvmefc_fcp_datadir io_dir;
  1661. u32 data_len;
  1662. int ret;
  1663. ret = nvme_setup_cmd(ns, rq, sqe);
  1664. if (ret)
  1665. return ret;
  1666. data_len = blk_rq_payload_bytes(rq);
  1667. if (data_len)
  1668. io_dir = ((rq_data_dir(rq) == WRITE) ?
  1669. NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
  1670. else
  1671. io_dir = NVMEFC_FCP_NODATA;
  1672. return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir);
  1673. }
  1674. static struct blk_mq_tags *
  1675. nvme_fc_tagset(struct nvme_fc_queue *queue)
  1676. {
  1677. if (queue->qnum == 0)
  1678. return queue->ctrl->admin_tag_set.tags[queue->qnum];
  1679. return queue->ctrl->tag_set.tags[queue->qnum - 1];
  1680. }
  1681. static int
  1682. nvme_fc_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
  1683. {
  1684. struct nvme_fc_queue *queue = hctx->driver_data;
  1685. struct nvme_fc_ctrl *ctrl = queue->ctrl;
  1686. struct request *req;
  1687. struct nvme_fc_fcp_op *op;
  1688. req = blk_mq_tag_to_rq(nvme_fc_tagset(queue), tag);
  1689. if (!req)
  1690. return 0;
  1691. op = blk_mq_rq_to_pdu(req);
  1692. if ((atomic_read(&op->state) == FCPOP_STATE_ACTIVE) &&
  1693. (ctrl->lport->ops->poll_queue))
  1694. ctrl->lport->ops->poll_queue(&ctrl->lport->localport,
  1695. queue->lldd_handle);
  1696. return ((atomic_read(&op->state) != FCPOP_STATE_ACTIVE));
  1697. }
  1698. static void
  1699. nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
  1700. {
  1701. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg);
  1702. struct nvme_fc_fcp_op *aen_op;
  1703. unsigned long flags;
  1704. bool terminating = false;
  1705. int ret;
  1706. if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
  1707. return;
  1708. spin_lock_irqsave(&ctrl->lock, flags);
  1709. if (ctrl->flags & FCCTRL_TERMIO)
  1710. terminating = true;
  1711. spin_unlock_irqrestore(&ctrl->lock, flags);
  1712. if (terminating)
  1713. return;
  1714. aen_op = &ctrl->aen_ops[aer_idx];
  1715. ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0,
  1716. NVMEFC_FCP_NODATA);
  1717. if (ret)
  1718. dev_err(ctrl->ctrl.device,
  1719. "failed async event work [%d]\n", aer_idx);
  1720. }
  1721. static void
  1722. __nvme_fc_final_op_cleanup(struct request *rq)
  1723. {
  1724. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1725. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1726. atomic_set(&op->state, FCPOP_STATE_IDLE);
  1727. op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
  1728. FCOP_FLAGS_COMPLETE);
  1729. nvme_cleanup_cmd(rq);
  1730. nvme_fc_unmap_data(ctrl, rq, op);
  1731. nvme_complete_rq(rq);
  1732. nvme_fc_ctrl_put(ctrl);
  1733. }
  1734. static void
  1735. nvme_fc_complete_rq(struct request *rq)
  1736. {
  1737. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
  1738. struct nvme_fc_ctrl *ctrl = op->ctrl;
  1739. unsigned long flags;
  1740. bool completed = false;
  1741. /*
  1742. * the core layer, on controller resets after calling
  1743. * nvme_shutdown_ctrl(), calls complete_rq without our
  1744. * calling blk_mq_complete_request(), thus there may still
  1745. * be live i/o outstanding with the LLDD. Means transport has
  1746. * to track complete calls vs fcpio_done calls to know what
  1747. * path to take on completes and dones.
  1748. */
  1749. spin_lock_irqsave(&ctrl->lock, flags);
  1750. if (op->flags & FCOP_FLAGS_COMPLETE)
  1751. completed = true;
  1752. else
  1753. op->flags |= FCOP_FLAGS_RELEASED;
  1754. spin_unlock_irqrestore(&ctrl->lock, flags);
  1755. if (completed)
  1756. __nvme_fc_final_op_cleanup(rq);
  1757. }
  1758. /*
  1759. * This routine is used by the transport when it needs to find active
  1760. * io on a queue that is to be terminated. The transport uses
  1761. * blk_mq_tagset_busy_itr() to find the busy requests, which then invoke
  1762. * this routine to kill them on a 1 by 1 basis.
  1763. *
  1764. * As FC allocates FC exchange for each io, the transport must contact
  1765. * the LLDD to terminate the exchange, thus releasing the FC exchange.
  1766. * After terminating the exchange the LLDD will call the transport's
  1767. * normal io done path for the request, but it will have an aborted
  1768. * status. The done path will return the io request back to the block
  1769. * layer with an error status.
  1770. */
  1771. static void
  1772. nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
  1773. {
  1774. struct nvme_ctrl *nctrl = data;
  1775. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  1776. struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
  1777. unsigned long flags;
  1778. int status;
  1779. if (!blk_mq_request_started(req))
  1780. return;
  1781. spin_lock_irqsave(&ctrl->lock, flags);
  1782. if (ctrl->flags & FCCTRL_TERMIO) {
  1783. ctrl->iocnt++;
  1784. op->flags |= FCOP_FLAGS_TERMIO;
  1785. }
  1786. spin_unlock_irqrestore(&ctrl->lock, flags);
  1787. status = __nvme_fc_abort_op(ctrl, op);
  1788. if (status) {
  1789. /*
  1790. * if __nvme_fc_abort_op failed the io wasn't
  1791. * active. Thus this call path is running in
  1792. * parallel to the io complete. Treat as non-error.
  1793. */
  1794. /* back out the flags/counters */
  1795. spin_lock_irqsave(&ctrl->lock, flags);
  1796. if (ctrl->flags & FCCTRL_TERMIO)
  1797. ctrl->iocnt--;
  1798. op->flags &= ~FCOP_FLAGS_TERMIO;
  1799. spin_unlock_irqrestore(&ctrl->lock, flags);
  1800. return;
  1801. }
  1802. }
  1803. static const struct blk_mq_ops nvme_fc_mq_ops = {
  1804. .queue_rq = nvme_fc_queue_rq,
  1805. .complete = nvme_fc_complete_rq,
  1806. .init_request = nvme_fc_init_request,
  1807. .exit_request = nvme_fc_exit_request,
  1808. .reinit_request = nvme_fc_reinit_request,
  1809. .init_hctx = nvme_fc_init_hctx,
  1810. .poll = nvme_fc_poll,
  1811. .timeout = nvme_fc_timeout,
  1812. };
  1813. static int
  1814. nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl)
  1815. {
  1816. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  1817. int ret;
  1818. ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
  1819. if (ret) {
  1820. dev_info(ctrl->ctrl.device,
  1821. "set_queue_count failed: %d\n", ret);
  1822. return ret;
  1823. }
  1824. ctrl->queue_count = opts->nr_io_queues + 1;
  1825. if (!opts->nr_io_queues)
  1826. return 0;
  1827. nvme_fc_init_io_queues(ctrl);
  1828. memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
  1829. ctrl->tag_set.ops = &nvme_fc_mq_ops;
  1830. ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
  1831. ctrl->tag_set.reserved_tags = 1; /* fabric connect */
  1832. ctrl->tag_set.numa_node = NUMA_NO_NODE;
  1833. ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
  1834. ctrl->tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  1835. (SG_CHUNK_SIZE *
  1836. sizeof(struct scatterlist)) +
  1837. ctrl->lport->ops->fcprqst_priv_sz;
  1838. ctrl->tag_set.driver_data = ctrl;
  1839. ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1;
  1840. ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
  1841. ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
  1842. if (ret)
  1843. return ret;
  1844. ctrl->ctrl.tagset = &ctrl->tag_set;
  1845. ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
  1846. if (IS_ERR(ctrl->ctrl.connect_q)) {
  1847. ret = PTR_ERR(ctrl->ctrl.connect_q);
  1848. goto out_free_tag_set;
  1849. }
  1850. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  1851. if (ret)
  1852. goto out_cleanup_blk_queue;
  1853. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  1854. if (ret)
  1855. goto out_delete_hw_queues;
  1856. return 0;
  1857. out_delete_hw_queues:
  1858. nvme_fc_delete_hw_io_queues(ctrl);
  1859. out_cleanup_blk_queue:
  1860. nvme_stop_keep_alive(&ctrl->ctrl);
  1861. blk_cleanup_queue(ctrl->ctrl.connect_q);
  1862. out_free_tag_set:
  1863. blk_mq_free_tag_set(&ctrl->tag_set);
  1864. nvme_fc_free_io_queues(ctrl);
  1865. /* force put free routine to ignore io queues */
  1866. ctrl->ctrl.tagset = NULL;
  1867. return ret;
  1868. }
  1869. static int
  1870. nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl)
  1871. {
  1872. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  1873. int ret;
  1874. ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues);
  1875. if (ret) {
  1876. dev_info(ctrl->ctrl.device,
  1877. "set_queue_count failed: %d\n", ret);
  1878. return ret;
  1879. }
  1880. /* check for io queues existing */
  1881. if (ctrl->queue_count == 1)
  1882. return 0;
  1883. nvme_fc_init_io_queues(ctrl);
  1884. ret = blk_mq_reinit_tagset(&ctrl->tag_set);
  1885. if (ret)
  1886. goto out_free_io_queues;
  1887. ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  1888. if (ret)
  1889. goto out_free_io_queues;
  1890. ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.opts->queue_size);
  1891. if (ret)
  1892. goto out_delete_hw_queues;
  1893. return 0;
  1894. out_delete_hw_queues:
  1895. nvme_fc_delete_hw_io_queues(ctrl);
  1896. out_free_io_queues:
  1897. nvme_fc_free_io_queues(ctrl);
  1898. return ret;
  1899. }
  1900. /*
  1901. * This routine restarts the controller on the host side, and
  1902. * on the link side, recreates the controller association.
  1903. */
  1904. static int
  1905. nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
  1906. {
  1907. struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
  1908. u32 segs;
  1909. int ret;
  1910. bool changed;
  1911. ++ctrl->ctrl.opts->nr_reconnects;
  1912. /*
  1913. * Create the admin queue
  1914. */
  1915. nvme_fc_init_queue(ctrl, 0, NVME_FC_AQ_BLKMQ_DEPTH);
  1916. ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0,
  1917. NVME_FC_AQ_BLKMQ_DEPTH);
  1918. if (ret)
  1919. goto out_free_queue;
  1920. ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0],
  1921. NVME_FC_AQ_BLKMQ_DEPTH,
  1922. (NVME_FC_AQ_BLKMQ_DEPTH / 4));
  1923. if (ret)
  1924. goto out_delete_hw_queue;
  1925. if (ctrl->ctrl.state != NVME_CTRL_NEW)
  1926. blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true);
  1927. ret = nvmf_connect_admin_queue(&ctrl->ctrl);
  1928. if (ret)
  1929. goto out_disconnect_admin_queue;
  1930. /*
  1931. * Check controller capabilities
  1932. *
  1933. * todo:- add code to check if ctrl attributes changed from
  1934. * prior connection values
  1935. */
  1936. ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap);
  1937. if (ret) {
  1938. dev_err(ctrl->ctrl.device,
  1939. "prop_get NVME_REG_CAP failed\n");
  1940. goto out_disconnect_admin_queue;
  1941. }
  1942. ctrl->ctrl.sqsize =
  1943. min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize);
  1944. ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap);
  1945. if (ret)
  1946. goto out_disconnect_admin_queue;
  1947. segs = min_t(u32, NVME_FC_MAX_SEGMENTS,
  1948. ctrl->lport->ops->max_sgl_segments);
  1949. ctrl->ctrl.max_hw_sectors = (segs - 1) << (PAGE_SHIFT - 9);
  1950. ret = nvme_init_identify(&ctrl->ctrl);
  1951. if (ret)
  1952. goto out_disconnect_admin_queue;
  1953. /* sanity checks */
  1954. /* FC-NVME does not have other data in the capsule */
  1955. if (ctrl->ctrl.icdoff) {
  1956. dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n",
  1957. ctrl->ctrl.icdoff);
  1958. goto out_disconnect_admin_queue;
  1959. }
  1960. nvme_start_keep_alive(&ctrl->ctrl);
  1961. /* FC-NVME supports normal SGL Data Block Descriptors */
  1962. if (opts->queue_size > ctrl->ctrl.maxcmd) {
  1963. /* warn if maxcmd is lower than queue_size */
  1964. dev_warn(ctrl->ctrl.device,
  1965. "queue_size %zu > ctrl maxcmd %u, reducing "
  1966. "to queue_size\n",
  1967. opts->queue_size, ctrl->ctrl.maxcmd);
  1968. opts->queue_size = ctrl->ctrl.maxcmd;
  1969. }
  1970. ret = nvme_fc_init_aen_ops(ctrl);
  1971. if (ret)
  1972. goto out_term_aen_ops;
  1973. /*
  1974. * Create the io queues
  1975. */
  1976. if (ctrl->queue_count > 1) {
  1977. if (ctrl->ctrl.state == NVME_CTRL_NEW)
  1978. ret = nvme_fc_create_io_queues(ctrl);
  1979. else
  1980. ret = nvme_fc_reinit_io_queues(ctrl);
  1981. if (ret)
  1982. goto out_term_aen_ops;
  1983. }
  1984. changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
  1985. WARN_ON_ONCE(!changed);
  1986. ctrl->ctrl.opts->nr_reconnects = 0;
  1987. if (ctrl->queue_count > 1) {
  1988. nvme_start_queues(&ctrl->ctrl);
  1989. nvme_queue_scan(&ctrl->ctrl);
  1990. nvme_queue_async_events(&ctrl->ctrl);
  1991. }
  1992. return 0; /* Success */
  1993. out_term_aen_ops:
  1994. nvme_fc_term_aen_ops(ctrl);
  1995. nvme_stop_keep_alive(&ctrl->ctrl);
  1996. out_disconnect_admin_queue:
  1997. /* send a Disconnect(association) LS to fc-nvme target */
  1998. nvme_fc_xmt_disconnect_assoc(ctrl);
  1999. out_delete_hw_queue:
  2000. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2001. out_free_queue:
  2002. nvme_fc_free_queue(&ctrl->queues[0]);
  2003. return ret;
  2004. }
  2005. /*
  2006. * This routine stops operation of the controller on the host side.
  2007. * On the host os stack side: Admin and IO queues are stopped,
  2008. * outstanding ios on them terminated via FC ABTS.
  2009. * On the link side: the association is terminated.
  2010. */
  2011. static void
  2012. nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
  2013. {
  2014. unsigned long flags;
  2015. nvme_stop_keep_alive(&ctrl->ctrl);
  2016. spin_lock_irqsave(&ctrl->lock, flags);
  2017. ctrl->flags |= FCCTRL_TERMIO;
  2018. ctrl->iocnt = 0;
  2019. spin_unlock_irqrestore(&ctrl->lock, flags);
  2020. /*
  2021. * If io queues are present, stop them and terminate all outstanding
  2022. * ios on them. As FC allocates FC exchange for each io, the
  2023. * transport must contact the LLDD to terminate the exchange,
  2024. * thus releasing the FC exchange. We use blk_mq_tagset_busy_itr()
  2025. * to tell us what io's are busy and invoke a transport routine
  2026. * to kill them with the LLDD. After terminating the exchange
  2027. * the LLDD will call the transport's normal io done path, but it
  2028. * will have an aborted status. The done path will return the
  2029. * io requests back to the block layer as part of normal completions
  2030. * (but with error status).
  2031. */
  2032. if (ctrl->queue_count > 1) {
  2033. nvme_stop_queues(&ctrl->ctrl);
  2034. blk_mq_tagset_busy_iter(&ctrl->tag_set,
  2035. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2036. }
  2037. /*
  2038. * Other transports, which don't have link-level contexts bound
  2039. * to sqe's, would try to gracefully shutdown the controller by
  2040. * writing the registers for shutdown and polling (call
  2041. * nvme_shutdown_ctrl()). Given a bunch of i/o was potentially
  2042. * just aborted and we will wait on those contexts, and given
  2043. * there was no indication of how live the controlelr is on the
  2044. * link, don't send more io to create more contexts for the
  2045. * shutdown. Let the controller fail via keepalive failure if
  2046. * its still present.
  2047. */
  2048. /*
  2049. * clean up the admin queue. Same thing as above.
  2050. * use blk_mq_tagset_busy_itr() and the transport routine to
  2051. * terminate the exchanges.
  2052. */
  2053. blk_mq_stop_hw_queues(ctrl->ctrl.admin_q);
  2054. blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
  2055. nvme_fc_terminate_exchange, &ctrl->ctrl);
  2056. /* kill the aens as they are a separate path */
  2057. nvme_fc_abort_aen_ops(ctrl);
  2058. /* wait for all io that had to be aborted */
  2059. spin_lock_irqsave(&ctrl->lock, flags);
  2060. while (ctrl->iocnt) {
  2061. spin_unlock_irqrestore(&ctrl->lock, flags);
  2062. msleep(1000);
  2063. spin_lock_irqsave(&ctrl->lock, flags);
  2064. }
  2065. ctrl->flags &= ~FCCTRL_TERMIO;
  2066. spin_unlock_irqrestore(&ctrl->lock, flags);
  2067. nvme_fc_term_aen_ops(ctrl);
  2068. /*
  2069. * send a Disconnect(association) LS to fc-nvme target
  2070. * Note: could have been sent at top of process, but
  2071. * cleaner on link traffic if after the aborts complete.
  2072. * Note: if association doesn't exist, association_id will be 0
  2073. */
  2074. if (ctrl->association_id)
  2075. nvme_fc_xmt_disconnect_assoc(ctrl);
  2076. if (ctrl->ctrl.tagset) {
  2077. nvme_fc_delete_hw_io_queues(ctrl);
  2078. nvme_fc_free_io_queues(ctrl);
  2079. }
  2080. __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0);
  2081. nvme_fc_free_queue(&ctrl->queues[0]);
  2082. }
  2083. static void
  2084. nvme_fc_delete_ctrl_work(struct work_struct *work)
  2085. {
  2086. struct nvme_fc_ctrl *ctrl =
  2087. container_of(work, struct nvme_fc_ctrl, delete_work);
  2088. cancel_work_sync(&ctrl->reset_work);
  2089. cancel_delayed_work_sync(&ctrl->connect_work);
  2090. /*
  2091. * kill the association on the link side. this will block
  2092. * waiting for io to terminate
  2093. */
  2094. nvme_fc_delete_association(ctrl);
  2095. /*
  2096. * tear down the controller
  2097. * After the last reference on the nvme ctrl is removed,
  2098. * the transport nvme_fc_nvme_ctrl_freed() callback will be
  2099. * invoked. From there, the transport will tear down it's
  2100. * logical queues and association.
  2101. */
  2102. nvme_uninit_ctrl(&ctrl->ctrl);
  2103. nvme_put_ctrl(&ctrl->ctrl);
  2104. }
  2105. static bool
  2106. __nvme_fc_schedule_delete_work(struct nvme_fc_ctrl *ctrl)
  2107. {
  2108. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING))
  2109. return true;
  2110. if (!queue_work(nvme_fc_wq, &ctrl->delete_work))
  2111. return true;
  2112. return false;
  2113. }
  2114. static int
  2115. __nvme_fc_del_ctrl(struct nvme_fc_ctrl *ctrl)
  2116. {
  2117. return __nvme_fc_schedule_delete_work(ctrl) ? -EBUSY : 0;
  2118. }
  2119. /*
  2120. * Request from nvme core layer to delete the controller
  2121. */
  2122. static int
  2123. nvme_fc_del_nvme_ctrl(struct nvme_ctrl *nctrl)
  2124. {
  2125. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  2126. int ret;
  2127. if (!kref_get_unless_zero(&ctrl->ctrl.kref))
  2128. return -EBUSY;
  2129. ret = __nvme_fc_del_ctrl(ctrl);
  2130. if (!ret)
  2131. flush_workqueue(nvme_fc_wq);
  2132. nvme_put_ctrl(&ctrl->ctrl);
  2133. return ret;
  2134. }
  2135. static void
  2136. nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
  2137. {
  2138. /* If we are resetting/deleting then do nothing */
  2139. if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
  2140. WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
  2141. ctrl->ctrl.state == NVME_CTRL_LIVE);
  2142. return;
  2143. }
  2144. dev_info(ctrl->ctrl.device,
  2145. "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n",
  2146. ctrl->cnum, status);
  2147. if (nvmf_should_reconnect(&ctrl->ctrl)) {
  2148. dev_info(ctrl->ctrl.device,
  2149. "NVME-FC{%d}: Reconnect attempt in %d seconds.\n",
  2150. ctrl->cnum, ctrl->ctrl.opts->reconnect_delay);
  2151. queue_delayed_work(nvme_fc_wq, &ctrl->connect_work,
  2152. ctrl->ctrl.opts->reconnect_delay * HZ);
  2153. } else {
  2154. dev_warn(ctrl->ctrl.device,
  2155. "NVME-FC{%d}: Max reconnect attempts (%d) "
  2156. "reached. Removing controller\n",
  2157. ctrl->cnum, ctrl->ctrl.opts->nr_reconnects);
  2158. WARN_ON(__nvme_fc_schedule_delete_work(ctrl));
  2159. }
  2160. }
  2161. static void
  2162. nvme_fc_reset_ctrl_work(struct work_struct *work)
  2163. {
  2164. struct nvme_fc_ctrl *ctrl =
  2165. container_of(work, struct nvme_fc_ctrl, reset_work);
  2166. int ret;
  2167. /* will block will waiting for io to terminate */
  2168. nvme_fc_delete_association(ctrl);
  2169. ret = nvme_fc_create_association(ctrl);
  2170. if (ret)
  2171. nvme_fc_reconnect_or_delete(ctrl, ret);
  2172. else
  2173. dev_info(ctrl->ctrl.device,
  2174. "NVME-FC{%d}: controller reset complete\n", ctrl->cnum);
  2175. }
  2176. /*
  2177. * called by the nvme core layer, for sysfs interface that requests
  2178. * a reset of the nvme controller
  2179. */
  2180. static int
  2181. nvme_fc_reset_nvme_ctrl(struct nvme_ctrl *nctrl)
  2182. {
  2183. struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
  2184. dev_info(ctrl->ctrl.device,
  2185. "NVME-FC{%d}: admin requested controller reset\n", ctrl->cnum);
  2186. if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
  2187. return -EBUSY;
  2188. if (!queue_work(nvme_fc_wq, &ctrl->reset_work))
  2189. return -EBUSY;
  2190. flush_work(&ctrl->reset_work);
  2191. return 0;
  2192. }
  2193. static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
  2194. .name = "fc",
  2195. .module = THIS_MODULE,
  2196. .flags = NVME_F_FABRICS,
  2197. .reg_read32 = nvmf_reg_read32,
  2198. .reg_read64 = nvmf_reg_read64,
  2199. .reg_write32 = nvmf_reg_write32,
  2200. .reset_ctrl = nvme_fc_reset_nvme_ctrl,
  2201. .free_ctrl = nvme_fc_nvme_ctrl_freed,
  2202. .submit_async_event = nvme_fc_submit_async_event,
  2203. .delete_ctrl = nvme_fc_del_nvme_ctrl,
  2204. .get_subsysnqn = nvmf_get_subsysnqn,
  2205. .get_address = nvmf_get_address,
  2206. };
  2207. static void
  2208. nvme_fc_connect_ctrl_work(struct work_struct *work)
  2209. {
  2210. int ret;
  2211. struct nvme_fc_ctrl *ctrl =
  2212. container_of(to_delayed_work(work),
  2213. struct nvme_fc_ctrl, connect_work);
  2214. ret = nvme_fc_create_association(ctrl);
  2215. if (ret)
  2216. nvme_fc_reconnect_or_delete(ctrl, ret);
  2217. else
  2218. dev_info(ctrl->ctrl.device,
  2219. "NVME-FC{%d}: controller reconnect complete\n",
  2220. ctrl->cnum);
  2221. }
  2222. static const struct blk_mq_ops nvme_fc_admin_mq_ops = {
  2223. .queue_rq = nvme_fc_queue_rq,
  2224. .complete = nvme_fc_complete_rq,
  2225. .init_request = nvme_fc_init_admin_request,
  2226. .exit_request = nvme_fc_exit_request,
  2227. .reinit_request = nvme_fc_reinit_request,
  2228. .init_hctx = nvme_fc_init_admin_hctx,
  2229. .timeout = nvme_fc_timeout,
  2230. };
  2231. static struct nvme_ctrl *
  2232. nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
  2233. struct nvme_fc_lport *lport, struct nvme_fc_rport *rport)
  2234. {
  2235. struct nvme_fc_ctrl *ctrl;
  2236. unsigned long flags;
  2237. int ret, idx;
  2238. if (!(rport->remoteport.port_role &
  2239. (FC_PORT_ROLE_NVME_DISCOVERY | FC_PORT_ROLE_NVME_TARGET))) {
  2240. ret = -EBADR;
  2241. goto out_fail;
  2242. }
  2243. ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
  2244. if (!ctrl) {
  2245. ret = -ENOMEM;
  2246. goto out_fail;
  2247. }
  2248. idx = ida_simple_get(&nvme_fc_ctrl_cnt, 0, 0, GFP_KERNEL);
  2249. if (idx < 0) {
  2250. ret = -ENOSPC;
  2251. goto out_free_ctrl;
  2252. }
  2253. ctrl->ctrl.opts = opts;
  2254. INIT_LIST_HEAD(&ctrl->ctrl_list);
  2255. ctrl->lport = lport;
  2256. ctrl->rport = rport;
  2257. ctrl->dev = lport->dev;
  2258. ctrl->cnum = idx;
  2259. get_device(ctrl->dev);
  2260. kref_init(&ctrl->ref);
  2261. INIT_WORK(&ctrl->delete_work, nvme_fc_delete_ctrl_work);
  2262. INIT_WORK(&ctrl->reset_work, nvme_fc_reset_ctrl_work);
  2263. INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
  2264. spin_lock_init(&ctrl->lock);
  2265. /* io queue count */
  2266. ctrl->queue_count = min_t(unsigned int,
  2267. opts->nr_io_queues,
  2268. lport->ops->max_hw_queues);
  2269. opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */
  2270. ctrl->queue_count++; /* +1 for admin queue */
  2271. ctrl->ctrl.sqsize = opts->queue_size - 1;
  2272. ctrl->ctrl.kato = opts->kato;
  2273. ret = -ENOMEM;
  2274. ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue),
  2275. GFP_KERNEL);
  2276. if (!ctrl->queues)
  2277. goto out_free_ida;
  2278. memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
  2279. ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops;
  2280. ctrl->admin_tag_set.queue_depth = NVME_FC_AQ_BLKMQ_DEPTH;
  2281. ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */
  2282. ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
  2283. ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_fc_fcp_op) +
  2284. (SG_CHUNK_SIZE *
  2285. sizeof(struct scatterlist)) +
  2286. ctrl->lport->ops->fcprqst_priv_sz;
  2287. ctrl->admin_tag_set.driver_data = ctrl;
  2288. ctrl->admin_tag_set.nr_hw_queues = 1;
  2289. ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
  2290. ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
  2291. if (ret)
  2292. goto out_free_queues;
  2293. ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
  2294. if (IS_ERR(ctrl->ctrl.admin_q)) {
  2295. ret = PTR_ERR(ctrl->ctrl.admin_q);
  2296. goto out_free_admin_tag_set;
  2297. }
  2298. /*
  2299. * Would have been nice to init io queues tag set as well.
  2300. * However, we require interaction from the controller
  2301. * for max io queue count before we can do so.
  2302. * Defer this to the connect path.
  2303. */
  2304. ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0);
  2305. if (ret)
  2306. goto out_cleanup_admin_q;
  2307. /* at this point, teardown path changes to ref counting on nvme ctrl */
  2308. spin_lock_irqsave(&rport->lock, flags);
  2309. list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list);
  2310. spin_unlock_irqrestore(&rport->lock, flags);
  2311. ret = nvme_fc_create_association(ctrl);
  2312. if (ret) {
  2313. ctrl->ctrl.opts = NULL;
  2314. /* initiate nvme ctrl ref counting teardown */
  2315. nvme_uninit_ctrl(&ctrl->ctrl);
  2316. /* as we're past the point where we transition to the ref
  2317. * counting teardown path, if we return a bad pointer here,
  2318. * the calling routine, thinking it's prior to the
  2319. * transition, will do an rport put. Since the teardown
  2320. * path also does a rport put, we do an extra get here to
  2321. * so proper order/teardown happens.
  2322. */
  2323. nvme_fc_rport_get(rport);
  2324. if (ret > 0)
  2325. ret = -EIO;
  2326. return ERR_PTR(ret);
  2327. }
  2328. kref_get(&ctrl->ctrl.kref);
  2329. dev_info(ctrl->ctrl.device,
  2330. "NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
  2331. ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
  2332. return &ctrl->ctrl;
  2333. out_cleanup_admin_q:
  2334. blk_cleanup_queue(ctrl->ctrl.admin_q);
  2335. out_free_admin_tag_set:
  2336. blk_mq_free_tag_set(&ctrl->admin_tag_set);
  2337. out_free_queues:
  2338. kfree(ctrl->queues);
  2339. out_free_ida:
  2340. put_device(ctrl->dev);
  2341. ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum);
  2342. out_free_ctrl:
  2343. kfree(ctrl);
  2344. out_fail:
  2345. /* exit via here doesn't follow ctlr ref points */
  2346. return ERR_PTR(ret);
  2347. }
  2348. enum {
  2349. FCT_TRADDR_ERR = 0,
  2350. FCT_TRADDR_WWNN = 1 << 0,
  2351. FCT_TRADDR_WWPN = 1 << 1,
  2352. };
  2353. struct nvmet_fc_traddr {
  2354. u64 nn;
  2355. u64 pn;
  2356. };
  2357. static const match_table_t traddr_opt_tokens = {
  2358. { FCT_TRADDR_WWNN, "nn-%s" },
  2359. { FCT_TRADDR_WWPN, "pn-%s" },
  2360. { FCT_TRADDR_ERR, NULL }
  2361. };
  2362. static int
  2363. nvme_fc_parse_address(struct nvmet_fc_traddr *traddr, char *buf)
  2364. {
  2365. substring_t args[MAX_OPT_ARGS];
  2366. char *options, *o, *p;
  2367. int token, ret = 0;
  2368. u64 token64;
  2369. options = o = kstrdup(buf, GFP_KERNEL);
  2370. if (!options)
  2371. return -ENOMEM;
  2372. while ((p = strsep(&o, ":\n")) != NULL) {
  2373. if (!*p)
  2374. continue;
  2375. token = match_token(p, traddr_opt_tokens, args);
  2376. switch (token) {
  2377. case FCT_TRADDR_WWNN:
  2378. if (match_u64(args, &token64)) {
  2379. ret = -EINVAL;
  2380. goto out;
  2381. }
  2382. traddr->nn = token64;
  2383. break;
  2384. case FCT_TRADDR_WWPN:
  2385. if (match_u64(args, &token64)) {
  2386. ret = -EINVAL;
  2387. goto out;
  2388. }
  2389. traddr->pn = token64;
  2390. break;
  2391. default:
  2392. pr_warn("unknown traddr token or missing value '%s'\n",
  2393. p);
  2394. ret = -EINVAL;
  2395. goto out;
  2396. }
  2397. }
  2398. out:
  2399. kfree(options);
  2400. return ret;
  2401. }
  2402. static struct nvme_ctrl *
  2403. nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts)
  2404. {
  2405. struct nvme_fc_lport *lport;
  2406. struct nvme_fc_rport *rport;
  2407. struct nvme_ctrl *ctrl;
  2408. struct nvmet_fc_traddr laddr = { 0L, 0L };
  2409. struct nvmet_fc_traddr raddr = { 0L, 0L };
  2410. unsigned long flags;
  2411. int ret;
  2412. ret = nvme_fc_parse_address(&raddr, opts->traddr);
  2413. if (ret || !raddr.nn || !raddr.pn)
  2414. return ERR_PTR(-EINVAL);
  2415. ret = nvme_fc_parse_address(&laddr, opts->host_traddr);
  2416. if (ret || !laddr.nn || !laddr.pn)
  2417. return ERR_PTR(-EINVAL);
  2418. /* find the host and remote ports to connect together */
  2419. spin_lock_irqsave(&nvme_fc_lock, flags);
  2420. list_for_each_entry(lport, &nvme_fc_lport_list, port_list) {
  2421. if (lport->localport.node_name != laddr.nn ||
  2422. lport->localport.port_name != laddr.pn)
  2423. continue;
  2424. list_for_each_entry(rport, &lport->endp_list, endp_list) {
  2425. if (rport->remoteport.node_name != raddr.nn ||
  2426. rport->remoteport.port_name != raddr.pn)
  2427. continue;
  2428. /* if fail to get reference fall through. Will error */
  2429. if (!nvme_fc_rport_get(rport))
  2430. break;
  2431. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2432. ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport);
  2433. if (IS_ERR(ctrl))
  2434. nvme_fc_rport_put(rport);
  2435. return ctrl;
  2436. }
  2437. }
  2438. spin_unlock_irqrestore(&nvme_fc_lock, flags);
  2439. return ERR_PTR(-ENOENT);
  2440. }
  2441. static struct nvmf_transport_ops nvme_fc_transport = {
  2442. .name = "fc",
  2443. .required_opts = NVMF_OPT_TRADDR | NVMF_OPT_HOST_TRADDR,
  2444. .allowed_opts = NVMF_OPT_RECONNECT_DELAY | NVMF_OPT_CTRL_LOSS_TMO,
  2445. .create_ctrl = nvme_fc_create_ctrl,
  2446. };
  2447. static int __init nvme_fc_init_module(void)
  2448. {
  2449. int ret;
  2450. nvme_fc_wq = create_workqueue("nvme_fc_wq");
  2451. if (!nvme_fc_wq)
  2452. return -ENOMEM;
  2453. ret = nvmf_register_transport(&nvme_fc_transport);
  2454. if (ret)
  2455. goto err;
  2456. return 0;
  2457. err:
  2458. destroy_workqueue(nvme_fc_wq);
  2459. return ret;
  2460. }
  2461. static void __exit nvme_fc_exit_module(void)
  2462. {
  2463. /* sanity check - all lports should be removed */
  2464. if (!list_empty(&nvme_fc_lport_list))
  2465. pr_warn("%s: localport list not empty\n", __func__);
  2466. nvmf_unregister_transport(&nvme_fc_transport);
  2467. destroy_workqueue(nvme_fc_wq);
  2468. ida_destroy(&nvme_fc_local_port_cnt);
  2469. ida_destroy(&nvme_fc_ctrl_cnt);
  2470. }
  2471. module_init(nvme_fc_init_module);
  2472. module_exit(nvme_fc_exit_module);
  2473. MODULE_LICENSE("GPL v2");