fc.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. *
  16. */
  17. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  18. #include <linux/module.h>
  19. #include <linux/slab.h>
  20. #include <linux/blk-mq.h>
  21. #include <linux/parser.h>
  22. #include <linux/random.h>
  23. #include <uapi/scsi/fc/fc_fs.h>
  24. #include <uapi/scsi/fc/fc_els.h>
  25. #include "nvmet.h"
  26. #include <linux/nvme-fc-driver.h>
  27. #include <linux/nvme-fc.h>
  28. /* *************************** Data Structures/Defines ****************** */
  29. #define NVMET_LS_CTX_COUNT 4
  30. /* for this implementation, assume small single frame rqst/rsp */
  31. #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
  32. struct nvmet_fc_tgtport;
  33. struct nvmet_fc_tgt_assoc;
  34. struct nvmet_fc_ls_iod {
  35. struct nvmefc_tgt_ls_req *lsreq;
  36. struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
  37. struct list_head ls_list; /* tgtport->ls_list */
  38. struct nvmet_fc_tgtport *tgtport;
  39. struct nvmet_fc_tgt_assoc *assoc;
  40. u8 *rqstbuf;
  41. u8 *rspbuf;
  42. u16 rqstdatalen;
  43. dma_addr_t rspdma;
  44. struct scatterlist sg[2];
  45. struct work_struct work;
  46. } __aligned(sizeof(unsigned long long));
  47. #define NVMET_FC_MAX_KB_PER_XFR 256
  48. enum nvmet_fcp_datadir {
  49. NVMET_FCP_NODATA,
  50. NVMET_FCP_WRITE,
  51. NVMET_FCP_READ,
  52. NVMET_FCP_ABORTED,
  53. };
  54. struct nvmet_fc_fcp_iod {
  55. struct nvmefc_tgt_fcp_req *fcpreq;
  56. struct nvme_fc_cmd_iu cmdiubuf;
  57. struct nvme_fc_ersp_iu rspiubuf;
  58. dma_addr_t rspdma;
  59. struct scatterlist *data_sg;
  60. struct scatterlist *next_sg;
  61. int data_sg_cnt;
  62. u32 next_sg_offset;
  63. u32 total_length;
  64. u32 offset;
  65. enum nvmet_fcp_datadir io_dir;
  66. bool active;
  67. bool abort;
  68. spinlock_t flock;
  69. struct nvmet_req req;
  70. struct work_struct work;
  71. struct nvmet_fc_tgtport *tgtport;
  72. struct nvmet_fc_tgt_queue *queue;
  73. struct list_head fcp_list; /* tgtport->fcp_list */
  74. };
  75. struct nvmet_fc_tgtport {
  76. struct nvmet_fc_target_port fc_target_port;
  77. struct list_head tgt_list; /* nvmet_fc_target_list */
  78. struct device *dev; /* dev for dma mapping */
  79. struct nvmet_fc_target_template *ops;
  80. struct nvmet_fc_ls_iod *iod;
  81. spinlock_t lock;
  82. struct list_head ls_list;
  83. struct list_head ls_busylist;
  84. struct list_head assoc_list;
  85. struct ida assoc_cnt;
  86. struct nvmet_port *port;
  87. struct kref ref;
  88. };
  89. struct nvmet_fc_tgt_queue {
  90. bool ninetypercent;
  91. u16 qid;
  92. u16 sqsize;
  93. u16 ersp_ratio;
  94. u16 sqhd;
  95. int cpu;
  96. atomic_t connected;
  97. atomic_t sqtail;
  98. atomic_t zrspcnt;
  99. atomic_t rsn;
  100. spinlock_t qlock;
  101. struct nvmet_port *port;
  102. struct nvmet_cq nvme_cq;
  103. struct nvmet_sq nvme_sq;
  104. struct nvmet_fc_tgt_assoc *assoc;
  105. struct nvmet_fc_fcp_iod *fod; /* array of fcp_iods */
  106. struct list_head fod_list;
  107. struct workqueue_struct *work_q;
  108. struct kref ref;
  109. } __aligned(sizeof(unsigned long long));
  110. struct nvmet_fc_tgt_assoc {
  111. u64 association_id;
  112. u32 a_id;
  113. struct nvmet_fc_tgtport *tgtport;
  114. struct list_head a_list;
  115. struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES];
  116. struct kref ref;
  117. };
  118. static inline int
  119. nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
  120. {
  121. return (iodptr - iodptr->tgtport->iod);
  122. }
  123. static inline int
  124. nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
  125. {
  126. return (fodptr - fodptr->queue->fod);
  127. }
  128. /*
  129. * Association and Connection IDs:
  130. *
  131. * Association ID will have random number in upper 6 bytes and zero
  132. * in lower 2 bytes
  133. *
  134. * Connection IDs will be Association ID with QID or'd in lower 2 bytes
  135. *
  136. * note: Association ID = Connection ID for queue 0
  137. */
  138. #define BYTES_FOR_QID sizeof(u16)
  139. #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
  140. #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
  141. static inline u64
  142. nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
  143. {
  144. return (assoc->association_id | qid);
  145. }
  146. static inline u64
  147. nvmet_fc_getassociationid(u64 connectionid)
  148. {
  149. return connectionid & ~NVMET_FC_QUEUEID_MASK;
  150. }
  151. static inline u16
  152. nvmet_fc_getqueueid(u64 connectionid)
  153. {
  154. return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
  155. }
  156. static inline struct nvmet_fc_tgtport *
  157. targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
  158. {
  159. return container_of(targetport, struct nvmet_fc_tgtport,
  160. fc_target_port);
  161. }
  162. static inline struct nvmet_fc_fcp_iod *
  163. nvmet_req_to_fod(struct nvmet_req *nvme_req)
  164. {
  165. return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
  166. }
  167. /* *************************** Globals **************************** */
  168. static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
  169. static LIST_HEAD(nvmet_fc_target_list);
  170. static DEFINE_IDA(nvmet_fc_tgtport_cnt);
  171. static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
  172. static void nvmet_fc_handle_fcp_rqst_work(struct work_struct *work);
  173. static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
  174. static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
  175. static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
  176. static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
  177. static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
  178. static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
  179. /* *********************** FC-NVME DMA Handling **************************** */
  180. /*
  181. * The fcloop device passes in a NULL device pointer. Real LLD's will
  182. * pass in a valid device pointer. If NULL is passed to the dma mapping
  183. * routines, depending on the platform, it may or may not succeed, and
  184. * may crash.
  185. *
  186. * As such:
  187. * Wrapper all the dma routines and check the dev pointer.
  188. *
  189. * If simple mappings (return just a dma address, we'll noop them,
  190. * returning a dma address of 0.
  191. *
  192. * On more complex mappings (dma_map_sg), a pseudo routine fills
  193. * in the scatter list, setting all dma addresses to 0.
  194. */
  195. static inline dma_addr_t
  196. fc_dma_map_single(struct device *dev, void *ptr, size_t size,
  197. enum dma_data_direction dir)
  198. {
  199. return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
  200. }
  201. static inline int
  202. fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  203. {
  204. return dev ? dma_mapping_error(dev, dma_addr) : 0;
  205. }
  206. static inline void
  207. fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  208. enum dma_data_direction dir)
  209. {
  210. if (dev)
  211. dma_unmap_single(dev, addr, size, dir);
  212. }
  213. static inline void
  214. fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
  215. enum dma_data_direction dir)
  216. {
  217. if (dev)
  218. dma_sync_single_for_cpu(dev, addr, size, dir);
  219. }
  220. static inline void
  221. fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
  222. enum dma_data_direction dir)
  223. {
  224. if (dev)
  225. dma_sync_single_for_device(dev, addr, size, dir);
  226. }
  227. /* pseudo dma_map_sg call */
  228. static int
  229. fc_map_sg(struct scatterlist *sg, int nents)
  230. {
  231. struct scatterlist *s;
  232. int i;
  233. WARN_ON(nents == 0 || sg[0].length == 0);
  234. for_each_sg(sg, s, nents, i) {
  235. s->dma_address = 0L;
  236. #ifdef CONFIG_NEED_SG_DMA_LENGTH
  237. s->dma_length = s->length;
  238. #endif
  239. }
  240. return nents;
  241. }
  242. static inline int
  243. fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
  244. enum dma_data_direction dir)
  245. {
  246. return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
  247. }
  248. static inline void
  249. fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
  250. enum dma_data_direction dir)
  251. {
  252. if (dev)
  253. dma_unmap_sg(dev, sg, nents, dir);
  254. }
  255. /* *********************** FC-NVME Port Management ************************ */
  256. static int
  257. nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  258. {
  259. struct nvmet_fc_ls_iod *iod;
  260. int i;
  261. iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
  262. GFP_KERNEL);
  263. if (!iod)
  264. return -ENOMEM;
  265. tgtport->iod = iod;
  266. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  267. INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
  268. iod->tgtport = tgtport;
  269. list_add_tail(&iod->ls_list, &tgtport->ls_list);
  270. iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
  271. GFP_KERNEL);
  272. if (!iod->rqstbuf)
  273. goto out_fail;
  274. iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
  275. iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
  276. NVME_FC_MAX_LS_BUFFER_SIZE,
  277. DMA_TO_DEVICE);
  278. if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
  279. goto out_fail;
  280. }
  281. return 0;
  282. out_fail:
  283. kfree(iod->rqstbuf);
  284. list_del(&iod->ls_list);
  285. for (iod--, i--; i >= 0; iod--, i--) {
  286. fc_dma_unmap_single(tgtport->dev, iod->rspdma,
  287. NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
  288. kfree(iod->rqstbuf);
  289. list_del(&iod->ls_list);
  290. }
  291. kfree(iod);
  292. return -EFAULT;
  293. }
  294. static void
  295. nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
  296. {
  297. struct nvmet_fc_ls_iod *iod = tgtport->iod;
  298. int i;
  299. for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
  300. fc_dma_unmap_single(tgtport->dev,
  301. iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
  302. DMA_TO_DEVICE);
  303. kfree(iod->rqstbuf);
  304. list_del(&iod->ls_list);
  305. }
  306. kfree(tgtport->iod);
  307. }
  308. static struct nvmet_fc_ls_iod *
  309. nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
  310. {
  311. static struct nvmet_fc_ls_iod *iod;
  312. unsigned long flags;
  313. spin_lock_irqsave(&tgtport->lock, flags);
  314. iod = list_first_entry_or_null(&tgtport->ls_list,
  315. struct nvmet_fc_ls_iod, ls_list);
  316. if (iod)
  317. list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
  318. spin_unlock_irqrestore(&tgtport->lock, flags);
  319. return iod;
  320. }
  321. static void
  322. nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
  323. struct nvmet_fc_ls_iod *iod)
  324. {
  325. unsigned long flags;
  326. spin_lock_irqsave(&tgtport->lock, flags);
  327. list_move(&iod->ls_list, &tgtport->ls_list);
  328. spin_unlock_irqrestore(&tgtport->lock, flags);
  329. }
  330. static void
  331. nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  332. struct nvmet_fc_tgt_queue *queue)
  333. {
  334. struct nvmet_fc_fcp_iod *fod = queue->fod;
  335. int i;
  336. for (i = 0; i < queue->sqsize; fod++, i++) {
  337. INIT_WORK(&fod->work, nvmet_fc_handle_fcp_rqst_work);
  338. fod->tgtport = tgtport;
  339. fod->queue = queue;
  340. fod->active = false;
  341. list_add_tail(&fod->fcp_list, &queue->fod_list);
  342. spin_lock_init(&fod->flock);
  343. fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
  344. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  345. if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
  346. list_del(&fod->fcp_list);
  347. for (fod--, i--; i >= 0; fod--, i--) {
  348. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  349. sizeof(fod->rspiubuf),
  350. DMA_TO_DEVICE);
  351. fod->rspdma = 0L;
  352. list_del(&fod->fcp_list);
  353. }
  354. return;
  355. }
  356. }
  357. }
  358. static void
  359. nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
  360. struct nvmet_fc_tgt_queue *queue)
  361. {
  362. struct nvmet_fc_fcp_iod *fod = queue->fod;
  363. int i;
  364. for (i = 0; i < queue->sqsize; fod++, i++) {
  365. if (fod->rspdma)
  366. fc_dma_unmap_single(tgtport->dev, fod->rspdma,
  367. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  368. }
  369. }
  370. static struct nvmet_fc_fcp_iod *
  371. nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
  372. {
  373. static struct nvmet_fc_fcp_iod *fod;
  374. unsigned long flags;
  375. spin_lock_irqsave(&queue->qlock, flags);
  376. fod = list_first_entry_or_null(&queue->fod_list,
  377. struct nvmet_fc_fcp_iod, fcp_list);
  378. if (fod) {
  379. list_del(&fod->fcp_list);
  380. fod->active = true;
  381. fod->abort = false;
  382. /*
  383. * no queue reference is taken, as it was taken by the
  384. * queue lookup just prior to the allocation. The iod
  385. * will "inherit" that reference.
  386. */
  387. }
  388. spin_unlock_irqrestore(&queue->qlock, flags);
  389. return fod;
  390. }
  391. static void
  392. nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
  393. struct nvmet_fc_fcp_iod *fod)
  394. {
  395. unsigned long flags;
  396. spin_lock_irqsave(&queue->qlock, flags);
  397. list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
  398. fod->active = false;
  399. spin_unlock_irqrestore(&queue->qlock, flags);
  400. /*
  401. * release the reference taken at queue lookup and fod allocation
  402. */
  403. nvmet_fc_tgt_q_put(queue);
  404. }
  405. static int
  406. nvmet_fc_queue_to_cpu(struct nvmet_fc_tgtport *tgtport, int qid)
  407. {
  408. int cpu, idx, cnt;
  409. if (!(tgtport->ops->target_features &
  410. NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED) ||
  411. tgtport->ops->max_hw_queues == 1)
  412. return WORK_CPU_UNBOUND;
  413. /* Simple cpu selection based on qid modulo active cpu count */
  414. idx = !qid ? 0 : (qid - 1) % num_active_cpus();
  415. /* find the n'th active cpu */
  416. for (cpu = 0, cnt = 0; ; ) {
  417. if (cpu_active(cpu)) {
  418. if (cnt == idx)
  419. break;
  420. cnt++;
  421. }
  422. cpu = (cpu + 1) % num_possible_cpus();
  423. }
  424. return cpu;
  425. }
  426. static struct nvmet_fc_tgt_queue *
  427. nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
  428. u16 qid, u16 sqsize)
  429. {
  430. struct nvmet_fc_tgt_queue *queue;
  431. unsigned long flags;
  432. int ret;
  433. if (qid >= NVMET_NR_QUEUES)
  434. return NULL;
  435. queue = kzalloc((sizeof(*queue) +
  436. (sizeof(struct nvmet_fc_fcp_iod) * sqsize)),
  437. GFP_KERNEL);
  438. if (!queue)
  439. return NULL;
  440. if (!nvmet_fc_tgt_a_get(assoc))
  441. goto out_free_queue;
  442. queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
  443. assoc->tgtport->fc_target_port.port_num,
  444. assoc->a_id, qid);
  445. if (!queue->work_q)
  446. goto out_a_put;
  447. queue->fod = (struct nvmet_fc_fcp_iod *)&queue[1];
  448. queue->qid = qid;
  449. queue->sqsize = sqsize;
  450. queue->assoc = assoc;
  451. queue->port = assoc->tgtport->port;
  452. queue->cpu = nvmet_fc_queue_to_cpu(assoc->tgtport, qid);
  453. INIT_LIST_HEAD(&queue->fod_list);
  454. atomic_set(&queue->connected, 0);
  455. atomic_set(&queue->sqtail, 0);
  456. atomic_set(&queue->rsn, 1);
  457. atomic_set(&queue->zrspcnt, 0);
  458. spin_lock_init(&queue->qlock);
  459. kref_init(&queue->ref);
  460. nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
  461. ret = nvmet_sq_init(&queue->nvme_sq);
  462. if (ret)
  463. goto out_fail_iodlist;
  464. WARN_ON(assoc->queues[qid]);
  465. spin_lock_irqsave(&assoc->tgtport->lock, flags);
  466. assoc->queues[qid] = queue;
  467. spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
  468. return queue;
  469. out_fail_iodlist:
  470. nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
  471. destroy_workqueue(queue->work_q);
  472. out_a_put:
  473. nvmet_fc_tgt_a_put(assoc);
  474. out_free_queue:
  475. kfree(queue);
  476. return NULL;
  477. }
  478. static void
  479. nvmet_fc_tgt_queue_free(struct kref *ref)
  480. {
  481. struct nvmet_fc_tgt_queue *queue =
  482. container_of(ref, struct nvmet_fc_tgt_queue, ref);
  483. unsigned long flags;
  484. spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
  485. queue->assoc->queues[queue->qid] = NULL;
  486. spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
  487. nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
  488. nvmet_fc_tgt_a_put(queue->assoc);
  489. destroy_workqueue(queue->work_q);
  490. kfree(queue);
  491. }
  492. static void
  493. nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
  494. {
  495. kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
  496. }
  497. static int
  498. nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
  499. {
  500. return kref_get_unless_zero(&queue->ref);
  501. }
  502. static void
  503. nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
  504. struct nvmefc_tgt_fcp_req *fcpreq)
  505. {
  506. int ret;
  507. fcpreq->op = NVMET_FCOP_ABORT;
  508. fcpreq->offset = 0;
  509. fcpreq->timeout = 0;
  510. fcpreq->transfer_length = 0;
  511. fcpreq->transferred_length = 0;
  512. fcpreq->fcp_error = 0;
  513. fcpreq->sg_cnt = 0;
  514. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fcpreq);
  515. if (ret)
  516. /* should never reach here !! */
  517. WARN_ON(1);
  518. }
  519. static void
  520. nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
  521. {
  522. struct nvmet_fc_fcp_iod *fod = queue->fod;
  523. unsigned long flags;
  524. int i;
  525. bool disconnect;
  526. disconnect = atomic_xchg(&queue->connected, 0);
  527. spin_lock_irqsave(&queue->qlock, flags);
  528. /* about outstanding io's */
  529. for (i = 0; i < queue->sqsize; fod++, i++) {
  530. if (fod->active) {
  531. spin_lock(&fod->flock);
  532. fod->abort = true;
  533. spin_unlock(&fod->flock);
  534. }
  535. }
  536. spin_unlock_irqrestore(&queue->qlock, flags);
  537. flush_workqueue(queue->work_q);
  538. if (disconnect)
  539. nvmet_sq_destroy(&queue->nvme_sq);
  540. nvmet_fc_tgt_q_put(queue);
  541. }
  542. static struct nvmet_fc_tgt_queue *
  543. nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
  544. u64 connection_id)
  545. {
  546. struct nvmet_fc_tgt_assoc *assoc;
  547. struct nvmet_fc_tgt_queue *queue;
  548. u64 association_id = nvmet_fc_getassociationid(connection_id);
  549. u16 qid = nvmet_fc_getqueueid(connection_id);
  550. unsigned long flags;
  551. spin_lock_irqsave(&tgtport->lock, flags);
  552. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  553. if (association_id == assoc->association_id) {
  554. queue = assoc->queues[qid];
  555. if (queue &&
  556. (!atomic_read(&queue->connected) ||
  557. !nvmet_fc_tgt_q_get(queue)))
  558. queue = NULL;
  559. spin_unlock_irqrestore(&tgtport->lock, flags);
  560. return queue;
  561. }
  562. }
  563. spin_unlock_irqrestore(&tgtport->lock, flags);
  564. return NULL;
  565. }
  566. static struct nvmet_fc_tgt_assoc *
  567. nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
  568. {
  569. struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
  570. unsigned long flags;
  571. u64 ran;
  572. int idx;
  573. bool needrandom = true;
  574. assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
  575. if (!assoc)
  576. return NULL;
  577. idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
  578. if (idx < 0)
  579. goto out_free_assoc;
  580. if (!nvmet_fc_tgtport_get(tgtport))
  581. goto out_ida_put;
  582. assoc->tgtport = tgtport;
  583. assoc->a_id = idx;
  584. INIT_LIST_HEAD(&assoc->a_list);
  585. kref_init(&assoc->ref);
  586. while (needrandom) {
  587. get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
  588. ran = ran << BYTES_FOR_QID_SHIFT;
  589. spin_lock_irqsave(&tgtport->lock, flags);
  590. needrandom = false;
  591. list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
  592. if (ran == tmpassoc->association_id) {
  593. needrandom = true;
  594. break;
  595. }
  596. if (!needrandom) {
  597. assoc->association_id = ran;
  598. list_add_tail(&assoc->a_list, &tgtport->assoc_list);
  599. }
  600. spin_unlock_irqrestore(&tgtport->lock, flags);
  601. }
  602. return assoc;
  603. out_ida_put:
  604. ida_simple_remove(&tgtport->assoc_cnt, idx);
  605. out_free_assoc:
  606. kfree(assoc);
  607. return NULL;
  608. }
  609. static void
  610. nvmet_fc_target_assoc_free(struct kref *ref)
  611. {
  612. struct nvmet_fc_tgt_assoc *assoc =
  613. container_of(ref, struct nvmet_fc_tgt_assoc, ref);
  614. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  615. unsigned long flags;
  616. spin_lock_irqsave(&tgtport->lock, flags);
  617. list_del(&assoc->a_list);
  618. spin_unlock_irqrestore(&tgtport->lock, flags);
  619. ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
  620. kfree(assoc);
  621. nvmet_fc_tgtport_put(tgtport);
  622. }
  623. static void
  624. nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
  625. {
  626. kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
  627. }
  628. static int
  629. nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
  630. {
  631. return kref_get_unless_zero(&assoc->ref);
  632. }
  633. static void
  634. nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
  635. {
  636. struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
  637. struct nvmet_fc_tgt_queue *queue;
  638. unsigned long flags;
  639. int i;
  640. spin_lock_irqsave(&tgtport->lock, flags);
  641. for (i = NVMET_NR_QUEUES - 1; i >= 0; i--) {
  642. queue = assoc->queues[i];
  643. if (queue) {
  644. if (!nvmet_fc_tgt_q_get(queue))
  645. continue;
  646. spin_unlock_irqrestore(&tgtport->lock, flags);
  647. nvmet_fc_delete_target_queue(queue);
  648. nvmet_fc_tgt_q_put(queue);
  649. spin_lock_irqsave(&tgtport->lock, flags);
  650. }
  651. }
  652. spin_unlock_irqrestore(&tgtport->lock, flags);
  653. nvmet_fc_tgt_a_put(assoc);
  654. }
  655. static struct nvmet_fc_tgt_assoc *
  656. nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
  657. u64 association_id)
  658. {
  659. struct nvmet_fc_tgt_assoc *assoc;
  660. struct nvmet_fc_tgt_assoc *ret = NULL;
  661. unsigned long flags;
  662. spin_lock_irqsave(&tgtport->lock, flags);
  663. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  664. if (association_id == assoc->association_id) {
  665. ret = assoc;
  666. nvmet_fc_tgt_a_get(assoc);
  667. break;
  668. }
  669. }
  670. spin_unlock_irqrestore(&tgtport->lock, flags);
  671. return ret;
  672. }
  673. /**
  674. * nvme_fc_register_targetport - transport entry point called by an
  675. * LLDD to register the existence of a local
  676. * NVME subystem FC port.
  677. * @pinfo: pointer to information about the port to be registered
  678. * @template: LLDD entrypoints and operational parameters for the port
  679. * @dev: physical hardware device node port corresponds to. Will be
  680. * used for DMA mappings
  681. * @portptr: pointer to a local port pointer. Upon success, the routine
  682. * will allocate a nvme_fc_local_port structure and place its
  683. * address in the local port pointer. Upon failure, local port
  684. * pointer will be set to NULL.
  685. *
  686. * Returns:
  687. * a completion status. Must be 0 upon success; a negative errno
  688. * (ex: -ENXIO) upon failure.
  689. */
  690. int
  691. nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
  692. struct nvmet_fc_target_template *template,
  693. struct device *dev,
  694. struct nvmet_fc_target_port **portptr)
  695. {
  696. struct nvmet_fc_tgtport *newrec;
  697. unsigned long flags;
  698. int ret, idx;
  699. if (!template->xmt_ls_rsp || !template->fcp_op ||
  700. !template->targetport_delete ||
  701. !template->max_hw_queues || !template->max_sgl_segments ||
  702. !template->max_dif_sgl_segments || !template->dma_boundary) {
  703. ret = -EINVAL;
  704. goto out_regtgt_failed;
  705. }
  706. newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
  707. GFP_KERNEL);
  708. if (!newrec) {
  709. ret = -ENOMEM;
  710. goto out_regtgt_failed;
  711. }
  712. idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
  713. if (idx < 0) {
  714. ret = -ENOSPC;
  715. goto out_fail_kfree;
  716. }
  717. if (!get_device(dev) && dev) {
  718. ret = -ENODEV;
  719. goto out_ida_put;
  720. }
  721. newrec->fc_target_port.node_name = pinfo->node_name;
  722. newrec->fc_target_port.port_name = pinfo->port_name;
  723. newrec->fc_target_port.private = &newrec[1];
  724. newrec->fc_target_port.port_id = pinfo->port_id;
  725. newrec->fc_target_port.port_num = idx;
  726. INIT_LIST_HEAD(&newrec->tgt_list);
  727. newrec->dev = dev;
  728. newrec->ops = template;
  729. spin_lock_init(&newrec->lock);
  730. INIT_LIST_HEAD(&newrec->ls_list);
  731. INIT_LIST_HEAD(&newrec->ls_busylist);
  732. INIT_LIST_HEAD(&newrec->assoc_list);
  733. kref_init(&newrec->ref);
  734. ida_init(&newrec->assoc_cnt);
  735. ret = nvmet_fc_alloc_ls_iodlist(newrec);
  736. if (ret) {
  737. ret = -ENOMEM;
  738. goto out_free_newrec;
  739. }
  740. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  741. list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
  742. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  743. *portptr = &newrec->fc_target_port;
  744. return 0;
  745. out_free_newrec:
  746. put_device(dev);
  747. out_ida_put:
  748. ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
  749. out_fail_kfree:
  750. kfree(newrec);
  751. out_regtgt_failed:
  752. *portptr = NULL;
  753. return ret;
  754. }
  755. EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
  756. static void
  757. nvmet_fc_free_tgtport(struct kref *ref)
  758. {
  759. struct nvmet_fc_tgtport *tgtport =
  760. container_of(ref, struct nvmet_fc_tgtport, ref);
  761. struct device *dev = tgtport->dev;
  762. unsigned long flags;
  763. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  764. list_del(&tgtport->tgt_list);
  765. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  766. nvmet_fc_free_ls_iodlist(tgtport);
  767. /* let the LLDD know we've finished tearing it down */
  768. tgtport->ops->targetport_delete(&tgtport->fc_target_port);
  769. ida_simple_remove(&nvmet_fc_tgtport_cnt,
  770. tgtport->fc_target_port.port_num);
  771. ida_destroy(&tgtport->assoc_cnt);
  772. kfree(tgtport);
  773. put_device(dev);
  774. }
  775. static void
  776. nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
  777. {
  778. kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
  779. }
  780. static int
  781. nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
  782. {
  783. return kref_get_unless_zero(&tgtport->ref);
  784. }
  785. static void
  786. __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
  787. {
  788. struct nvmet_fc_tgt_assoc *assoc, *next;
  789. unsigned long flags;
  790. spin_lock_irqsave(&tgtport->lock, flags);
  791. list_for_each_entry_safe(assoc, next,
  792. &tgtport->assoc_list, a_list) {
  793. if (!nvmet_fc_tgt_a_get(assoc))
  794. continue;
  795. spin_unlock_irqrestore(&tgtport->lock, flags);
  796. nvmet_fc_delete_target_assoc(assoc);
  797. nvmet_fc_tgt_a_put(assoc);
  798. spin_lock_irqsave(&tgtport->lock, flags);
  799. }
  800. spin_unlock_irqrestore(&tgtport->lock, flags);
  801. }
  802. /*
  803. * nvmet layer has called to terminate an association
  804. */
  805. static void
  806. nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
  807. {
  808. struct nvmet_fc_tgtport *tgtport, *next;
  809. struct nvmet_fc_tgt_assoc *assoc;
  810. struct nvmet_fc_tgt_queue *queue;
  811. unsigned long flags;
  812. bool found_ctrl = false;
  813. /* this is a bit ugly, but don't want to make locks layered */
  814. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  815. list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
  816. tgt_list) {
  817. if (!nvmet_fc_tgtport_get(tgtport))
  818. continue;
  819. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  820. spin_lock_irqsave(&tgtport->lock, flags);
  821. list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
  822. queue = assoc->queues[0];
  823. if (queue && queue->nvme_sq.ctrl == ctrl) {
  824. if (nvmet_fc_tgt_a_get(assoc))
  825. found_ctrl = true;
  826. break;
  827. }
  828. }
  829. spin_unlock_irqrestore(&tgtport->lock, flags);
  830. nvmet_fc_tgtport_put(tgtport);
  831. if (found_ctrl) {
  832. nvmet_fc_delete_target_assoc(assoc);
  833. nvmet_fc_tgt_a_put(assoc);
  834. return;
  835. }
  836. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  837. }
  838. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  839. }
  840. /**
  841. * nvme_fc_unregister_targetport - transport entry point called by an
  842. * LLDD to deregister/remove a previously
  843. * registered a local NVME subsystem FC port.
  844. * @tgtport: pointer to the (registered) target port that is to be
  845. * deregistered.
  846. *
  847. * Returns:
  848. * a completion status. Must be 0 upon success; a negative errno
  849. * (ex: -ENXIO) upon failure.
  850. */
  851. int
  852. nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
  853. {
  854. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  855. /* terminate any outstanding associations */
  856. __nvmet_fc_free_assocs(tgtport);
  857. nvmet_fc_tgtport_put(tgtport);
  858. return 0;
  859. }
  860. EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
  861. /* *********************** FC-NVME LS Handling **************************** */
  862. static void
  863. nvmet_fc_format_rsp_hdr(void *buf, u8 ls_cmd, u32 desc_len, u8 rqst_ls_cmd)
  864. {
  865. struct fcnvme_ls_acc_hdr *acc = buf;
  866. acc->w0.ls_cmd = ls_cmd;
  867. acc->desc_list_len = desc_len;
  868. acc->rqst.desc_tag = cpu_to_be32(FCNVME_LSDESC_RQST);
  869. acc->rqst.desc_len =
  870. fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rqst));
  871. acc->rqst.w0.ls_cmd = rqst_ls_cmd;
  872. }
  873. static int
  874. nvmet_fc_format_rjt(void *buf, u16 buflen, u8 ls_cmd,
  875. u8 reason, u8 explanation, u8 vendor)
  876. {
  877. struct fcnvme_ls_rjt *rjt = buf;
  878. nvmet_fc_format_rsp_hdr(buf, FCNVME_LSDESC_RQST,
  879. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_rjt)),
  880. ls_cmd);
  881. rjt->rjt.desc_tag = cpu_to_be32(FCNVME_LSDESC_RJT);
  882. rjt->rjt.desc_len = fcnvme_lsdesc_len(sizeof(struct fcnvme_lsdesc_rjt));
  883. rjt->rjt.reason_code = reason;
  884. rjt->rjt.reason_explanation = explanation;
  885. rjt->rjt.vendor = vendor;
  886. return sizeof(struct fcnvme_ls_rjt);
  887. }
  888. /* Validation Error indexes into the string table below */
  889. enum {
  890. VERR_NO_ERROR = 0,
  891. VERR_CR_ASSOC_LEN = 1,
  892. VERR_CR_ASSOC_RQST_LEN = 2,
  893. VERR_CR_ASSOC_CMD = 3,
  894. VERR_CR_ASSOC_CMD_LEN = 4,
  895. VERR_ERSP_RATIO = 5,
  896. VERR_ASSOC_ALLOC_FAIL = 6,
  897. VERR_QUEUE_ALLOC_FAIL = 7,
  898. VERR_CR_CONN_LEN = 8,
  899. VERR_CR_CONN_RQST_LEN = 9,
  900. VERR_ASSOC_ID = 10,
  901. VERR_ASSOC_ID_LEN = 11,
  902. VERR_NO_ASSOC = 12,
  903. VERR_CONN_ID = 13,
  904. VERR_CONN_ID_LEN = 14,
  905. VERR_NO_CONN = 15,
  906. VERR_CR_CONN_CMD = 16,
  907. VERR_CR_CONN_CMD_LEN = 17,
  908. VERR_DISCONN_LEN = 18,
  909. VERR_DISCONN_RQST_LEN = 19,
  910. VERR_DISCONN_CMD = 20,
  911. VERR_DISCONN_CMD_LEN = 21,
  912. VERR_DISCONN_SCOPE = 22,
  913. VERR_RS_LEN = 23,
  914. VERR_RS_RQST_LEN = 24,
  915. VERR_RS_CMD = 25,
  916. VERR_RS_CMD_LEN = 26,
  917. VERR_RS_RCTL = 27,
  918. VERR_RS_RO = 28,
  919. };
  920. static char *validation_errors[] = {
  921. "OK",
  922. "Bad CR_ASSOC Length",
  923. "Bad CR_ASSOC Rqst Length",
  924. "Not CR_ASSOC Cmd",
  925. "Bad CR_ASSOC Cmd Length",
  926. "Bad Ersp Ratio",
  927. "Association Allocation Failed",
  928. "Queue Allocation Failed",
  929. "Bad CR_CONN Length",
  930. "Bad CR_CONN Rqst Length",
  931. "Not Association ID",
  932. "Bad Association ID Length",
  933. "No Association",
  934. "Not Connection ID",
  935. "Bad Connection ID Length",
  936. "No Connection",
  937. "Not CR_CONN Cmd",
  938. "Bad CR_CONN Cmd Length",
  939. "Bad DISCONN Length",
  940. "Bad DISCONN Rqst Length",
  941. "Not DISCONN Cmd",
  942. "Bad DISCONN Cmd Length",
  943. "Bad Disconnect Scope",
  944. "Bad RS Length",
  945. "Bad RS Rqst Length",
  946. "Not RS Cmd",
  947. "Bad RS Cmd Length",
  948. "Bad RS R_CTL",
  949. "Bad RS Relative Offset",
  950. };
  951. static void
  952. nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
  953. struct nvmet_fc_ls_iod *iod)
  954. {
  955. struct fcnvme_ls_cr_assoc_rqst *rqst =
  956. (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
  957. struct fcnvme_ls_cr_assoc_acc *acc =
  958. (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
  959. struct nvmet_fc_tgt_queue *queue;
  960. int ret = 0;
  961. memset(acc, 0, sizeof(*acc));
  962. if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst))
  963. ret = VERR_CR_ASSOC_LEN;
  964. else if (rqst->desc_list_len !=
  965. fcnvme_lsdesc_len(
  966. sizeof(struct fcnvme_ls_cr_assoc_rqst)))
  967. ret = VERR_CR_ASSOC_RQST_LEN;
  968. else if (rqst->assoc_cmd.desc_tag !=
  969. cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
  970. ret = VERR_CR_ASSOC_CMD;
  971. else if (rqst->assoc_cmd.desc_len !=
  972. fcnvme_lsdesc_len(
  973. sizeof(struct fcnvme_lsdesc_cr_assoc_cmd)))
  974. ret = VERR_CR_ASSOC_CMD_LEN;
  975. else if (!rqst->assoc_cmd.ersp_ratio ||
  976. (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
  977. be16_to_cpu(rqst->assoc_cmd.sqsize)))
  978. ret = VERR_ERSP_RATIO;
  979. else {
  980. /* new association w/ admin queue */
  981. iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
  982. if (!iod->assoc)
  983. ret = VERR_ASSOC_ALLOC_FAIL;
  984. else {
  985. queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
  986. be16_to_cpu(rqst->assoc_cmd.sqsize));
  987. if (!queue)
  988. ret = VERR_QUEUE_ALLOC_FAIL;
  989. }
  990. }
  991. if (ret) {
  992. dev_err(tgtport->dev,
  993. "Create Association LS failed: %s\n",
  994. validation_errors[ret]);
  995. iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
  996. NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
  997. ELS_RJT_LOGIC,
  998. ELS_EXPL_NONE, 0);
  999. return;
  1000. }
  1001. queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
  1002. atomic_set(&queue->connected, 1);
  1003. queue->sqhd = 0; /* best place to init value */
  1004. /* format a response */
  1005. iod->lsreq->rsplen = sizeof(*acc);
  1006. nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1007. fcnvme_lsdesc_len(
  1008. sizeof(struct fcnvme_ls_cr_assoc_acc)),
  1009. FCNVME_LS_CREATE_ASSOCIATION);
  1010. acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
  1011. acc->associd.desc_len =
  1012. fcnvme_lsdesc_len(
  1013. sizeof(struct fcnvme_lsdesc_assoc_id));
  1014. acc->associd.association_id =
  1015. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
  1016. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1017. acc->connectid.desc_len =
  1018. fcnvme_lsdesc_len(
  1019. sizeof(struct fcnvme_lsdesc_conn_id));
  1020. acc->connectid.connection_id = acc->associd.association_id;
  1021. }
  1022. static void
  1023. nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
  1024. struct nvmet_fc_ls_iod *iod)
  1025. {
  1026. struct fcnvme_ls_cr_conn_rqst *rqst =
  1027. (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
  1028. struct fcnvme_ls_cr_conn_acc *acc =
  1029. (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
  1030. struct nvmet_fc_tgt_queue *queue;
  1031. int ret = 0;
  1032. memset(acc, 0, sizeof(*acc));
  1033. if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
  1034. ret = VERR_CR_CONN_LEN;
  1035. else if (rqst->desc_list_len !=
  1036. fcnvme_lsdesc_len(
  1037. sizeof(struct fcnvme_ls_cr_conn_rqst)))
  1038. ret = VERR_CR_CONN_RQST_LEN;
  1039. else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1040. ret = VERR_ASSOC_ID;
  1041. else if (rqst->associd.desc_len !=
  1042. fcnvme_lsdesc_len(
  1043. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1044. ret = VERR_ASSOC_ID_LEN;
  1045. else if (rqst->connect_cmd.desc_tag !=
  1046. cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
  1047. ret = VERR_CR_CONN_CMD;
  1048. else if (rqst->connect_cmd.desc_len !=
  1049. fcnvme_lsdesc_len(
  1050. sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
  1051. ret = VERR_CR_CONN_CMD_LEN;
  1052. else if (!rqst->connect_cmd.ersp_ratio ||
  1053. (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
  1054. be16_to_cpu(rqst->connect_cmd.sqsize)))
  1055. ret = VERR_ERSP_RATIO;
  1056. else {
  1057. /* new io queue */
  1058. iod->assoc = nvmet_fc_find_target_assoc(tgtport,
  1059. be64_to_cpu(rqst->associd.association_id));
  1060. if (!iod->assoc)
  1061. ret = VERR_NO_ASSOC;
  1062. else {
  1063. queue = nvmet_fc_alloc_target_queue(iod->assoc,
  1064. be16_to_cpu(rqst->connect_cmd.qid),
  1065. be16_to_cpu(rqst->connect_cmd.sqsize));
  1066. if (!queue)
  1067. ret = VERR_QUEUE_ALLOC_FAIL;
  1068. /* release get taken in nvmet_fc_find_target_assoc */
  1069. nvmet_fc_tgt_a_put(iod->assoc);
  1070. }
  1071. }
  1072. if (ret) {
  1073. dev_err(tgtport->dev,
  1074. "Create Connection LS failed: %s\n",
  1075. validation_errors[ret]);
  1076. iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
  1077. NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
  1078. (ret == VERR_NO_ASSOC) ?
  1079. ELS_RJT_PROT : ELS_RJT_LOGIC,
  1080. ELS_EXPL_NONE, 0);
  1081. return;
  1082. }
  1083. queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
  1084. atomic_set(&queue->connected, 1);
  1085. queue->sqhd = 0; /* best place to init value */
  1086. /* format a response */
  1087. iod->lsreq->rsplen = sizeof(*acc);
  1088. nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1089. fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
  1090. FCNVME_LS_CREATE_CONNECTION);
  1091. acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
  1092. acc->connectid.desc_len =
  1093. fcnvme_lsdesc_len(
  1094. sizeof(struct fcnvme_lsdesc_conn_id));
  1095. acc->connectid.connection_id =
  1096. cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
  1097. be16_to_cpu(rqst->connect_cmd.qid)));
  1098. }
  1099. static void
  1100. nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
  1101. struct nvmet_fc_ls_iod *iod)
  1102. {
  1103. struct fcnvme_ls_disconnect_rqst *rqst =
  1104. (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
  1105. struct fcnvme_ls_disconnect_acc *acc =
  1106. (struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
  1107. struct nvmet_fc_tgt_queue *queue;
  1108. struct nvmet_fc_tgt_assoc *assoc;
  1109. int ret = 0;
  1110. bool del_assoc = false;
  1111. memset(acc, 0, sizeof(*acc));
  1112. if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_rqst))
  1113. ret = VERR_DISCONN_LEN;
  1114. else if (rqst->desc_list_len !=
  1115. fcnvme_lsdesc_len(
  1116. sizeof(struct fcnvme_ls_disconnect_rqst)))
  1117. ret = VERR_DISCONN_RQST_LEN;
  1118. else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
  1119. ret = VERR_ASSOC_ID;
  1120. else if (rqst->associd.desc_len !=
  1121. fcnvme_lsdesc_len(
  1122. sizeof(struct fcnvme_lsdesc_assoc_id)))
  1123. ret = VERR_ASSOC_ID_LEN;
  1124. else if (rqst->discon_cmd.desc_tag !=
  1125. cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
  1126. ret = VERR_DISCONN_CMD;
  1127. else if (rqst->discon_cmd.desc_len !=
  1128. fcnvme_lsdesc_len(
  1129. sizeof(struct fcnvme_lsdesc_disconn_cmd)))
  1130. ret = VERR_DISCONN_CMD_LEN;
  1131. else if ((rqst->discon_cmd.scope != FCNVME_DISCONN_ASSOCIATION) &&
  1132. (rqst->discon_cmd.scope != FCNVME_DISCONN_CONNECTION))
  1133. ret = VERR_DISCONN_SCOPE;
  1134. else {
  1135. /* match an active association */
  1136. assoc = nvmet_fc_find_target_assoc(tgtport,
  1137. be64_to_cpu(rqst->associd.association_id));
  1138. iod->assoc = assoc;
  1139. if (!assoc)
  1140. ret = VERR_NO_ASSOC;
  1141. }
  1142. if (ret) {
  1143. dev_err(tgtport->dev,
  1144. "Disconnect LS failed: %s\n",
  1145. validation_errors[ret]);
  1146. iod->lsreq->rsplen = nvmet_fc_format_rjt(acc,
  1147. NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
  1148. (ret == 8) ? ELS_RJT_PROT : ELS_RJT_LOGIC,
  1149. ELS_EXPL_NONE, 0);
  1150. return;
  1151. }
  1152. /* format a response */
  1153. iod->lsreq->rsplen = sizeof(*acc);
  1154. nvmet_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
  1155. fcnvme_lsdesc_len(
  1156. sizeof(struct fcnvme_ls_disconnect_acc)),
  1157. FCNVME_LS_DISCONNECT);
  1158. if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
  1159. queue = nvmet_fc_find_target_queue(tgtport,
  1160. be64_to_cpu(rqst->discon_cmd.id));
  1161. if (queue) {
  1162. int qid = queue->qid;
  1163. nvmet_fc_delete_target_queue(queue);
  1164. /* release the get taken by find_target_queue */
  1165. nvmet_fc_tgt_q_put(queue);
  1166. /* tear association down if io queue terminated */
  1167. if (!qid)
  1168. del_assoc = true;
  1169. }
  1170. }
  1171. /* release get taken in nvmet_fc_find_target_assoc */
  1172. nvmet_fc_tgt_a_put(iod->assoc);
  1173. if (del_assoc)
  1174. nvmet_fc_delete_target_assoc(iod->assoc);
  1175. }
  1176. /* *********************** NVME Ctrl Routines **************************** */
  1177. static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
  1178. static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
  1179. static void
  1180. nvmet_fc_xmt_ls_rsp_done(struct nvmefc_tgt_ls_req *lsreq)
  1181. {
  1182. struct nvmet_fc_ls_iod *iod = lsreq->nvmet_fc_private;
  1183. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1184. fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
  1185. NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
  1186. nvmet_fc_free_ls_iod(tgtport, iod);
  1187. nvmet_fc_tgtport_put(tgtport);
  1188. }
  1189. static void
  1190. nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
  1191. struct nvmet_fc_ls_iod *iod)
  1192. {
  1193. int ret;
  1194. fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
  1195. NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
  1196. ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsreq);
  1197. if (ret)
  1198. nvmet_fc_xmt_ls_rsp_done(iod->lsreq);
  1199. }
  1200. /*
  1201. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1202. */
  1203. static void
  1204. nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
  1205. struct nvmet_fc_ls_iod *iod)
  1206. {
  1207. struct fcnvme_ls_rqst_w0 *w0 =
  1208. (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
  1209. iod->lsreq->nvmet_fc_private = iod;
  1210. iod->lsreq->rspbuf = iod->rspbuf;
  1211. iod->lsreq->rspdma = iod->rspdma;
  1212. iod->lsreq->done = nvmet_fc_xmt_ls_rsp_done;
  1213. /* Be preventative. handlers will later set to valid length */
  1214. iod->lsreq->rsplen = 0;
  1215. iod->assoc = NULL;
  1216. /*
  1217. * handlers:
  1218. * parse request input, execute the request, and format the
  1219. * LS response
  1220. */
  1221. switch (w0->ls_cmd) {
  1222. case FCNVME_LS_CREATE_ASSOCIATION:
  1223. /* Creates Association and initial Admin Queue/Connection */
  1224. nvmet_fc_ls_create_association(tgtport, iod);
  1225. break;
  1226. case FCNVME_LS_CREATE_CONNECTION:
  1227. /* Creates an IO Queue/Connection */
  1228. nvmet_fc_ls_create_connection(tgtport, iod);
  1229. break;
  1230. case FCNVME_LS_DISCONNECT:
  1231. /* Terminate a Queue/Connection or the Association */
  1232. nvmet_fc_ls_disconnect(tgtport, iod);
  1233. break;
  1234. default:
  1235. iod->lsreq->rsplen = nvmet_fc_format_rjt(iod->rspbuf,
  1236. NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
  1237. ELS_RJT_INVAL, ELS_EXPL_NONE, 0);
  1238. }
  1239. nvmet_fc_xmt_ls_rsp(tgtport, iod);
  1240. }
  1241. /*
  1242. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1243. */
  1244. static void
  1245. nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
  1246. {
  1247. struct nvmet_fc_ls_iod *iod =
  1248. container_of(work, struct nvmet_fc_ls_iod, work);
  1249. struct nvmet_fc_tgtport *tgtport = iod->tgtport;
  1250. nvmet_fc_handle_ls_rqst(tgtport, iod);
  1251. }
  1252. /**
  1253. * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
  1254. * upon the reception of a NVME LS request.
  1255. *
  1256. * The nvmet-fc layer will copy payload to an internal structure for
  1257. * processing. As such, upon completion of the routine, the LLDD may
  1258. * immediately free/reuse the LS request buffer passed in the call.
  1259. *
  1260. * If this routine returns error, the LLDD should abort the exchange.
  1261. *
  1262. * @tgtport: pointer to the (registered) target port the LS was
  1263. * received on.
  1264. * @lsreq: pointer to a lsreq request structure to be used to reference
  1265. * the exchange corresponding to the LS.
  1266. * @lsreqbuf: pointer to the buffer containing the LS Request
  1267. * @lsreqbuf_len: length, in bytes, of the received LS request
  1268. */
  1269. int
  1270. nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
  1271. struct nvmefc_tgt_ls_req *lsreq,
  1272. void *lsreqbuf, u32 lsreqbuf_len)
  1273. {
  1274. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1275. struct nvmet_fc_ls_iod *iod;
  1276. if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
  1277. return -E2BIG;
  1278. if (!nvmet_fc_tgtport_get(tgtport))
  1279. return -ESHUTDOWN;
  1280. iod = nvmet_fc_alloc_ls_iod(tgtport);
  1281. if (!iod) {
  1282. nvmet_fc_tgtport_put(tgtport);
  1283. return -ENOENT;
  1284. }
  1285. iod->lsreq = lsreq;
  1286. iod->fcpreq = NULL;
  1287. memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
  1288. iod->rqstdatalen = lsreqbuf_len;
  1289. schedule_work(&iod->work);
  1290. return 0;
  1291. }
  1292. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
  1293. /*
  1294. * **********************
  1295. * Start of FCP handling
  1296. * **********************
  1297. */
  1298. static int
  1299. nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1300. {
  1301. struct scatterlist *sg;
  1302. struct page *page;
  1303. unsigned int nent;
  1304. u32 page_len, length;
  1305. int i = 0;
  1306. length = fod->total_length;
  1307. nent = DIV_ROUND_UP(length, PAGE_SIZE);
  1308. sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL);
  1309. if (!sg)
  1310. goto out;
  1311. sg_init_table(sg, nent);
  1312. while (length) {
  1313. page_len = min_t(u32, length, PAGE_SIZE);
  1314. page = alloc_page(GFP_KERNEL);
  1315. if (!page)
  1316. goto out_free_pages;
  1317. sg_set_page(&sg[i], page, page_len, 0);
  1318. length -= page_len;
  1319. i++;
  1320. }
  1321. fod->data_sg = sg;
  1322. fod->data_sg_cnt = nent;
  1323. fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
  1324. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1325. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1326. /* note: write from initiator perspective */
  1327. return 0;
  1328. out_free_pages:
  1329. while (i > 0) {
  1330. i--;
  1331. __free_page(sg_page(&sg[i]));
  1332. }
  1333. kfree(sg);
  1334. fod->data_sg = NULL;
  1335. fod->data_sg_cnt = 0;
  1336. out:
  1337. return NVME_SC_INTERNAL;
  1338. }
  1339. static void
  1340. nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
  1341. {
  1342. struct scatterlist *sg;
  1343. int count;
  1344. if (!fod->data_sg || !fod->data_sg_cnt)
  1345. return;
  1346. fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
  1347. ((fod->io_dir == NVMET_FCP_WRITE) ?
  1348. DMA_FROM_DEVICE : DMA_TO_DEVICE));
  1349. for_each_sg(fod->data_sg, sg, fod->data_sg_cnt, count)
  1350. __free_page(sg_page(sg));
  1351. kfree(fod->data_sg);
  1352. }
  1353. static bool
  1354. queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
  1355. {
  1356. u32 sqtail, used;
  1357. /* egad, this is ugly. And sqtail is just a best guess */
  1358. sqtail = atomic_read(&q->sqtail) % q->sqsize;
  1359. used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
  1360. return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
  1361. }
  1362. /*
  1363. * Prep RSP payload.
  1364. * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
  1365. */
  1366. static void
  1367. nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1368. struct nvmet_fc_fcp_iod *fod)
  1369. {
  1370. struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
  1371. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  1372. struct nvme_completion *cqe = &ersp->cqe;
  1373. u32 *cqewd = (u32 *)cqe;
  1374. bool send_ersp = false;
  1375. u32 rsn, rspcnt, xfr_length;
  1376. if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
  1377. xfr_length = fod->total_length;
  1378. else
  1379. xfr_length = fod->offset;
  1380. /*
  1381. * check to see if we can send a 0's rsp.
  1382. * Note: to send a 0's response, the NVME-FC host transport will
  1383. * recreate the CQE. The host transport knows: sq id, SQHD (last
  1384. * seen in an ersp), and command_id. Thus it will create a
  1385. * zero-filled CQE with those known fields filled in. Transport
  1386. * must send an ersp for any condition where the cqe won't match
  1387. * this.
  1388. *
  1389. * Here are the FC-NVME mandated cases where we must send an ersp:
  1390. * every N responses, where N=ersp_ratio
  1391. * force fabric commands to send ersp's (not in FC-NVME but good
  1392. * practice)
  1393. * normal cmds: any time status is non-zero, or status is zero
  1394. * but words 0 or 1 are non-zero.
  1395. * the SQ is 90% or more full
  1396. * the cmd is a fused command
  1397. * transferred data length not equal to cmd iu length
  1398. */
  1399. rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
  1400. if (!(rspcnt % fod->queue->ersp_ratio) ||
  1401. sqe->opcode == nvme_fabrics_command ||
  1402. xfr_length != fod->total_length ||
  1403. (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
  1404. (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
  1405. queue_90percent_full(fod->queue, cqe->sq_head))
  1406. send_ersp = true;
  1407. /* re-set the fields */
  1408. fod->fcpreq->rspaddr = ersp;
  1409. fod->fcpreq->rspdma = fod->rspdma;
  1410. if (!send_ersp) {
  1411. memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
  1412. fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
  1413. } else {
  1414. ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
  1415. rsn = atomic_inc_return(&fod->queue->rsn);
  1416. ersp->rsn = cpu_to_be32(rsn);
  1417. ersp->xfrd_len = cpu_to_be32(xfr_length);
  1418. fod->fcpreq->rsplen = sizeof(*ersp);
  1419. }
  1420. fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
  1421. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  1422. }
  1423. static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
  1424. static void
  1425. nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
  1426. struct nvmet_fc_fcp_iod *fod)
  1427. {
  1428. int ret;
  1429. fod->fcpreq->op = NVMET_FCOP_RSP;
  1430. fod->fcpreq->timeout = 0;
  1431. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1432. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1433. if (ret)
  1434. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1435. }
  1436. static void
  1437. nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
  1438. struct nvmet_fc_fcp_iod *fod, u8 op)
  1439. {
  1440. struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
  1441. struct scatterlist *sg, *datasg;
  1442. u32 tlen, sg_off;
  1443. int ret;
  1444. fcpreq->op = op;
  1445. fcpreq->offset = fod->offset;
  1446. fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
  1447. tlen = min_t(u32, (NVMET_FC_MAX_KB_PER_XFR * 1024),
  1448. (fod->total_length - fod->offset));
  1449. tlen = min_t(u32, tlen, NVME_FC_MAX_SEGMENTS * PAGE_SIZE);
  1450. tlen = min_t(u32, tlen, fod->tgtport->ops->max_sgl_segments
  1451. * PAGE_SIZE);
  1452. fcpreq->transfer_length = tlen;
  1453. fcpreq->transferred_length = 0;
  1454. fcpreq->fcp_error = 0;
  1455. fcpreq->rsplen = 0;
  1456. fcpreq->sg_cnt = 0;
  1457. datasg = fod->next_sg;
  1458. sg_off = fod->next_sg_offset;
  1459. for (sg = fcpreq->sg ; tlen; sg++) {
  1460. *sg = *datasg;
  1461. if (sg_off) {
  1462. sg->offset += sg_off;
  1463. sg->length -= sg_off;
  1464. sg->dma_address += sg_off;
  1465. sg_off = 0;
  1466. }
  1467. if (tlen < sg->length) {
  1468. sg->length = tlen;
  1469. fod->next_sg = datasg;
  1470. fod->next_sg_offset += tlen;
  1471. } else if (tlen == sg->length) {
  1472. fod->next_sg_offset = 0;
  1473. fod->next_sg = sg_next(datasg);
  1474. } else {
  1475. fod->next_sg_offset = 0;
  1476. datasg = sg_next(datasg);
  1477. }
  1478. tlen -= sg->length;
  1479. fcpreq->sg_cnt++;
  1480. }
  1481. /*
  1482. * If the last READDATA request: check if LLDD supports
  1483. * combined xfr with response.
  1484. */
  1485. if ((op == NVMET_FCOP_READDATA) &&
  1486. ((fod->offset + fcpreq->transfer_length) == fod->total_length) &&
  1487. (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
  1488. fcpreq->op = NVMET_FCOP_READDATA_RSP;
  1489. nvmet_fc_prep_fcp_rsp(tgtport, fod);
  1490. }
  1491. ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
  1492. if (ret) {
  1493. /*
  1494. * should be ok to set w/o lock as its in the thread of
  1495. * execution (not an async timer routine) and doesn't
  1496. * contend with any clearing action
  1497. */
  1498. fod->abort = true;
  1499. if (op == NVMET_FCOP_WRITEDATA)
  1500. nvmet_req_complete(&fod->req,
  1501. NVME_SC_FC_TRANSPORT_ERROR);
  1502. else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
  1503. fcpreq->fcp_error = ret;
  1504. fcpreq->transferred_length = 0;
  1505. nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
  1506. }
  1507. }
  1508. }
  1509. static void
  1510. nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
  1511. {
  1512. struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
  1513. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1514. unsigned long flags;
  1515. bool abort;
  1516. spin_lock_irqsave(&fod->flock, flags);
  1517. abort = fod->abort;
  1518. spin_unlock_irqrestore(&fod->flock, flags);
  1519. /* if in the middle of an io and we need to tear down */
  1520. if (abort && fcpreq->op != NVMET_FCOP_ABORT) {
  1521. /* data no longer needed */
  1522. nvmet_fc_free_tgt_pgs(fod);
  1523. if (fcpreq->fcp_error || abort)
  1524. nvmet_req_complete(&fod->req, fcpreq->fcp_error);
  1525. return;
  1526. }
  1527. switch (fcpreq->op) {
  1528. case NVMET_FCOP_WRITEDATA:
  1529. if (abort || fcpreq->fcp_error ||
  1530. fcpreq->transferred_length != fcpreq->transfer_length) {
  1531. nvmet_req_complete(&fod->req,
  1532. NVME_SC_FC_TRANSPORT_ERROR);
  1533. return;
  1534. }
  1535. fod->offset += fcpreq->transferred_length;
  1536. if (fod->offset != fod->total_length) {
  1537. /* transfer the next chunk */
  1538. nvmet_fc_transfer_fcp_data(tgtport, fod,
  1539. NVMET_FCOP_WRITEDATA);
  1540. return;
  1541. }
  1542. /* data transfer complete, resume with nvmet layer */
  1543. fod->req.execute(&fod->req);
  1544. break;
  1545. case NVMET_FCOP_READDATA:
  1546. case NVMET_FCOP_READDATA_RSP:
  1547. if (abort || fcpreq->fcp_error ||
  1548. fcpreq->transferred_length != fcpreq->transfer_length) {
  1549. /* data no longer needed */
  1550. nvmet_fc_free_tgt_pgs(fod);
  1551. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1552. return;
  1553. }
  1554. /* success */
  1555. if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
  1556. /* data no longer needed */
  1557. nvmet_fc_free_tgt_pgs(fod);
  1558. fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
  1559. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  1560. nvmet_fc_free_fcp_iod(fod->queue, fod);
  1561. return;
  1562. }
  1563. fod->offset += fcpreq->transferred_length;
  1564. if (fod->offset != fod->total_length) {
  1565. /* transfer the next chunk */
  1566. nvmet_fc_transfer_fcp_data(tgtport, fod,
  1567. NVMET_FCOP_READDATA);
  1568. return;
  1569. }
  1570. /* data transfer complete, send response */
  1571. /* data no longer needed */
  1572. nvmet_fc_free_tgt_pgs(fod);
  1573. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  1574. break;
  1575. case NVMET_FCOP_RSP:
  1576. case NVMET_FCOP_ABORT:
  1577. fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
  1578. sizeof(fod->rspiubuf), DMA_TO_DEVICE);
  1579. nvmet_fc_free_fcp_iod(fod->queue, fod);
  1580. break;
  1581. default:
  1582. nvmet_fc_free_tgt_pgs(fod);
  1583. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1584. break;
  1585. }
  1586. }
  1587. /*
  1588. * actual completion handler after execution by the nvmet layer
  1589. */
  1590. static void
  1591. __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
  1592. struct nvmet_fc_fcp_iod *fod, int status)
  1593. {
  1594. struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
  1595. struct nvme_completion *cqe = &fod->rspiubuf.cqe;
  1596. unsigned long flags;
  1597. bool abort;
  1598. spin_lock_irqsave(&fod->flock, flags);
  1599. abort = fod->abort;
  1600. spin_unlock_irqrestore(&fod->flock, flags);
  1601. /* if we have a CQE, snoop the last sq_head value */
  1602. if (!status)
  1603. fod->queue->sqhd = cqe->sq_head;
  1604. if (abort) {
  1605. /* data no longer needed */
  1606. nvmet_fc_free_tgt_pgs(fod);
  1607. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1608. return;
  1609. }
  1610. /* if an error handling the cmd post initial parsing */
  1611. if (status) {
  1612. /* fudge up a failed CQE status for our transport error */
  1613. memset(cqe, 0, sizeof(*cqe));
  1614. cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
  1615. cqe->sq_id = cpu_to_le16(fod->queue->qid);
  1616. cqe->command_id = sqe->command_id;
  1617. cqe->status = cpu_to_le16(status);
  1618. } else {
  1619. /*
  1620. * try to push the data even if the SQE status is non-zero.
  1621. * There may be a status where data still was intended to
  1622. * be moved
  1623. */
  1624. if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
  1625. /* push the data over before sending rsp */
  1626. nvmet_fc_transfer_fcp_data(tgtport, fod,
  1627. NVMET_FCOP_READDATA);
  1628. return;
  1629. }
  1630. /* writes & no data - fall thru */
  1631. }
  1632. /* data no longer needed */
  1633. nvmet_fc_free_tgt_pgs(fod);
  1634. nvmet_fc_xmt_fcp_rsp(tgtport, fod);
  1635. }
  1636. static void
  1637. nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
  1638. {
  1639. struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
  1640. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1641. __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
  1642. }
  1643. /*
  1644. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1645. */
  1646. void
  1647. nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
  1648. struct nvmet_fc_fcp_iod *fod)
  1649. {
  1650. struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
  1651. int ret;
  1652. /*
  1653. * Fused commands are currently not supported in the linux
  1654. * implementation.
  1655. *
  1656. * As such, the implementation of the FC transport does not
  1657. * look at the fused commands and order delivery to the upper
  1658. * layer until we have both based on csn.
  1659. */
  1660. fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
  1661. fod->total_length = be32_to_cpu(cmdiu->data_len);
  1662. if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
  1663. fod->io_dir = NVMET_FCP_WRITE;
  1664. if (!nvme_is_write(&cmdiu->sqe))
  1665. goto transport_error;
  1666. } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
  1667. fod->io_dir = NVMET_FCP_READ;
  1668. if (nvme_is_write(&cmdiu->sqe))
  1669. goto transport_error;
  1670. } else {
  1671. fod->io_dir = NVMET_FCP_NODATA;
  1672. if (fod->total_length)
  1673. goto transport_error;
  1674. }
  1675. fod->req.cmd = &fod->cmdiubuf.sqe;
  1676. fod->req.rsp = &fod->rspiubuf.cqe;
  1677. fod->req.port = fod->queue->port;
  1678. /* ensure nvmet handlers will set cmd handler callback */
  1679. fod->req.execute = NULL;
  1680. /* clear any response payload */
  1681. memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
  1682. ret = nvmet_req_init(&fod->req,
  1683. &fod->queue->nvme_cq,
  1684. &fod->queue->nvme_sq,
  1685. &nvmet_fc_tgt_fcp_ops);
  1686. if (!ret) { /* bad SQE content */
  1687. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1688. return;
  1689. }
  1690. /* keep a running counter of tail position */
  1691. atomic_inc(&fod->queue->sqtail);
  1692. fod->data_sg = NULL;
  1693. fod->data_sg_cnt = 0;
  1694. if (fod->total_length) {
  1695. ret = nvmet_fc_alloc_tgt_pgs(fod);
  1696. if (ret) {
  1697. nvmet_req_complete(&fod->req, ret);
  1698. return;
  1699. }
  1700. }
  1701. fod->req.sg = fod->data_sg;
  1702. fod->req.sg_cnt = fod->data_sg_cnt;
  1703. fod->offset = 0;
  1704. fod->next_sg = fod->data_sg;
  1705. fod->next_sg_offset = 0;
  1706. if (fod->io_dir == NVMET_FCP_WRITE) {
  1707. /* pull the data over before invoking nvmet layer */
  1708. nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
  1709. return;
  1710. }
  1711. /*
  1712. * Reads or no data:
  1713. *
  1714. * can invoke the nvmet_layer now. If read data, cmd completion will
  1715. * push the data
  1716. */
  1717. fod->req.execute(&fod->req);
  1718. return;
  1719. transport_error:
  1720. nvmet_fc_abort_op(tgtport, fod->fcpreq);
  1721. }
  1722. /*
  1723. * Actual processing routine for received FC-NVME LS Requests from the LLD
  1724. */
  1725. static void
  1726. nvmet_fc_handle_fcp_rqst_work(struct work_struct *work)
  1727. {
  1728. struct nvmet_fc_fcp_iod *fod =
  1729. container_of(work, struct nvmet_fc_fcp_iod, work);
  1730. struct nvmet_fc_tgtport *tgtport = fod->tgtport;
  1731. nvmet_fc_handle_fcp_rqst(tgtport, fod);
  1732. }
  1733. /**
  1734. * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
  1735. * upon the reception of a NVME FCP CMD IU.
  1736. *
  1737. * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
  1738. * layer for processing.
  1739. *
  1740. * The nvmet-fc layer will copy cmd payload to an internal structure for
  1741. * processing. As such, upon completion of the routine, the LLDD may
  1742. * immediately free/reuse the CMD IU buffer passed in the call.
  1743. *
  1744. * If this routine returns error, the lldd should abort the exchange.
  1745. *
  1746. * @target_port: pointer to the (registered) target port the FCP CMD IU
  1747. * was receive on.
  1748. * @fcpreq: pointer to a fcpreq request structure to be used to reference
  1749. * the exchange corresponding to the FCP Exchange.
  1750. * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
  1751. * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
  1752. */
  1753. int
  1754. nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
  1755. struct nvmefc_tgt_fcp_req *fcpreq,
  1756. void *cmdiubuf, u32 cmdiubuf_len)
  1757. {
  1758. struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
  1759. struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
  1760. struct nvmet_fc_tgt_queue *queue;
  1761. struct nvmet_fc_fcp_iod *fod;
  1762. /* validate iu, so the connection id can be used to find the queue */
  1763. if ((cmdiubuf_len != sizeof(*cmdiu)) ||
  1764. (cmdiu->scsi_id != NVME_CMD_SCSI_ID) ||
  1765. (cmdiu->fc_id != NVME_CMD_FC_ID) ||
  1766. (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
  1767. return -EIO;
  1768. queue = nvmet_fc_find_target_queue(tgtport,
  1769. be64_to_cpu(cmdiu->connection_id));
  1770. if (!queue)
  1771. return -ENOTCONN;
  1772. /*
  1773. * note: reference taken by find_target_queue
  1774. * After successful fod allocation, the fod will inherit the
  1775. * ownership of that reference and will remove the reference
  1776. * when the fod is freed.
  1777. */
  1778. fod = nvmet_fc_alloc_fcp_iod(queue);
  1779. if (!fod) {
  1780. /* release the queue lookup reference */
  1781. nvmet_fc_tgt_q_put(queue);
  1782. return -ENOENT;
  1783. }
  1784. fcpreq->nvmet_fc_private = fod;
  1785. fod->fcpreq = fcpreq;
  1786. /*
  1787. * put all admin cmds on hw queue id 0. All io commands go to
  1788. * the respective hw queue based on a modulo basis
  1789. */
  1790. fcpreq->hwqid = queue->qid ?
  1791. ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
  1792. memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
  1793. queue_work_on(queue->cpu, queue->work_q, &fod->work);
  1794. return 0;
  1795. }
  1796. EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
  1797. enum {
  1798. FCT_TRADDR_ERR = 0,
  1799. FCT_TRADDR_WWNN = 1 << 0,
  1800. FCT_TRADDR_WWPN = 1 << 1,
  1801. };
  1802. struct nvmet_fc_traddr {
  1803. u64 nn;
  1804. u64 pn;
  1805. };
  1806. static const match_table_t traddr_opt_tokens = {
  1807. { FCT_TRADDR_WWNN, "nn-%s" },
  1808. { FCT_TRADDR_WWPN, "pn-%s" },
  1809. { FCT_TRADDR_ERR, NULL }
  1810. };
  1811. static int
  1812. nvmet_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf)
  1813. {
  1814. substring_t args[MAX_OPT_ARGS];
  1815. char *options, *o, *p;
  1816. int token, ret = 0;
  1817. u64 token64;
  1818. options = o = kstrdup(buf, GFP_KERNEL);
  1819. if (!options)
  1820. return -ENOMEM;
  1821. while ((p = strsep(&o, ",\n")) != NULL) {
  1822. if (!*p)
  1823. continue;
  1824. token = match_token(p, traddr_opt_tokens, args);
  1825. switch (token) {
  1826. case FCT_TRADDR_WWNN:
  1827. if (match_u64(args, &token64)) {
  1828. ret = -EINVAL;
  1829. goto out;
  1830. }
  1831. traddr->nn = token64;
  1832. break;
  1833. case FCT_TRADDR_WWPN:
  1834. if (match_u64(args, &token64)) {
  1835. ret = -EINVAL;
  1836. goto out;
  1837. }
  1838. traddr->pn = token64;
  1839. break;
  1840. default:
  1841. pr_warn("unknown traddr token or missing value '%s'\n",
  1842. p);
  1843. ret = -EINVAL;
  1844. goto out;
  1845. }
  1846. }
  1847. out:
  1848. kfree(options);
  1849. return ret;
  1850. }
  1851. static int
  1852. nvmet_fc_add_port(struct nvmet_port *port)
  1853. {
  1854. struct nvmet_fc_tgtport *tgtport;
  1855. struct nvmet_fc_traddr traddr = { 0L, 0L };
  1856. unsigned long flags;
  1857. int ret;
  1858. /* validate the address info */
  1859. if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
  1860. (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
  1861. return -EINVAL;
  1862. /* map the traddr address info to a target port */
  1863. ret = nvmet_fc_parse_traddr(&traddr, port->disc_addr.traddr);
  1864. if (ret)
  1865. return ret;
  1866. ret = -ENXIO;
  1867. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1868. list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
  1869. if ((tgtport->fc_target_port.node_name == traddr.nn) &&
  1870. (tgtport->fc_target_port.port_name == traddr.pn)) {
  1871. /* a FC port can only be 1 nvmet port id */
  1872. if (!tgtport->port) {
  1873. tgtport->port = port;
  1874. port->priv = tgtport;
  1875. ret = 0;
  1876. } else
  1877. ret = -EALREADY;
  1878. break;
  1879. }
  1880. }
  1881. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1882. return ret;
  1883. }
  1884. static void
  1885. nvmet_fc_remove_port(struct nvmet_port *port)
  1886. {
  1887. struct nvmet_fc_tgtport *tgtport = port->priv;
  1888. unsigned long flags;
  1889. spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
  1890. if (tgtport->port == port) {
  1891. nvmet_fc_tgtport_put(tgtport);
  1892. tgtport->port = NULL;
  1893. }
  1894. spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
  1895. }
  1896. static struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
  1897. .owner = THIS_MODULE,
  1898. .type = NVMF_TRTYPE_FC,
  1899. .msdbd = 1,
  1900. .add_port = nvmet_fc_add_port,
  1901. .remove_port = nvmet_fc_remove_port,
  1902. .queue_response = nvmet_fc_fcp_nvme_cmd_done,
  1903. .delete_ctrl = nvmet_fc_delete_ctrl,
  1904. };
  1905. static int __init nvmet_fc_init_module(void)
  1906. {
  1907. return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
  1908. }
  1909. static void __exit nvmet_fc_exit_module(void)
  1910. {
  1911. /* sanity check - all lports should be removed */
  1912. if (!list_empty(&nvmet_fc_target_list))
  1913. pr_warn("%s: targetport list not empty\n", __func__);
  1914. nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
  1915. ida_destroy(&nvmet_fc_tgtport_cnt);
  1916. }
  1917. module_init(nvmet_fc_init_module);
  1918. module_exit(nvmet_fc_exit_module);
  1919. MODULE_LICENSE("GPL v2");