fcloop.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. static DEFINE_SPINLOCK(fcloop_lock);
  173. static LIST_HEAD(fcloop_lports);
  174. static LIST_HEAD(fcloop_nports);
  175. struct fcloop_lport {
  176. struct nvme_fc_local_port *localport;
  177. struct list_head lport_list;
  178. struct completion unreg_done;
  179. };
  180. struct fcloop_lport_priv {
  181. struct fcloop_lport *lport;
  182. };
  183. struct fcloop_rport {
  184. struct nvme_fc_remote_port *remoteport;
  185. struct nvmet_fc_target_port *targetport;
  186. struct fcloop_nport *nport;
  187. struct fcloop_lport *lport;
  188. };
  189. struct fcloop_tport {
  190. struct nvmet_fc_target_port *targetport;
  191. struct nvme_fc_remote_port *remoteport;
  192. struct fcloop_nport *nport;
  193. struct fcloop_lport *lport;
  194. };
  195. struct fcloop_nport {
  196. struct fcloop_rport *rport;
  197. struct fcloop_tport *tport;
  198. struct fcloop_lport *lport;
  199. struct list_head nport_list;
  200. struct kref ref;
  201. u64 node_name;
  202. u64 port_name;
  203. u32 port_role;
  204. u32 port_id;
  205. };
  206. struct fcloop_lsreq {
  207. struct fcloop_tport *tport;
  208. struct nvmefc_ls_req *lsreq;
  209. struct work_struct work;
  210. struct nvmefc_tgt_ls_req tgt_ls_req;
  211. int status;
  212. };
  213. enum {
  214. INI_IO_START = 0,
  215. INI_IO_ACTIVE = 1,
  216. INI_IO_ABORTED = 2,
  217. INI_IO_COMPLETED = 3,
  218. };
  219. struct fcloop_fcpreq {
  220. struct fcloop_tport *tport;
  221. struct nvmefc_fcp_req *fcpreq;
  222. spinlock_t reqlock;
  223. u16 status;
  224. u32 inistate;
  225. bool active;
  226. bool aborted;
  227. struct kref ref;
  228. struct work_struct fcp_rcv_work;
  229. struct work_struct abort_rcv_work;
  230. struct work_struct tio_done_work;
  231. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  232. };
  233. struct fcloop_ini_fcpreq {
  234. struct nvmefc_fcp_req *fcpreq;
  235. struct fcloop_fcpreq *tfcp_req;
  236. spinlock_t inilock;
  237. };
  238. static inline struct fcloop_lsreq *
  239. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  240. {
  241. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  242. }
  243. static inline struct fcloop_fcpreq *
  244. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  245. {
  246. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  247. }
  248. static int
  249. fcloop_create_queue(struct nvme_fc_local_port *localport,
  250. unsigned int qidx, u16 qsize,
  251. void **handle)
  252. {
  253. *handle = localport;
  254. return 0;
  255. }
  256. static void
  257. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  258. unsigned int idx, void *handle)
  259. {
  260. }
  261. /*
  262. * Transmit of LS RSP done (e.g. buffers all set). call back up
  263. * initiator "done" flows.
  264. */
  265. static void
  266. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  267. {
  268. struct fcloop_lsreq *tls_req =
  269. container_of(work, struct fcloop_lsreq, work);
  270. struct fcloop_tport *tport = tls_req->tport;
  271. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  272. if (!tport || tport->remoteport)
  273. lsreq->done(lsreq, tls_req->status);
  274. }
  275. static int
  276. fcloop_ls_req(struct nvme_fc_local_port *localport,
  277. struct nvme_fc_remote_port *remoteport,
  278. struct nvmefc_ls_req *lsreq)
  279. {
  280. struct fcloop_lsreq *tls_req = lsreq->private;
  281. struct fcloop_rport *rport = remoteport->private;
  282. int ret = 0;
  283. tls_req->lsreq = lsreq;
  284. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  285. if (!rport->targetport) {
  286. tls_req->status = -ECONNREFUSED;
  287. tls_req->tport = NULL;
  288. schedule_work(&tls_req->work);
  289. return ret;
  290. }
  291. tls_req->status = 0;
  292. tls_req->tport = rport->targetport->private;
  293. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  294. lsreq->rqstaddr, lsreq->rqstlen);
  295. return ret;
  296. }
  297. static int
  298. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  299. struct nvmefc_tgt_ls_req *tgt_lsreq)
  300. {
  301. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  302. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  303. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  304. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  305. lsreq->rsplen : tgt_lsreq->rsplen));
  306. tgt_lsreq->done(tgt_lsreq);
  307. schedule_work(&tls_req->work);
  308. return 0;
  309. }
  310. static void
  311. fcloop_tfcp_req_free(struct kref *ref)
  312. {
  313. struct fcloop_fcpreq *tfcp_req =
  314. container_of(ref, struct fcloop_fcpreq, ref);
  315. kfree(tfcp_req);
  316. }
  317. static void
  318. fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
  319. {
  320. kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
  321. }
  322. static int
  323. fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
  324. {
  325. return kref_get_unless_zero(&tfcp_req->ref);
  326. }
  327. static void
  328. fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
  329. struct fcloop_fcpreq *tfcp_req, int status)
  330. {
  331. struct fcloop_ini_fcpreq *inireq = NULL;
  332. if (fcpreq) {
  333. inireq = fcpreq->private;
  334. spin_lock(&inireq->inilock);
  335. inireq->tfcp_req = NULL;
  336. spin_unlock(&inireq->inilock);
  337. fcpreq->status = status;
  338. fcpreq->done(fcpreq);
  339. }
  340. /* release original io reference on tgt struct */
  341. fcloop_tfcp_req_put(tfcp_req);
  342. }
  343. static void
  344. fcloop_fcp_recv_work(struct work_struct *work)
  345. {
  346. struct fcloop_fcpreq *tfcp_req =
  347. container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
  348. struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
  349. int ret = 0;
  350. bool aborted = false;
  351. spin_lock(&tfcp_req->reqlock);
  352. switch (tfcp_req->inistate) {
  353. case INI_IO_START:
  354. tfcp_req->inistate = INI_IO_ACTIVE;
  355. break;
  356. case INI_IO_ABORTED:
  357. aborted = true;
  358. break;
  359. default:
  360. spin_unlock(&tfcp_req->reqlock);
  361. WARN_ON(1);
  362. return;
  363. }
  364. spin_unlock(&tfcp_req->reqlock);
  365. if (unlikely(aborted))
  366. ret = -ECANCELED;
  367. else
  368. ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
  369. &tfcp_req->tgt_fcp_req,
  370. fcpreq->cmdaddr, fcpreq->cmdlen);
  371. if (ret)
  372. fcloop_call_host_done(fcpreq, tfcp_req, ret);
  373. return;
  374. }
  375. static void
  376. fcloop_fcp_abort_recv_work(struct work_struct *work)
  377. {
  378. struct fcloop_fcpreq *tfcp_req =
  379. container_of(work, struct fcloop_fcpreq, abort_rcv_work);
  380. struct nvmefc_fcp_req *fcpreq;
  381. bool completed = false;
  382. spin_lock(&tfcp_req->reqlock);
  383. fcpreq = tfcp_req->fcpreq;
  384. switch (tfcp_req->inistate) {
  385. case INI_IO_ABORTED:
  386. break;
  387. case INI_IO_COMPLETED:
  388. completed = true;
  389. break;
  390. default:
  391. spin_unlock(&tfcp_req->reqlock);
  392. WARN_ON(1);
  393. return;
  394. }
  395. spin_unlock(&tfcp_req->reqlock);
  396. if (unlikely(completed)) {
  397. /* remove reference taken in original abort downcall */
  398. fcloop_tfcp_req_put(tfcp_req);
  399. return;
  400. }
  401. if (tfcp_req->tport->targetport)
  402. nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
  403. &tfcp_req->tgt_fcp_req);
  404. spin_lock(&tfcp_req->reqlock);
  405. tfcp_req->fcpreq = NULL;
  406. spin_unlock(&tfcp_req->reqlock);
  407. fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
  408. /* call_host_done releases reference for abort downcall */
  409. }
  410. /*
  411. * FCP IO operation done by target completion.
  412. * call back up initiator "done" flows.
  413. */
  414. static void
  415. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  416. {
  417. struct fcloop_fcpreq *tfcp_req =
  418. container_of(work, struct fcloop_fcpreq, tio_done_work);
  419. struct nvmefc_fcp_req *fcpreq;
  420. spin_lock(&tfcp_req->reqlock);
  421. fcpreq = tfcp_req->fcpreq;
  422. tfcp_req->inistate = INI_IO_COMPLETED;
  423. spin_unlock(&tfcp_req->reqlock);
  424. fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
  425. }
  426. static int
  427. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  428. struct nvme_fc_remote_port *remoteport,
  429. void *hw_queue_handle,
  430. struct nvmefc_fcp_req *fcpreq)
  431. {
  432. struct fcloop_rport *rport = remoteport->private;
  433. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  434. struct fcloop_fcpreq *tfcp_req;
  435. if (!rport->targetport)
  436. return -ECONNREFUSED;
  437. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  438. if (!tfcp_req)
  439. return -ENOMEM;
  440. inireq->fcpreq = fcpreq;
  441. inireq->tfcp_req = tfcp_req;
  442. spin_lock_init(&inireq->inilock);
  443. tfcp_req->fcpreq = fcpreq;
  444. tfcp_req->tport = rport->targetport->private;
  445. tfcp_req->inistate = INI_IO_START;
  446. spin_lock_init(&tfcp_req->reqlock);
  447. INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
  448. INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
  449. INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
  450. kref_init(&tfcp_req->ref);
  451. schedule_work(&tfcp_req->fcp_rcv_work);
  452. return 0;
  453. }
  454. static void
  455. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  456. struct scatterlist *io_sg, u32 offset, u32 length)
  457. {
  458. void *data_p, *io_p;
  459. u32 data_len, io_len, tlen;
  460. io_p = sg_virt(io_sg);
  461. io_len = io_sg->length;
  462. for ( ; offset; ) {
  463. tlen = min_t(u32, offset, io_len);
  464. offset -= tlen;
  465. io_len -= tlen;
  466. if (!io_len) {
  467. io_sg = sg_next(io_sg);
  468. io_p = sg_virt(io_sg);
  469. io_len = io_sg->length;
  470. } else
  471. io_p += tlen;
  472. }
  473. data_p = sg_virt(data_sg);
  474. data_len = data_sg->length;
  475. for ( ; length; ) {
  476. tlen = min_t(u32, io_len, data_len);
  477. tlen = min_t(u32, tlen, length);
  478. if (op == NVMET_FCOP_WRITEDATA)
  479. memcpy(data_p, io_p, tlen);
  480. else
  481. memcpy(io_p, data_p, tlen);
  482. length -= tlen;
  483. io_len -= tlen;
  484. if ((!io_len) && (length)) {
  485. io_sg = sg_next(io_sg);
  486. io_p = sg_virt(io_sg);
  487. io_len = io_sg->length;
  488. } else
  489. io_p += tlen;
  490. data_len -= tlen;
  491. if ((!data_len) && (length)) {
  492. data_sg = sg_next(data_sg);
  493. data_p = sg_virt(data_sg);
  494. data_len = data_sg->length;
  495. } else
  496. data_p += tlen;
  497. }
  498. }
  499. static int
  500. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  501. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  502. {
  503. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  504. struct nvmefc_fcp_req *fcpreq;
  505. u32 rsplen = 0, xfrlen = 0;
  506. int fcp_err = 0, active, aborted;
  507. u8 op = tgt_fcpreq->op;
  508. spin_lock(&tfcp_req->reqlock);
  509. fcpreq = tfcp_req->fcpreq;
  510. active = tfcp_req->active;
  511. aborted = tfcp_req->aborted;
  512. tfcp_req->active = true;
  513. spin_unlock(&tfcp_req->reqlock);
  514. if (unlikely(active))
  515. /* illegal - call while i/o active */
  516. return -EALREADY;
  517. if (unlikely(aborted)) {
  518. /* target transport has aborted i/o prior */
  519. spin_lock(&tfcp_req->reqlock);
  520. tfcp_req->active = false;
  521. spin_unlock(&tfcp_req->reqlock);
  522. tgt_fcpreq->transferred_length = 0;
  523. tgt_fcpreq->fcp_error = -ECANCELED;
  524. tgt_fcpreq->done(tgt_fcpreq);
  525. return 0;
  526. }
  527. /*
  528. * if fcpreq is NULL, the I/O has been aborted (from
  529. * initiator side). For the target side, act as if all is well
  530. * but don't actually move data.
  531. */
  532. switch (op) {
  533. case NVMET_FCOP_WRITEDATA:
  534. xfrlen = tgt_fcpreq->transfer_length;
  535. if (fcpreq) {
  536. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  537. fcpreq->first_sgl, tgt_fcpreq->offset,
  538. xfrlen);
  539. fcpreq->transferred_length += xfrlen;
  540. }
  541. break;
  542. case NVMET_FCOP_READDATA:
  543. case NVMET_FCOP_READDATA_RSP:
  544. xfrlen = tgt_fcpreq->transfer_length;
  545. if (fcpreq) {
  546. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  547. fcpreq->first_sgl, tgt_fcpreq->offset,
  548. xfrlen);
  549. fcpreq->transferred_length += xfrlen;
  550. }
  551. if (op == NVMET_FCOP_READDATA)
  552. break;
  553. /* Fall-Thru to RSP handling */
  554. case NVMET_FCOP_RSP:
  555. if (fcpreq) {
  556. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  557. fcpreq->rsplen : tgt_fcpreq->rsplen);
  558. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  559. if (rsplen < tgt_fcpreq->rsplen)
  560. fcp_err = -E2BIG;
  561. fcpreq->rcv_rsplen = rsplen;
  562. fcpreq->status = 0;
  563. }
  564. tfcp_req->status = 0;
  565. break;
  566. default:
  567. fcp_err = -EINVAL;
  568. break;
  569. }
  570. spin_lock(&tfcp_req->reqlock);
  571. tfcp_req->active = false;
  572. spin_unlock(&tfcp_req->reqlock);
  573. tgt_fcpreq->transferred_length = xfrlen;
  574. tgt_fcpreq->fcp_error = fcp_err;
  575. tgt_fcpreq->done(tgt_fcpreq);
  576. return 0;
  577. }
  578. static void
  579. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  580. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  581. {
  582. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  583. /*
  584. * mark aborted only in case there were 2 threads in transport
  585. * (one doing io, other doing abort) and only kills ops posted
  586. * after the abort request
  587. */
  588. spin_lock(&tfcp_req->reqlock);
  589. tfcp_req->aborted = true;
  590. spin_unlock(&tfcp_req->reqlock);
  591. tfcp_req->status = NVME_SC_INTERNAL;
  592. /*
  593. * nothing more to do. If io wasn't active, the transport should
  594. * immediately call the req_release. If it was active, the op
  595. * will complete, and the lldd should call req_release.
  596. */
  597. }
  598. static void
  599. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  600. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  601. {
  602. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  603. schedule_work(&tfcp_req->tio_done_work);
  604. }
  605. static void
  606. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  607. struct nvme_fc_remote_port *remoteport,
  608. struct nvmefc_ls_req *lsreq)
  609. {
  610. }
  611. static void
  612. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  613. struct nvme_fc_remote_port *remoteport,
  614. void *hw_queue_handle,
  615. struct nvmefc_fcp_req *fcpreq)
  616. {
  617. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  618. struct fcloop_fcpreq *tfcp_req;
  619. bool abortio = true;
  620. spin_lock(&inireq->inilock);
  621. tfcp_req = inireq->tfcp_req;
  622. if (tfcp_req)
  623. fcloop_tfcp_req_get(tfcp_req);
  624. spin_unlock(&inireq->inilock);
  625. if (!tfcp_req)
  626. /* abort has already been called */
  627. return;
  628. /* break initiator/target relationship for io */
  629. spin_lock(&tfcp_req->reqlock);
  630. switch (tfcp_req->inistate) {
  631. case INI_IO_START:
  632. case INI_IO_ACTIVE:
  633. tfcp_req->inistate = INI_IO_ABORTED;
  634. break;
  635. case INI_IO_COMPLETED:
  636. abortio = false;
  637. break;
  638. default:
  639. spin_unlock(&tfcp_req->reqlock);
  640. WARN_ON(1);
  641. return;
  642. }
  643. spin_unlock(&tfcp_req->reqlock);
  644. if (abortio)
  645. /* leave the reference while the work item is scheduled */
  646. WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
  647. else {
  648. /*
  649. * as the io has already had the done callback made,
  650. * nothing more to do. So release the reference taken above
  651. */
  652. fcloop_tfcp_req_put(tfcp_req);
  653. }
  654. }
  655. static void
  656. fcloop_nport_free(struct kref *ref)
  657. {
  658. struct fcloop_nport *nport =
  659. container_of(ref, struct fcloop_nport, ref);
  660. unsigned long flags;
  661. spin_lock_irqsave(&fcloop_lock, flags);
  662. list_del(&nport->nport_list);
  663. spin_unlock_irqrestore(&fcloop_lock, flags);
  664. kfree(nport);
  665. }
  666. static void
  667. fcloop_nport_put(struct fcloop_nport *nport)
  668. {
  669. kref_put(&nport->ref, fcloop_nport_free);
  670. }
  671. static int
  672. fcloop_nport_get(struct fcloop_nport *nport)
  673. {
  674. return kref_get_unless_zero(&nport->ref);
  675. }
  676. static void
  677. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  678. {
  679. struct fcloop_lport_priv *lport_priv = localport->private;
  680. struct fcloop_lport *lport = lport_priv->lport;
  681. /* release any threads waiting for the unreg to complete */
  682. complete(&lport->unreg_done);
  683. }
  684. static void
  685. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  686. {
  687. struct fcloop_rport *rport = remoteport->private;
  688. fcloop_nport_put(rport->nport);
  689. }
  690. static void
  691. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  692. {
  693. struct fcloop_tport *tport = targetport->private;
  694. fcloop_nport_put(tport->nport);
  695. }
  696. #define FCLOOP_HW_QUEUES 4
  697. #define FCLOOP_SGL_SEGS 256
  698. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  699. static struct nvme_fc_port_template fctemplate = {
  700. .localport_delete = fcloop_localport_delete,
  701. .remoteport_delete = fcloop_remoteport_delete,
  702. .create_queue = fcloop_create_queue,
  703. .delete_queue = fcloop_delete_queue,
  704. .ls_req = fcloop_ls_req,
  705. .fcp_io = fcloop_fcp_req,
  706. .ls_abort = fcloop_ls_abort,
  707. .fcp_abort = fcloop_fcp_abort,
  708. .max_hw_queues = FCLOOP_HW_QUEUES,
  709. .max_sgl_segments = FCLOOP_SGL_SEGS,
  710. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  711. .dma_boundary = FCLOOP_DMABOUND_4G,
  712. /* sizes of additional private data for data structures */
  713. .local_priv_sz = sizeof(struct fcloop_lport_priv),
  714. .remote_priv_sz = sizeof(struct fcloop_rport),
  715. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  716. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  717. };
  718. static struct nvmet_fc_target_template tgttemplate = {
  719. .targetport_delete = fcloop_targetport_delete,
  720. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  721. .fcp_op = fcloop_fcp_op,
  722. .fcp_abort = fcloop_tgt_fcp_abort,
  723. .fcp_req_release = fcloop_fcp_req_release,
  724. .max_hw_queues = FCLOOP_HW_QUEUES,
  725. .max_sgl_segments = FCLOOP_SGL_SEGS,
  726. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  727. .dma_boundary = FCLOOP_DMABOUND_4G,
  728. /* optional features */
  729. .target_features = 0,
  730. /* sizes of additional private data for data structures */
  731. .target_priv_sz = sizeof(struct fcloop_tport),
  732. };
  733. static ssize_t
  734. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  735. const char *buf, size_t count)
  736. {
  737. struct nvme_fc_port_info pinfo;
  738. struct fcloop_ctrl_options *opts;
  739. struct nvme_fc_local_port *localport;
  740. struct fcloop_lport *lport;
  741. struct fcloop_lport_priv *lport_priv;
  742. unsigned long flags;
  743. int ret = -ENOMEM;
  744. lport = kzalloc(sizeof(*lport), GFP_KERNEL);
  745. if (!lport)
  746. return -ENOMEM;
  747. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  748. if (!opts)
  749. goto out_free_lport;
  750. ret = fcloop_parse_options(opts, buf);
  751. if (ret)
  752. goto out_free_opts;
  753. /* everything there ? */
  754. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  755. ret = -EINVAL;
  756. goto out_free_opts;
  757. }
  758. memset(&pinfo, 0, sizeof(pinfo));
  759. pinfo.node_name = opts->wwnn;
  760. pinfo.port_name = opts->wwpn;
  761. pinfo.port_role = opts->roles;
  762. pinfo.port_id = opts->fcaddr;
  763. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  764. if (!ret) {
  765. /* success */
  766. lport_priv = localport->private;
  767. lport_priv->lport = lport;
  768. lport->localport = localport;
  769. INIT_LIST_HEAD(&lport->lport_list);
  770. spin_lock_irqsave(&fcloop_lock, flags);
  771. list_add_tail(&lport->lport_list, &fcloop_lports);
  772. spin_unlock_irqrestore(&fcloop_lock, flags);
  773. }
  774. out_free_opts:
  775. kfree(opts);
  776. out_free_lport:
  777. /* free only if we're going to fail */
  778. if (ret)
  779. kfree(lport);
  780. return ret ? ret : count;
  781. }
  782. static void
  783. __unlink_local_port(struct fcloop_lport *lport)
  784. {
  785. list_del(&lport->lport_list);
  786. }
  787. static int
  788. __wait_localport_unreg(struct fcloop_lport *lport)
  789. {
  790. int ret;
  791. init_completion(&lport->unreg_done);
  792. ret = nvme_fc_unregister_localport(lport->localport);
  793. wait_for_completion(&lport->unreg_done);
  794. kfree(lport);
  795. return ret;
  796. }
  797. static ssize_t
  798. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  799. const char *buf, size_t count)
  800. {
  801. struct fcloop_lport *tlport, *lport = NULL;
  802. u64 nodename, portname;
  803. unsigned long flags;
  804. int ret;
  805. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  806. if (ret)
  807. return ret;
  808. spin_lock_irqsave(&fcloop_lock, flags);
  809. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  810. if (tlport->localport->node_name == nodename &&
  811. tlport->localport->port_name == portname) {
  812. lport = tlport;
  813. __unlink_local_port(lport);
  814. break;
  815. }
  816. }
  817. spin_unlock_irqrestore(&fcloop_lock, flags);
  818. if (!lport)
  819. return -ENOENT;
  820. ret = __wait_localport_unreg(lport);
  821. return ret ? ret : count;
  822. }
  823. static struct fcloop_nport *
  824. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  825. {
  826. struct fcloop_nport *newnport, *nport = NULL;
  827. struct fcloop_lport *tmplport, *lport = NULL;
  828. struct fcloop_ctrl_options *opts;
  829. unsigned long flags;
  830. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  831. int ret;
  832. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  833. if (!opts)
  834. return NULL;
  835. ret = fcloop_parse_options(opts, buf);
  836. if (ret)
  837. goto out_free_opts;
  838. /* everything there ? */
  839. if ((opts->mask & opts_mask) != opts_mask) {
  840. ret = -EINVAL;
  841. goto out_free_opts;
  842. }
  843. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  844. if (!newnport)
  845. goto out_free_opts;
  846. INIT_LIST_HEAD(&newnport->nport_list);
  847. newnport->node_name = opts->wwnn;
  848. newnport->port_name = opts->wwpn;
  849. if (opts->mask & NVMF_OPT_ROLES)
  850. newnport->port_role = opts->roles;
  851. if (opts->mask & NVMF_OPT_FCADDR)
  852. newnport->port_id = opts->fcaddr;
  853. kref_init(&newnport->ref);
  854. spin_lock_irqsave(&fcloop_lock, flags);
  855. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  856. if (tmplport->localport->node_name == opts->wwnn &&
  857. tmplport->localport->port_name == opts->wwpn)
  858. goto out_invalid_opts;
  859. if (tmplport->localport->node_name == opts->lpwwnn &&
  860. tmplport->localport->port_name == opts->lpwwpn)
  861. lport = tmplport;
  862. }
  863. if (remoteport) {
  864. if (!lport)
  865. goto out_invalid_opts;
  866. newnport->lport = lport;
  867. }
  868. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  869. if (nport->node_name == opts->wwnn &&
  870. nport->port_name == opts->wwpn) {
  871. if ((remoteport && nport->rport) ||
  872. (!remoteport && nport->tport)) {
  873. nport = NULL;
  874. goto out_invalid_opts;
  875. }
  876. fcloop_nport_get(nport);
  877. spin_unlock_irqrestore(&fcloop_lock, flags);
  878. if (remoteport)
  879. nport->lport = lport;
  880. if (opts->mask & NVMF_OPT_ROLES)
  881. nport->port_role = opts->roles;
  882. if (opts->mask & NVMF_OPT_FCADDR)
  883. nport->port_id = opts->fcaddr;
  884. goto out_free_newnport;
  885. }
  886. }
  887. list_add_tail(&newnport->nport_list, &fcloop_nports);
  888. spin_unlock_irqrestore(&fcloop_lock, flags);
  889. kfree(opts);
  890. return newnport;
  891. out_invalid_opts:
  892. spin_unlock_irqrestore(&fcloop_lock, flags);
  893. out_free_newnport:
  894. kfree(newnport);
  895. out_free_opts:
  896. kfree(opts);
  897. return nport;
  898. }
  899. static ssize_t
  900. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  901. const char *buf, size_t count)
  902. {
  903. struct nvme_fc_remote_port *remoteport;
  904. struct fcloop_nport *nport;
  905. struct fcloop_rport *rport;
  906. struct nvme_fc_port_info pinfo;
  907. int ret;
  908. nport = fcloop_alloc_nport(buf, count, true);
  909. if (!nport)
  910. return -EIO;
  911. memset(&pinfo, 0, sizeof(pinfo));
  912. pinfo.node_name = nport->node_name;
  913. pinfo.port_name = nport->port_name;
  914. pinfo.port_role = nport->port_role;
  915. pinfo.port_id = nport->port_id;
  916. ret = nvme_fc_register_remoteport(nport->lport->localport,
  917. &pinfo, &remoteport);
  918. if (ret || !remoteport) {
  919. fcloop_nport_put(nport);
  920. return ret;
  921. }
  922. /* success */
  923. rport = remoteport->private;
  924. rport->remoteport = remoteport;
  925. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  926. if (nport->tport) {
  927. nport->tport->remoteport = remoteport;
  928. nport->tport->lport = nport->lport;
  929. }
  930. rport->nport = nport;
  931. rport->lport = nport->lport;
  932. nport->rport = rport;
  933. return count;
  934. }
  935. static struct fcloop_rport *
  936. __unlink_remote_port(struct fcloop_nport *nport)
  937. {
  938. struct fcloop_rport *rport = nport->rport;
  939. if (rport && nport->tport)
  940. nport->tport->remoteport = NULL;
  941. nport->rport = NULL;
  942. return rport;
  943. }
  944. static int
  945. __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  946. {
  947. if (!rport)
  948. return -EALREADY;
  949. return nvme_fc_unregister_remoteport(rport->remoteport);
  950. }
  951. static ssize_t
  952. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  953. const char *buf, size_t count)
  954. {
  955. struct fcloop_nport *nport = NULL, *tmpport;
  956. static struct fcloop_rport *rport;
  957. u64 nodename, portname;
  958. unsigned long flags;
  959. int ret;
  960. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  961. if (ret)
  962. return ret;
  963. spin_lock_irqsave(&fcloop_lock, flags);
  964. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  965. if (tmpport->node_name == nodename &&
  966. tmpport->port_name == portname && tmpport->rport) {
  967. nport = tmpport;
  968. rport = __unlink_remote_port(nport);
  969. break;
  970. }
  971. }
  972. spin_unlock_irqrestore(&fcloop_lock, flags);
  973. if (!nport)
  974. return -ENOENT;
  975. ret = __remoteport_unreg(nport, rport);
  976. return ret ? ret : count;
  977. }
  978. static ssize_t
  979. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  980. const char *buf, size_t count)
  981. {
  982. struct nvmet_fc_target_port *targetport;
  983. struct fcloop_nport *nport;
  984. struct fcloop_tport *tport;
  985. struct nvmet_fc_port_info tinfo;
  986. int ret;
  987. nport = fcloop_alloc_nport(buf, count, false);
  988. if (!nport)
  989. return -EIO;
  990. tinfo.node_name = nport->node_name;
  991. tinfo.port_name = nport->port_name;
  992. tinfo.port_id = nport->port_id;
  993. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  994. &targetport);
  995. if (ret) {
  996. fcloop_nport_put(nport);
  997. return ret;
  998. }
  999. /* success */
  1000. tport = targetport->private;
  1001. tport->targetport = targetport;
  1002. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  1003. if (nport->rport)
  1004. nport->rport->targetport = targetport;
  1005. tport->nport = nport;
  1006. tport->lport = nport->lport;
  1007. nport->tport = tport;
  1008. return count;
  1009. }
  1010. static struct fcloop_tport *
  1011. __unlink_target_port(struct fcloop_nport *nport)
  1012. {
  1013. struct fcloop_tport *tport = nport->tport;
  1014. if (tport && nport->rport)
  1015. nport->rport->targetport = NULL;
  1016. nport->tport = NULL;
  1017. return tport;
  1018. }
  1019. static int
  1020. __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  1021. {
  1022. if (!tport)
  1023. return -EALREADY;
  1024. return nvmet_fc_unregister_targetport(tport->targetport);
  1025. }
  1026. static ssize_t
  1027. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  1028. const char *buf, size_t count)
  1029. {
  1030. struct fcloop_nport *nport = NULL, *tmpport;
  1031. struct fcloop_tport *tport = NULL;
  1032. u64 nodename, portname;
  1033. unsigned long flags;
  1034. int ret;
  1035. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  1036. if (ret)
  1037. return ret;
  1038. spin_lock_irqsave(&fcloop_lock, flags);
  1039. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  1040. if (tmpport->node_name == nodename &&
  1041. tmpport->port_name == portname && tmpport->tport) {
  1042. nport = tmpport;
  1043. tport = __unlink_target_port(nport);
  1044. break;
  1045. }
  1046. }
  1047. spin_unlock_irqrestore(&fcloop_lock, flags);
  1048. if (!nport)
  1049. return -ENOENT;
  1050. ret = __targetport_unreg(nport, tport);
  1051. return ret ? ret : count;
  1052. }
  1053. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  1054. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  1055. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  1056. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  1057. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  1058. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  1059. static struct attribute *fcloop_dev_attrs[] = {
  1060. &dev_attr_add_local_port.attr,
  1061. &dev_attr_del_local_port.attr,
  1062. &dev_attr_add_remote_port.attr,
  1063. &dev_attr_del_remote_port.attr,
  1064. &dev_attr_add_target_port.attr,
  1065. &dev_attr_del_target_port.attr,
  1066. NULL
  1067. };
  1068. static struct attribute_group fclopp_dev_attrs_group = {
  1069. .attrs = fcloop_dev_attrs,
  1070. };
  1071. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  1072. &fclopp_dev_attrs_group,
  1073. NULL,
  1074. };
  1075. static struct class *fcloop_class;
  1076. static struct device *fcloop_device;
  1077. static int __init fcloop_init(void)
  1078. {
  1079. int ret;
  1080. fcloop_class = class_create(THIS_MODULE, "fcloop");
  1081. if (IS_ERR(fcloop_class)) {
  1082. pr_err("couldn't register class fcloop\n");
  1083. ret = PTR_ERR(fcloop_class);
  1084. return ret;
  1085. }
  1086. fcloop_device = device_create_with_groups(
  1087. fcloop_class, NULL, MKDEV(0, 0), NULL,
  1088. fcloop_dev_attr_groups, "ctl");
  1089. if (IS_ERR(fcloop_device)) {
  1090. pr_err("couldn't create ctl device!\n");
  1091. ret = PTR_ERR(fcloop_device);
  1092. goto out_destroy_class;
  1093. }
  1094. get_device(fcloop_device);
  1095. return 0;
  1096. out_destroy_class:
  1097. class_destroy(fcloop_class);
  1098. return ret;
  1099. }
  1100. static void __exit fcloop_exit(void)
  1101. {
  1102. struct fcloop_lport *lport;
  1103. struct fcloop_nport *nport;
  1104. struct fcloop_tport *tport;
  1105. struct fcloop_rport *rport;
  1106. unsigned long flags;
  1107. int ret;
  1108. spin_lock_irqsave(&fcloop_lock, flags);
  1109. for (;;) {
  1110. nport = list_first_entry_or_null(&fcloop_nports,
  1111. typeof(*nport), nport_list);
  1112. if (!nport)
  1113. break;
  1114. tport = __unlink_target_port(nport);
  1115. rport = __unlink_remote_port(nport);
  1116. spin_unlock_irqrestore(&fcloop_lock, flags);
  1117. ret = __targetport_unreg(nport, tport);
  1118. if (ret)
  1119. pr_warn("%s: Failed deleting target port\n", __func__);
  1120. ret = __remoteport_unreg(nport, rport);
  1121. if (ret)
  1122. pr_warn("%s: Failed deleting remote port\n", __func__);
  1123. spin_lock_irqsave(&fcloop_lock, flags);
  1124. }
  1125. for (;;) {
  1126. lport = list_first_entry_or_null(&fcloop_lports,
  1127. typeof(*lport), lport_list);
  1128. if (!lport)
  1129. break;
  1130. __unlink_local_port(lport);
  1131. spin_unlock_irqrestore(&fcloop_lock, flags);
  1132. ret = __wait_localport_unreg(lport);
  1133. if (ret)
  1134. pr_warn("%s: Failed deleting local port\n", __func__);
  1135. spin_lock_irqsave(&fcloop_lock, flags);
  1136. }
  1137. spin_unlock_irqrestore(&fcloop_lock, flags);
  1138. put_device(fcloop_device);
  1139. device_destroy(fcloop_class, MKDEV(0, 0));
  1140. class_destroy(fcloop_class);
  1141. }
  1142. module_init(fcloop_init);
  1143. module_exit(fcloop_exit);
  1144. MODULE_LICENSE("GPL v2");