fcloop.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. static DEFINE_SPINLOCK(fcloop_lock);
  173. static LIST_HEAD(fcloop_lports);
  174. static LIST_HEAD(fcloop_nports);
  175. struct fcloop_lport {
  176. struct nvme_fc_local_port *localport;
  177. struct list_head lport_list;
  178. struct completion unreg_done;
  179. };
  180. struct fcloop_lport_priv {
  181. struct fcloop_lport *lport;
  182. };
  183. struct fcloop_rport {
  184. struct nvme_fc_remote_port *remoteport;
  185. struct nvmet_fc_target_port *targetport;
  186. struct fcloop_nport *nport;
  187. struct fcloop_lport *lport;
  188. };
  189. struct fcloop_tport {
  190. struct nvmet_fc_target_port *targetport;
  191. struct nvme_fc_remote_port *remoteport;
  192. struct fcloop_nport *nport;
  193. struct fcloop_lport *lport;
  194. };
  195. struct fcloop_nport {
  196. struct fcloop_rport *rport;
  197. struct fcloop_tport *tport;
  198. struct fcloop_lport *lport;
  199. struct list_head nport_list;
  200. struct kref ref;
  201. u64 node_name;
  202. u64 port_name;
  203. u32 port_role;
  204. u32 port_id;
  205. };
  206. struct fcloop_lsreq {
  207. struct fcloop_tport *tport;
  208. struct nvmefc_ls_req *lsreq;
  209. struct work_struct work;
  210. struct nvmefc_tgt_ls_req tgt_ls_req;
  211. int status;
  212. };
  213. enum {
  214. INI_IO_START = 0,
  215. INI_IO_ACTIVE = 1,
  216. INI_IO_ABORTED = 2,
  217. INI_IO_COMPLETED = 3,
  218. };
  219. struct fcloop_fcpreq {
  220. struct fcloop_tport *tport;
  221. struct nvmefc_fcp_req *fcpreq;
  222. spinlock_t reqlock;
  223. u16 status;
  224. u32 inistate;
  225. bool active;
  226. bool aborted;
  227. struct kref ref;
  228. struct work_struct fcp_rcv_work;
  229. struct work_struct abort_rcv_work;
  230. struct work_struct tio_done_work;
  231. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  232. };
  233. struct fcloop_ini_fcpreq {
  234. struct nvmefc_fcp_req *fcpreq;
  235. struct fcloop_fcpreq *tfcp_req;
  236. spinlock_t inilock;
  237. };
  238. static inline struct fcloop_lsreq *
  239. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  240. {
  241. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  242. }
  243. static inline struct fcloop_fcpreq *
  244. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  245. {
  246. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  247. }
  248. static int
  249. fcloop_create_queue(struct nvme_fc_local_port *localport,
  250. unsigned int qidx, u16 qsize,
  251. void **handle)
  252. {
  253. *handle = localport;
  254. return 0;
  255. }
  256. static void
  257. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  258. unsigned int idx, void *handle)
  259. {
  260. }
  261. /*
  262. * Transmit of LS RSP done (e.g. buffers all set). call back up
  263. * initiator "done" flows.
  264. */
  265. static void
  266. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  267. {
  268. struct fcloop_lsreq *tls_req =
  269. container_of(work, struct fcloop_lsreq, work);
  270. struct fcloop_tport *tport = tls_req->tport;
  271. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  272. if (tport->remoteport)
  273. lsreq->done(lsreq, tls_req->status);
  274. }
  275. static int
  276. fcloop_ls_req(struct nvme_fc_local_port *localport,
  277. struct nvme_fc_remote_port *remoteport,
  278. struct nvmefc_ls_req *lsreq)
  279. {
  280. struct fcloop_lsreq *tls_req = lsreq->private;
  281. struct fcloop_rport *rport = remoteport->private;
  282. int ret = 0;
  283. tls_req->lsreq = lsreq;
  284. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  285. if (!rport->targetport) {
  286. tls_req->status = -ECONNREFUSED;
  287. schedule_work(&tls_req->work);
  288. return ret;
  289. }
  290. tls_req->status = 0;
  291. tls_req->tport = rport->targetport->private;
  292. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  293. lsreq->rqstaddr, lsreq->rqstlen);
  294. return ret;
  295. }
  296. static int
  297. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  298. struct nvmefc_tgt_ls_req *tgt_lsreq)
  299. {
  300. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  301. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  302. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  303. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  304. lsreq->rsplen : tgt_lsreq->rsplen));
  305. tgt_lsreq->done(tgt_lsreq);
  306. schedule_work(&tls_req->work);
  307. return 0;
  308. }
  309. static void
  310. fcloop_tfcp_req_free(struct kref *ref)
  311. {
  312. struct fcloop_fcpreq *tfcp_req =
  313. container_of(ref, struct fcloop_fcpreq, ref);
  314. kfree(tfcp_req);
  315. }
  316. static void
  317. fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
  318. {
  319. kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
  320. }
  321. static int
  322. fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
  323. {
  324. return kref_get_unless_zero(&tfcp_req->ref);
  325. }
  326. static void
  327. fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
  328. struct fcloop_fcpreq *tfcp_req, int status)
  329. {
  330. struct fcloop_ini_fcpreq *inireq = NULL;
  331. if (fcpreq) {
  332. inireq = fcpreq->private;
  333. spin_lock(&inireq->inilock);
  334. inireq->tfcp_req = NULL;
  335. spin_unlock(&inireq->inilock);
  336. fcpreq->status = status;
  337. fcpreq->done(fcpreq);
  338. }
  339. /* release original io reference on tgt struct */
  340. fcloop_tfcp_req_put(tfcp_req);
  341. }
  342. static void
  343. fcloop_fcp_recv_work(struct work_struct *work)
  344. {
  345. struct fcloop_fcpreq *tfcp_req =
  346. container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
  347. struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
  348. int ret = 0;
  349. bool aborted = false;
  350. spin_lock(&tfcp_req->reqlock);
  351. switch (tfcp_req->inistate) {
  352. case INI_IO_START:
  353. tfcp_req->inistate = INI_IO_ACTIVE;
  354. break;
  355. case INI_IO_ABORTED:
  356. aborted = true;
  357. break;
  358. default:
  359. spin_unlock(&tfcp_req->reqlock);
  360. WARN_ON(1);
  361. return;
  362. }
  363. spin_unlock(&tfcp_req->reqlock);
  364. if (unlikely(aborted))
  365. ret = -ECANCELED;
  366. else
  367. ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
  368. &tfcp_req->tgt_fcp_req,
  369. fcpreq->cmdaddr, fcpreq->cmdlen);
  370. if (ret)
  371. fcloop_call_host_done(fcpreq, tfcp_req, ret);
  372. return;
  373. }
  374. static void
  375. fcloop_fcp_abort_recv_work(struct work_struct *work)
  376. {
  377. struct fcloop_fcpreq *tfcp_req =
  378. container_of(work, struct fcloop_fcpreq, abort_rcv_work);
  379. struct nvmefc_fcp_req *fcpreq;
  380. bool completed = false;
  381. spin_lock(&tfcp_req->reqlock);
  382. fcpreq = tfcp_req->fcpreq;
  383. switch (tfcp_req->inistate) {
  384. case INI_IO_ABORTED:
  385. break;
  386. case INI_IO_COMPLETED:
  387. completed = true;
  388. break;
  389. default:
  390. spin_unlock(&tfcp_req->reqlock);
  391. WARN_ON(1);
  392. return;
  393. }
  394. spin_unlock(&tfcp_req->reqlock);
  395. if (unlikely(completed)) {
  396. /* remove reference taken in original abort downcall */
  397. fcloop_tfcp_req_put(tfcp_req);
  398. return;
  399. }
  400. if (tfcp_req->tport->targetport)
  401. nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
  402. &tfcp_req->tgt_fcp_req);
  403. spin_lock(&tfcp_req->reqlock);
  404. tfcp_req->fcpreq = NULL;
  405. spin_unlock(&tfcp_req->reqlock);
  406. fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
  407. /* call_host_done releases reference for abort downcall */
  408. }
  409. /*
  410. * FCP IO operation done by target completion.
  411. * call back up initiator "done" flows.
  412. */
  413. static void
  414. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  415. {
  416. struct fcloop_fcpreq *tfcp_req =
  417. container_of(work, struct fcloop_fcpreq, tio_done_work);
  418. struct nvmefc_fcp_req *fcpreq;
  419. spin_lock(&tfcp_req->reqlock);
  420. fcpreq = tfcp_req->fcpreq;
  421. tfcp_req->inistate = INI_IO_COMPLETED;
  422. spin_unlock(&tfcp_req->reqlock);
  423. fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
  424. }
  425. static int
  426. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  427. struct nvme_fc_remote_port *remoteport,
  428. void *hw_queue_handle,
  429. struct nvmefc_fcp_req *fcpreq)
  430. {
  431. struct fcloop_rport *rport = remoteport->private;
  432. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  433. struct fcloop_fcpreq *tfcp_req;
  434. if (!rport->targetport)
  435. return -ECONNREFUSED;
  436. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  437. if (!tfcp_req)
  438. return -ENOMEM;
  439. inireq->fcpreq = fcpreq;
  440. inireq->tfcp_req = tfcp_req;
  441. spin_lock_init(&inireq->inilock);
  442. tfcp_req->fcpreq = fcpreq;
  443. tfcp_req->tport = rport->targetport->private;
  444. tfcp_req->inistate = INI_IO_START;
  445. spin_lock_init(&tfcp_req->reqlock);
  446. INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
  447. INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
  448. INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
  449. kref_init(&tfcp_req->ref);
  450. schedule_work(&tfcp_req->fcp_rcv_work);
  451. return 0;
  452. }
  453. static void
  454. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  455. struct scatterlist *io_sg, u32 offset, u32 length)
  456. {
  457. void *data_p, *io_p;
  458. u32 data_len, io_len, tlen;
  459. io_p = sg_virt(io_sg);
  460. io_len = io_sg->length;
  461. for ( ; offset; ) {
  462. tlen = min_t(u32, offset, io_len);
  463. offset -= tlen;
  464. io_len -= tlen;
  465. if (!io_len) {
  466. io_sg = sg_next(io_sg);
  467. io_p = sg_virt(io_sg);
  468. io_len = io_sg->length;
  469. } else
  470. io_p += tlen;
  471. }
  472. data_p = sg_virt(data_sg);
  473. data_len = data_sg->length;
  474. for ( ; length; ) {
  475. tlen = min_t(u32, io_len, data_len);
  476. tlen = min_t(u32, tlen, length);
  477. if (op == NVMET_FCOP_WRITEDATA)
  478. memcpy(data_p, io_p, tlen);
  479. else
  480. memcpy(io_p, data_p, tlen);
  481. length -= tlen;
  482. io_len -= tlen;
  483. if ((!io_len) && (length)) {
  484. io_sg = sg_next(io_sg);
  485. io_p = sg_virt(io_sg);
  486. io_len = io_sg->length;
  487. } else
  488. io_p += tlen;
  489. data_len -= tlen;
  490. if ((!data_len) && (length)) {
  491. data_sg = sg_next(data_sg);
  492. data_p = sg_virt(data_sg);
  493. data_len = data_sg->length;
  494. } else
  495. data_p += tlen;
  496. }
  497. }
  498. static int
  499. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  500. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  501. {
  502. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  503. struct nvmefc_fcp_req *fcpreq;
  504. u32 rsplen = 0, xfrlen = 0;
  505. int fcp_err = 0, active, aborted;
  506. u8 op = tgt_fcpreq->op;
  507. spin_lock(&tfcp_req->reqlock);
  508. fcpreq = tfcp_req->fcpreq;
  509. active = tfcp_req->active;
  510. aborted = tfcp_req->aborted;
  511. tfcp_req->active = true;
  512. spin_unlock(&tfcp_req->reqlock);
  513. if (unlikely(active))
  514. /* illegal - call while i/o active */
  515. return -EALREADY;
  516. if (unlikely(aborted)) {
  517. /* target transport has aborted i/o prior */
  518. spin_lock(&tfcp_req->reqlock);
  519. tfcp_req->active = false;
  520. spin_unlock(&tfcp_req->reqlock);
  521. tgt_fcpreq->transferred_length = 0;
  522. tgt_fcpreq->fcp_error = -ECANCELED;
  523. tgt_fcpreq->done(tgt_fcpreq);
  524. return 0;
  525. }
  526. /*
  527. * if fcpreq is NULL, the I/O has been aborted (from
  528. * initiator side). For the target side, act as if all is well
  529. * but don't actually move data.
  530. */
  531. switch (op) {
  532. case NVMET_FCOP_WRITEDATA:
  533. xfrlen = tgt_fcpreq->transfer_length;
  534. if (fcpreq) {
  535. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  536. fcpreq->first_sgl, tgt_fcpreq->offset,
  537. xfrlen);
  538. fcpreq->transferred_length += xfrlen;
  539. }
  540. break;
  541. case NVMET_FCOP_READDATA:
  542. case NVMET_FCOP_READDATA_RSP:
  543. xfrlen = tgt_fcpreq->transfer_length;
  544. if (fcpreq) {
  545. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  546. fcpreq->first_sgl, tgt_fcpreq->offset,
  547. xfrlen);
  548. fcpreq->transferred_length += xfrlen;
  549. }
  550. if (op == NVMET_FCOP_READDATA)
  551. break;
  552. /* Fall-Thru to RSP handling */
  553. case NVMET_FCOP_RSP:
  554. if (fcpreq) {
  555. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  556. fcpreq->rsplen : tgt_fcpreq->rsplen);
  557. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  558. if (rsplen < tgt_fcpreq->rsplen)
  559. fcp_err = -E2BIG;
  560. fcpreq->rcv_rsplen = rsplen;
  561. fcpreq->status = 0;
  562. }
  563. tfcp_req->status = 0;
  564. break;
  565. default:
  566. fcp_err = -EINVAL;
  567. break;
  568. }
  569. spin_lock(&tfcp_req->reqlock);
  570. tfcp_req->active = false;
  571. spin_unlock(&tfcp_req->reqlock);
  572. tgt_fcpreq->transferred_length = xfrlen;
  573. tgt_fcpreq->fcp_error = fcp_err;
  574. tgt_fcpreq->done(tgt_fcpreq);
  575. return 0;
  576. }
  577. static void
  578. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  579. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  580. {
  581. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  582. /*
  583. * mark aborted only in case there were 2 threads in transport
  584. * (one doing io, other doing abort) and only kills ops posted
  585. * after the abort request
  586. */
  587. spin_lock(&tfcp_req->reqlock);
  588. tfcp_req->aborted = true;
  589. spin_unlock(&tfcp_req->reqlock);
  590. tfcp_req->status = NVME_SC_INTERNAL;
  591. /*
  592. * nothing more to do. If io wasn't active, the transport should
  593. * immediately call the req_release. If it was active, the op
  594. * will complete, and the lldd should call req_release.
  595. */
  596. }
  597. static void
  598. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  599. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  600. {
  601. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  602. schedule_work(&tfcp_req->tio_done_work);
  603. }
  604. static void
  605. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  606. struct nvme_fc_remote_port *remoteport,
  607. struct nvmefc_ls_req *lsreq)
  608. {
  609. }
  610. static void
  611. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  612. struct nvme_fc_remote_port *remoteport,
  613. void *hw_queue_handle,
  614. struct nvmefc_fcp_req *fcpreq)
  615. {
  616. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  617. struct fcloop_fcpreq *tfcp_req;
  618. bool abortio = true;
  619. spin_lock(&inireq->inilock);
  620. tfcp_req = inireq->tfcp_req;
  621. if (tfcp_req)
  622. fcloop_tfcp_req_get(tfcp_req);
  623. spin_unlock(&inireq->inilock);
  624. if (!tfcp_req)
  625. /* abort has already been called */
  626. return;
  627. /* break initiator/target relationship for io */
  628. spin_lock(&tfcp_req->reqlock);
  629. switch (tfcp_req->inistate) {
  630. case INI_IO_START:
  631. case INI_IO_ACTIVE:
  632. tfcp_req->inistate = INI_IO_ABORTED;
  633. break;
  634. case INI_IO_COMPLETED:
  635. abortio = false;
  636. break;
  637. default:
  638. spin_unlock(&tfcp_req->reqlock);
  639. WARN_ON(1);
  640. return;
  641. }
  642. spin_unlock(&tfcp_req->reqlock);
  643. if (abortio)
  644. /* leave the reference while the work item is scheduled */
  645. WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
  646. else {
  647. /*
  648. * as the io has already had the done callback made,
  649. * nothing more to do. So release the reference taken above
  650. */
  651. fcloop_tfcp_req_put(tfcp_req);
  652. }
  653. }
  654. static void
  655. fcloop_nport_free(struct kref *ref)
  656. {
  657. struct fcloop_nport *nport =
  658. container_of(ref, struct fcloop_nport, ref);
  659. unsigned long flags;
  660. spin_lock_irqsave(&fcloop_lock, flags);
  661. list_del(&nport->nport_list);
  662. spin_unlock_irqrestore(&fcloop_lock, flags);
  663. kfree(nport);
  664. }
  665. static void
  666. fcloop_nport_put(struct fcloop_nport *nport)
  667. {
  668. kref_put(&nport->ref, fcloop_nport_free);
  669. }
  670. static int
  671. fcloop_nport_get(struct fcloop_nport *nport)
  672. {
  673. return kref_get_unless_zero(&nport->ref);
  674. }
  675. static void
  676. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  677. {
  678. struct fcloop_lport_priv *lport_priv = localport->private;
  679. struct fcloop_lport *lport = lport_priv->lport;
  680. /* release any threads waiting for the unreg to complete */
  681. complete(&lport->unreg_done);
  682. }
  683. static void
  684. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  685. {
  686. struct fcloop_rport *rport = remoteport->private;
  687. fcloop_nport_put(rport->nport);
  688. }
  689. static void
  690. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  691. {
  692. struct fcloop_tport *tport = targetport->private;
  693. fcloop_nport_put(tport->nport);
  694. }
  695. #define FCLOOP_HW_QUEUES 4
  696. #define FCLOOP_SGL_SEGS 256
  697. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  698. static struct nvme_fc_port_template fctemplate = {
  699. .localport_delete = fcloop_localport_delete,
  700. .remoteport_delete = fcloop_remoteport_delete,
  701. .create_queue = fcloop_create_queue,
  702. .delete_queue = fcloop_delete_queue,
  703. .ls_req = fcloop_ls_req,
  704. .fcp_io = fcloop_fcp_req,
  705. .ls_abort = fcloop_ls_abort,
  706. .fcp_abort = fcloop_fcp_abort,
  707. .max_hw_queues = FCLOOP_HW_QUEUES,
  708. .max_sgl_segments = FCLOOP_SGL_SEGS,
  709. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  710. .dma_boundary = FCLOOP_DMABOUND_4G,
  711. /* sizes of additional private data for data structures */
  712. .local_priv_sz = sizeof(struct fcloop_lport_priv),
  713. .remote_priv_sz = sizeof(struct fcloop_rport),
  714. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  715. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  716. };
  717. static struct nvmet_fc_target_template tgttemplate = {
  718. .targetport_delete = fcloop_targetport_delete,
  719. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  720. .fcp_op = fcloop_fcp_op,
  721. .fcp_abort = fcloop_tgt_fcp_abort,
  722. .fcp_req_release = fcloop_fcp_req_release,
  723. .max_hw_queues = FCLOOP_HW_QUEUES,
  724. .max_sgl_segments = FCLOOP_SGL_SEGS,
  725. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  726. .dma_boundary = FCLOOP_DMABOUND_4G,
  727. /* optional features */
  728. .target_features = 0,
  729. /* sizes of additional private data for data structures */
  730. .target_priv_sz = sizeof(struct fcloop_tport),
  731. };
  732. static ssize_t
  733. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  734. const char *buf, size_t count)
  735. {
  736. struct nvme_fc_port_info pinfo;
  737. struct fcloop_ctrl_options *opts;
  738. struct nvme_fc_local_port *localport;
  739. struct fcloop_lport *lport;
  740. struct fcloop_lport_priv *lport_priv;
  741. unsigned long flags;
  742. int ret = -ENOMEM;
  743. lport = kzalloc(sizeof(*lport), GFP_KERNEL);
  744. if (!lport)
  745. return -ENOMEM;
  746. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  747. if (!opts)
  748. goto out_free_lport;
  749. ret = fcloop_parse_options(opts, buf);
  750. if (ret)
  751. goto out_free_opts;
  752. /* everything there ? */
  753. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  754. ret = -EINVAL;
  755. goto out_free_opts;
  756. }
  757. memset(&pinfo, 0, sizeof(pinfo));
  758. pinfo.node_name = opts->wwnn;
  759. pinfo.port_name = opts->wwpn;
  760. pinfo.port_role = opts->roles;
  761. pinfo.port_id = opts->fcaddr;
  762. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  763. if (!ret) {
  764. /* success */
  765. lport_priv = localport->private;
  766. lport_priv->lport = lport;
  767. lport->localport = localport;
  768. INIT_LIST_HEAD(&lport->lport_list);
  769. spin_lock_irqsave(&fcloop_lock, flags);
  770. list_add_tail(&lport->lport_list, &fcloop_lports);
  771. spin_unlock_irqrestore(&fcloop_lock, flags);
  772. }
  773. out_free_opts:
  774. kfree(opts);
  775. out_free_lport:
  776. /* free only if we're going to fail */
  777. if (ret)
  778. kfree(lport);
  779. return ret ? ret : count;
  780. }
  781. static void
  782. __unlink_local_port(struct fcloop_lport *lport)
  783. {
  784. list_del(&lport->lport_list);
  785. }
  786. static int
  787. __wait_localport_unreg(struct fcloop_lport *lport)
  788. {
  789. int ret;
  790. init_completion(&lport->unreg_done);
  791. ret = nvme_fc_unregister_localport(lport->localport);
  792. wait_for_completion(&lport->unreg_done);
  793. kfree(lport);
  794. return ret;
  795. }
  796. static ssize_t
  797. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  798. const char *buf, size_t count)
  799. {
  800. struct fcloop_lport *tlport, *lport = NULL;
  801. u64 nodename, portname;
  802. unsigned long flags;
  803. int ret;
  804. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  805. if (ret)
  806. return ret;
  807. spin_lock_irqsave(&fcloop_lock, flags);
  808. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  809. if (tlport->localport->node_name == nodename &&
  810. tlport->localport->port_name == portname) {
  811. lport = tlport;
  812. __unlink_local_port(lport);
  813. break;
  814. }
  815. }
  816. spin_unlock_irqrestore(&fcloop_lock, flags);
  817. if (!lport)
  818. return -ENOENT;
  819. ret = __wait_localport_unreg(lport);
  820. return ret ? ret : count;
  821. }
  822. static struct fcloop_nport *
  823. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  824. {
  825. struct fcloop_nport *newnport, *nport = NULL;
  826. struct fcloop_lport *tmplport, *lport = NULL;
  827. struct fcloop_ctrl_options *opts;
  828. unsigned long flags;
  829. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  830. int ret;
  831. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  832. if (!opts)
  833. return NULL;
  834. ret = fcloop_parse_options(opts, buf);
  835. if (ret)
  836. goto out_free_opts;
  837. /* everything there ? */
  838. if ((opts->mask & opts_mask) != opts_mask) {
  839. ret = -EINVAL;
  840. goto out_free_opts;
  841. }
  842. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  843. if (!newnport)
  844. goto out_free_opts;
  845. INIT_LIST_HEAD(&newnport->nport_list);
  846. newnport->node_name = opts->wwnn;
  847. newnport->port_name = opts->wwpn;
  848. if (opts->mask & NVMF_OPT_ROLES)
  849. newnport->port_role = opts->roles;
  850. if (opts->mask & NVMF_OPT_FCADDR)
  851. newnport->port_id = opts->fcaddr;
  852. kref_init(&newnport->ref);
  853. spin_lock_irqsave(&fcloop_lock, flags);
  854. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  855. if (tmplport->localport->node_name == opts->wwnn &&
  856. tmplport->localport->port_name == opts->wwpn)
  857. goto out_invalid_opts;
  858. if (tmplport->localport->node_name == opts->lpwwnn &&
  859. tmplport->localport->port_name == opts->lpwwpn)
  860. lport = tmplport;
  861. }
  862. if (remoteport) {
  863. if (!lport)
  864. goto out_invalid_opts;
  865. newnport->lport = lport;
  866. }
  867. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  868. if (nport->node_name == opts->wwnn &&
  869. nport->port_name == opts->wwpn) {
  870. if ((remoteport && nport->rport) ||
  871. (!remoteport && nport->tport)) {
  872. nport = NULL;
  873. goto out_invalid_opts;
  874. }
  875. fcloop_nport_get(nport);
  876. spin_unlock_irqrestore(&fcloop_lock, flags);
  877. if (remoteport)
  878. nport->lport = lport;
  879. if (opts->mask & NVMF_OPT_ROLES)
  880. nport->port_role = opts->roles;
  881. if (opts->mask & NVMF_OPT_FCADDR)
  882. nport->port_id = opts->fcaddr;
  883. goto out_free_newnport;
  884. }
  885. }
  886. list_add_tail(&newnport->nport_list, &fcloop_nports);
  887. spin_unlock_irqrestore(&fcloop_lock, flags);
  888. kfree(opts);
  889. return newnport;
  890. out_invalid_opts:
  891. spin_unlock_irqrestore(&fcloop_lock, flags);
  892. out_free_newnport:
  893. kfree(newnport);
  894. out_free_opts:
  895. kfree(opts);
  896. return nport;
  897. }
  898. static ssize_t
  899. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  900. const char *buf, size_t count)
  901. {
  902. struct nvme_fc_remote_port *remoteport;
  903. struct fcloop_nport *nport;
  904. struct fcloop_rport *rport;
  905. struct nvme_fc_port_info pinfo;
  906. int ret;
  907. nport = fcloop_alloc_nport(buf, count, true);
  908. if (!nport)
  909. return -EIO;
  910. memset(&pinfo, 0, sizeof(pinfo));
  911. pinfo.node_name = nport->node_name;
  912. pinfo.port_name = nport->port_name;
  913. pinfo.port_role = nport->port_role;
  914. pinfo.port_id = nport->port_id;
  915. ret = nvme_fc_register_remoteport(nport->lport->localport,
  916. &pinfo, &remoteport);
  917. if (ret || !remoteport) {
  918. fcloop_nport_put(nport);
  919. return ret;
  920. }
  921. /* success */
  922. rport = remoteport->private;
  923. rport->remoteport = remoteport;
  924. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  925. if (nport->tport) {
  926. nport->tport->remoteport = remoteport;
  927. nport->tport->lport = nport->lport;
  928. }
  929. rport->nport = nport;
  930. rport->lport = nport->lport;
  931. nport->rport = rport;
  932. return count;
  933. }
  934. static struct fcloop_rport *
  935. __unlink_remote_port(struct fcloop_nport *nport)
  936. {
  937. struct fcloop_rport *rport = nport->rport;
  938. if (rport && nport->tport)
  939. nport->tport->remoteport = NULL;
  940. nport->rport = NULL;
  941. return rport;
  942. }
  943. static int
  944. __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  945. {
  946. if (!rport)
  947. return -EALREADY;
  948. return nvme_fc_unregister_remoteport(rport->remoteport);
  949. }
  950. static ssize_t
  951. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  952. const char *buf, size_t count)
  953. {
  954. struct fcloop_nport *nport = NULL, *tmpport;
  955. static struct fcloop_rport *rport;
  956. u64 nodename, portname;
  957. unsigned long flags;
  958. int ret;
  959. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  960. if (ret)
  961. return ret;
  962. spin_lock_irqsave(&fcloop_lock, flags);
  963. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  964. if (tmpport->node_name == nodename &&
  965. tmpport->port_name == portname && tmpport->rport) {
  966. nport = tmpport;
  967. rport = __unlink_remote_port(nport);
  968. break;
  969. }
  970. }
  971. spin_unlock_irqrestore(&fcloop_lock, flags);
  972. if (!nport)
  973. return -ENOENT;
  974. ret = __remoteport_unreg(nport, rport);
  975. return ret ? ret : count;
  976. }
  977. static ssize_t
  978. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  979. const char *buf, size_t count)
  980. {
  981. struct nvmet_fc_target_port *targetport;
  982. struct fcloop_nport *nport;
  983. struct fcloop_tport *tport;
  984. struct nvmet_fc_port_info tinfo;
  985. int ret;
  986. nport = fcloop_alloc_nport(buf, count, false);
  987. if (!nport)
  988. return -EIO;
  989. tinfo.node_name = nport->node_name;
  990. tinfo.port_name = nport->port_name;
  991. tinfo.port_id = nport->port_id;
  992. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  993. &targetport);
  994. if (ret) {
  995. fcloop_nport_put(nport);
  996. return ret;
  997. }
  998. /* success */
  999. tport = targetport->private;
  1000. tport->targetport = targetport;
  1001. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  1002. if (nport->rport)
  1003. nport->rport->targetport = targetport;
  1004. tport->nport = nport;
  1005. tport->lport = nport->lport;
  1006. nport->tport = tport;
  1007. return count;
  1008. }
  1009. static struct fcloop_tport *
  1010. __unlink_target_port(struct fcloop_nport *nport)
  1011. {
  1012. struct fcloop_tport *tport = nport->tport;
  1013. if (tport && nport->rport)
  1014. nport->rport->targetport = NULL;
  1015. nport->tport = NULL;
  1016. return tport;
  1017. }
  1018. static int
  1019. __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  1020. {
  1021. if (!tport)
  1022. return -EALREADY;
  1023. return nvmet_fc_unregister_targetport(tport->targetport);
  1024. }
  1025. static ssize_t
  1026. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  1027. const char *buf, size_t count)
  1028. {
  1029. struct fcloop_nport *nport = NULL, *tmpport;
  1030. struct fcloop_tport *tport = NULL;
  1031. u64 nodename, portname;
  1032. unsigned long flags;
  1033. int ret;
  1034. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  1035. if (ret)
  1036. return ret;
  1037. spin_lock_irqsave(&fcloop_lock, flags);
  1038. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  1039. if (tmpport->node_name == nodename &&
  1040. tmpport->port_name == portname && tmpport->tport) {
  1041. nport = tmpport;
  1042. tport = __unlink_target_port(nport);
  1043. break;
  1044. }
  1045. }
  1046. spin_unlock_irqrestore(&fcloop_lock, flags);
  1047. if (!nport)
  1048. return -ENOENT;
  1049. ret = __targetport_unreg(nport, tport);
  1050. return ret ? ret : count;
  1051. }
  1052. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  1053. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  1054. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  1055. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  1056. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  1057. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  1058. static struct attribute *fcloop_dev_attrs[] = {
  1059. &dev_attr_add_local_port.attr,
  1060. &dev_attr_del_local_port.attr,
  1061. &dev_attr_add_remote_port.attr,
  1062. &dev_attr_del_remote_port.attr,
  1063. &dev_attr_add_target_port.attr,
  1064. &dev_attr_del_target_port.attr,
  1065. NULL
  1066. };
  1067. static struct attribute_group fclopp_dev_attrs_group = {
  1068. .attrs = fcloop_dev_attrs,
  1069. };
  1070. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  1071. &fclopp_dev_attrs_group,
  1072. NULL,
  1073. };
  1074. static struct class *fcloop_class;
  1075. static struct device *fcloop_device;
  1076. static int __init fcloop_init(void)
  1077. {
  1078. int ret;
  1079. fcloop_class = class_create(THIS_MODULE, "fcloop");
  1080. if (IS_ERR(fcloop_class)) {
  1081. pr_err("couldn't register class fcloop\n");
  1082. ret = PTR_ERR(fcloop_class);
  1083. return ret;
  1084. }
  1085. fcloop_device = device_create_with_groups(
  1086. fcloop_class, NULL, MKDEV(0, 0), NULL,
  1087. fcloop_dev_attr_groups, "ctl");
  1088. if (IS_ERR(fcloop_device)) {
  1089. pr_err("couldn't create ctl device!\n");
  1090. ret = PTR_ERR(fcloop_device);
  1091. goto out_destroy_class;
  1092. }
  1093. get_device(fcloop_device);
  1094. return 0;
  1095. out_destroy_class:
  1096. class_destroy(fcloop_class);
  1097. return ret;
  1098. }
  1099. static void __exit fcloop_exit(void)
  1100. {
  1101. struct fcloop_lport *lport;
  1102. struct fcloop_nport *nport;
  1103. struct fcloop_tport *tport;
  1104. struct fcloop_rport *rport;
  1105. unsigned long flags;
  1106. int ret;
  1107. spin_lock_irqsave(&fcloop_lock, flags);
  1108. for (;;) {
  1109. nport = list_first_entry_or_null(&fcloop_nports,
  1110. typeof(*nport), nport_list);
  1111. if (!nport)
  1112. break;
  1113. tport = __unlink_target_port(nport);
  1114. rport = __unlink_remote_port(nport);
  1115. spin_unlock_irqrestore(&fcloop_lock, flags);
  1116. ret = __targetport_unreg(nport, tport);
  1117. if (ret)
  1118. pr_warn("%s: Failed deleting target port\n", __func__);
  1119. ret = __remoteport_unreg(nport, rport);
  1120. if (ret)
  1121. pr_warn("%s: Failed deleting remote port\n", __func__);
  1122. spin_lock_irqsave(&fcloop_lock, flags);
  1123. }
  1124. for (;;) {
  1125. lport = list_first_entry_or_null(&fcloop_lports,
  1126. typeof(*lport), lport_list);
  1127. if (!lport)
  1128. break;
  1129. __unlink_local_port(lport);
  1130. spin_unlock_irqrestore(&fcloop_lock, flags);
  1131. ret = __wait_localport_unreg(lport);
  1132. if (ret)
  1133. pr_warn("%s: Failed deleting local port\n", __func__);
  1134. spin_lock_irqsave(&fcloop_lock, flags);
  1135. }
  1136. spin_unlock_irqrestore(&fcloop_lock, flags);
  1137. put_device(fcloop_device);
  1138. device_destroy(fcloop_class, MKDEV(0, 0));
  1139. class_destroy(fcloop_class);
  1140. }
  1141. module_init(fcloop_init);
  1142. module_exit(fcloop_exit);
  1143. MODULE_LICENSE("GPL v2");