fcloop.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. #define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
  173. NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  174. static DEFINE_SPINLOCK(fcloop_lock);
  175. static LIST_HEAD(fcloop_lports);
  176. static LIST_HEAD(fcloop_nports);
  177. struct fcloop_lport {
  178. struct nvme_fc_local_port *localport;
  179. struct list_head lport_list;
  180. struct completion unreg_done;
  181. };
  182. struct fcloop_rport {
  183. struct nvme_fc_remote_port *remoteport;
  184. struct nvmet_fc_target_port *targetport;
  185. struct fcloop_nport *nport;
  186. struct fcloop_lport *lport;
  187. };
  188. struct fcloop_tport {
  189. struct nvmet_fc_target_port *targetport;
  190. struct nvme_fc_remote_port *remoteport;
  191. struct fcloop_nport *nport;
  192. struct fcloop_lport *lport;
  193. };
  194. struct fcloop_nport {
  195. struct fcloop_rport *rport;
  196. struct fcloop_tport *tport;
  197. struct fcloop_lport *lport;
  198. struct list_head nport_list;
  199. struct kref ref;
  200. struct completion rport_unreg_done;
  201. struct completion tport_unreg_done;
  202. u64 node_name;
  203. u64 port_name;
  204. u32 port_role;
  205. u32 port_id;
  206. };
  207. struct fcloop_lsreq {
  208. struct fcloop_tport *tport;
  209. struct nvmefc_ls_req *lsreq;
  210. struct work_struct work;
  211. struct nvmefc_tgt_ls_req tgt_ls_req;
  212. int status;
  213. };
  214. struct fcloop_fcpreq {
  215. struct fcloop_tport *tport;
  216. struct nvmefc_fcp_req *fcpreq;
  217. spinlock_t reqlock;
  218. u16 status;
  219. bool active;
  220. bool aborted;
  221. struct work_struct work;
  222. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  223. };
  224. struct fcloop_ini_fcpreq {
  225. struct nvmefc_fcp_req *fcpreq;
  226. struct fcloop_fcpreq *tfcp_req;
  227. struct work_struct iniwork;
  228. };
  229. static inline struct fcloop_lsreq *
  230. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  231. {
  232. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  233. }
  234. static inline struct fcloop_fcpreq *
  235. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  236. {
  237. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  238. }
  239. static int
  240. fcloop_create_queue(struct nvme_fc_local_port *localport,
  241. unsigned int qidx, u16 qsize,
  242. void **handle)
  243. {
  244. *handle = localport;
  245. return 0;
  246. }
  247. static void
  248. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  249. unsigned int idx, void *handle)
  250. {
  251. }
  252. /*
  253. * Transmit of LS RSP done (e.g. buffers all set). call back up
  254. * initiator "done" flows.
  255. */
  256. static void
  257. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  258. {
  259. struct fcloop_lsreq *tls_req =
  260. container_of(work, struct fcloop_lsreq, work);
  261. struct fcloop_tport *tport = tls_req->tport;
  262. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  263. if (tport->remoteport)
  264. lsreq->done(lsreq, tls_req->status);
  265. }
  266. static int
  267. fcloop_ls_req(struct nvme_fc_local_port *localport,
  268. struct nvme_fc_remote_port *remoteport,
  269. struct nvmefc_ls_req *lsreq)
  270. {
  271. struct fcloop_lsreq *tls_req = lsreq->private;
  272. struct fcloop_rport *rport = remoteport->private;
  273. int ret = 0;
  274. tls_req->lsreq = lsreq;
  275. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  276. if (!rport->targetport) {
  277. tls_req->status = -ECONNREFUSED;
  278. schedule_work(&tls_req->work);
  279. return ret;
  280. }
  281. tls_req->status = 0;
  282. tls_req->tport = rport->targetport->private;
  283. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  284. lsreq->rqstaddr, lsreq->rqstlen);
  285. return ret;
  286. }
  287. static int
  288. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  289. struct nvmefc_tgt_ls_req *tgt_lsreq)
  290. {
  291. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  292. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  293. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  294. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  295. lsreq->rsplen : tgt_lsreq->rsplen));
  296. tgt_lsreq->done(tgt_lsreq);
  297. schedule_work(&tls_req->work);
  298. return 0;
  299. }
  300. /*
  301. * FCP IO operation done by initiator abort.
  302. * call back up initiator "done" flows.
  303. */
  304. static void
  305. fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
  306. {
  307. struct fcloop_ini_fcpreq *inireq =
  308. container_of(work, struct fcloop_ini_fcpreq, iniwork);
  309. inireq->fcpreq->done(inireq->fcpreq);
  310. }
  311. /*
  312. * FCP IO operation done by target completion.
  313. * call back up initiator "done" flows.
  314. */
  315. static void
  316. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  317. {
  318. struct fcloop_fcpreq *tfcp_req =
  319. container_of(work, struct fcloop_fcpreq, work);
  320. struct fcloop_tport *tport = tfcp_req->tport;
  321. struct nvmefc_fcp_req *fcpreq;
  322. spin_lock(&tfcp_req->reqlock);
  323. fcpreq = tfcp_req->fcpreq;
  324. spin_unlock(&tfcp_req->reqlock);
  325. if (tport->remoteport && fcpreq) {
  326. fcpreq->status = tfcp_req->status;
  327. fcpreq->done(fcpreq);
  328. }
  329. kfree(tfcp_req);
  330. }
  331. static int
  332. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  333. struct nvme_fc_remote_port *remoteport,
  334. void *hw_queue_handle,
  335. struct nvmefc_fcp_req *fcpreq)
  336. {
  337. struct fcloop_rport *rport = remoteport->private;
  338. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  339. struct fcloop_fcpreq *tfcp_req;
  340. int ret = 0;
  341. if (!rport->targetport)
  342. return -ECONNREFUSED;
  343. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  344. if (!tfcp_req)
  345. return -ENOMEM;
  346. inireq->fcpreq = fcpreq;
  347. inireq->tfcp_req = tfcp_req;
  348. INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
  349. tfcp_req->fcpreq = fcpreq;
  350. tfcp_req->tport = rport->targetport->private;
  351. spin_lock_init(&tfcp_req->reqlock);
  352. INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
  353. ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
  354. fcpreq->cmdaddr, fcpreq->cmdlen);
  355. return ret;
  356. }
  357. static void
  358. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  359. struct scatterlist *io_sg, u32 offset, u32 length)
  360. {
  361. void *data_p, *io_p;
  362. u32 data_len, io_len, tlen;
  363. io_p = sg_virt(io_sg);
  364. io_len = io_sg->length;
  365. for ( ; offset; ) {
  366. tlen = min_t(u32, offset, io_len);
  367. offset -= tlen;
  368. io_len -= tlen;
  369. if (!io_len) {
  370. io_sg = sg_next(io_sg);
  371. io_p = sg_virt(io_sg);
  372. io_len = io_sg->length;
  373. } else
  374. io_p += tlen;
  375. }
  376. data_p = sg_virt(data_sg);
  377. data_len = data_sg->length;
  378. for ( ; length; ) {
  379. tlen = min_t(u32, io_len, data_len);
  380. tlen = min_t(u32, tlen, length);
  381. if (op == NVMET_FCOP_WRITEDATA)
  382. memcpy(data_p, io_p, tlen);
  383. else
  384. memcpy(io_p, data_p, tlen);
  385. length -= tlen;
  386. io_len -= tlen;
  387. if ((!io_len) && (length)) {
  388. io_sg = sg_next(io_sg);
  389. io_p = sg_virt(io_sg);
  390. io_len = io_sg->length;
  391. } else
  392. io_p += tlen;
  393. data_len -= tlen;
  394. if ((!data_len) && (length)) {
  395. data_sg = sg_next(data_sg);
  396. data_p = sg_virt(data_sg);
  397. data_len = data_sg->length;
  398. } else
  399. data_p += tlen;
  400. }
  401. }
  402. static int
  403. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  404. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  405. {
  406. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  407. struct nvmefc_fcp_req *fcpreq;
  408. u32 rsplen = 0, xfrlen = 0;
  409. int fcp_err = 0, active, aborted;
  410. u8 op = tgt_fcpreq->op;
  411. spin_lock(&tfcp_req->reqlock);
  412. fcpreq = tfcp_req->fcpreq;
  413. active = tfcp_req->active;
  414. aborted = tfcp_req->aborted;
  415. tfcp_req->active = true;
  416. spin_unlock(&tfcp_req->reqlock);
  417. if (unlikely(active))
  418. /* illegal - call while i/o active */
  419. return -EALREADY;
  420. if (unlikely(aborted)) {
  421. /* target transport has aborted i/o prior */
  422. spin_lock(&tfcp_req->reqlock);
  423. tfcp_req->active = false;
  424. spin_unlock(&tfcp_req->reqlock);
  425. tgt_fcpreq->transferred_length = 0;
  426. tgt_fcpreq->fcp_error = -ECANCELED;
  427. tgt_fcpreq->done(tgt_fcpreq);
  428. return 0;
  429. }
  430. /*
  431. * if fcpreq is NULL, the I/O has been aborted (from
  432. * initiator side). For the target side, act as if all is well
  433. * but don't actually move data.
  434. */
  435. switch (op) {
  436. case NVMET_FCOP_WRITEDATA:
  437. xfrlen = tgt_fcpreq->transfer_length;
  438. if (fcpreq) {
  439. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  440. fcpreq->first_sgl, tgt_fcpreq->offset,
  441. xfrlen);
  442. fcpreq->transferred_length += xfrlen;
  443. }
  444. break;
  445. case NVMET_FCOP_READDATA:
  446. case NVMET_FCOP_READDATA_RSP:
  447. xfrlen = tgt_fcpreq->transfer_length;
  448. if (fcpreq) {
  449. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  450. fcpreq->first_sgl, tgt_fcpreq->offset,
  451. xfrlen);
  452. fcpreq->transferred_length += xfrlen;
  453. }
  454. if (op == NVMET_FCOP_READDATA)
  455. break;
  456. /* Fall-Thru to RSP handling */
  457. case NVMET_FCOP_RSP:
  458. if (fcpreq) {
  459. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  460. fcpreq->rsplen : tgt_fcpreq->rsplen);
  461. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  462. if (rsplen < tgt_fcpreq->rsplen)
  463. fcp_err = -E2BIG;
  464. fcpreq->rcv_rsplen = rsplen;
  465. fcpreq->status = 0;
  466. }
  467. tfcp_req->status = 0;
  468. break;
  469. default:
  470. fcp_err = -EINVAL;
  471. break;
  472. }
  473. spin_lock(&tfcp_req->reqlock);
  474. tfcp_req->active = false;
  475. spin_unlock(&tfcp_req->reqlock);
  476. tgt_fcpreq->transferred_length = xfrlen;
  477. tgt_fcpreq->fcp_error = fcp_err;
  478. tgt_fcpreq->done(tgt_fcpreq);
  479. return 0;
  480. }
  481. static void
  482. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  483. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  484. {
  485. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  486. int active;
  487. /*
  488. * mark aborted only in case there were 2 threads in transport
  489. * (one doing io, other doing abort) and only kills ops posted
  490. * after the abort request
  491. */
  492. spin_lock(&tfcp_req->reqlock);
  493. active = tfcp_req->active;
  494. tfcp_req->aborted = true;
  495. spin_unlock(&tfcp_req->reqlock);
  496. tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
  497. /*
  498. * nothing more to do. If io wasn't active, the transport should
  499. * immediately call the req_release. If it was active, the op
  500. * will complete, and the lldd should call req_release.
  501. */
  502. }
  503. static void
  504. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  505. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  506. {
  507. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  508. schedule_work(&tfcp_req->work);
  509. }
  510. static void
  511. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  512. struct nvme_fc_remote_port *remoteport,
  513. struct nvmefc_ls_req *lsreq)
  514. {
  515. }
  516. static void
  517. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  518. struct nvme_fc_remote_port *remoteport,
  519. void *hw_queue_handle,
  520. struct nvmefc_fcp_req *fcpreq)
  521. {
  522. struct fcloop_rport *rport = remoteport->private;
  523. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  524. struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
  525. if (!tfcp_req)
  526. /* abort has already been called */
  527. return;
  528. if (rport->targetport)
  529. nvmet_fc_rcv_fcp_abort(rport->targetport,
  530. &tfcp_req->tgt_fcp_req);
  531. /* break initiator/target relationship for io */
  532. spin_lock(&tfcp_req->reqlock);
  533. inireq->tfcp_req = NULL;
  534. tfcp_req->fcpreq = NULL;
  535. spin_unlock(&tfcp_req->reqlock);
  536. /* post the aborted io completion */
  537. fcpreq->status = -ECANCELED;
  538. schedule_work(&inireq->iniwork);
  539. }
  540. static void
  541. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  542. {
  543. struct fcloop_lport *lport = localport->private;
  544. /* release any threads waiting for the unreg to complete */
  545. complete(&lport->unreg_done);
  546. }
  547. static void
  548. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  549. {
  550. struct fcloop_rport *rport = remoteport->private;
  551. /* release any threads waiting for the unreg to complete */
  552. complete(&rport->nport->rport_unreg_done);
  553. }
  554. static void
  555. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  556. {
  557. struct fcloop_tport *tport = targetport->private;
  558. /* release any threads waiting for the unreg to complete */
  559. complete(&tport->nport->tport_unreg_done);
  560. }
  561. #define FCLOOP_HW_QUEUES 4
  562. #define FCLOOP_SGL_SEGS 256
  563. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  564. static struct nvme_fc_port_template fctemplate = {
  565. .localport_delete = fcloop_localport_delete,
  566. .remoteport_delete = fcloop_remoteport_delete,
  567. .create_queue = fcloop_create_queue,
  568. .delete_queue = fcloop_delete_queue,
  569. .ls_req = fcloop_ls_req,
  570. .fcp_io = fcloop_fcp_req,
  571. .ls_abort = fcloop_ls_abort,
  572. .fcp_abort = fcloop_fcp_abort,
  573. .max_hw_queues = FCLOOP_HW_QUEUES,
  574. .max_sgl_segments = FCLOOP_SGL_SEGS,
  575. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  576. .dma_boundary = FCLOOP_DMABOUND_4G,
  577. /* sizes of additional private data for data structures */
  578. .local_priv_sz = sizeof(struct fcloop_lport),
  579. .remote_priv_sz = sizeof(struct fcloop_rport),
  580. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  581. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  582. };
  583. static struct nvmet_fc_target_template tgttemplate = {
  584. .targetport_delete = fcloop_targetport_delete,
  585. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  586. .fcp_op = fcloop_fcp_op,
  587. .fcp_abort = fcloop_tgt_fcp_abort,
  588. .fcp_req_release = fcloop_fcp_req_release,
  589. .max_hw_queues = FCLOOP_HW_QUEUES,
  590. .max_sgl_segments = FCLOOP_SGL_SEGS,
  591. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  592. .dma_boundary = FCLOOP_DMABOUND_4G,
  593. /* optional features */
  594. .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
  595. NVMET_FCTGTFEAT_OPDONE_IN_ISR,
  596. /* sizes of additional private data for data structures */
  597. .target_priv_sz = sizeof(struct fcloop_tport),
  598. };
  599. static ssize_t
  600. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  601. const char *buf, size_t count)
  602. {
  603. struct nvme_fc_port_info pinfo;
  604. struct fcloop_ctrl_options *opts;
  605. struct nvme_fc_local_port *localport;
  606. struct fcloop_lport *lport;
  607. int ret;
  608. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  609. if (!opts)
  610. return -ENOMEM;
  611. ret = fcloop_parse_options(opts, buf);
  612. if (ret)
  613. goto out_free_opts;
  614. /* everything there ? */
  615. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  616. ret = -EINVAL;
  617. goto out_free_opts;
  618. }
  619. pinfo.node_name = opts->wwnn;
  620. pinfo.port_name = opts->wwpn;
  621. pinfo.port_role = opts->roles;
  622. pinfo.port_id = opts->fcaddr;
  623. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  624. if (!ret) {
  625. unsigned long flags;
  626. /* success */
  627. lport = localport->private;
  628. lport->localport = localport;
  629. INIT_LIST_HEAD(&lport->lport_list);
  630. spin_lock_irqsave(&fcloop_lock, flags);
  631. list_add_tail(&lport->lport_list, &fcloop_lports);
  632. spin_unlock_irqrestore(&fcloop_lock, flags);
  633. /* mark all of the input buffer consumed */
  634. ret = count;
  635. }
  636. out_free_opts:
  637. kfree(opts);
  638. return ret ? ret : count;
  639. }
  640. static void
  641. __unlink_local_port(struct fcloop_lport *lport)
  642. {
  643. list_del(&lport->lport_list);
  644. }
  645. static int
  646. __wait_localport_unreg(struct fcloop_lport *lport)
  647. {
  648. int ret;
  649. init_completion(&lport->unreg_done);
  650. ret = nvme_fc_unregister_localport(lport->localport);
  651. wait_for_completion(&lport->unreg_done);
  652. return ret;
  653. }
  654. static ssize_t
  655. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  656. const char *buf, size_t count)
  657. {
  658. struct fcloop_lport *tlport, *lport = NULL;
  659. u64 nodename, portname;
  660. unsigned long flags;
  661. int ret;
  662. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  663. if (ret)
  664. return ret;
  665. spin_lock_irqsave(&fcloop_lock, flags);
  666. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  667. if (tlport->localport->node_name == nodename &&
  668. tlport->localport->port_name == portname) {
  669. lport = tlport;
  670. __unlink_local_port(lport);
  671. break;
  672. }
  673. }
  674. spin_unlock_irqrestore(&fcloop_lock, flags);
  675. if (!lport)
  676. return -ENOENT;
  677. ret = __wait_localport_unreg(lport);
  678. return ret ? ret : count;
  679. }
  680. static void
  681. fcloop_nport_free(struct kref *ref)
  682. {
  683. struct fcloop_nport *nport =
  684. container_of(ref, struct fcloop_nport, ref);
  685. unsigned long flags;
  686. spin_lock_irqsave(&fcloop_lock, flags);
  687. list_del(&nport->nport_list);
  688. spin_unlock_irqrestore(&fcloop_lock, flags);
  689. kfree(nport);
  690. }
  691. static void
  692. fcloop_nport_put(struct fcloop_nport *nport)
  693. {
  694. kref_put(&nport->ref, fcloop_nport_free);
  695. }
  696. static int
  697. fcloop_nport_get(struct fcloop_nport *nport)
  698. {
  699. return kref_get_unless_zero(&nport->ref);
  700. }
  701. static struct fcloop_nport *
  702. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  703. {
  704. struct fcloop_nport *newnport, *nport = NULL;
  705. struct fcloop_lport *tmplport, *lport = NULL;
  706. struct fcloop_ctrl_options *opts;
  707. unsigned long flags;
  708. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  709. int ret;
  710. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  711. if (!opts)
  712. return NULL;
  713. ret = fcloop_parse_options(opts, buf);
  714. if (ret)
  715. goto out_free_opts;
  716. /* everything there ? */
  717. if ((opts->mask & opts_mask) != opts_mask) {
  718. ret = -EINVAL;
  719. goto out_free_opts;
  720. }
  721. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  722. if (!newnport)
  723. goto out_free_opts;
  724. INIT_LIST_HEAD(&newnport->nport_list);
  725. newnport->node_name = opts->wwnn;
  726. newnport->port_name = opts->wwpn;
  727. if (opts->mask & NVMF_OPT_ROLES)
  728. newnport->port_role = opts->roles;
  729. if (opts->mask & NVMF_OPT_FCADDR)
  730. newnport->port_id = opts->fcaddr;
  731. kref_init(&newnport->ref);
  732. spin_lock_irqsave(&fcloop_lock, flags);
  733. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  734. if (tmplport->localport->node_name == opts->wwnn &&
  735. tmplport->localport->port_name == opts->wwpn)
  736. goto out_invalid_opts;
  737. if (tmplport->localport->node_name == opts->lpwwnn &&
  738. tmplport->localport->port_name == opts->lpwwpn)
  739. lport = tmplport;
  740. }
  741. if (remoteport) {
  742. if (!lport)
  743. goto out_invalid_opts;
  744. newnport->lport = lport;
  745. }
  746. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  747. if (nport->node_name == opts->wwnn &&
  748. nport->port_name == opts->wwpn) {
  749. if ((remoteport && nport->rport) ||
  750. (!remoteport && nport->tport)) {
  751. nport = NULL;
  752. goto out_invalid_opts;
  753. }
  754. fcloop_nport_get(nport);
  755. spin_unlock_irqrestore(&fcloop_lock, flags);
  756. if (remoteport)
  757. nport->lport = lport;
  758. if (opts->mask & NVMF_OPT_ROLES)
  759. nport->port_role = opts->roles;
  760. if (opts->mask & NVMF_OPT_FCADDR)
  761. nport->port_id = opts->fcaddr;
  762. goto out_free_newnport;
  763. }
  764. }
  765. list_add_tail(&newnport->nport_list, &fcloop_nports);
  766. spin_unlock_irqrestore(&fcloop_lock, flags);
  767. kfree(opts);
  768. return newnport;
  769. out_invalid_opts:
  770. spin_unlock_irqrestore(&fcloop_lock, flags);
  771. out_free_newnport:
  772. kfree(newnport);
  773. out_free_opts:
  774. kfree(opts);
  775. return nport;
  776. }
  777. static ssize_t
  778. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  779. const char *buf, size_t count)
  780. {
  781. struct nvme_fc_remote_port *remoteport;
  782. struct fcloop_nport *nport;
  783. struct fcloop_rport *rport;
  784. struct nvme_fc_port_info pinfo;
  785. int ret;
  786. nport = fcloop_alloc_nport(buf, count, true);
  787. if (!nport)
  788. return -EIO;
  789. pinfo.node_name = nport->node_name;
  790. pinfo.port_name = nport->port_name;
  791. pinfo.port_role = nport->port_role;
  792. pinfo.port_id = nport->port_id;
  793. ret = nvme_fc_register_remoteport(nport->lport->localport,
  794. &pinfo, &remoteport);
  795. if (ret || !remoteport) {
  796. fcloop_nport_put(nport);
  797. return ret;
  798. }
  799. /* success */
  800. rport = remoteport->private;
  801. rport->remoteport = remoteport;
  802. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  803. if (nport->tport) {
  804. nport->tport->remoteport = remoteport;
  805. nport->tport->lport = nport->lport;
  806. }
  807. rport->nport = nport;
  808. rport->lport = nport->lport;
  809. nport->rport = rport;
  810. return count;
  811. }
  812. static struct fcloop_rport *
  813. __unlink_remote_port(struct fcloop_nport *nport)
  814. {
  815. struct fcloop_rport *rport = nport->rport;
  816. if (rport && nport->tport)
  817. nport->tport->remoteport = NULL;
  818. nport->rport = NULL;
  819. return rport;
  820. }
  821. static int
  822. __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  823. {
  824. int ret;
  825. if (!rport)
  826. return -EALREADY;
  827. init_completion(&nport->rport_unreg_done);
  828. ret = nvme_fc_unregister_remoteport(rport->remoteport);
  829. if (ret)
  830. return ret;
  831. wait_for_completion(&nport->rport_unreg_done);
  832. fcloop_nport_put(nport);
  833. return ret;
  834. }
  835. static ssize_t
  836. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  837. const char *buf, size_t count)
  838. {
  839. struct fcloop_nport *nport = NULL, *tmpport;
  840. static struct fcloop_rport *rport;
  841. u64 nodename, portname;
  842. unsigned long flags;
  843. int ret;
  844. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  845. if (ret)
  846. return ret;
  847. spin_lock_irqsave(&fcloop_lock, flags);
  848. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  849. if (tmpport->node_name == nodename &&
  850. tmpport->port_name == portname && tmpport->rport) {
  851. nport = tmpport;
  852. rport = __unlink_remote_port(nport);
  853. break;
  854. }
  855. }
  856. spin_unlock_irqrestore(&fcloop_lock, flags);
  857. if (!nport)
  858. return -ENOENT;
  859. ret = __wait_remoteport_unreg(nport, rport);
  860. return ret ? ret : count;
  861. }
  862. static ssize_t
  863. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  864. const char *buf, size_t count)
  865. {
  866. struct nvmet_fc_target_port *targetport;
  867. struct fcloop_nport *nport;
  868. struct fcloop_tport *tport;
  869. struct nvmet_fc_port_info tinfo;
  870. int ret;
  871. nport = fcloop_alloc_nport(buf, count, false);
  872. if (!nport)
  873. return -EIO;
  874. tinfo.node_name = nport->node_name;
  875. tinfo.port_name = nport->port_name;
  876. tinfo.port_id = nport->port_id;
  877. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  878. &targetport);
  879. if (ret) {
  880. fcloop_nport_put(nport);
  881. return ret;
  882. }
  883. /* success */
  884. tport = targetport->private;
  885. tport->targetport = targetport;
  886. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  887. if (nport->rport)
  888. nport->rport->targetport = targetport;
  889. tport->nport = nport;
  890. tport->lport = nport->lport;
  891. nport->tport = tport;
  892. return count;
  893. }
  894. static struct fcloop_tport *
  895. __unlink_target_port(struct fcloop_nport *nport)
  896. {
  897. struct fcloop_tport *tport = nport->tport;
  898. if (tport && nport->rport)
  899. nport->rport->targetport = NULL;
  900. nport->tport = NULL;
  901. return tport;
  902. }
  903. static int
  904. __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  905. {
  906. int ret;
  907. if (!tport)
  908. return -EALREADY;
  909. init_completion(&nport->tport_unreg_done);
  910. ret = nvmet_fc_unregister_targetport(tport->targetport);
  911. if (ret)
  912. return ret;
  913. wait_for_completion(&nport->tport_unreg_done);
  914. fcloop_nport_put(nport);
  915. return ret;
  916. }
  917. static ssize_t
  918. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  919. const char *buf, size_t count)
  920. {
  921. struct fcloop_nport *nport = NULL, *tmpport;
  922. struct fcloop_tport *tport;
  923. u64 nodename, portname;
  924. unsigned long flags;
  925. int ret;
  926. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  927. if (ret)
  928. return ret;
  929. spin_lock_irqsave(&fcloop_lock, flags);
  930. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  931. if (tmpport->node_name == nodename &&
  932. tmpport->port_name == portname && tmpport->tport) {
  933. nport = tmpport;
  934. tport = __unlink_target_port(nport);
  935. break;
  936. }
  937. }
  938. spin_unlock_irqrestore(&fcloop_lock, flags);
  939. if (!nport)
  940. return -ENOENT;
  941. ret = __wait_targetport_unreg(nport, tport);
  942. return ret ? ret : count;
  943. }
  944. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  945. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  946. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  947. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  948. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  949. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  950. static struct attribute *fcloop_dev_attrs[] = {
  951. &dev_attr_add_local_port.attr,
  952. &dev_attr_del_local_port.attr,
  953. &dev_attr_add_remote_port.attr,
  954. &dev_attr_del_remote_port.attr,
  955. &dev_attr_add_target_port.attr,
  956. &dev_attr_del_target_port.attr,
  957. NULL
  958. };
  959. static struct attribute_group fclopp_dev_attrs_group = {
  960. .attrs = fcloop_dev_attrs,
  961. };
  962. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  963. &fclopp_dev_attrs_group,
  964. NULL,
  965. };
  966. static struct class *fcloop_class;
  967. static struct device *fcloop_device;
  968. static int __init fcloop_init(void)
  969. {
  970. int ret;
  971. fcloop_class = class_create(THIS_MODULE, "fcloop");
  972. if (IS_ERR(fcloop_class)) {
  973. pr_err("couldn't register class fcloop\n");
  974. ret = PTR_ERR(fcloop_class);
  975. return ret;
  976. }
  977. fcloop_device = device_create_with_groups(
  978. fcloop_class, NULL, MKDEV(0, 0), NULL,
  979. fcloop_dev_attr_groups, "ctl");
  980. if (IS_ERR(fcloop_device)) {
  981. pr_err("couldn't create ctl device!\n");
  982. ret = PTR_ERR(fcloop_device);
  983. goto out_destroy_class;
  984. }
  985. get_device(fcloop_device);
  986. return 0;
  987. out_destroy_class:
  988. class_destroy(fcloop_class);
  989. return ret;
  990. }
  991. static void __exit fcloop_exit(void)
  992. {
  993. struct fcloop_lport *lport;
  994. struct fcloop_nport *nport;
  995. struct fcloop_tport *tport;
  996. struct fcloop_rport *rport;
  997. unsigned long flags;
  998. int ret;
  999. spin_lock_irqsave(&fcloop_lock, flags);
  1000. for (;;) {
  1001. nport = list_first_entry_or_null(&fcloop_nports,
  1002. typeof(*nport), nport_list);
  1003. if (!nport)
  1004. break;
  1005. tport = __unlink_target_port(nport);
  1006. rport = __unlink_remote_port(nport);
  1007. spin_unlock_irqrestore(&fcloop_lock, flags);
  1008. ret = __wait_targetport_unreg(nport, tport);
  1009. if (ret)
  1010. pr_warn("%s: Failed deleting target port\n", __func__);
  1011. ret = __wait_remoteport_unreg(nport, rport);
  1012. if (ret)
  1013. pr_warn("%s: Failed deleting remote port\n", __func__);
  1014. spin_lock_irqsave(&fcloop_lock, flags);
  1015. }
  1016. for (;;) {
  1017. lport = list_first_entry_or_null(&fcloop_lports,
  1018. typeof(*lport), lport_list);
  1019. if (!lport)
  1020. break;
  1021. __unlink_local_port(lport);
  1022. spin_unlock_irqrestore(&fcloop_lock, flags);
  1023. ret = __wait_localport_unreg(lport);
  1024. if (ret)
  1025. pr_warn("%s: Failed deleting local port\n", __func__);
  1026. spin_lock_irqsave(&fcloop_lock, flags);
  1027. }
  1028. spin_unlock_irqrestore(&fcloop_lock, flags);
  1029. put_device(fcloop_device);
  1030. device_destroy(fcloop_class, MKDEV(0, 0));
  1031. class_destroy(fcloop_class);
  1032. }
  1033. module_init(fcloop_init);
  1034. module_exit(fcloop_exit);
  1035. MODULE_LICENSE("GPL v2");