fcloop.c 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. static DEFINE_SPINLOCK(fcloop_lock);
  173. static LIST_HEAD(fcloop_lports);
  174. static LIST_HEAD(fcloop_nports);
  175. struct fcloop_lport {
  176. struct nvme_fc_local_port *localport;
  177. struct list_head lport_list;
  178. struct completion unreg_done;
  179. };
  180. struct fcloop_rport {
  181. struct nvme_fc_remote_port *remoteport;
  182. struct nvmet_fc_target_port *targetport;
  183. struct fcloop_nport *nport;
  184. struct fcloop_lport *lport;
  185. };
  186. struct fcloop_tport {
  187. struct nvmet_fc_target_port *targetport;
  188. struct nvme_fc_remote_port *remoteport;
  189. struct fcloop_nport *nport;
  190. struct fcloop_lport *lport;
  191. };
  192. struct fcloop_nport {
  193. struct fcloop_rport *rport;
  194. struct fcloop_tport *tport;
  195. struct fcloop_lport *lport;
  196. struct list_head nport_list;
  197. struct kref ref;
  198. u64 node_name;
  199. u64 port_name;
  200. u32 port_role;
  201. u32 port_id;
  202. };
  203. struct fcloop_lsreq {
  204. struct fcloop_tport *tport;
  205. struct nvmefc_ls_req *lsreq;
  206. struct work_struct work;
  207. struct nvmefc_tgt_ls_req tgt_ls_req;
  208. int status;
  209. };
  210. struct fcloop_fcpreq {
  211. struct fcloop_tport *tport;
  212. struct nvmefc_fcp_req *fcpreq;
  213. spinlock_t reqlock;
  214. u16 status;
  215. bool active;
  216. bool aborted;
  217. struct work_struct work;
  218. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  219. };
  220. struct fcloop_ini_fcpreq {
  221. struct nvmefc_fcp_req *fcpreq;
  222. struct fcloop_fcpreq *tfcp_req;
  223. struct work_struct iniwork;
  224. };
  225. static inline struct fcloop_lsreq *
  226. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  227. {
  228. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  229. }
  230. static inline struct fcloop_fcpreq *
  231. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  232. {
  233. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  234. }
  235. static int
  236. fcloop_create_queue(struct nvme_fc_local_port *localport,
  237. unsigned int qidx, u16 qsize,
  238. void **handle)
  239. {
  240. *handle = localport;
  241. return 0;
  242. }
  243. static void
  244. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  245. unsigned int idx, void *handle)
  246. {
  247. }
  248. /*
  249. * Transmit of LS RSP done (e.g. buffers all set). call back up
  250. * initiator "done" flows.
  251. */
  252. static void
  253. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  254. {
  255. struct fcloop_lsreq *tls_req =
  256. container_of(work, struct fcloop_lsreq, work);
  257. struct fcloop_tport *tport = tls_req->tport;
  258. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  259. if (tport->remoteport)
  260. lsreq->done(lsreq, tls_req->status);
  261. }
  262. static int
  263. fcloop_ls_req(struct nvme_fc_local_port *localport,
  264. struct nvme_fc_remote_port *remoteport,
  265. struct nvmefc_ls_req *lsreq)
  266. {
  267. struct fcloop_lsreq *tls_req = lsreq->private;
  268. struct fcloop_rport *rport = remoteport->private;
  269. int ret = 0;
  270. tls_req->lsreq = lsreq;
  271. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  272. if (!rport->targetport) {
  273. tls_req->status = -ECONNREFUSED;
  274. schedule_work(&tls_req->work);
  275. return ret;
  276. }
  277. tls_req->status = 0;
  278. tls_req->tport = rport->targetport->private;
  279. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  280. lsreq->rqstaddr, lsreq->rqstlen);
  281. return ret;
  282. }
  283. static int
  284. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  285. struct nvmefc_tgt_ls_req *tgt_lsreq)
  286. {
  287. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  288. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  289. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  290. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  291. lsreq->rsplen : tgt_lsreq->rsplen));
  292. tgt_lsreq->done(tgt_lsreq);
  293. schedule_work(&tls_req->work);
  294. return 0;
  295. }
  296. /*
  297. * FCP IO operation done by initiator abort.
  298. * call back up initiator "done" flows.
  299. */
  300. static void
  301. fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
  302. {
  303. struct fcloop_ini_fcpreq *inireq =
  304. container_of(work, struct fcloop_ini_fcpreq, iniwork);
  305. inireq->fcpreq->done(inireq->fcpreq);
  306. }
  307. /*
  308. * FCP IO operation done by target completion.
  309. * call back up initiator "done" flows.
  310. */
  311. static void
  312. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  313. {
  314. struct fcloop_fcpreq *tfcp_req =
  315. container_of(work, struct fcloop_fcpreq, work);
  316. struct fcloop_tport *tport = tfcp_req->tport;
  317. struct nvmefc_fcp_req *fcpreq;
  318. spin_lock(&tfcp_req->reqlock);
  319. fcpreq = tfcp_req->fcpreq;
  320. spin_unlock(&tfcp_req->reqlock);
  321. if (tport->remoteport && fcpreq) {
  322. fcpreq->status = tfcp_req->status;
  323. fcpreq->done(fcpreq);
  324. }
  325. kfree(tfcp_req);
  326. }
  327. static int
  328. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  329. struct nvme_fc_remote_port *remoteport,
  330. void *hw_queue_handle,
  331. struct nvmefc_fcp_req *fcpreq)
  332. {
  333. struct fcloop_rport *rport = remoteport->private;
  334. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  335. struct fcloop_fcpreq *tfcp_req;
  336. int ret = 0;
  337. if (!rport->targetport)
  338. return -ECONNREFUSED;
  339. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  340. if (!tfcp_req)
  341. return -ENOMEM;
  342. inireq->fcpreq = fcpreq;
  343. inireq->tfcp_req = tfcp_req;
  344. INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
  345. tfcp_req->fcpreq = fcpreq;
  346. tfcp_req->tport = rport->targetport->private;
  347. spin_lock_init(&tfcp_req->reqlock);
  348. INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
  349. ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
  350. fcpreq->cmdaddr, fcpreq->cmdlen);
  351. return ret;
  352. }
  353. static void
  354. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  355. struct scatterlist *io_sg, u32 offset, u32 length)
  356. {
  357. void *data_p, *io_p;
  358. u32 data_len, io_len, tlen;
  359. io_p = sg_virt(io_sg);
  360. io_len = io_sg->length;
  361. for ( ; offset; ) {
  362. tlen = min_t(u32, offset, io_len);
  363. offset -= tlen;
  364. io_len -= tlen;
  365. if (!io_len) {
  366. io_sg = sg_next(io_sg);
  367. io_p = sg_virt(io_sg);
  368. io_len = io_sg->length;
  369. } else
  370. io_p += tlen;
  371. }
  372. data_p = sg_virt(data_sg);
  373. data_len = data_sg->length;
  374. for ( ; length; ) {
  375. tlen = min_t(u32, io_len, data_len);
  376. tlen = min_t(u32, tlen, length);
  377. if (op == NVMET_FCOP_WRITEDATA)
  378. memcpy(data_p, io_p, tlen);
  379. else
  380. memcpy(io_p, data_p, tlen);
  381. length -= tlen;
  382. io_len -= tlen;
  383. if ((!io_len) && (length)) {
  384. io_sg = sg_next(io_sg);
  385. io_p = sg_virt(io_sg);
  386. io_len = io_sg->length;
  387. } else
  388. io_p += tlen;
  389. data_len -= tlen;
  390. if ((!data_len) && (length)) {
  391. data_sg = sg_next(data_sg);
  392. data_p = sg_virt(data_sg);
  393. data_len = data_sg->length;
  394. } else
  395. data_p += tlen;
  396. }
  397. }
  398. static int
  399. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  400. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  401. {
  402. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  403. struct nvmefc_fcp_req *fcpreq;
  404. u32 rsplen = 0, xfrlen = 0;
  405. int fcp_err = 0, active, aborted;
  406. u8 op = tgt_fcpreq->op;
  407. spin_lock(&tfcp_req->reqlock);
  408. fcpreq = tfcp_req->fcpreq;
  409. active = tfcp_req->active;
  410. aborted = tfcp_req->aborted;
  411. tfcp_req->active = true;
  412. spin_unlock(&tfcp_req->reqlock);
  413. if (unlikely(active))
  414. /* illegal - call while i/o active */
  415. return -EALREADY;
  416. if (unlikely(aborted)) {
  417. /* target transport has aborted i/o prior */
  418. spin_lock(&tfcp_req->reqlock);
  419. tfcp_req->active = false;
  420. spin_unlock(&tfcp_req->reqlock);
  421. tgt_fcpreq->transferred_length = 0;
  422. tgt_fcpreq->fcp_error = -ECANCELED;
  423. tgt_fcpreq->done(tgt_fcpreq);
  424. return 0;
  425. }
  426. /*
  427. * if fcpreq is NULL, the I/O has been aborted (from
  428. * initiator side). For the target side, act as if all is well
  429. * but don't actually move data.
  430. */
  431. switch (op) {
  432. case NVMET_FCOP_WRITEDATA:
  433. xfrlen = tgt_fcpreq->transfer_length;
  434. if (fcpreq) {
  435. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  436. fcpreq->first_sgl, tgt_fcpreq->offset,
  437. xfrlen);
  438. fcpreq->transferred_length += xfrlen;
  439. }
  440. break;
  441. case NVMET_FCOP_READDATA:
  442. case NVMET_FCOP_READDATA_RSP:
  443. xfrlen = tgt_fcpreq->transfer_length;
  444. if (fcpreq) {
  445. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  446. fcpreq->first_sgl, tgt_fcpreq->offset,
  447. xfrlen);
  448. fcpreq->transferred_length += xfrlen;
  449. }
  450. if (op == NVMET_FCOP_READDATA)
  451. break;
  452. /* Fall-Thru to RSP handling */
  453. case NVMET_FCOP_RSP:
  454. if (fcpreq) {
  455. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  456. fcpreq->rsplen : tgt_fcpreq->rsplen);
  457. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  458. if (rsplen < tgt_fcpreq->rsplen)
  459. fcp_err = -E2BIG;
  460. fcpreq->rcv_rsplen = rsplen;
  461. fcpreq->status = 0;
  462. }
  463. tfcp_req->status = 0;
  464. break;
  465. default:
  466. fcp_err = -EINVAL;
  467. break;
  468. }
  469. spin_lock(&tfcp_req->reqlock);
  470. tfcp_req->active = false;
  471. spin_unlock(&tfcp_req->reqlock);
  472. tgt_fcpreq->transferred_length = xfrlen;
  473. tgt_fcpreq->fcp_error = fcp_err;
  474. tgt_fcpreq->done(tgt_fcpreq);
  475. return 0;
  476. }
  477. static void
  478. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  479. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  480. {
  481. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  482. /*
  483. * mark aborted only in case there were 2 threads in transport
  484. * (one doing io, other doing abort) and only kills ops posted
  485. * after the abort request
  486. */
  487. spin_lock(&tfcp_req->reqlock);
  488. tfcp_req->aborted = true;
  489. spin_unlock(&tfcp_req->reqlock);
  490. tfcp_req->status = NVME_SC_INTERNAL;
  491. /*
  492. * nothing more to do. If io wasn't active, the transport should
  493. * immediately call the req_release. If it was active, the op
  494. * will complete, and the lldd should call req_release.
  495. */
  496. }
  497. static void
  498. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  499. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  500. {
  501. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  502. schedule_work(&tfcp_req->work);
  503. }
  504. static void
  505. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  506. struct nvme_fc_remote_port *remoteport,
  507. struct nvmefc_ls_req *lsreq)
  508. {
  509. }
  510. static void
  511. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  512. struct nvme_fc_remote_port *remoteport,
  513. void *hw_queue_handle,
  514. struct nvmefc_fcp_req *fcpreq)
  515. {
  516. struct fcloop_rport *rport = remoteport->private;
  517. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  518. struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
  519. if (!tfcp_req)
  520. /* abort has already been called */
  521. return;
  522. if (rport->targetport)
  523. nvmet_fc_rcv_fcp_abort(rport->targetport,
  524. &tfcp_req->tgt_fcp_req);
  525. /* break initiator/target relationship for io */
  526. spin_lock(&tfcp_req->reqlock);
  527. inireq->tfcp_req = NULL;
  528. tfcp_req->fcpreq = NULL;
  529. spin_unlock(&tfcp_req->reqlock);
  530. /* post the aborted io completion */
  531. fcpreq->status = -ECANCELED;
  532. schedule_work(&inireq->iniwork);
  533. }
  534. static void
  535. fcloop_nport_free(struct kref *ref)
  536. {
  537. struct fcloop_nport *nport =
  538. container_of(ref, struct fcloop_nport, ref);
  539. unsigned long flags;
  540. spin_lock_irqsave(&fcloop_lock, flags);
  541. list_del(&nport->nport_list);
  542. spin_unlock_irqrestore(&fcloop_lock, flags);
  543. kfree(nport);
  544. }
  545. static void
  546. fcloop_nport_put(struct fcloop_nport *nport)
  547. {
  548. kref_put(&nport->ref, fcloop_nport_free);
  549. }
  550. static int
  551. fcloop_nport_get(struct fcloop_nport *nport)
  552. {
  553. return kref_get_unless_zero(&nport->ref);
  554. }
  555. static void
  556. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  557. {
  558. struct fcloop_lport *lport = localport->private;
  559. /* release any threads waiting for the unreg to complete */
  560. complete(&lport->unreg_done);
  561. }
  562. static void
  563. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  564. {
  565. struct fcloop_rport *rport = remoteport->private;
  566. fcloop_nport_put(rport->nport);
  567. }
  568. static void
  569. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  570. {
  571. struct fcloop_tport *tport = targetport->private;
  572. fcloop_nport_put(tport->nport);
  573. }
  574. #define FCLOOP_HW_QUEUES 4
  575. #define FCLOOP_SGL_SEGS 256
  576. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  577. static struct nvme_fc_port_template fctemplate = {
  578. .localport_delete = fcloop_localport_delete,
  579. .remoteport_delete = fcloop_remoteport_delete,
  580. .create_queue = fcloop_create_queue,
  581. .delete_queue = fcloop_delete_queue,
  582. .ls_req = fcloop_ls_req,
  583. .fcp_io = fcloop_fcp_req,
  584. .ls_abort = fcloop_ls_abort,
  585. .fcp_abort = fcloop_fcp_abort,
  586. .max_hw_queues = FCLOOP_HW_QUEUES,
  587. .max_sgl_segments = FCLOOP_SGL_SEGS,
  588. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  589. .dma_boundary = FCLOOP_DMABOUND_4G,
  590. /* sizes of additional private data for data structures */
  591. .local_priv_sz = sizeof(struct fcloop_lport),
  592. .remote_priv_sz = sizeof(struct fcloop_rport),
  593. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  594. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  595. };
  596. static struct nvmet_fc_target_template tgttemplate = {
  597. .targetport_delete = fcloop_targetport_delete,
  598. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  599. .fcp_op = fcloop_fcp_op,
  600. .fcp_abort = fcloop_tgt_fcp_abort,
  601. .fcp_req_release = fcloop_fcp_req_release,
  602. .max_hw_queues = FCLOOP_HW_QUEUES,
  603. .max_sgl_segments = FCLOOP_SGL_SEGS,
  604. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  605. .dma_boundary = FCLOOP_DMABOUND_4G,
  606. /* optional features */
  607. .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
  608. NVMET_FCTGTFEAT_OPDONE_IN_ISR,
  609. /* sizes of additional private data for data structures */
  610. .target_priv_sz = sizeof(struct fcloop_tport),
  611. };
  612. static ssize_t
  613. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  614. const char *buf, size_t count)
  615. {
  616. struct nvme_fc_port_info pinfo;
  617. struct fcloop_ctrl_options *opts;
  618. struct nvme_fc_local_port *localport;
  619. struct fcloop_lport *lport;
  620. int ret;
  621. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  622. if (!opts)
  623. return -ENOMEM;
  624. ret = fcloop_parse_options(opts, buf);
  625. if (ret)
  626. goto out_free_opts;
  627. /* everything there ? */
  628. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  629. ret = -EINVAL;
  630. goto out_free_opts;
  631. }
  632. memset(&pinfo, 0, sizeof(pinfo));
  633. pinfo.node_name = opts->wwnn;
  634. pinfo.port_name = opts->wwpn;
  635. pinfo.port_role = opts->roles;
  636. pinfo.port_id = opts->fcaddr;
  637. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  638. if (!ret) {
  639. unsigned long flags;
  640. /* success */
  641. lport = localport->private;
  642. lport->localport = localport;
  643. INIT_LIST_HEAD(&lport->lport_list);
  644. spin_lock_irqsave(&fcloop_lock, flags);
  645. list_add_tail(&lport->lport_list, &fcloop_lports);
  646. spin_unlock_irqrestore(&fcloop_lock, flags);
  647. /* mark all of the input buffer consumed */
  648. ret = count;
  649. }
  650. out_free_opts:
  651. kfree(opts);
  652. return ret ? ret : count;
  653. }
  654. static void
  655. __unlink_local_port(struct fcloop_lport *lport)
  656. {
  657. list_del(&lport->lport_list);
  658. }
  659. static int
  660. __wait_localport_unreg(struct fcloop_lport *lport)
  661. {
  662. int ret;
  663. init_completion(&lport->unreg_done);
  664. ret = nvme_fc_unregister_localport(lport->localport);
  665. wait_for_completion(&lport->unreg_done);
  666. return ret;
  667. }
  668. static ssize_t
  669. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  670. const char *buf, size_t count)
  671. {
  672. struct fcloop_lport *tlport, *lport = NULL;
  673. u64 nodename, portname;
  674. unsigned long flags;
  675. int ret;
  676. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  677. if (ret)
  678. return ret;
  679. spin_lock_irqsave(&fcloop_lock, flags);
  680. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  681. if (tlport->localport->node_name == nodename &&
  682. tlport->localport->port_name == portname) {
  683. lport = tlport;
  684. __unlink_local_port(lport);
  685. break;
  686. }
  687. }
  688. spin_unlock_irqrestore(&fcloop_lock, flags);
  689. if (!lport)
  690. return -ENOENT;
  691. ret = __wait_localport_unreg(lport);
  692. return ret ? ret : count;
  693. }
  694. static struct fcloop_nport *
  695. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  696. {
  697. struct fcloop_nport *newnport, *nport = NULL;
  698. struct fcloop_lport *tmplport, *lport = NULL;
  699. struct fcloop_ctrl_options *opts;
  700. unsigned long flags;
  701. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  702. int ret;
  703. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  704. if (!opts)
  705. return NULL;
  706. ret = fcloop_parse_options(opts, buf);
  707. if (ret)
  708. goto out_free_opts;
  709. /* everything there ? */
  710. if ((opts->mask & opts_mask) != opts_mask) {
  711. ret = -EINVAL;
  712. goto out_free_opts;
  713. }
  714. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  715. if (!newnport)
  716. goto out_free_opts;
  717. INIT_LIST_HEAD(&newnport->nport_list);
  718. newnport->node_name = opts->wwnn;
  719. newnport->port_name = opts->wwpn;
  720. if (opts->mask & NVMF_OPT_ROLES)
  721. newnport->port_role = opts->roles;
  722. if (opts->mask & NVMF_OPT_FCADDR)
  723. newnport->port_id = opts->fcaddr;
  724. kref_init(&newnport->ref);
  725. spin_lock_irqsave(&fcloop_lock, flags);
  726. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  727. if (tmplport->localport->node_name == opts->wwnn &&
  728. tmplport->localport->port_name == opts->wwpn)
  729. goto out_invalid_opts;
  730. if (tmplport->localport->node_name == opts->lpwwnn &&
  731. tmplport->localport->port_name == opts->lpwwpn)
  732. lport = tmplport;
  733. }
  734. if (remoteport) {
  735. if (!lport)
  736. goto out_invalid_opts;
  737. newnport->lport = lport;
  738. }
  739. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  740. if (nport->node_name == opts->wwnn &&
  741. nport->port_name == opts->wwpn) {
  742. if ((remoteport && nport->rport) ||
  743. (!remoteport && nport->tport)) {
  744. nport = NULL;
  745. goto out_invalid_opts;
  746. }
  747. fcloop_nport_get(nport);
  748. spin_unlock_irqrestore(&fcloop_lock, flags);
  749. if (remoteport)
  750. nport->lport = lport;
  751. if (opts->mask & NVMF_OPT_ROLES)
  752. nport->port_role = opts->roles;
  753. if (opts->mask & NVMF_OPT_FCADDR)
  754. nport->port_id = opts->fcaddr;
  755. goto out_free_newnport;
  756. }
  757. }
  758. list_add_tail(&newnport->nport_list, &fcloop_nports);
  759. spin_unlock_irqrestore(&fcloop_lock, flags);
  760. kfree(opts);
  761. return newnport;
  762. out_invalid_opts:
  763. spin_unlock_irqrestore(&fcloop_lock, flags);
  764. out_free_newnport:
  765. kfree(newnport);
  766. out_free_opts:
  767. kfree(opts);
  768. return nport;
  769. }
  770. static ssize_t
  771. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  772. const char *buf, size_t count)
  773. {
  774. struct nvme_fc_remote_port *remoteport;
  775. struct fcloop_nport *nport;
  776. struct fcloop_rport *rport;
  777. struct nvme_fc_port_info pinfo;
  778. int ret;
  779. nport = fcloop_alloc_nport(buf, count, true);
  780. if (!nport)
  781. return -EIO;
  782. memset(&pinfo, 0, sizeof(pinfo));
  783. pinfo.node_name = nport->node_name;
  784. pinfo.port_name = nport->port_name;
  785. pinfo.port_role = nport->port_role;
  786. pinfo.port_id = nport->port_id;
  787. ret = nvme_fc_register_remoteport(nport->lport->localport,
  788. &pinfo, &remoteport);
  789. if (ret || !remoteport) {
  790. fcloop_nport_put(nport);
  791. return ret;
  792. }
  793. /* success */
  794. rport = remoteport->private;
  795. rport->remoteport = remoteport;
  796. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  797. if (nport->tport) {
  798. nport->tport->remoteport = remoteport;
  799. nport->tport->lport = nport->lport;
  800. }
  801. rport->nport = nport;
  802. rport->lport = nport->lport;
  803. nport->rport = rport;
  804. return count;
  805. }
  806. static struct fcloop_rport *
  807. __unlink_remote_port(struct fcloop_nport *nport)
  808. {
  809. struct fcloop_rport *rport = nport->rport;
  810. if (rport && nport->tport)
  811. nport->tport->remoteport = NULL;
  812. nport->rport = NULL;
  813. return rport;
  814. }
  815. static int
  816. __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  817. {
  818. if (!rport)
  819. return -EALREADY;
  820. return nvme_fc_unregister_remoteport(rport->remoteport);
  821. }
  822. static ssize_t
  823. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  824. const char *buf, size_t count)
  825. {
  826. struct fcloop_nport *nport = NULL, *tmpport;
  827. static struct fcloop_rport *rport;
  828. u64 nodename, portname;
  829. unsigned long flags;
  830. int ret;
  831. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  832. if (ret)
  833. return ret;
  834. spin_lock_irqsave(&fcloop_lock, flags);
  835. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  836. if (tmpport->node_name == nodename &&
  837. tmpport->port_name == portname && tmpport->rport) {
  838. nport = tmpport;
  839. rport = __unlink_remote_port(nport);
  840. break;
  841. }
  842. }
  843. spin_unlock_irqrestore(&fcloop_lock, flags);
  844. if (!nport)
  845. return -ENOENT;
  846. ret = __remoteport_unreg(nport, rport);
  847. return ret ? ret : count;
  848. }
  849. static ssize_t
  850. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  851. const char *buf, size_t count)
  852. {
  853. struct nvmet_fc_target_port *targetport;
  854. struct fcloop_nport *nport;
  855. struct fcloop_tport *tport;
  856. struct nvmet_fc_port_info tinfo;
  857. int ret;
  858. nport = fcloop_alloc_nport(buf, count, false);
  859. if (!nport)
  860. return -EIO;
  861. tinfo.node_name = nport->node_name;
  862. tinfo.port_name = nport->port_name;
  863. tinfo.port_id = nport->port_id;
  864. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  865. &targetport);
  866. if (ret) {
  867. fcloop_nport_put(nport);
  868. return ret;
  869. }
  870. /* success */
  871. tport = targetport->private;
  872. tport->targetport = targetport;
  873. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  874. if (nport->rport)
  875. nport->rport->targetport = targetport;
  876. tport->nport = nport;
  877. tport->lport = nport->lport;
  878. nport->tport = tport;
  879. return count;
  880. }
  881. static struct fcloop_tport *
  882. __unlink_target_port(struct fcloop_nport *nport)
  883. {
  884. struct fcloop_tport *tport = nport->tport;
  885. if (tport && nport->rport)
  886. nport->rport->targetport = NULL;
  887. nport->tport = NULL;
  888. return tport;
  889. }
  890. static int
  891. __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  892. {
  893. if (!tport)
  894. return -EALREADY;
  895. return nvmet_fc_unregister_targetport(tport->targetport);
  896. }
  897. static ssize_t
  898. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  899. const char *buf, size_t count)
  900. {
  901. struct fcloop_nport *nport = NULL, *tmpport;
  902. struct fcloop_tport *tport;
  903. u64 nodename, portname;
  904. unsigned long flags;
  905. int ret;
  906. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  907. if (ret)
  908. return ret;
  909. spin_lock_irqsave(&fcloop_lock, flags);
  910. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  911. if (tmpport->node_name == nodename &&
  912. tmpport->port_name == portname && tmpport->tport) {
  913. nport = tmpport;
  914. tport = __unlink_target_port(nport);
  915. break;
  916. }
  917. }
  918. spin_unlock_irqrestore(&fcloop_lock, flags);
  919. if (!nport)
  920. return -ENOENT;
  921. ret = __targetport_unreg(nport, tport);
  922. return ret ? ret : count;
  923. }
  924. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  925. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  926. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  927. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  928. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  929. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  930. static struct attribute *fcloop_dev_attrs[] = {
  931. &dev_attr_add_local_port.attr,
  932. &dev_attr_del_local_port.attr,
  933. &dev_attr_add_remote_port.attr,
  934. &dev_attr_del_remote_port.attr,
  935. &dev_attr_add_target_port.attr,
  936. &dev_attr_del_target_port.attr,
  937. NULL
  938. };
  939. static struct attribute_group fclopp_dev_attrs_group = {
  940. .attrs = fcloop_dev_attrs,
  941. };
  942. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  943. &fclopp_dev_attrs_group,
  944. NULL,
  945. };
  946. static struct class *fcloop_class;
  947. static struct device *fcloop_device;
  948. static int __init fcloop_init(void)
  949. {
  950. int ret;
  951. fcloop_class = class_create(THIS_MODULE, "fcloop");
  952. if (IS_ERR(fcloop_class)) {
  953. pr_err("couldn't register class fcloop\n");
  954. ret = PTR_ERR(fcloop_class);
  955. return ret;
  956. }
  957. fcloop_device = device_create_with_groups(
  958. fcloop_class, NULL, MKDEV(0, 0), NULL,
  959. fcloop_dev_attr_groups, "ctl");
  960. if (IS_ERR(fcloop_device)) {
  961. pr_err("couldn't create ctl device!\n");
  962. ret = PTR_ERR(fcloop_device);
  963. goto out_destroy_class;
  964. }
  965. get_device(fcloop_device);
  966. return 0;
  967. out_destroy_class:
  968. class_destroy(fcloop_class);
  969. return ret;
  970. }
  971. static void __exit fcloop_exit(void)
  972. {
  973. struct fcloop_lport *lport;
  974. struct fcloop_nport *nport;
  975. struct fcloop_tport *tport;
  976. struct fcloop_rport *rport;
  977. unsigned long flags;
  978. int ret;
  979. spin_lock_irqsave(&fcloop_lock, flags);
  980. for (;;) {
  981. nport = list_first_entry_or_null(&fcloop_nports,
  982. typeof(*nport), nport_list);
  983. if (!nport)
  984. break;
  985. tport = __unlink_target_port(nport);
  986. rport = __unlink_remote_port(nport);
  987. spin_unlock_irqrestore(&fcloop_lock, flags);
  988. ret = __targetport_unreg(nport, tport);
  989. if (ret)
  990. pr_warn("%s: Failed deleting target port\n", __func__);
  991. ret = __remoteport_unreg(nport, rport);
  992. if (ret)
  993. pr_warn("%s: Failed deleting remote port\n", __func__);
  994. spin_lock_irqsave(&fcloop_lock, flags);
  995. }
  996. for (;;) {
  997. lport = list_first_entry_or_null(&fcloop_lports,
  998. typeof(*lport), lport_list);
  999. if (!lport)
  1000. break;
  1001. __unlink_local_port(lport);
  1002. spin_unlock_irqrestore(&fcloop_lock, flags);
  1003. ret = __wait_localport_unreg(lport);
  1004. if (ret)
  1005. pr_warn("%s: Failed deleting local port\n", __func__);
  1006. spin_lock_irqsave(&fcloop_lock, flags);
  1007. }
  1008. spin_unlock_irqrestore(&fcloop_lock, flags);
  1009. put_device(fcloop_device);
  1010. device_destroy(fcloop_class, MKDEV(0, 0));
  1011. class_destroy(fcloop_class);
  1012. }
  1013. module_init(fcloop_init);
  1014. module_exit(fcloop_exit);
  1015. MODULE_LICENSE("GPL v2");