fcloop.c 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268
  1. /*
  2. * Copyright (c) 2016 Avago Technologies. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of version 2 of the GNU General Public License as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful.
  9. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES,
  10. * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A
  11. * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO
  12. * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID.
  13. * See the GNU General Public License for more details, a copy of which
  14. * can be found in the file COPYING included with this package
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/module.h>
  18. #include <linux/parser.h>
  19. #include <uapi/scsi/fc/fc_fs.h>
  20. #include "../host/nvme.h"
  21. #include "../target/nvmet.h"
  22. #include <linux/nvme-fc-driver.h>
  23. #include <linux/nvme-fc.h>
  24. enum {
  25. NVMF_OPT_ERR = 0,
  26. NVMF_OPT_WWNN = 1 << 0,
  27. NVMF_OPT_WWPN = 1 << 1,
  28. NVMF_OPT_ROLES = 1 << 2,
  29. NVMF_OPT_FCADDR = 1 << 3,
  30. NVMF_OPT_LPWWNN = 1 << 4,
  31. NVMF_OPT_LPWWPN = 1 << 5,
  32. };
  33. struct fcloop_ctrl_options {
  34. int mask;
  35. u64 wwnn;
  36. u64 wwpn;
  37. u32 roles;
  38. u32 fcaddr;
  39. u64 lpwwnn;
  40. u64 lpwwpn;
  41. };
  42. static const match_table_t opt_tokens = {
  43. { NVMF_OPT_WWNN, "wwnn=%s" },
  44. { NVMF_OPT_WWPN, "wwpn=%s" },
  45. { NVMF_OPT_ROLES, "roles=%d" },
  46. { NVMF_OPT_FCADDR, "fcaddr=%x" },
  47. { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
  48. { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
  49. { NVMF_OPT_ERR, NULL }
  50. };
  51. static int
  52. fcloop_parse_options(struct fcloop_ctrl_options *opts,
  53. const char *buf)
  54. {
  55. substring_t args[MAX_OPT_ARGS];
  56. char *options, *o, *p;
  57. int token, ret = 0;
  58. u64 token64;
  59. options = o = kstrdup(buf, GFP_KERNEL);
  60. if (!options)
  61. return -ENOMEM;
  62. while ((p = strsep(&o, ",\n")) != NULL) {
  63. if (!*p)
  64. continue;
  65. token = match_token(p, opt_tokens, args);
  66. opts->mask |= token;
  67. switch (token) {
  68. case NVMF_OPT_WWNN:
  69. if (match_u64(args, &token64)) {
  70. ret = -EINVAL;
  71. goto out_free_options;
  72. }
  73. opts->wwnn = token64;
  74. break;
  75. case NVMF_OPT_WWPN:
  76. if (match_u64(args, &token64)) {
  77. ret = -EINVAL;
  78. goto out_free_options;
  79. }
  80. opts->wwpn = token64;
  81. break;
  82. case NVMF_OPT_ROLES:
  83. if (match_int(args, &token)) {
  84. ret = -EINVAL;
  85. goto out_free_options;
  86. }
  87. opts->roles = token;
  88. break;
  89. case NVMF_OPT_FCADDR:
  90. if (match_hex(args, &token)) {
  91. ret = -EINVAL;
  92. goto out_free_options;
  93. }
  94. opts->fcaddr = token;
  95. break;
  96. case NVMF_OPT_LPWWNN:
  97. if (match_u64(args, &token64)) {
  98. ret = -EINVAL;
  99. goto out_free_options;
  100. }
  101. opts->lpwwnn = token64;
  102. break;
  103. case NVMF_OPT_LPWWPN:
  104. if (match_u64(args, &token64)) {
  105. ret = -EINVAL;
  106. goto out_free_options;
  107. }
  108. opts->lpwwpn = token64;
  109. break;
  110. default:
  111. pr_warn("unknown parameter or missing value '%s'\n", p);
  112. ret = -EINVAL;
  113. goto out_free_options;
  114. }
  115. }
  116. out_free_options:
  117. kfree(options);
  118. return ret;
  119. }
  120. static int
  121. fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
  122. const char *buf)
  123. {
  124. substring_t args[MAX_OPT_ARGS];
  125. char *options, *o, *p;
  126. int token, ret = 0;
  127. u64 token64;
  128. *nname = -1;
  129. *pname = -1;
  130. options = o = kstrdup(buf, GFP_KERNEL);
  131. if (!options)
  132. return -ENOMEM;
  133. while ((p = strsep(&o, ",\n")) != NULL) {
  134. if (!*p)
  135. continue;
  136. token = match_token(p, opt_tokens, args);
  137. switch (token) {
  138. case NVMF_OPT_WWNN:
  139. if (match_u64(args, &token64)) {
  140. ret = -EINVAL;
  141. goto out_free_options;
  142. }
  143. *nname = token64;
  144. break;
  145. case NVMF_OPT_WWPN:
  146. if (match_u64(args, &token64)) {
  147. ret = -EINVAL;
  148. goto out_free_options;
  149. }
  150. *pname = token64;
  151. break;
  152. default:
  153. pr_warn("unknown parameter or missing value '%s'\n", p);
  154. ret = -EINVAL;
  155. goto out_free_options;
  156. }
  157. }
  158. out_free_options:
  159. kfree(options);
  160. if (!ret) {
  161. if (*nname == -1)
  162. return -EINVAL;
  163. if (*pname == -1)
  164. return -EINVAL;
  165. }
  166. return ret;
  167. }
  168. #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  169. #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
  170. NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  171. #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
  172. #define ALL_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | NVMF_OPT_ROLES | \
  173. NVMF_OPT_FCADDR | NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
  174. static DEFINE_SPINLOCK(fcloop_lock);
  175. static LIST_HEAD(fcloop_lports);
  176. static LIST_HEAD(fcloop_nports);
  177. struct fcloop_lport {
  178. struct nvme_fc_local_port *localport;
  179. struct list_head lport_list;
  180. struct completion unreg_done;
  181. };
  182. struct fcloop_rport {
  183. struct nvme_fc_remote_port *remoteport;
  184. struct nvmet_fc_target_port *targetport;
  185. struct fcloop_nport *nport;
  186. struct fcloop_lport *lport;
  187. };
  188. struct fcloop_tport {
  189. struct nvmet_fc_target_port *targetport;
  190. struct nvme_fc_remote_port *remoteport;
  191. struct fcloop_nport *nport;
  192. struct fcloop_lport *lport;
  193. };
  194. struct fcloop_nport {
  195. struct fcloop_rport *rport;
  196. struct fcloop_tport *tport;
  197. struct fcloop_lport *lport;
  198. struct list_head nport_list;
  199. struct kref ref;
  200. struct completion rport_unreg_done;
  201. struct completion tport_unreg_done;
  202. u64 node_name;
  203. u64 port_name;
  204. u32 port_role;
  205. u32 port_id;
  206. };
  207. struct fcloop_lsreq {
  208. struct fcloop_tport *tport;
  209. struct nvmefc_ls_req *lsreq;
  210. struct work_struct work;
  211. struct nvmefc_tgt_ls_req tgt_ls_req;
  212. int status;
  213. };
  214. struct fcloop_fcpreq {
  215. struct fcloop_tport *tport;
  216. struct nvmefc_fcp_req *fcpreq;
  217. spinlock_t reqlock;
  218. u16 status;
  219. bool active;
  220. bool aborted;
  221. struct work_struct work;
  222. struct nvmefc_tgt_fcp_req tgt_fcp_req;
  223. };
  224. struct fcloop_ini_fcpreq {
  225. struct nvmefc_fcp_req *fcpreq;
  226. struct fcloop_fcpreq *tfcp_req;
  227. struct work_struct iniwork;
  228. };
  229. static inline struct fcloop_lsreq *
  230. tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
  231. {
  232. return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
  233. }
  234. static inline struct fcloop_fcpreq *
  235. tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  236. {
  237. return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
  238. }
  239. static int
  240. fcloop_create_queue(struct nvme_fc_local_port *localport,
  241. unsigned int qidx, u16 qsize,
  242. void **handle)
  243. {
  244. *handle = localport;
  245. return 0;
  246. }
  247. static void
  248. fcloop_delete_queue(struct nvme_fc_local_port *localport,
  249. unsigned int idx, void *handle)
  250. {
  251. }
  252. /*
  253. * Transmit of LS RSP done (e.g. buffers all set). call back up
  254. * initiator "done" flows.
  255. */
  256. static void
  257. fcloop_tgt_lsrqst_done_work(struct work_struct *work)
  258. {
  259. struct fcloop_lsreq *tls_req =
  260. container_of(work, struct fcloop_lsreq, work);
  261. struct fcloop_tport *tport = tls_req->tport;
  262. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  263. if (tport->remoteport)
  264. lsreq->done(lsreq, tls_req->status);
  265. }
  266. static int
  267. fcloop_ls_req(struct nvme_fc_local_port *localport,
  268. struct nvme_fc_remote_port *remoteport,
  269. struct nvmefc_ls_req *lsreq)
  270. {
  271. struct fcloop_lsreq *tls_req = lsreq->private;
  272. struct fcloop_rport *rport = remoteport->private;
  273. int ret = 0;
  274. tls_req->lsreq = lsreq;
  275. INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
  276. if (!rport->targetport) {
  277. tls_req->status = -ECONNREFUSED;
  278. schedule_work(&tls_req->work);
  279. return ret;
  280. }
  281. tls_req->status = 0;
  282. tls_req->tport = rport->targetport->private;
  283. ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
  284. lsreq->rqstaddr, lsreq->rqstlen);
  285. return ret;
  286. }
  287. static int
  288. fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
  289. struct nvmefc_tgt_ls_req *tgt_lsreq)
  290. {
  291. struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
  292. struct nvmefc_ls_req *lsreq = tls_req->lsreq;
  293. memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
  294. ((lsreq->rsplen < tgt_lsreq->rsplen) ?
  295. lsreq->rsplen : tgt_lsreq->rsplen));
  296. tgt_lsreq->done(tgt_lsreq);
  297. schedule_work(&tls_req->work);
  298. return 0;
  299. }
  300. /*
  301. * FCP IO operation done by initiator abort.
  302. * call back up initiator "done" flows.
  303. */
  304. static void
  305. fcloop_tgt_fcprqst_ini_done_work(struct work_struct *work)
  306. {
  307. struct fcloop_ini_fcpreq *inireq =
  308. container_of(work, struct fcloop_ini_fcpreq, iniwork);
  309. inireq->fcpreq->done(inireq->fcpreq);
  310. }
  311. /*
  312. * FCP IO operation done by target completion.
  313. * call back up initiator "done" flows.
  314. */
  315. static void
  316. fcloop_tgt_fcprqst_done_work(struct work_struct *work)
  317. {
  318. struct fcloop_fcpreq *tfcp_req =
  319. container_of(work, struct fcloop_fcpreq, work);
  320. struct fcloop_tport *tport = tfcp_req->tport;
  321. struct nvmefc_fcp_req *fcpreq;
  322. spin_lock(&tfcp_req->reqlock);
  323. fcpreq = tfcp_req->fcpreq;
  324. spin_unlock(&tfcp_req->reqlock);
  325. if (tport->remoteport && fcpreq) {
  326. fcpreq->status = tfcp_req->status;
  327. fcpreq->done(fcpreq);
  328. }
  329. kfree(tfcp_req);
  330. }
  331. static int
  332. fcloop_fcp_req(struct nvme_fc_local_port *localport,
  333. struct nvme_fc_remote_port *remoteport,
  334. void *hw_queue_handle,
  335. struct nvmefc_fcp_req *fcpreq)
  336. {
  337. struct fcloop_rport *rport = remoteport->private;
  338. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  339. struct fcloop_fcpreq *tfcp_req;
  340. int ret = 0;
  341. if (!rport->targetport)
  342. return -ECONNREFUSED;
  343. tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_KERNEL);
  344. if (!tfcp_req)
  345. return -ENOMEM;
  346. inireq->fcpreq = fcpreq;
  347. inireq->tfcp_req = tfcp_req;
  348. INIT_WORK(&inireq->iniwork, fcloop_tgt_fcprqst_ini_done_work);
  349. tfcp_req->fcpreq = fcpreq;
  350. tfcp_req->tport = rport->targetport->private;
  351. spin_lock_init(&tfcp_req->reqlock);
  352. INIT_WORK(&tfcp_req->work, fcloop_tgt_fcprqst_done_work);
  353. ret = nvmet_fc_rcv_fcp_req(rport->targetport, &tfcp_req->tgt_fcp_req,
  354. fcpreq->cmdaddr, fcpreq->cmdlen);
  355. return ret;
  356. }
  357. static void
  358. fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
  359. struct scatterlist *io_sg, u32 offset, u32 length)
  360. {
  361. void *data_p, *io_p;
  362. u32 data_len, io_len, tlen;
  363. io_p = sg_virt(io_sg);
  364. io_len = io_sg->length;
  365. for ( ; offset; ) {
  366. tlen = min_t(u32, offset, io_len);
  367. offset -= tlen;
  368. io_len -= tlen;
  369. if (!io_len) {
  370. io_sg = sg_next(io_sg);
  371. io_p = sg_virt(io_sg);
  372. io_len = io_sg->length;
  373. } else
  374. io_p += tlen;
  375. }
  376. data_p = sg_virt(data_sg);
  377. data_len = data_sg->length;
  378. for ( ; length; ) {
  379. tlen = min_t(u32, io_len, data_len);
  380. tlen = min_t(u32, tlen, length);
  381. if (op == NVMET_FCOP_WRITEDATA)
  382. memcpy(data_p, io_p, tlen);
  383. else
  384. memcpy(io_p, data_p, tlen);
  385. length -= tlen;
  386. io_len -= tlen;
  387. if ((!io_len) && (length)) {
  388. io_sg = sg_next(io_sg);
  389. io_p = sg_virt(io_sg);
  390. io_len = io_sg->length;
  391. } else
  392. io_p += tlen;
  393. data_len -= tlen;
  394. if ((!data_len) && (length)) {
  395. data_sg = sg_next(data_sg);
  396. data_p = sg_virt(data_sg);
  397. data_len = data_sg->length;
  398. } else
  399. data_p += tlen;
  400. }
  401. }
  402. static int
  403. fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
  404. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  405. {
  406. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  407. struct nvmefc_fcp_req *fcpreq;
  408. u32 rsplen = 0, xfrlen = 0;
  409. int fcp_err = 0, active, aborted;
  410. u8 op = tgt_fcpreq->op;
  411. spin_lock(&tfcp_req->reqlock);
  412. fcpreq = tfcp_req->fcpreq;
  413. active = tfcp_req->active;
  414. aborted = tfcp_req->aborted;
  415. tfcp_req->active = true;
  416. spin_unlock(&tfcp_req->reqlock);
  417. if (unlikely(active))
  418. /* illegal - call while i/o active */
  419. return -EALREADY;
  420. if (unlikely(aborted)) {
  421. /* target transport has aborted i/o prior */
  422. spin_lock(&tfcp_req->reqlock);
  423. tfcp_req->active = false;
  424. spin_unlock(&tfcp_req->reqlock);
  425. tgt_fcpreq->transferred_length = 0;
  426. tgt_fcpreq->fcp_error = -ECANCELED;
  427. tgt_fcpreq->done(tgt_fcpreq);
  428. return 0;
  429. }
  430. /*
  431. * if fcpreq is NULL, the I/O has been aborted (from
  432. * initiator side). For the target side, act as if all is well
  433. * but don't actually move data.
  434. */
  435. switch (op) {
  436. case NVMET_FCOP_WRITEDATA:
  437. xfrlen = tgt_fcpreq->transfer_length;
  438. if (fcpreq) {
  439. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  440. fcpreq->first_sgl, tgt_fcpreq->offset,
  441. xfrlen);
  442. fcpreq->transferred_length += xfrlen;
  443. }
  444. break;
  445. case NVMET_FCOP_READDATA:
  446. case NVMET_FCOP_READDATA_RSP:
  447. xfrlen = tgt_fcpreq->transfer_length;
  448. if (fcpreq) {
  449. fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
  450. fcpreq->first_sgl, tgt_fcpreq->offset,
  451. xfrlen);
  452. fcpreq->transferred_length += xfrlen;
  453. }
  454. if (op == NVMET_FCOP_READDATA)
  455. break;
  456. /* Fall-Thru to RSP handling */
  457. case NVMET_FCOP_RSP:
  458. if (fcpreq) {
  459. rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
  460. fcpreq->rsplen : tgt_fcpreq->rsplen);
  461. memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
  462. if (rsplen < tgt_fcpreq->rsplen)
  463. fcp_err = -E2BIG;
  464. fcpreq->rcv_rsplen = rsplen;
  465. fcpreq->status = 0;
  466. }
  467. tfcp_req->status = 0;
  468. break;
  469. default:
  470. fcp_err = -EINVAL;
  471. break;
  472. }
  473. spin_lock(&tfcp_req->reqlock);
  474. tfcp_req->active = false;
  475. spin_unlock(&tfcp_req->reqlock);
  476. tgt_fcpreq->transferred_length = xfrlen;
  477. tgt_fcpreq->fcp_error = fcp_err;
  478. tgt_fcpreq->done(tgt_fcpreq);
  479. return 0;
  480. }
  481. static void
  482. fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
  483. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  484. {
  485. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  486. /*
  487. * mark aborted only in case there were 2 threads in transport
  488. * (one doing io, other doing abort) and only kills ops posted
  489. * after the abort request
  490. */
  491. spin_lock(&tfcp_req->reqlock);
  492. tfcp_req->aborted = true;
  493. spin_unlock(&tfcp_req->reqlock);
  494. tfcp_req->status = NVME_SC_FC_TRANSPORT_ABORTED;
  495. /*
  496. * nothing more to do. If io wasn't active, the transport should
  497. * immediately call the req_release. If it was active, the op
  498. * will complete, and the lldd should call req_release.
  499. */
  500. }
  501. static void
  502. fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
  503. struct nvmefc_tgt_fcp_req *tgt_fcpreq)
  504. {
  505. struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
  506. schedule_work(&tfcp_req->work);
  507. }
  508. static void
  509. fcloop_ls_abort(struct nvme_fc_local_port *localport,
  510. struct nvme_fc_remote_port *remoteport,
  511. struct nvmefc_ls_req *lsreq)
  512. {
  513. }
  514. static void
  515. fcloop_fcp_abort(struct nvme_fc_local_port *localport,
  516. struct nvme_fc_remote_port *remoteport,
  517. void *hw_queue_handle,
  518. struct nvmefc_fcp_req *fcpreq)
  519. {
  520. struct fcloop_rport *rport = remoteport->private;
  521. struct fcloop_ini_fcpreq *inireq = fcpreq->private;
  522. struct fcloop_fcpreq *tfcp_req = inireq->tfcp_req;
  523. if (!tfcp_req)
  524. /* abort has already been called */
  525. return;
  526. if (rport->targetport)
  527. nvmet_fc_rcv_fcp_abort(rport->targetport,
  528. &tfcp_req->tgt_fcp_req);
  529. /* break initiator/target relationship for io */
  530. spin_lock(&tfcp_req->reqlock);
  531. inireq->tfcp_req = NULL;
  532. tfcp_req->fcpreq = NULL;
  533. spin_unlock(&tfcp_req->reqlock);
  534. /* post the aborted io completion */
  535. fcpreq->status = -ECANCELED;
  536. schedule_work(&inireq->iniwork);
  537. }
  538. static void
  539. fcloop_localport_delete(struct nvme_fc_local_port *localport)
  540. {
  541. struct fcloop_lport *lport = localport->private;
  542. /* release any threads waiting for the unreg to complete */
  543. complete(&lport->unreg_done);
  544. }
  545. static void
  546. fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
  547. {
  548. struct fcloop_rport *rport = remoteport->private;
  549. /* release any threads waiting for the unreg to complete */
  550. complete(&rport->nport->rport_unreg_done);
  551. }
  552. static void
  553. fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
  554. {
  555. struct fcloop_tport *tport = targetport->private;
  556. /* release any threads waiting for the unreg to complete */
  557. complete(&tport->nport->tport_unreg_done);
  558. }
  559. #define FCLOOP_HW_QUEUES 4
  560. #define FCLOOP_SGL_SEGS 256
  561. #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
  562. static struct nvme_fc_port_template fctemplate = {
  563. .localport_delete = fcloop_localport_delete,
  564. .remoteport_delete = fcloop_remoteport_delete,
  565. .create_queue = fcloop_create_queue,
  566. .delete_queue = fcloop_delete_queue,
  567. .ls_req = fcloop_ls_req,
  568. .fcp_io = fcloop_fcp_req,
  569. .ls_abort = fcloop_ls_abort,
  570. .fcp_abort = fcloop_fcp_abort,
  571. .max_hw_queues = FCLOOP_HW_QUEUES,
  572. .max_sgl_segments = FCLOOP_SGL_SEGS,
  573. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  574. .dma_boundary = FCLOOP_DMABOUND_4G,
  575. /* sizes of additional private data for data structures */
  576. .local_priv_sz = sizeof(struct fcloop_lport),
  577. .remote_priv_sz = sizeof(struct fcloop_rport),
  578. .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
  579. .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
  580. };
  581. static struct nvmet_fc_target_template tgttemplate = {
  582. .targetport_delete = fcloop_targetport_delete,
  583. .xmt_ls_rsp = fcloop_xmt_ls_rsp,
  584. .fcp_op = fcloop_fcp_op,
  585. .fcp_abort = fcloop_tgt_fcp_abort,
  586. .fcp_req_release = fcloop_fcp_req_release,
  587. .max_hw_queues = FCLOOP_HW_QUEUES,
  588. .max_sgl_segments = FCLOOP_SGL_SEGS,
  589. .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
  590. .dma_boundary = FCLOOP_DMABOUND_4G,
  591. /* optional features */
  592. .target_features = NVMET_FCTGTFEAT_CMD_IN_ISR |
  593. NVMET_FCTGTFEAT_OPDONE_IN_ISR,
  594. /* sizes of additional private data for data structures */
  595. .target_priv_sz = sizeof(struct fcloop_tport),
  596. };
  597. static ssize_t
  598. fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
  599. const char *buf, size_t count)
  600. {
  601. struct nvme_fc_port_info pinfo;
  602. struct fcloop_ctrl_options *opts;
  603. struct nvme_fc_local_port *localport;
  604. struct fcloop_lport *lport;
  605. int ret;
  606. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  607. if (!opts)
  608. return -ENOMEM;
  609. ret = fcloop_parse_options(opts, buf);
  610. if (ret)
  611. goto out_free_opts;
  612. /* everything there ? */
  613. if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
  614. ret = -EINVAL;
  615. goto out_free_opts;
  616. }
  617. pinfo.node_name = opts->wwnn;
  618. pinfo.port_name = opts->wwpn;
  619. pinfo.port_role = opts->roles;
  620. pinfo.port_id = opts->fcaddr;
  621. ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
  622. if (!ret) {
  623. unsigned long flags;
  624. /* success */
  625. lport = localport->private;
  626. lport->localport = localport;
  627. INIT_LIST_HEAD(&lport->lport_list);
  628. spin_lock_irqsave(&fcloop_lock, flags);
  629. list_add_tail(&lport->lport_list, &fcloop_lports);
  630. spin_unlock_irqrestore(&fcloop_lock, flags);
  631. /* mark all of the input buffer consumed */
  632. ret = count;
  633. }
  634. out_free_opts:
  635. kfree(opts);
  636. return ret ? ret : count;
  637. }
  638. static void
  639. __unlink_local_port(struct fcloop_lport *lport)
  640. {
  641. list_del(&lport->lport_list);
  642. }
  643. static int
  644. __wait_localport_unreg(struct fcloop_lport *lport)
  645. {
  646. int ret;
  647. init_completion(&lport->unreg_done);
  648. ret = nvme_fc_unregister_localport(lport->localport);
  649. wait_for_completion(&lport->unreg_done);
  650. return ret;
  651. }
  652. static ssize_t
  653. fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
  654. const char *buf, size_t count)
  655. {
  656. struct fcloop_lport *tlport, *lport = NULL;
  657. u64 nodename, portname;
  658. unsigned long flags;
  659. int ret;
  660. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  661. if (ret)
  662. return ret;
  663. spin_lock_irqsave(&fcloop_lock, flags);
  664. list_for_each_entry(tlport, &fcloop_lports, lport_list) {
  665. if (tlport->localport->node_name == nodename &&
  666. tlport->localport->port_name == portname) {
  667. lport = tlport;
  668. __unlink_local_port(lport);
  669. break;
  670. }
  671. }
  672. spin_unlock_irqrestore(&fcloop_lock, flags);
  673. if (!lport)
  674. return -ENOENT;
  675. ret = __wait_localport_unreg(lport);
  676. return ret ? ret : count;
  677. }
  678. static void
  679. fcloop_nport_free(struct kref *ref)
  680. {
  681. struct fcloop_nport *nport =
  682. container_of(ref, struct fcloop_nport, ref);
  683. unsigned long flags;
  684. spin_lock_irqsave(&fcloop_lock, flags);
  685. list_del(&nport->nport_list);
  686. spin_unlock_irqrestore(&fcloop_lock, flags);
  687. kfree(nport);
  688. }
  689. static void
  690. fcloop_nport_put(struct fcloop_nport *nport)
  691. {
  692. kref_put(&nport->ref, fcloop_nport_free);
  693. }
  694. static int
  695. fcloop_nport_get(struct fcloop_nport *nport)
  696. {
  697. return kref_get_unless_zero(&nport->ref);
  698. }
  699. static struct fcloop_nport *
  700. fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
  701. {
  702. struct fcloop_nport *newnport, *nport = NULL;
  703. struct fcloop_lport *tmplport, *lport = NULL;
  704. struct fcloop_ctrl_options *opts;
  705. unsigned long flags;
  706. u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
  707. int ret;
  708. opts = kzalloc(sizeof(*opts), GFP_KERNEL);
  709. if (!opts)
  710. return NULL;
  711. ret = fcloop_parse_options(opts, buf);
  712. if (ret)
  713. goto out_free_opts;
  714. /* everything there ? */
  715. if ((opts->mask & opts_mask) != opts_mask) {
  716. ret = -EINVAL;
  717. goto out_free_opts;
  718. }
  719. newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
  720. if (!newnport)
  721. goto out_free_opts;
  722. INIT_LIST_HEAD(&newnport->nport_list);
  723. newnport->node_name = opts->wwnn;
  724. newnport->port_name = opts->wwpn;
  725. if (opts->mask & NVMF_OPT_ROLES)
  726. newnport->port_role = opts->roles;
  727. if (opts->mask & NVMF_OPT_FCADDR)
  728. newnport->port_id = opts->fcaddr;
  729. kref_init(&newnport->ref);
  730. spin_lock_irqsave(&fcloop_lock, flags);
  731. list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
  732. if (tmplport->localport->node_name == opts->wwnn &&
  733. tmplport->localport->port_name == opts->wwpn)
  734. goto out_invalid_opts;
  735. if (tmplport->localport->node_name == opts->lpwwnn &&
  736. tmplport->localport->port_name == opts->lpwwpn)
  737. lport = tmplport;
  738. }
  739. if (remoteport) {
  740. if (!lport)
  741. goto out_invalid_opts;
  742. newnport->lport = lport;
  743. }
  744. list_for_each_entry(nport, &fcloop_nports, nport_list) {
  745. if (nport->node_name == opts->wwnn &&
  746. nport->port_name == opts->wwpn) {
  747. if ((remoteport && nport->rport) ||
  748. (!remoteport && nport->tport)) {
  749. nport = NULL;
  750. goto out_invalid_opts;
  751. }
  752. fcloop_nport_get(nport);
  753. spin_unlock_irqrestore(&fcloop_lock, flags);
  754. if (remoteport)
  755. nport->lport = lport;
  756. if (opts->mask & NVMF_OPT_ROLES)
  757. nport->port_role = opts->roles;
  758. if (opts->mask & NVMF_OPT_FCADDR)
  759. nport->port_id = opts->fcaddr;
  760. goto out_free_newnport;
  761. }
  762. }
  763. list_add_tail(&newnport->nport_list, &fcloop_nports);
  764. spin_unlock_irqrestore(&fcloop_lock, flags);
  765. kfree(opts);
  766. return newnport;
  767. out_invalid_opts:
  768. spin_unlock_irqrestore(&fcloop_lock, flags);
  769. out_free_newnport:
  770. kfree(newnport);
  771. out_free_opts:
  772. kfree(opts);
  773. return nport;
  774. }
  775. static ssize_t
  776. fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
  777. const char *buf, size_t count)
  778. {
  779. struct nvme_fc_remote_port *remoteport;
  780. struct fcloop_nport *nport;
  781. struct fcloop_rport *rport;
  782. struct nvme_fc_port_info pinfo;
  783. int ret;
  784. nport = fcloop_alloc_nport(buf, count, true);
  785. if (!nport)
  786. return -EIO;
  787. pinfo.node_name = nport->node_name;
  788. pinfo.port_name = nport->port_name;
  789. pinfo.port_role = nport->port_role;
  790. pinfo.port_id = nport->port_id;
  791. ret = nvme_fc_register_remoteport(nport->lport->localport,
  792. &pinfo, &remoteport);
  793. if (ret || !remoteport) {
  794. fcloop_nport_put(nport);
  795. return ret;
  796. }
  797. /* success */
  798. rport = remoteport->private;
  799. rport->remoteport = remoteport;
  800. rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
  801. if (nport->tport) {
  802. nport->tport->remoteport = remoteport;
  803. nport->tport->lport = nport->lport;
  804. }
  805. rport->nport = nport;
  806. rport->lport = nport->lport;
  807. nport->rport = rport;
  808. return count;
  809. }
  810. static struct fcloop_rport *
  811. __unlink_remote_port(struct fcloop_nport *nport)
  812. {
  813. struct fcloop_rport *rport = nport->rport;
  814. if (rport && nport->tport)
  815. nport->tport->remoteport = NULL;
  816. nport->rport = NULL;
  817. return rport;
  818. }
  819. static int
  820. __wait_remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
  821. {
  822. int ret;
  823. if (!rport)
  824. return -EALREADY;
  825. init_completion(&nport->rport_unreg_done);
  826. ret = nvme_fc_unregister_remoteport(rport->remoteport);
  827. if (ret)
  828. return ret;
  829. wait_for_completion(&nport->rport_unreg_done);
  830. fcloop_nport_put(nport);
  831. return ret;
  832. }
  833. static ssize_t
  834. fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
  835. const char *buf, size_t count)
  836. {
  837. struct fcloop_nport *nport = NULL, *tmpport;
  838. static struct fcloop_rport *rport;
  839. u64 nodename, portname;
  840. unsigned long flags;
  841. int ret;
  842. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  843. if (ret)
  844. return ret;
  845. spin_lock_irqsave(&fcloop_lock, flags);
  846. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  847. if (tmpport->node_name == nodename &&
  848. tmpport->port_name == portname && tmpport->rport) {
  849. nport = tmpport;
  850. rport = __unlink_remote_port(nport);
  851. break;
  852. }
  853. }
  854. spin_unlock_irqrestore(&fcloop_lock, flags);
  855. if (!nport)
  856. return -ENOENT;
  857. ret = __wait_remoteport_unreg(nport, rport);
  858. return ret ? ret : count;
  859. }
  860. static ssize_t
  861. fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
  862. const char *buf, size_t count)
  863. {
  864. struct nvmet_fc_target_port *targetport;
  865. struct fcloop_nport *nport;
  866. struct fcloop_tport *tport;
  867. struct nvmet_fc_port_info tinfo;
  868. int ret;
  869. nport = fcloop_alloc_nport(buf, count, false);
  870. if (!nport)
  871. return -EIO;
  872. tinfo.node_name = nport->node_name;
  873. tinfo.port_name = nport->port_name;
  874. tinfo.port_id = nport->port_id;
  875. ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
  876. &targetport);
  877. if (ret) {
  878. fcloop_nport_put(nport);
  879. return ret;
  880. }
  881. /* success */
  882. tport = targetport->private;
  883. tport->targetport = targetport;
  884. tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
  885. if (nport->rport)
  886. nport->rport->targetport = targetport;
  887. tport->nport = nport;
  888. tport->lport = nport->lport;
  889. nport->tport = tport;
  890. return count;
  891. }
  892. static struct fcloop_tport *
  893. __unlink_target_port(struct fcloop_nport *nport)
  894. {
  895. struct fcloop_tport *tport = nport->tport;
  896. if (tport && nport->rport)
  897. nport->rport->targetport = NULL;
  898. nport->tport = NULL;
  899. return tport;
  900. }
  901. static int
  902. __wait_targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
  903. {
  904. int ret;
  905. if (!tport)
  906. return -EALREADY;
  907. init_completion(&nport->tport_unreg_done);
  908. ret = nvmet_fc_unregister_targetport(tport->targetport);
  909. if (ret)
  910. return ret;
  911. wait_for_completion(&nport->tport_unreg_done);
  912. fcloop_nport_put(nport);
  913. return ret;
  914. }
  915. static ssize_t
  916. fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
  917. const char *buf, size_t count)
  918. {
  919. struct fcloop_nport *nport = NULL, *tmpport;
  920. struct fcloop_tport *tport;
  921. u64 nodename, portname;
  922. unsigned long flags;
  923. int ret;
  924. ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
  925. if (ret)
  926. return ret;
  927. spin_lock_irqsave(&fcloop_lock, flags);
  928. list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
  929. if (tmpport->node_name == nodename &&
  930. tmpport->port_name == portname && tmpport->tport) {
  931. nport = tmpport;
  932. tport = __unlink_target_port(nport);
  933. break;
  934. }
  935. }
  936. spin_unlock_irqrestore(&fcloop_lock, flags);
  937. if (!nport)
  938. return -ENOENT;
  939. ret = __wait_targetport_unreg(nport, tport);
  940. return ret ? ret : count;
  941. }
  942. static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
  943. static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
  944. static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
  945. static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
  946. static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
  947. static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
  948. static struct attribute *fcloop_dev_attrs[] = {
  949. &dev_attr_add_local_port.attr,
  950. &dev_attr_del_local_port.attr,
  951. &dev_attr_add_remote_port.attr,
  952. &dev_attr_del_remote_port.attr,
  953. &dev_attr_add_target_port.attr,
  954. &dev_attr_del_target_port.attr,
  955. NULL
  956. };
  957. static struct attribute_group fclopp_dev_attrs_group = {
  958. .attrs = fcloop_dev_attrs,
  959. };
  960. static const struct attribute_group *fcloop_dev_attr_groups[] = {
  961. &fclopp_dev_attrs_group,
  962. NULL,
  963. };
  964. static struct class *fcloop_class;
  965. static struct device *fcloop_device;
  966. static int __init fcloop_init(void)
  967. {
  968. int ret;
  969. fcloop_class = class_create(THIS_MODULE, "fcloop");
  970. if (IS_ERR(fcloop_class)) {
  971. pr_err("couldn't register class fcloop\n");
  972. ret = PTR_ERR(fcloop_class);
  973. return ret;
  974. }
  975. fcloop_device = device_create_with_groups(
  976. fcloop_class, NULL, MKDEV(0, 0), NULL,
  977. fcloop_dev_attr_groups, "ctl");
  978. if (IS_ERR(fcloop_device)) {
  979. pr_err("couldn't create ctl device!\n");
  980. ret = PTR_ERR(fcloop_device);
  981. goto out_destroy_class;
  982. }
  983. get_device(fcloop_device);
  984. return 0;
  985. out_destroy_class:
  986. class_destroy(fcloop_class);
  987. return ret;
  988. }
  989. static void __exit fcloop_exit(void)
  990. {
  991. struct fcloop_lport *lport;
  992. struct fcloop_nport *nport;
  993. struct fcloop_tport *tport;
  994. struct fcloop_rport *rport;
  995. unsigned long flags;
  996. int ret;
  997. spin_lock_irqsave(&fcloop_lock, flags);
  998. for (;;) {
  999. nport = list_first_entry_or_null(&fcloop_nports,
  1000. typeof(*nport), nport_list);
  1001. if (!nport)
  1002. break;
  1003. tport = __unlink_target_port(nport);
  1004. rport = __unlink_remote_port(nport);
  1005. spin_unlock_irqrestore(&fcloop_lock, flags);
  1006. ret = __wait_targetport_unreg(nport, tport);
  1007. if (ret)
  1008. pr_warn("%s: Failed deleting target port\n", __func__);
  1009. ret = __wait_remoteport_unreg(nport, rport);
  1010. if (ret)
  1011. pr_warn("%s: Failed deleting remote port\n", __func__);
  1012. spin_lock_irqsave(&fcloop_lock, flags);
  1013. }
  1014. for (;;) {
  1015. lport = list_first_entry_or_null(&fcloop_lports,
  1016. typeof(*lport), lport_list);
  1017. if (!lport)
  1018. break;
  1019. __unlink_local_port(lport);
  1020. spin_unlock_irqrestore(&fcloop_lock, flags);
  1021. ret = __wait_localport_unreg(lport);
  1022. if (ret)
  1023. pr_warn("%s: Failed deleting local port\n", __func__);
  1024. spin_lock_irqsave(&fcloop_lock, flags);
  1025. }
  1026. spin_unlock_irqrestore(&fcloop_lock, flags);
  1027. put_device(fcloop_device);
  1028. device_destroy(fcloop_class, MKDEV(0, 0));
  1029. class_destroy(fcloop_class);
  1030. }
  1031. module_init(fcloop_init);
  1032. module_exit(fcloop_exit);
  1033. MODULE_LICENSE("GPL v2");