tcm_loop.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include <target/target_core_fabric_configfs.h>
  36. #include "tcm_loop.h"
  37. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  38. static struct workqueue_struct *tcm_loop_workqueue;
  39. static struct kmem_cache *tcm_loop_cmd_cache;
  40. static int tcm_loop_hba_no_cnt;
  41. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  42. /*
  43. * Called from struct target_core_fabric_ops->check_stop_free()
  44. */
  45. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  46. {
  47. /*
  48. * Do not release struct se_cmd's containing a valid TMR
  49. * pointer. These will be released directly in tcm_loop_device_reset()
  50. * with transport_generic_free_cmd().
  51. */
  52. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  53. return 0;
  54. /*
  55. * Release the struct se_cmd, which will make a callback to release
  56. * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  57. */
  58. transport_generic_free_cmd(se_cmd, 0);
  59. return 1;
  60. }
  61. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  62. {
  63. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  64. struct tcm_loop_cmd, tl_se_cmd);
  65. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  66. }
  67. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  68. {
  69. seq_printf(m, "tcm_loop_proc_info()\n");
  70. return 0;
  71. }
  72. static int tcm_loop_driver_probe(struct device *);
  73. static int tcm_loop_driver_remove(struct device *);
  74. static int pseudo_lld_bus_match(struct device *dev,
  75. struct device_driver *dev_driver)
  76. {
  77. return 1;
  78. }
  79. static struct bus_type tcm_loop_lld_bus = {
  80. .name = "tcm_loop_bus",
  81. .match = pseudo_lld_bus_match,
  82. .probe = tcm_loop_driver_probe,
  83. .remove = tcm_loop_driver_remove,
  84. };
  85. static struct device_driver tcm_loop_driverfs = {
  86. .name = "tcm_loop",
  87. .bus = &tcm_loop_lld_bus,
  88. };
  89. /*
  90. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  91. */
  92. static struct device *tcm_loop_primary;
  93. static void tcm_loop_submission_work(struct work_struct *work)
  94. {
  95. struct tcm_loop_cmd *tl_cmd =
  96. container_of(work, struct tcm_loop_cmd, work);
  97. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  98. struct scsi_cmnd *sc = tl_cmd->sc;
  99. struct tcm_loop_nexus *tl_nexus;
  100. struct tcm_loop_hba *tl_hba;
  101. struct tcm_loop_tpg *tl_tpg;
  102. struct scatterlist *sgl_bidi = NULL;
  103. u32 sgl_bidi_count = 0, transfer_length;
  104. int rc;
  105. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  106. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  107. /*
  108. * Ensure that this tl_tpg reference from the incoming sc->device->id
  109. * has already been configured via tcm_loop_make_naa_tpg().
  110. */
  111. if (!tl_tpg->tl_hba) {
  112. set_host_byte(sc, DID_NO_CONNECT);
  113. goto out_done;
  114. }
  115. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  116. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  117. goto out_done;
  118. }
  119. tl_nexus = tl_tpg->tl_nexus;
  120. if (!tl_nexus) {
  121. scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
  122. " does not exist\n");
  123. set_host_byte(sc, DID_ERROR);
  124. goto out_done;
  125. }
  126. if (scsi_bidi_cmnd(sc)) {
  127. struct scsi_data_buffer *sdb = scsi_in(sc);
  128. sgl_bidi = sdb->table.sgl;
  129. sgl_bidi_count = sdb->table.nents;
  130. se_cmd->se_cmd_flags |= SCF_BIDI;
  131. }
  132. transfer_length = scsi_transfer_length(sc);
  133. if (!scsi_prot_sg_count(sc) &&
  134. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  135. se_cmd->prot_pto = true;
  136. /*
  137. * loopback transport doesn't support
  138. * WRITE_GENERATE, READ_STRIP protection
  139. * information operations, go ahead unprotected.
  140. */
  141. transfer_length = scsi_bufflen(sc);
  142. }
  143. se_cmd->tag = tl_cmd->sc_cmd_tag;
  144. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  145. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  146. transfer_length, TCM_SIMPLE_TAG,
  147. sc->sc_data_direction, 0,
  148. scsi_sglist(sc), scsi_sg_count(sc),
  149. sgl_bidi, sgl_bidi_count,
  150. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  151. if (rc < 0) {
  152. set_host_byte(sc, DID_NO_CONNECT);
  153. goto out_done;
  154. }
  155. return;
  156. out_done:
  157. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  158. sc->scsi_done(sc);
  159. return;
  160. }
  161. /*
  162. * ->queuecommand can be and usually is called from interrupt context, so
  163. * defer the actual submission to a workqueue.
  164. */
  165. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  166. {
  167. struct tcm_loop_cmd *tl_cmd;
  168. pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
  169. " scsi_buf_len: %u\n", sc->device->host->host_no,
  170. sc->device->id, sc->device->channel, sc->device->lun,
  171. sc->cmnd[0], scsi_bufflen(sc));
  172. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  173. if (!tl_cmd) {
  174. pr_err("Unable to allocate struct tcm_loop_cmd\n");
  175. set_host_byte(sc, DID_ERROR);
  176. sc->scsi_done(sc);
  177. return 0;
  178. }
  179. tl_cmd->sc = sc;
  180. tl_cmd->sc_cmd_tag = sc->request->tag;
  181. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  182. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  183. return 0;
  184. }
  185. /*
  186. * Called from SCSI EH process context to issue a LUN_RESET TMR
  187. * to struct scsi_device
  188. */
  189. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  190. u64 lun, int task, enum tcm_tmreq_table tmr)
  191. {
  192. struct se_cmd *se_cmd = NULL;
  193. struct se_session *se_sess;
  194. struct se_portal_group *se_tpg;
  195. struct tcm_loop_nexus *tl_nexus;
  196. struct tcm_loop_cmd *tl_cmd = NULL;
  197. struct tcm_loop_tmr *tl_tmr = NULL;
  198. int ret = TMR_FUNCTION_FAILED, rc;
  199. /*
  200. * Locate the tl_nexus and se_sess pointers
  201. */
  202. tl_nexus = tl_tpg->tl_nexus;
  203. if (!tl_nexus) {
  204. pr_err("Unable to perform device reset without"
  205. " active I_T Nexus\n");
  206. return ret;
  207. }
  208. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  209. if (!tl_cmd) {
  210. pr_err("Unable to allocate memory for tl_cmd\n");
  211. return ret;
  212. }
  213. tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
  214. if (!tl_tmr) {
  215. pr_err("Unable to allocate memory for tl_tmr\n");
  216. goto release;
  217. }
  218. init_waitqueue_head(&tl_tmr->tl_tmr_wait);
  219. se_cmd = &tl_cmd->tl_se_cmd;
  220. se_tpg = &tl_tpg->tl_se_tpg;
  221. se_sess = tl_tpg->tl_nexus->se_sess;
  222. /*
  223. * Initialize struct se_cmd descriptor from target_core_mod infrastructure
  224. */
  225. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
  226. DMA_NONE, TCM_SIMPLE_TAG,
  227. &tl_cmd->tl_sense_buf[0]);
  228. rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
  229. if (rc < 0)
  230. goto release;
  231. if (tmr == TMR_ABORT_TASK)
  232. se_cmd->se_tmr_req->ref_task_tag = task;
  233. /*
  234. * Locate the underlying TCM struct se_lun
  235. */
  236. if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
  237. ret = TMR_LUN_DOES_NOT_EXIST;
  238. goto release;
  239. }
  240. /*
  241. * Queue the TMR to TCM Core and sleep waiting for
  242. * tcm_loop_queue_tm_rsp() to wake us up.
  243. */
  244. transport_generic_handle_tmr(se_cmd);
  245. wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
  246. /*
  247. * The TMR LUN_RESET has completed, check the response status and
  248. * then release allocations.
  249. */
  250. ret = se_cmd->se_tmr_req->response;
  251. release:
  252. if (se_cmd)
  253. transport_generic_free_cmd(se_cmd, 1);
  254. else
  255. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  256. kfree(tl_tmr);
  257. return ret;
  258. }
  259. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  260. {
  261. struct tcm_loop_hba *tl_hba;
  262. struct tcm_loop_tpg *tl_tpg;
  263. int ret = FAILED;
  264. /*
  265. * Locate the tcm_loop_hba_t pointer
  266. */
  267. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  268. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  269. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  270. sc->request->tag, TMR_ABORT_TASK);
  271. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  272. }
  273. /*
  274. * Called from SCSI EH process context to issue a LUN_RESET TMR
  275. * to struct scsi_device
  276. */
  277. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  278. {
  279. struct tcm_loop_hba *tl_hba;
  280. struct tcm_loop_tpg *tl_tpg;
  281. int ret = FAILED;
  282. /*
  283. * Locate the tcm_loop_hba_t pointer
  284. */
  285. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  286. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  287. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  288. 0, TMR_LUN_RESET);
  289. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  290. }
  291. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  292. {
  293. struct tcm_loop_hba *tl_hba;
  294. struct tcm_loop_tpg *tl_tpg;
  295. /*
  296. * Locate the tcm_loop_hba_t pointer
  297. */
  298. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  299. if (!tl_hba) {
  300. pr_err("Unable to perform device reset without"
  301. " active I_T Nexus\n");
  302. return FAILED;
  303. }
  304. /*
  305. * Locate the tl_tpg pointer from TargetID in sc->device->id
  306. */
  307. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  308. if (tl_tpg) {
  309. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  310. return SUCCESS;
  311. }
  312. return FAILED;
  313. }
  314. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  315. {
  316. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  317. return 0;
  318. }
  319. static struct scsi_host_template tcm_loop_driver_template = {
  320. .show_info = tcm_loop_show_info,
  321. .proc_name = "tcm_loopback",
  322. .name = "TCM_Loopback",
  323. .queuecommand = tcm_loop_queuecommand,
  324. .change_queue_depth = scsi_change_queue_depth,
  325. .eh_abort_handler = tcm_loop_abort_task,
  326. .eh_device_reset_handler = tcm_loop_device_reset,
  327. .eh_target_reset_handler = tcm_loop_target_reset,
  328. .can_queue = 1024,
  329. .this_id = -1,
  330. .sg_tablesize = 256,
  331. .cmd_per_lun = 1024,
  332. .max_sectors = 0xFFFF,
  333. .use_clustering = DISABLE_CLUSTERING,
  334. .slave_alloc = tcm_loop_slave_alloc,
  335. .module = THIS_MODULE,
  336. .use_blk_tags = 1,
  337. .track_queue_depth = 1,
  338. };
  339. static int tcm_loop_driver_probe(struct device *dev)
  340. {
  341. struct tcm_loop_hba *tl_hba;
  342. struct Scsi_Host *sh;
  343. int error, host_prot;
  344. tl_hba = to_tcm_loop_hba(dev);
  345. sh = scsi_host_alloc(&tcm_loop_driver_template,
  346. sizeof(struct tcm_loop_hba));
  347. if (!sh) {
  348. pr_err("Unable to allocate struct scsi_host\n");
  349. return -ENODEV;
  350. }
  351. tl_hba->sh = sh;
  352. /*
  353. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  354. */
  355. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  356. /*
  357. * Setup single ID, Channel and LUN for now..
  358. */
  359. sh->max_id = 2;
  360. sh->max_lun = 0;
  361. sh->max_channel = 0;
  362. sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  363. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  364. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  365. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  366. scsi_host_set_prot(sh, host_prot);
  367. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  368. error = scsi_add_host(sh, &tl_hba->dev);
  369. if (error) {
  370. pr_err("%s: scsi_add_host failed\n", __func__);
  371. scsi_host_put(sh);
  372. return -ENODEV;
  373. }
  374. return 0;
  375. }
  376. static int tcm_loop_driver_remove(struct device *dev)
  377. {
  378. struct tcm_loop_hba *tl_hba;
  379. struct Scsi_Host *sh;
  380. tl_hba = to_tcm_loop_hba(dev);
  381. sh = tl_hba->sh;
  382. scsi_remove_host(sh);
  383. scsi_host_put(sh);
  384. return 0;
  385. }
  386. static void tcm_loop_release_adapter(struct device *dev)
  387. {
  388. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  389. kfree(tl_hba);
  390. }
  391. /*
  392. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  393. */
  394. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  395. {
  396. int ret;
  397. tl_hba->dev.bus = &tcm_loop_lld_bus;
  398. tl_hba->dev.parent = tcm_loop_primary;
  399. tl_hba->dev.release = &tcm_loop_release_adapter;
  400. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  401. ret = device_register(&tl_hba->dev);
  402. if (ret) {
  403. pr_err("device_register() failed for"
  404. " tl_hba->dev: %d\n", ret);
  405. return -ENODEV;
  406. }
  407. return 0;
  408. }
  409. /*
  410. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  411. * tcm_loop SCSI bus.
  412. */
  413. static int tcm_loop_alloc_core_bus(void)
  414. {
  415. int ret;
  416. tcm_loop_primary = root_device_register("tcm_loop_0");
  417. if (IS_ERR(tcm_loop_primary)) {
  418. pr_err("Unable to allocate tcm_loop_primary\n");
  419. return PTR_ERR(tcm_loop_primary);
  420. }
  421. ret = bus_register(&tcm_loop_lld_bus);
  422. if (ret) {
  423. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  424. goto dev_unreg;
  425. }
  426. ret = driver_register(&tcm_loop_driverfs);
  427. if (ret) {
  428. pr_err("driver_register() failed for"
  429. "tcm_loop_driverfs\n");
  430. goto bus_unreg;
  431. }
  432. pr_debug("Initialized TCM Loop Core Bus\n");
  433. return ret;
  434. bus_unreg:
  435. bus_unregister(&tcm_loop_lld_bus);
  436. dev_unreg:
  437. root_device_unregister(tcm_loop_primary);
  438. return ret;
  439. }
  440. static void tcm_loop_release_core_bus(void)
  441. {
  442. driver_unregister(&tcm_loop_driverfs);
  443. bus_unregister(&tcm_loop_lld_bus);
  444. root_device_unregister(tcm_loop_primary);
  445. pr_debug("Releasing TCM Loop Core BUS\n");
  446. }
  447. static char *tcm_loop_get_fabric_name(void)
  448. {
  449. return "loopback";
  450. }
  451. static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
  452. {
  453. return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  454. }
  455. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  456. {
  457. /*
  458. * Return the passed NAA identifier for the Target Port
  459. */
  460. return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
  461. }
  462. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  463. {
  464. /*
  465. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  466. * to represent the SCSI Target Port.
  467. */
  468. return tl_tpg(se_tpg)->tl_tpgt;
  469. }
  470. /*
  471. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  472. * based upon the incoming fabric dependent SCSI Initiator Port
  473. */
  474. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  475. {
  476. return 1;
  477. }
  478. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  479. {
  480. return 0;
  481. }
  482. /*
  483. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  484. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  485. */
  486. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  487. {
  488. return 0;
  489. }
  490. /*
  491. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  492. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  493. * It has been added here as a nop for target_fabric_tf_ops_check()
  494. */
  495. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  496. {
  497. return 0;
  498. }
  499. static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
  500. {
  501. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  502. tl_se_tpg);
  503. return tl_tpg->tl_fabric_prot_type;
  504. }
  505. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  506. {
  507. return 1;
  508. }
  509. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  510. {
  511. return 1;
  512. }
  513. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  514. {
  515. return;
  516. }
  517. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  518. {
  519. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  520. struct tcm_loop_cmd, tl_se_cmd);
  521. return tl_cmd->sc_cmd_state;
  522. }
  523. static int tcm_loop_shutdown_session(struct se_session *se_sess)
  524. {
  525. return 0;
  526. }
  527. static void tcm_loop_close_session(struct se_session *se_sess)
  528. {
  529. return;
  530. };
  531. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  532. {
  533. /*
  534. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  535. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  536. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  537. * format with transport_generic_map_mem_to_cmd().
  538. *
  539. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  540. * object execution queue.
  541. */
  542. target_execute_cmd(se_cmd);
  543. return 0;
  544. }
  545. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  546. {
  547. return 0;
  548. }
  549. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  550. {
  551. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  552. struct tcm_loop_cmd, tl_se_cmd);
  553. struct scsi_cmnd *sc = tl_cmd->sc;
  554. pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
  555. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  556. sc->result = SAM_STAT_GOOD;
  557. set_host_byte(sc, DID_OK);
  558. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  559. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  560. scsi_set_resid(sc, se_cmd->residual_count);
  561. sc->scsi_done(sc);
  562. return 0;
  563. }
  564. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  565. {
  566. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  567. struct tcm_loop_cmd, tl_se_cmd);
  568. struct scsi_cmnd *sc = tl_cmd->sc;
  569. pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
  570. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  571. if (se_cmd->sense_buffer &&
  572. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  573. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  574. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  575. SCSI_SENSE_BUFFERSIZE);
  576. sc->result = SAM_STAT_CHECK_CONDITION;
  577. set_driver_byte(sc, DRIVER_SENSE);
  578. } else
  579. sc->result = se_cmd->scsi_status;
  580. set_host_byte(sc, DID_OK);
  581. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  582. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  583. scsi_set_resid(sc, se_cmd->residual_count);
  584. sc->scsi_done(sc);
  585. return 0;
  586. }
  587. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  588. {
  589. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  590. struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
  591. /*
  592. * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
  593. * and wake up the wait_queue_head_t in tcm_loop_device_reset()
  594. */
  595. atomic_set(&tl_tmr->tmr_complete, 1);
  596. wake_up(&tl_tmr->tl_tmr_wait);
  597. }
  598. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  599. {
  600. return;
  601. }
  602. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  603. {
  604. switch (tl_hba->tl_proto_id) {
  605. case SCSI_PROTOCOL_SAS:
  606. return "SAS";
  607. case SCSI_PROTOCOL_FCP:
  608. return "FCP";
  609. case SCSI_PROTOCOL_ISCSI:
  610. return "iSCSI";
  611. default:
  612. break;
  613. }
  614. return "Unknown";
  615. }
  616. /* Start items for tcm_loop_port_cit */
  617. static int tcm_loop_port_link(
  618. struct se_portal_group *se_tpg,
  619. struct se_lun *lun)
  620. {
  621. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  622. struct tcm_loop_tpg, tl_se_tpg);
  623. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  624. atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
  625. /*
  626. * Add Linux/SCSI struct scsi_device by HCTL
  627. */
  628. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  629. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  630. return 0;
  631. }
  632. static void tcm_loop_port_unlink(
  633. struct se_portal_group *se_tpg,
  634. struct se_lun *se_lun)
  635. {
  636. struct scsi_device *sd;
  637. struct tcm_loop_hba *tl_hba;
  638. struct tcm_loop_tpg *tl_tpg;
  639. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  640. tl_hba = tl_tpg->tl_hba;
  641. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  642. se_lun->unpacked_lun);
  643. if (!sd) {
  644. pr_err("Unable to locate struct scsi_device for %d:%d:"
  645. "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  646. return;
  647. }
  648. /*
  649. * Remove Linux/SCSI struct scsi_device by HCTL
  650. */
  651. scsi_remove_device(sd);
  652. scsi_device_put(sd);
  653. atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
  654. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  655. }
  656. /* End items for tcm_loop_port_cit */
  657. static ssize_t tcm_loop_tpg_attrib_show_fabric_prot_type(
  658. struct se_portal_group *se_tpg,
  659. char *page)
  660. {
  661. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  662. tl_se_tpg);
  663. return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  664. }
  665. static ssize_t tcm_loop_tpg_attrib_store_fabric_prot_type(
  666. struct se_portal_group *se_tpg,
  667. const char *page,
  668. size_t count)
  669. {
  670. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  671. tl_se_tpg);
  672. unsigned long val;
  673. int ret = kstrtoul(page, 0, &val);
  674. if (ret) {
  675. pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
  676. return ret;
  677. }
  678. if (val != 0 && val != 1 && val != 3) {
  679. pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
  680. return -EINVAL;
  681. }
  682. tl_tpg->tl_fabric_prot_type = val;
  683. return count;
  684. }
  685. TF_TPG_ATTRIB_ATTR(tcm_loop, fabric_prot_type, S_IRUGO | S_IWUSR);
  686. static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
  687. &tcm_loop_tpg_attrib_fabric_prot_type.attr,
  688. NULL,
  689. };
  690. /* Start items for tcm_loop_nexus_cit */
  691. static int tcm_loop_make_nexus(
  692. struct tcm_loop_tpg *tl_tpg,
  693. const char *name)
  694. {
  695. struct se_portal_group *se_tpg;
  696. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  697. struct tcm_loop_nexus *tl_nexus;
  698. int ret = -ENOMEM;
  699. if (tl_tpg->tl_nexus) {
  700. pr_debug("tl_tpg->tl_nexus already exists\n");
  701. return -EEXIST;
  702. }
  703. se_tpg = &tl_tpg->tl_se_tpg;
  704. tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
  705. if (!tl_nexus) {
  706. pr_err("Unable to allocate struct tcm_loop_nexus\n");
  707. return -ENOMEM;
  708. }
  709. /*
  710. * Initialize the struct se_session pointer
  711. */
  712. tl_nexus->se_sess = transport_init_session(
  713. TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
  714. if (IS_ERR(tl_nexus->se_sess)) {
  715. ret = PTR_ERR(tl_nexus->se_sess);
  716. goto out;
  717. }
  718. /*
  719. * Since we are running in 'demo mode' this call with generate a
  720. * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
  721. * Initiator port name of the passed configfs group 'name'.
  722. */
  723. tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
  724. se_tpg, (unsigned char *)name);
  725. if (!tl_nexus->se_sess->se_node_acl) {
  726. transport_free_session(tl_nexus->se_sess);
  727. goto out;
  728. }
  729. /* Now, register the I_T Nexus as active. */
  730. transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
  731. tl_nexus->se_sess, tl_nexus);
  732. tl_tpg->tl_nexus = tl_nexus;
  733. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
  734. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  735. name);
  736. return 0;
  737. out:
  738. kfree(tl_nexus);
  739. return ret;
  740. }
  741. static int tcm_loop_drop_nexus(
  742. struct tcm_loop_tpg *tpg)
  743. {
  744. struct se_session *se_sess;
  745. struct tcm_loop_nexus *tl_nexus;
  746. tl_nexus = tpg->tl_nexus;
  747. if (!tl_nexus)
  748. return -ENODEV;
  749. se_sess = tl_nexus->se_sess;
  750. if (!se_sess)
  751. return -ENODEV;
  752. if (atomic_read(&tpg->tl_tpg_port_count)) {
  753. pr_err("Unable to remove TCM_Loop I_T Nexus with"
  754. " active TPG port count: %d\n",
  755. atomic_read(&tpg->tl_tpg_port_count));
  756. return -EPERM;
  757. }
  758. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
  759. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
  760. tl_nexus->se_sess->se_node_acl->initiatorname);
  761. /*
  762. * Release the SCSI I_T Nexus to the emulated Target Port
  763. */
  764. transport_deregister_session(tl_nexus->se_sess);
  765. tpg->tl_nexus = NULL;
  766. kfree(tl_nexus);
  767. return 0;
  768. }
  769. /* End items for tcm_loop_nexus_cit */
  770. static ssize_t tcm_loop_tpg_show_nexus(
  771. struct se_portal_group *se_tpg,
  772. char *page)
  773. {
  774. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  775. struct tcm_loop_tpg, tl_se_tpg);
  776. struct tcm_loop_nexus *tl_nexus;
  777. ssize_t ret;
  778. tl_nexus = tl_tpg->tl_nexus;
  779. if (!tl_nexus)
  780. return -ENODEV;
  781. ret = snprintf(page, PAGE_SIZE, "%s\n",
  782. tl_nexus->se_sess->se_node_acl->initiatorname);
  783. return ret;
  784. }
  785. static ssize_t tcm_loop_tpg_store_nexus(
  786. struct se_portal_group *se_tpg,
  787. const char *page,
  788. size_t count)
  789. {
  790. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  791. struct tcm_loop_tpg, tl_se_tpg);
  792. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  793. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  794. int ret;
  795. /*
  796. * Shutdown the active I_T nexus if 'NULL' is passed..
  797. */
  798. if (!strncmp(page, "NULL", 4)) {
  799. ret = tcm_loop_drop_nexus(tl_tpg);
  800. return (!ret) ? count : ret;
  801. }
  802. /*
  803. * Otherwise make sure the passed virtual Initiator port WWN matches
  804. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  805. * tcm_loop_make_nexus()
  806. */
  807. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  808. pr_err("Emulated NAA Sas Address: %s, exceeds"
  809. " max: %d\n", page, TL_WWN_ADDR_LEN);
  810. return -EINVAL;
  811. }
  812. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  813. ptr = strstr(i_port, "naa.");
  814. if (ptr) {
  815. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  816. pr_err("Passed SAS Initiator Port %s does not"
  817. " match target port protoid: %s\n", i_port,
  818. tcm_loop_dump_proto_id(tl_hba));
  819. return -EINVAL;
  820. }
  821. port_ptr = &i_port[0];
  822. goto check_newline;
  823. }
  824. ptr = strstr(i_port, "fc.");
  825. if (ptr) {
  826. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  827. pr_err("Passed FCP Initiator Port %s does not"
  828. " match target port protoid: %s\n", i_port,
  829. tcm_loop_dump_proto_id(tl_hba));
  830. return -EINVAL;
  831. }
  832. port_ptr = &i_port[3]; /* Skip over "fc." */
  833. goto check_newline;
  834. }
  835. ptr = strstr(i_port, "iqn.");
  836. if (ptr) {
  837. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  838. pr_err("Passed iSCSI Initiator Port %s does not"
  839. " match target port protoid: %s\n", i_port,
  840. tcm_loop_dump_proto_id(tl_hba));
  841. return -EINVAL;
  842. }
  843. port_ptr = &i_port[0];
  844. goto check_newline;
  845. }
  846. pr_err("Unable to locate prefix for emulated Initiator Port:"
  847. " %s\n", i_port);
  848. return -EINVAL;
  849. /*
  850. * Clear any trailing newline for the NAA WWN
  851. */
  852. check_newline:
  853. if (i_port[strlen(i_port)-1] == '\n')
  854. i_port[strlen(i_port)-1] = '\0';
  855. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  856. if (ret < 0)
  857. return ret;
  858. return count;
  859. }
  860. TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
  861. static ssize_t tcm_loop_tpg_show_transport_status(
  862. struct se_portal_group *se_tpg,
  863. char *page)
  864. {
  865. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  866. struct tcm_loop_tpg, tl_se_tpg);
  867. const char *status = NULL;
  868. ssize_t ret = -EINVAL;
  869. switch (tl_tpg->tl_transport_status) {
  870. case TCM_TRANSPORT_ONLINE:
  871. status = "online";
  872. break;
  873. case TCM_TRANSPORT_OFFLINE:
  874. status = "offline";
  875. break;
  876. default:
  877. break;
  878. }
  879. if (status)
  880. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  881. return ret;
  882. }
  883. static ssize_t tcm_loop_tpg_store_transport_status(
  884. struct se_portal_group *se_tpg,
  885. const char *page,
  886. size_t count)
  887. {
  888. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  889. struct tcm_loop_tpg, tl_se_tpg);
  890. if (!strncmp(page, "online", 6)) {
  891. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  892. return count;
  893. }
  894. if (!strncmp(page, "offline", 7)) {
  895. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  896. if (tl_tpg->tl_nexus) {
  897. struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
  898. core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
  899. }
  900. return count;
  901. }
  902. return -EINVAL;
  903. }
  904. TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
  905. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  906. &tcm_loop_tpg_nexus.attr,
  907. &tcm_loop_tpg_transport_status.attr,
  908. NULL,
  909. };
  910. /* Start items for tcm_loop_naa_cit */
  911. static struct se_portal_group *tcm_loop_make_naa_tpg(
  912. struct se_wwn *wwn,
  913. struct config_group *group,
  914. const char *name)
  915. {
  916. struct tcm_loop_hba *tl_hba = container_of(wwn,
  917. struct tcm_loop_hba, tl_hba_wwn);
  918. struct tcm_loop_tpg *tl_tpg;
  919. int ret;
  920. unsigned long tpgt;
  921. if (strstr(name, "tpgt_") != name) {
  922. pr_err("Unable to locate \"tpgt_#\" directory"
  923. " group\n");
  924. return ERR_PTR(-EINVAL);
  925. }
  926. if (kstrtoul(name+5, 10, &tpgt))
  927. return ERR_PTR(-EINVAL);
  928. if (tpgt >= TL_TPGS_PER_HBA) {
  929. pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
  930. " %u\n", tpgt, TL_TPGS_PER_HBA);
  931. return ERR_PTR(-EINVAL);
  932. }
  933. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  934. tl_tpg->tl_hba = tl_hba;
  935. tl_tpg->tl_tpgt = tpgt;
  936. /*
  937. * Register the tl_tpg as a emulated TCM Target Endpoint
  938. */
  939. ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
  940. if (ret < 0)
  941. return ERR_PTR(-ENOMEM);
  942. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
  943. " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
  944. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  945. return &tl_tpg->tl_se_tpg;
  946. }
  947. static void tcm_loop_drop_naa_tpg(
  948. struct se_portal_group *se_tpg)
  949. {
  950. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  951. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  952. struct tcm_loop_tpg, tl_se_tpg);
  953. struct tcm_loop_hba *tl_hba;
  954. unsigned short tpgt;
  955. tl_hba = tl_tpg->tl_hba;
  956. tpgt = tl_tpg->tl_tpgt;
  957. /*
  958. * Release the I_T Nexus for the Virtual target link if present
  959. */
  960. tcm_loop_drop_nexus(tl_tpg);
  961. /*
  962. * Deregister the tl_tpg as a emulated TCM Target Endpoint
  963. */
  964. core_tpg_deregister(se_tpg);
  965. tl_tpg->tl_hba = NULL;
  966. tl_tpg->tl_tpgt = 0;
  967. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
  968. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  969. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  970. }
  971. /* End items for tcm_loop_naa_cit */
  972. /* Start items for tcm_loop_cit */
  973. static struct se_wwn *tcm_loop_make_scsi_hba(
  974. struct target_fabric_configfs *tf,
  975. struct config_group *group,
  976. const char *name)
  977. {
  978. struct tcm_loop_hba *tl_hba;
  979. struct Scsi_Host *sh;
  980. char *ptr;
  981. int ret, off = 0;
  982. tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
  983. if (!tl_hba) {
  984. pr_err("Unable to allocate struct tcm_loop_hba\n");
  985. return ERR_PTR(-ENOMEM);
  986. }
  987. /*
  988. * Determine the emulated Protocol Identifier and Target Port Name
  989. * based on the incoming configfs directory name.
  990. */
  991. ptr = strstr(name, "naa.");
  992. if (ptr) {
  993. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  994. goto check_len;
  995. }
  996. ptr = strstr(name, "fc.");
  997. if (ptr) {
  998. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  999. off = 3; /* Skip over "fc." */
  1000. goto check_len;
  1001. }
  1002. ptr = strstr(name, "iqn.");
  1003. if (!ptr) {
  1004. pr_err("Unable to locate prefix for emulated Target "
  1005. "Port: %s\n", name);
  1006. ret = -EINVAL;
  1007. goto out;
  1008. }
  1009. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  1010. check_len:
  1011. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  1012. pr_err("Emulated NAA %s Address: %s, exceeds"
  1013. " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
  1014. TL_WWN_ADDR_LEN);
  1015. ret = -EINVAL;
  1016. goto out;
  1017. }
  1018. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  1019. /*
  1020. * Call device_register(tl_hba->dev) to register the emulated
  1021. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  1022. * device_register() callbacks in tcm_loop_driver_probe()
  1023. */
  1024. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  1025. if (ret)
  1026. goto out;
  1027. sh = tl_hba->sh;
  1028. tcm_loop_hba_no_cnt++;
  1029. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
  1030. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  1031. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  1032. return &tl_hba->tl_hba_wwn;
  1033. out:
  1034. kfree(tl_hba);
  1035. return ERR_PTR(ret);
  1036. }
  1037. static void tcm_loop_drop_scsi_hba(
  1038. struct se_wwn *wwn)
  1039. {
  1040. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1041. struct tcm_loop_hba, tl_hba_wwn);
  1042. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
  1043. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  1044. tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
  1045. tl_hba->sh->host_no);
  1046. /*
  1047. * Call device_unregister() on the original tl_hba->dev.
  1048. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  1049. * release *tl_hba;
  1050. */
  1051. device_unregister(&tl_hba->dev);
  1052. }
  1053. /* Start items for tcm_loop_cit */
  1054. static ssize_t tcm_loop_wwn_show_attr_version(
  1055. struct target_fabric_configfs *tf,
  1056. char *page)
  1057. {
  1058. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  1059. }
  1060. TF_WWN_ATTR_RO(tcm_loop, version);
  1061. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  1062. &tcm_loop_wwn_version.attr,
  1063. NULL,
  1064. };
  1065. /* End items for tcm_loop_cit */
  1066. static const struct target_core_fabric_ops loop_ops = {
  1067. .module = THIS_MODULE,
  1068. .name = "loopback",
  1069. .get_fabric_name = tcm_loop_get_fabric_name,
  1070. .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
  1071. .tpg_get_tag = tcm_loop_get_tag,
  1072. .tpg_check_demo_mode = tcm_loop_check_demo_mode,
  1073. .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
  1074. .tpg_check_demo_mode_write_protect =
  1075. tcm_loop_check_demo_mode_write_protect,
  1076. .tpg_check_prod_mode_write_protect =
  1077. tcm_loop_check_prod_mode_write_protect,
  1078. .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
  1079. .tpg_get_inst_index = tcm_loop_get_inst_index,
  1080. .check_stop_free = tcm_loop_check_stop_free,
  1081. .release_cmd = tcm_loop_release_cmd,
  1082. .shutdown_session = tcm_loop_shutdown_session,
  1083. .close_session = tcm_loop_close_session,
  1084. .sess_get_index = tcm_loop_sess_get_index,
  1085. .write_pending = tcm_loop_write_pending,
  1086. .write_pending_status = tcm_loop_write_pending_status,
  1087. .set_default_node_attributes = tcm_loop_set_default_node_attributes,
  1088. .get_cmd_state = tcm_loop_get_cmd_state,
  1089. .queue_data_in = tcm_loop_queue_data_in,
  1090. .queue_status = tcm_loop_queue_status,
  1091. .queue_tm_rsp = tcm_loop_queue_tm_rsp,
  1092. .aborted_task = tcm_loop_aborted_task,
  1093. .fabric_make_wwn = tcm_loop_make_scsi_hba,
  1094. .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
  1095. .fabric_make_tpg = tcm_loop_make_naa_tpg,
  1096. .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
  1097. .fabric_post_link = tcm_loop_port_link,
  1098. .fabric_pre_unlink = tcm_loop_port_unlink,
  1099. .tfc_wwn_attrs = tcm_loop_wwn_attrs,
  1100. .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
  1101. .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
  1102. };
  1103. static int __init tcm_loop_fabric_init(void)
  1104. {
  1105. int ret = -ENOMEM;
  1106. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1107. if (!tcm_loop_workqueue)
  1108. goto out;
  1109. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1110. sizeof(struct tcm_loop_cmd),
  1111. __alignof__(struct tcm_loop_cmd),
  1112. 0, NULL);
  1113. if (!tcm_loop_cmd_cache) {
  1114. pr_debug("kmem_cache_create() for"
  1115. " tcm_loop_cmd_cache failed\n");
  1116. goto out_destroy_workqueue;
  1117. }
  1118. ret = tcm_loop_alloc_core_bus();
  1119. if (ret)
  1120. goto out_destroy_cache;
  1121. ret = target_register_template(&loop_ops);
  1122. if (ret)
  1123. goto out_release_core_bus;
  1124. return 0;
  1125. out_release_core_bus:
  1126. tcm_loop_release_core_bus();
  1127. out_destroy_cache:
  1128. kmem_cache_destroy(tcm_loop_cmd_cache);
  1129. out_destroy_workqueue:
  1130. destroy_workqueue(tcm_loop_workqueue);
  1131. out:
  1132. return ret;
  1133. }
  1134. static void __exit tcm_loop_fabric_exit(void)
  1135. {
  1136. target_unregister_template(&loop_ops);
  1137. tcm_loop_release_core_bus();
  1138. kmem_cache_destroy(tcm_loop_cmd_cache);
  1139. destroy_workqueue(tcm_loop_workqueue);
  1140. }
  1141. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1142. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1143. MODULE_LICENSE("GPL");
  1144. module_init(tcm_loop_fabric_init);
  1145. module_exit(tcm_loop_fabric_exit);