tcm_loop.c 32 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include "tcm_loop.h"
  36. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  37. static struct workqueue_struct *tcm_loop_workqueue;
  38. static struct kmem_cache *tcm_loop_cmd_cache;
  39. static int tcm_loop_hba_no_cnt;
  40. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  41. /*
  42. * Called from struct target_core_fabric_ops->check_stop_free()
  43. */
  44. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  45. {
  46. return transport_generic_free_cmd(se_cmd, 0);
  47. }
  48. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  49. {
  50. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  51. struct tcm_loop_cmd, tl_se_cmd);
  52. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  53. }
  54. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  55. {
  56. seq_puts(m, "tcm_loop_proc_info()\n");
  57. return 0;
  58. }
  59. static int tcm_loop_driver_probe(struct device *);
  60. static int tcm_loop_driver_remove(struct device *);
  61. static int pseudo_lld_bus_match(struct device *dev,
  62. struct device_driver *dev_driver)
  63. {
  64. return 1;
  65. }
  66. static struct bus_type tcm_loop_lld_bus = {
  67. .name = "tcm_loop_bus",
  68. .match = pseudo_lld_bus_match,
  69. .probe = tcm_loop_driver_probe,
  70. .remove = tcm_loop_driver_remove,
  71. };
  72. static struct device_driver tcm_loop_driverfs = {
  73. .name = "tcm_loop",
  74. .bus = &tcm_loop_lld_bus,
  75. };
  76. /*
  77. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  78. */
  79. static struct device *tcm_loop_primary;
  80. static void tcm_loop_submission_work(struct work_struct *work)
  81. {
  82. struct tcm_loop_cmd *tl_cmd =
  83. container_of(work, struct tcm_loop_cmd, work);
  84. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  85. struct scsi_cmnd *sc = tl_cmd->sc;
  86. struct tcm_loop_nexus *tl_nexus;
  87. struct tcm_loop_hba *tl_hba;
  88. struct tcm_loop_tpg *tl_tpg;
  89. struct scatterlist *sgl_bidi = NULL;
  90. u32 sgl_bidi_count = 0, transfer_length;
  91. int rc;
  92. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  93. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  94. /*
  95. * Ensure that this tl_tpg reference from the incoming sc->device->id
  96. * has already been configured via tcm_loop_make_naa_tpg().
  97. */
  98. if (!tl_tpg->tl_hba) {
  99. set_host_byte(sc, DID_NO_CONNECT);
  100. goto out_done;
  101. }
  102. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  103. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  104. goto out_done;
  105. }
  106. tl_nexus = tl_tpg->tl_nexus;
  107. if (!tl_nexus) {
  108. scmd_printk(KERN_ERR, sc,
  109. "TCM_Loop I_T Nexus does not exist\n");
  110. set_host_byte(sc, DID_ERROR);
  111. goto out_done;
  112. }
  113. if (scsi_bidi_cmnd(sc)) {
  114. struct scsi_data_buffer *sdb = scsi_in(sc);
  115. sgl_bidi = sdb->table.sgl;
  116. sgl_bidi_count = sdb->table.nents;
  117. se_cmd->se_cmd_flags |= SCF_BIDI;
  118. }
  119. transfer_length = scsi_transfer_length(sc);
  120. if (!scsi_prot_sg_count(sc) &&
  121. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  122. se_cmd->prot_pto = true;
  123. /*
  124. * loopback transport doesn't support
  125. * WRITE_GENERATE, READ_STRIP protection
  126. * information operations, go ahead unprotected.
  127. */
  128. transfer_length = scsi_bufflen(sc);
  129. }
  130. se_cmd->tag = tl_cmd->sc_cmd_tag;
  131. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  132. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  133. transfer_length, TCM_SIMPLE_TAG,
  134. sc->sc_data_direction, 0,
  135. scsi_sglist(sc), scsi_sg_count(sc),
  136. sgl_bidi, sgl_bidi_count,
  137. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  138. if (rc < 0) {
  139. set_host_byte(sc, DID_NO_CONNECT);
  140. goto out_done;
  141. }
  142. return;
  143. out_done:
  144. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  145. sc->scsi_done(sc);
  146. }
  147. /*
  148. * ->queuecommand can be and usually is called from interrupt context, so
  149. * defer the actual submission to a workqueue.
  150. */
  151. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  152. {
  153. struct tcm_loop_cmd *tl_cmd;
  154. pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
  155. __func__, sc->device->host->host_no, sc->device->id,
  156. sc->device->channel, sc->device->lun, sc->cmnd[0],
  157. scsi_bufflen(sc));
  158. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  159. if (!tl_cmd) {
  160. set_host_byte(sc, DID_ERROR);
  161. sc->scsi_done(sc);
  162. return 0;
  163. }
  164. tl_cmd->sc = sc;
  165. tl_cmd->sc_cmd_tag = sc->request->tag;
  166. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  167. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  168. return 0;
  169. }
  170. /*
  171. * Called from SCSI EH process context to issue a LUN_RESET TMR
  172. * to struct scsi_device
  173. */
  174. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  175. u64 lun, int task, enum tcm_tmreq_table tmr)
  176. {
  177. struct se_cmd *se_cmd;
  178. struct se_session *se_sess;
  179. struct tcm_loop_nexus *tl_nexus;
  180. struct tcm_loop_cmd *tl_cmd;
  181. int ret = TMR_FUNCTION_FAILED, rc;
  182. /*
  183. * Locate the tl_nexus and se_sess pointers
  184. */
  185. tl_nexus = tl_tpg->tl_nexus;
  186. if (!tl_nexus) {
  187. pr_err("Unable to perform device reset without active I_T Nexus\n");
  188. return ret;
  189. }
  190. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  191. if (!tl_cmd)
  192. return ret;
  193. init_completion(&tl_cmd->tmr_done);
  194. se_cmd = &tl_cmd->tl_se_cmd;
  195. se_sess = tl_tpg->tl_nexus->se_sess;
  196. rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
  197. NULL, tmr, GFP_KERNEL, task,
  198. TARGET_SCF_ACK_KREF);
  199. if (rc < 0)
  200. goto release;
  201. wait_for_completion(&tl_cmd->tmr_done);
  202. ret = se_cmd->se_tmr_req->response;
  203. target_put_sess_cmd(se_cmd);
  204. out:
  205. return ret;
  206. release:
  207. if (se_cmd)
  208. transport_generic_free_cmd(se_cmd, 0);
  209. else
  210. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  211. goto out;
  212. }
  213. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  214. {
  215. struct tcm_loop_hba *tl_hba;
  216. struct tcm_loop_tpg *tl_tpg;
  217. int ret = FAILED;
  218. /*
  219. * Locate the tcm_loop_hba_t pointer
  220. */
  221. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  222. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  223. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  224. sc->request->tag, TMR_ABORT_TASK);
  225. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  226. }
  227. /*
  228. * Called from SCSI EH process context to issue a LUN_RESET TMR
  229. * to struct scsi_device
  230. */
  231. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  232. {
  233. struct tcm_loop_hba *tl_hba;
  234. struct tcm_loop_tpg *tl_tpg;
  235. int ret = FAILED;
  236. /*
  237. * Locate the tcm_loop_hba_t pointer
  238. */
  239. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  240. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  241. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  242. 0, TMR_LUN_RESET);
  243. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  244. }
  245. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  246. {
  247. struct tcm_loop_hba *tl_hba;
  248. struct tcm_loop_tpg *tl_tpg;
  249. /*
  250. * Locate the tcm_loop_hba_t pointer
  251. */
  252. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  253. if (!tl_hba) {
  254. pr_err("Unable to perform device reset without active I_T Nexus\n");
  255. return FAILED;
  256. }
  257. /*
  258. * Locate the tl_tpg pointer from TargetID in sc->device->id
  259. */
  260. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  261. if (tl_tpg) {
  262. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  263. return SUCCESS;
  264. }
  265. return FAILED;
  266. }
  267. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  268. {
  269. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  270. return 0;
  271. }
  272. static struct scsi_host_template tcm_loop_driver_template = {
  273. .show_info = tcm_loop_show_info,
  274. .proc_name = "tcm_loopback",
  275. .name = "TCM_Loopback",
  276. .queuecommand = tcm_loop_queuecommand,
  277. .change_queue_depth = scsi_change_queue_depth,
  278. .eh_abort_handler = tcm_loop_abort_task,
  279. .eh_device_reset_handler = tcm_loop_device_reset,
  280. .eh_target_reset_handler = tcm_loop_target_reset,
  281. .can_queue = 1024,
  282. .this_id = -1,
  283. .sg_tablesize = 256,
  284. .cmd_per_lun = 1024,
  285. .max_sectors = 0xFFFF,
  286. .use_clustering = DISABLE_CLUSTERING,
  287. .slave_alloc = tcm_loop_slave_alloc,
  288. .module = THIS_MODULE,
  289. .track_queue_depth = 1,
  290. };
  291. static int tcm_loop_driver_probe(struct device *dev)
  292. {
  293. struct tcm_loop_hba *tl_hba;
  294. struct Scsi_Host *sh;
  295. int error, host_prot;
  296. tl_hba = to_tcm_loop_hba(dev);
  297. sh = scsi_host_alloc(&tcm_loop_driver_template,
  298. sizeof(struct tcm_loop_hba));
  299. if (!sh) {
  300. pr_err("Unable to allocate struct scsi_host\n");
  301. return -ENODEV;
  302. }
  303. tl_hba->sh = sh;
  304. /*
  305. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  306. */
  307. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  308. /*
  309. * Setup single ID, Channel and LUN for now..
  310. */
  311. sh->max_id = 2;
  312. sh->max_lun = 0;
  313. sh->max_channel = 0;
  314. sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  315. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  316. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  317. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  318. scsi_host_set_prot(sh, host_prot);
  319. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  320. error = scsi_add_host(sh, &tl_hba->dev);
  321. if (error) {
  322. pr_err("%s: scsi_add_host failed\n", __func__);
  323. scsi_host_put(sh);
  324. return -ENODEV;
  325. }
  326. return 0;
  327. }
  328. static int tcm_loop_driver_remove(struct device *dev)
  329. {
  330. struct tcm_loop_hba *tl_hba;
  331. struct Scsi_Host *sh;
  332. tl_hba = to_tcm_loop_hba(dev);
  333. sh = tl_hba->sh;
  334. scsi_remove_host(sh);
  335. scsi_host_put(sh);
  336. return 0;
  337. }
  338. static void tcm_loop_release_adapter(struct device *dev)
  339. {
  340. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  341. kfree(tl_hba);
  342. }
  343. /*
  344. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  345. */
  346. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  347. {
  348. int ret;
  349. tl_hba->dev.bus = &tcm_loop_lld_bus;
  350. tl_hba->dev.parent = tcm_loop_primary;
  351. tl_hba->dev.release = &tcm_loop_release_adapter;
  352. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  353. ret = device_register(&tl_hba->dev);
  354. if (ret) {
  355. pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
  356. return -ENODEV;
  357. }
  358. return 0;
  359. }
  360. /*
  361. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  362. * tcm_loop SCSI bus.
  363. */
  364. static int tcm_loop_alloc_core_bus(void)
  365. {
  366. int ret;
  367. tcm_loop_primary = root_device_register("tcm_loop_0");
  368. if (IS_ERR(tcm_loop_primary)) {
  369. pr_err("Unable to allocate tcm_loop_primary\n");
  370. return PTR_ERR(tcm_loop_primary);
  371. }
  372. ret = bus_register(&tcm_loop_lld_bus);
  373. if (ret) {
  374. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  375. goto dev_unreg;
  376. }
  377. ret = driver_register(&tcm_loop_driverfs);
  378. if (ret) {
  379. pr_err("driver_register() failed for tcm_loop_driverfs\n");
  380. goto bus_unreg;
  381. }
  382. pr_debug("Initialized TCM Loop Core Bus\n");
  383. return ret;
  384. bus_unreg:
  385. bus_unregister(&tcm_loop_lld_bus);
  386. dev_unreg:
  387. root_device_unregister(tcm_loop_primary);
  388. return ret;
  389. }
  390. static void tcm_loop_release_core_bus(void)
  391. {
  392. driver_unregister(&tcm_loop_driverfs);
  393. bus_unregister(&tcm_loop_lld_bus);
  394. root_device_unregister(tcm_loop_primary);
  395. pr_debug("Releasing TCM Loop Core BUS\n");
  396. }
  397. static char *tcm_loop_get_fabric_name(void)
  398. {
  399. return "loopback";
  400. }
  401. static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
  402. {
  403. return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  404. }
  405. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  406. {
  407. /*
  408. * Return the passed NAA identifier for the Target Port
  409. */
  410. return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
  411. }
  412. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  413. {
  414. /*
  415. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  416. * to represent the SCSI Target Port.
  417. */
  418. return tl_tpg(se_tpg)->tl_tpgt;
  419. }
  420. /*
  421. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  422. * based upon the incoming fabric dependent SCSI Initiator Port
  423. */
  424. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  425. {
  426. return 1;
  427. }
  428. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  429. {
  430. return 0;
  431. }
  432. /*
  433. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  434. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  435. */
  436. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  437. {
  438. return 0;
  439. }
  440. /*
  441. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  442. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  443. * It has been added here as a nop for target_fabric_tf_ops_check()
  444. */
  445. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  446. {
  447. return 0;
  448. }
  449. static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
  450. {
  451. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  452. tl_se_tpg);
  453. return tl_tpg->tl_fabric_prot_type;
  454. }
  455. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  456. {
  457. return 1;
  458. }
  459. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  460. {
  461. return 1;
  462. }
  463. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  464. {
  465. return;
  466. }
  467. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  468. {
  469. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  470. struct tcm_loop_cmd, tl_se_cmd);
  471. return tl_cmd->sc_cmd_state;
  472. }
  473. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  474. {
  475. /*
  476. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  477. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  478. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  479. * format with transport_generic_map_mem_to_cmd().
  480. *
  481. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  482. * object execution queue.
  483. */
  484. target_execute_cmd(se_cmd);
  485. return 0;
  486. }
  487. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  488. {
  489. return 0;
  490. }
  491. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  492. {
  493. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  494. struct tcm_loop_cmd, tl_se_cmd);
  495. struct scsi_cmnd *sc = tl_cmd->sc;
  496. pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
  497. __func__, sc, sc->cmnd[0]);
  498. sc->result = SAM_STAT_GOOD;
  499. set_host_byte(sc, DID_OK);
  500. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  501. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  502. scsi_set_resid(sc, se_cmd->residual_count);
  503. sc->scsi_done(sc);
  504. return 0;
  505. }
  506. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  507. {
  508. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  509. struct tcm_loop_cmd, tl_se_cmd);
  510. struct scsi_cmnd *sc = tl_cmd->sc;
  511. pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
  512. __func__, sc, sc->cmnd[0]);
  513. if (se_cmd->sense_buffer &&
  514. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  515. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  516. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  517. SCSI_SENSE_BUFFERSIZE);
  518. sc->result = SAM_STAT_CHECK_CONDITION;
  519. set_driver_byte(sc, DRIVER_SENSE);
  520. } else
  521. sc->result = se_cmd->scsi_status;
  522. set_host_byte(sc, DID_OK);
  523. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  524. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  525. scsi_set_resid(sc, se_cmd->residual_count);
  526. sc->scsi_done(sc);
  527. return 0;
  528. }
  529. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  530. {
  531. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  532. struct tcm_loop_cmd, tl_se_cmd);
  533. /* Wake up tcm_loop_issue_tmr(). */
  534. complete(&tl_cmd->tmr_done);
  535. }
  536. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  537. {
  538. return;
  539. }
  540. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  541. {
  542. switch (tl_hba->tl_proto_id) {
  543. case SCSI_PROTOCOL_SAS:
  544. return "SAS";
  545. case SCSI_PROTOCOL_FCP:
  546. return "FCP";
  547. case SCSI_PROTOCOL_ISCSI:
  548. return "iSCSI";
  549. default:
  550. break;
  551. }
  552. return "Unknown";
  553. }
  554. /* Start items for tcm_loop_port_cit */
  555. static int tcm_loop_port_link(
  556. struct se_portal_group *se_tpg,
  557. struct se_lun *lun)
  558. {
  559. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  560. struct tcm_loop_tpg, tl_se_tpg);
  561. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  562. atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
  563. /*
  564. * Add Linux/SCSI struct scsi_device by HCTL
  565. */
  566. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  567. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  568. return 0;
  569. }
  570. static void tcm_loop_port_unlink(
  571. struct se_portal_group *se_tpg,
  572. struct se_lun *se_lun)
  573. {
  574. struct scsi_device *sd;
  575. struct tcm_loop_hba *tl_hba;
  576. struct tcm_loop_tpg *tl_tpg;
  577. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  578. tl_hba = tl_tpg->tl_hba;
  579. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  580. se_lun->unpacked_lun);
  581. if (!sd) {
  582. pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
  583. 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  584. return;
  585. }
  586. /*
  587. * Remove Linux/SCSI struct scsi_device by HCTL
  588. */
  589. scsi_remove_device(sd);
  590. scsi_device_put(sd);
  591. atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
  592. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  593. }
  594. /* End items for tcm_loop_port_cit */
  595. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
  596. struct config_item *item, char *page)
  597. {
  598. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  599. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  600. tl_se_tpg);
  601. return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  602. }
  603. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
  604. struct config_item *item, const char *page, size_t count)
  605. {
  606. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  607. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  608. tl_se_tpg);
  609. unsigned long val;
  610. int ret = kstrtoul(page, 0, &val);
  611. if (ret) {
  612. pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
  613. return ret;
  614. }
  615. if (val != 0 && val != 1 && val != 3) {
  616. pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
  617. return -EINVAL;
  618. }
  619. tl_tpg->tl_fabric_prot_type = val;
  620. return count;
  621. }
  622. CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
  623. static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
  624. &tcm_loop_tpg_attrib_attr_fabric_prot_type,
  625. NULL,
  626. };
  627. /* Start items for tcm_loop_nexus_cit */
  628. static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
  629. struct se_session *se_sess, void *p)
  630. {
  631. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  632. struct tcm_loop_tpg, tl_se_tpg);
  633. tl_tpg->tl_nexus = p;
  634. return 0;
  635. }
  636. static int tcm_loop_make_nexus(
  637. struct tcm_loop_tpg *tl_tpg,
  638. const char *name)
  639. {
  640. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  641. struct tcm_loop_nexus *tl_nexus;
  642. int ret;
  643. if (tl_tpg->tl_nexus) {
  644. pr_debug("tl_tpg->tl_nexus already exists\n");
  645. return -EEXIST;
  646. }
  647. tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
  648. if (!tl_nexus)
  649. return -ENOMEM;
  650. tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
  651. TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
  652. name, tl_nexus, tcm_loop_alloc_sess_cb);
  653. if (IS_ERR(tl_nexus->se_sess)) {
  654. ret = PTR_ERR(tl_nexus->se_sess);
  655. kfree(tl_nexus);
  656. return ret;
  657. }
  658. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
  659. tcm_loop_dump_proto_id(tl_hba), name);
  660. return 0;
  661. }
  662. static int tcm_loop_drop_nexus(
  663. struct tcm_loop_tpg *tpg)
  664. {
  665. struct se_session *se_sess;
  666. struct tcm_loop_nexus *tl_nexus;
  667. tl_nexus = tpg->tl_nexus;
  668. if (!tl_nexus)
  669. return -ENODEV;
  670. se_sess = tl_nexus->se_sess;
  671. if (!se_sess)
  672. return -ENODEV;
  673. if (atomic_read(&tpg->tl_tpg_port_count)) {
  674. pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
  675. atomic_read(&tpg->tl_tpg_port_count));
  676. return -EPERM;
  677. }
  678. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
  679. tcm_loop_dump_proto_id(tpg->tl_hba),
  680. tl_nexus->se_sess->se_node_acl->initiatorname);
  681. /*
  682. * Release the SCSI I_T Nexus to the emulated Target Port
  683. */
  684. transport_deregister_session(tl_nexus->se_sess);
  685. tpg->tl_nexus = NULL;
  686. kfree(tl_nexus);
  687. return 0;
  688. }
  689. /* End items for tcm_loop_nexus_cit */
  690. static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
  691. {
  692. struct se_portal_group *se_tpg = to_tpg(item);
  693. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  694. struct tcm_loop_tpg, tl_se_tpg);
  695. struct tcm_loop_nexus *tl_nexus;
  696. ssize_t ret;
  697. tl_nexus = tl_tpg->tl_nexus;
  698. if (!tl_nexus)
  699. return -ENODEV;
  700. ret = snprintf(page, PAGE_SIZE, "%s\n",
  701. tl_nexus->se_sess->se_node_acl->initiatorname);
  702. return ret;
  703. }
  704. static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
  705. const char *page, size_t count)
  706. {
  707. struct se_portal_group *se_tpg = to_tpg(item);
  708. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  709. struct tcm_loop_tpg, tl_se_tpg);
  710. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  711. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  712. int ret;
  713. /*
  714. * Shutdown the active I_T nexus if 'NULL' is passed..
  715. */
  716. if (!strncmp(page, "NULL", 4)) {
  717. ret = tcm_loop_drop_nexus(tl_tpg);
  718. return (!ret) ? count : ret;
  719. }
  720. /*
  721. * Otherwise make sure the passed virtual Initiator port WWN matches
  722. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  723. * tcm_loop_make_nexus()
  724. */
  725. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  726. pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
  727. page, TL_WWN_ADDR_LEN);
  728. return -EINVAL;
  729. }
  730. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  731. ptr = strstr(i_port, "naa.");
  732. if (ptr) {
  733. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  734. pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
  735. i_port, tcm_loop_dump_proto_id(tl_hba));
  736. return -EINVAL;
  737. }
  738. port_ptr = &i_port[0];
  739. goto check_newline;
  740. }
  741. ptr = strstr(i_port, "fc.");
  742. if (ptr) {
  743. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  744. pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
  745. i_port, tcm_loop_dump_proto_id(tl_hba));
  746. return -EINVAL;
  747. }
  748. port_ptr = &i_port[3]; /* Skip over "fc." */
  749. goto check_newline;
  750. }
  751. ptr = strstr(i_port, "iqn.");
  752. if (ptr) {
  753. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  754. pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
  755. i_port, tcm_loop_dump_proto_id(tl_hba));
  756. return -EINVAL;
  757. }
  758. port_ptr = &i_port[0];
  759. goto check_newline;
  760. }
  761. pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
  762. i_port);
  763. return -EINVAL;
  764. /*
  765. * Clear any trailing newline for the NAA WWN
  766. */
  767. check_newline:
  768. if (i_port[strlen(i_port)-1] == '\n')
  769. i_port[strlen(i_port)-1] = '\0';
  770. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  771. if (ret < 0)
  772. return ret;
  773. return count;
  774. }
  775. static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
  776. char *page)
  777. {
  778. struct se_portal_group *se_tpg = to_tpg(item);
  779. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  780. struct tcm_loop_tpg, tl_se_tpg);
  781. const char *status = NULL;
  782. ssize_t ret = -EINVAL;
  783. switch (tl_tpg->tl_transport_status) {
  784. case TCM_TRANSPORT_ONLINE:
  785. status = "online";
  786. break;
  787. case TCM_TRANSPORT_OFFLINE:
  788. status = "offline";
  789. break;
  790. default:
  791. break;
  792. }
  793. if (status)
  794. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  795. return ret;
  796. }
  797. static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
  798. const char *page, size_t count)
  799. {
  800. struct se_portal_group *se_tpg = to_tpg(item);
  801. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  802. struct tcm_loop_tpg, tl_se_tpg);
  803. if (!strncmp(page, "online", 6)) {
  804. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  805. return count;
  806. }
  807. if (!strncmp(page, "offline", 7)) {
  808. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  809. if (tl_tpg->tl_nexus) {
  810. struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
  811. core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
  812. }
  813. return count;
  814. }
  815. return -EINVAL;
  816. }
  817. static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
  818. char *page)
  819. {
  820. struct se_portal_group *se_tpg = to_tpg(item);
  821. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  822. struct tcm_loop_tpg, tl_se_tpg);
  823. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  824. return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
  825. tl_hba->sh->host_no, tl_tpg->tl_tpgt);
  826. }
  827. CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
  828. CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
  829. CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
  830. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  831. &tcm_loop_tpg_attr_nexus,
  832. &tcm_loop_tpg_attr_transport_status,
  833. &tcm_loop_tpg_attr_address,
  834. NULL,
  835. };
  836. /* Start items for tcm_loop_naa_cit */
  837. static struct se_portal_group *tcm_loop_make_naa_tpg(
  838. struct se_wwn *wwn,
  839. struct config_group *group,
  840. const char *name)
  841. {
  842. struct tcm_loop_hba *tl_hba = container_of(wwn,
  843. struct tcm_loop_hba, tl_hba_wwn);
  844. struct tcm_loop_tpg *tl_tpg;
  845. int ret;
  846. unsigned long tpgt;
  847. if (strstr(name, "tpgt_") != name) {
  848. pr_err("Unable to locate \"tpgt_#\" directory group\n");
  849. return ERR_PTR(-EINVAL);
  850. }
  851. if (kstrtoul(name+5, 10, &tpgt))
  852. return ERR_PTR(-EINVAL);
  853. if (tpgt >= TL_TPGS_PER_HBA) {
  854. pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
  855. tpgt, TL_TPGS_PER_HBA);
  856. return ERR_PTR(-EINVAL);
  857. }
  858. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  859. tl_tpg->tl_hba = tl_hba;
  860. tl_tpg->tl_tpgt = tpgt;
  861. /*
  862. * Register the tl_tpg as a emulated TCM Target Endpoint
  863. */
  864. ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
  865. if (ret < 0)
  866. return ERR_PTR(-ENOMEM);
  867. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
  868. tcm_loop_dump_proto_id(tl_hba),
  869. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  870. return &tl_tpg->tl_se_tpg;
  871. }
  872. static void tcm_loop_drop_naa_tpg(
  873. struct se_portal_group *se_tpg)
  874. {
  875. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  876. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  877. struct tcm_loop_tpg, tl_se_tpg);
  878. struct tcm_loop_hba *tl_hba;
  879. unsigned short tpgt;
  880. tl_hba = tl_tpg->tl_hba;
  881. tpgt = tl_tpg->tl_tpgt;
  882. /*
  883. * Release the I_T Nexus for the Virtual target link if present
  884. */
  885. tcm_loop_drop_nexus(tl_tpg);
  886. /*
  887. * Deregister the tl_tpg as a emulated TCM Target Endpoint
  888. */
  889. core_tpg_deregister(se_tpg);
  890. tl_tpg->tl_hba = NULL;
  891. tl_tpg->tl_tpgt = 0;
  892. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
  893. tcm_loop_dump_proto_id(tl_hba),
  894. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  895. }
  896. /* End items for tcm_loop_naa_cit */
  897. /* Start items for tcm_loop_cit */
  898. static struct se_wwn *tcm_loop_make_scsi_hba(
  899. struct target_fabric_configfs *tf,
  900. struct config_group *group,
  901. const char *name)
  902. {
  903. struct tcm_loop_hba *tl_hba;
  904. struct Scsi_Host *sh;
  905. char *ptr;
  906. int ret, off = 0;
  907. tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
  908. if (!tl_hba)
  909. return ERR_PTR(-ENOMEM);
  910. /*
  911. * Determine the emulated Protocol Identifier and Target Port Name
  912. * based on the incoming configfs directory name.
  913. */
  914. ptr = strstr(name, "naa.");
  915. if (ptr) {
  916. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  917. goto check_len;
  918. }
  919. ptr = strstr(name, "fc.");
  920. if (ptr) {
  921. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  922. off = 3; /* Skip over "fc." */
  923. goto check_len;
  924. }
  925. ptr = strstr(name, "iqn.");
  926. if (!ptr) {
  927. pr_err("Unable to locate prefix for emulated Target Port: %s\n",
  928. name);
  929. ret = -EINVAL;
  930. goto out;
  931. }
  932. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  933. check_len:
  934. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  935. pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
  936. name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
  937. ret = -EINVAL;
  938. goto out;
  939. }
  940. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  941. /*
  942. * Call device_register(tl_hba->dev) to register the emulated
  943. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  944. * device_register() callbacks in tcm_loop_driver_probe()
  945. */
  946. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  947. if (ret)
  948. goto out;
  949. sh = tl_hba->sh;
  950. tcm_loop_hba_no_cnt++;
  951. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  952. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  953. return &tl_hba->tl_hba_wwn;
  954. out:
  955. kfree(tl_hba);
  956. return ERR_PTR(ret);
  957. }
  958. static void tcm_loop_drop_scsi_hba(
  959. struct se_wwn *wwn)
  960. {
  961. struct tcm_loop_hba *tl_hba = container_of(wwn,
  962. struct tcm_loop_hba, tl_hba_wwn);
  963. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  964. tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
  965. tl_hba->sh->host_no);
  966. /*
  967. * Call device_unregister() on the original tl_hba->dev.
  968. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  969. * release *tl_hba;
  970. */
  971. device_unregister(&tl_hba->dev);
  972. }
  973. /* Start items for tcm_loop_cit */
  974. static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
  975. {
  976. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  977. }
  978. CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
  979. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  980. &tcm_loop_wwn_attr_version,
  981. NULL,
  982. };
  983. /* End items for tcm_loop_cit */
  984. static const struct target_core_fabric_ops loop_ops = {
  985. .module = THIS_MODULE,
  986. .name = "loopback",
  987. .get_fabric_name = tcm_loop_get_fabric_name,
  988. .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
  989. .tpg_get_tag = tcm_loop_get_tag,
  990. .tpg_check_demo_mode = tcm_loop_check_demo_mode,
  991. .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
  992. .tpg_check_demo_mode_write_protect =
  993. tcm_loop_check_demo_mode_write_protect,
  994. .tpg_check_prod_mode_write_protect =
  995. tcm_loop_check_prod_mode_write_protect,
  996. .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
  997. .tpg_get_inst_index = tcm_loop_get_inst_index,
  998. .check_stop_free = tcm_loop_check_stop_free,
  999. .release_cmd = tcm_loop_release_cmd,
  1000. .sess_get_index = tcm_loop_sess_get_index,
  1001. .write_pending = tcm_loop_write_pending,
  1002. .write_pending_status = tcm_loop_write_pending_status,
  1003. .set_default_node_attributes = tcm_loop_set_default_node_attributes,
  1004. .get_cmd_state = tcm_loop_get_cmd_state,
  1005. .queue_data_in = tcm_loop_queue_data_in,
  1006. .queue_status = tcm_loop_queue_status,
  1007. .queue_tm_rsp = tcm_loop_queue_tm_rsp,
  1008. .aborted_task = tcm_loop_aborted_task,
  1009. .fabric_make_wwn = tcm_loop_make_scsi_hba,
  1010. .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
  1011. .fabric_make_tpg = tcm_loop_make_naa_tpg,
  1012. .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
  1013. .fabric_post_link = tcm_loop_port_link,
  1014. .fabric_pre_unlink = tcm_loop_port_unlink,
  1015. .tfc_wwn_attrs = tcm_loop_wwn_attrs,
  1016. .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
  1017. .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
  1018. };
  1019. static int __init tcm_loop_fabric_init(void)
  1020. {
  1021. int ret = -ENOMEM;
  1022. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1023. if (!tcm_loop_workqueue)
  1024. goto out;
  1025. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1026. sizeof(struct tcm_loop_cmd),
  1027. __alignof__(struct tcm_loop_cmd),
  1028. 0, NULL);
  1029. if (!tcm_loop_cmd_cache) {
  1030. pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
  1031. goto out_destroy_workqueue;
  1032. }
  1033. ret = tcm_loop_alloc_core_bus();
  1034. if (ret)
  1035. goto out_destroy_cache;
  1036. ret = target_register_template(&loop_ops);
  1037. if (ret)
  1038. goto out_release_core_bus;
  1039. return 0;
  1040. out_release_core_bus:
  1041. tcm_loop_release_core_bus();
  1042. out_destroy_cache:
  1043. kmem_cache_destroy(tcm_loop_cmd_cache);
  1044. out_destroy_workqueue:
  1045. destroy_workqueue(tcm_loop_workqueue);
  1046. out:
  1047. return ret;
  1048. }
  1049. static void __exit tcm_loop_fabric_exit(void)
  1050. {
  1051. target_unregister_template(&loop_ops);
  1052. tcm_loop_release_core_bus();
  1053. kmem_cache_destroy(tcm_loop_cmd_cache);
  1054. destroy_workqueue(tcm_loop_workqueue);
  1055. }
  1056. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1057. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1058. MODULE_LICENSE("GPL");
  1059. module_init(tcm_loop_fabric_init);
  1060. module_exit(tcm_loop_fabric_exit);