tcm_loop.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include "tcm_loop.h"
  36. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  37. static struct workqueue_struct *tcm_loop_workqueue;
  38. static struct kmem_cache *tcm_loop_cmd_cache;
  39. static int tcm_loop_hba_no_cnt;
  40. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  41. /*
  42. * Called from struct target_core_fabric_ops->check_stop_free()
  43. */
  44. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  45. {
  46. return transport_generic_free_cmd(se_cmd, 0);
  47. }
  48. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  49. {
  50. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  51. struct tcm_loop_cmd, tl_se_cmd);
  52. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  53. }
  54. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  55. {
  56. seq_printf(m, "tcm_loop_proc_info()\n");
  57. return 0;
  58. }
  59. static int tcm_loop_driver_probe(struct device *);
  60. static int tcm_loop_driver_remove(struct device *);
  61. static int pseudo_lld_bus_match(struct device *dev,
  62. struct device_driver *dev_driver)
  63. {
  64. return 1;
  65. }
  66. static struct bus_type tcm_loop_lld_bus = {
  67. .name = "tcm_loop_bus",
  68. .match = pseudo_lld_bus_match,
  69. .probe = tcm_loop_driver_probe,
  70. .remove = tcm_loop_driver_remove,
  71. };
  72. static struct device_driver tcm_loop_driverfs = {
  73. .name = "tcm_loop",
  74. .bus = &tcm_loop_lld_bus,
  75. };
  76. /*
  77. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  78. */
  79. static struct device *tcm_loop_primary;
  80. static void tcm_loop_submission_work(struct work_struct *work)
  81. {
  82. struct tcm_loop_cmd *tl_cmd =
  83. container_of(work, struct tcm_loop_cmd, work);
  84. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  85. struct scsi_cmnd *sc = tl_cmd->sc;
  86. struct tcm_loop_nexus *tl_nexus;
  87. struct tcm_loop_hba *tl_hba;
  88. struct tcm_loop_tpg *tl_tpg;
  89. struct scatterlist *sgl_bidi = NULL;
  90. u32 sgl_bidi_count = 0, transfer_length;
  91. int rc;
  92. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  93. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  94. /*
  95. * Ensure that this tl_tpg reference from the incoming sc->device->id
  96. * has already been configured via tcm_loop_make_naa_tpg().
  97. */
  98. if (!tl_tpg->tl_hba) {
  99. set_host_byte(sc, DID_NO_CONNECT);
  100. goto out_done;
  101. }
  102. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  103. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  104. goto out_done;
  105. }
  106. tl_nexus = tl_tpg->tl_nexus;
  107. if (!tl_nexus) {
  108. scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
  109. " does not exist\n");
  110. set_host_byte(sc, DID_ERROR);
  111. goto out_done;
  112. }
  113. if (scsi_bidi_cmnd(sc)) {
  114. struct scsi_data_buffer *sdb = scsi_in(sc);
  115. sgl_bidi = sdb->table.sgl;
  116. sgl_bidi_count = sdb->table.nents;
  117. se_cmd->se_cmd_flags |= SCF_BIDI;
  118. }
  119. transfer_length = scsi_transfer_length(sc);
  120. if (!scsi_prot_sg_count(sc) &&
  121. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  122. se_cmd->prot_pto = true;
  123. /*
  124. * loopback transport doesn't support
  125. * WRITE_GENERATE, READ_STRIP protection
  126. * information operations, go ahead unprotected.
  127. */
  128. transfer_length = scsi_bufflen(sc);
  129. }
  130. se_cmd->tag = tl_cmd->sc_cmd_tag;
  131. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  132. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  133. transfer_length, TCM_SIMPLE_TAG,
  134. sc->sc_data_direction, 0,
  135. scsi_sglist(sc), scsi_sg_count(sc),
  136. sgl_bidi, sgl_bidi_count,
  137. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  138. if (rc < 0) {
  139. set_host_byte(sc, DID_NO_CONNECT);
  140. goto out_done;
  141. }
  142. return;
  143. out_done:
  144. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  145. sc->scsi_done(sc);
  146. return;
  147. }
  148. /*
  149. * ->queuecommand can be and usually is called from interrupt context, so
  150. * defer the actual submission to a workqueue.
  151. */
  152. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  153. {
  154. struct tcm_loop_cmd *tl_cmd;
  155. pr_debug("tcm_loop_queuecommand() %d:%d:%d:%llu got CDB: 0x%02x"
  156. " scsi_buf_len: %u\n", sc->device->host->host_no,
  157. sc->device->id, sc->device->channel, sc->device->lun,
  158. sc->cmnd[0], scsi_bufflen(sc));
  159. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  160. if (!tl_cmd) {
  161. pr_err("Unable to allocate struct tcm_loop_cmd\n");
  162. set_host_byte(sc, DID_ERROR);
  163. sc->scsi_done(sc);
  164. return 0;
  165. }
  166. tl_cmd->sc = sc;
  167. tl_cmd->sc_cmd_tag = sc->request->tag;
  168. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  169. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  170. return 0;
  171. }
  172. /*
  173. * Called from SCSI EH process context to issue a LUN_RESET TMR
  174. * to struct scsi_device
  175. */
  176. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  177. u64 lun, int task, enum tcm_tmreq_table tmr)
  178. {
  179. struct se_cmd *se_cmd = NULL;
  180. struct se_session *se_sess;
  181. struct tcm_loop_nexus *tl_nexus;
  182. struct tcm_loop_cmd *tl_cmd = NULL;
  183. int ret = TMR_FUNCTION_FAILED, rc;
  184. /*
  185. * Locate the tl_nexus and se_sess pointers
  186. */
  187. tl_nexus = tl_tpg->tl_nexus;
  188. if (!tl_nexus) {
  189. pr_err("Unable to perform device reset without"
  190. " active I_T Nexus\n");
  191. return ret;
  192. }
  193. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  194. if (!tl_cmd) {
  195. pr_err("Unable to allocate memory for tl_cmd\n");
  196. return ret;
  197. }
  198. init_completion(&tl_cmd->tmr_done);
  199. se_cmd = &tl_cmd->tl_se_cmd;
  200. se_sess = tl_tpg->tl_nexus->se_sess;
  201. rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
  202. NULL, tmr, GFP_KERNEL, task,
  203. TARGET_SCF_ACK_KREF);
  204. if (rc < 0)
  205. goto release;
  206. wait_for_completion(&tl_cmd->tmr_done);
  207. ret = se_cmd->se_tmr_req->response;
  208. target_put_sess_cmd(se_cmd);
  209. out:
  210. return ret;
  211. release:
  212. if (se_cmd)
  213. transport_generic_free_cmd(se_cmd, 0);
  214. else
  215. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  216. goto out;
  217. }
  218. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  219. {
  220. struct tcm_loop_hba *tl_hba;
  221. struct tcm_loop_tpg *tl_tpg;
  222. int ret = FAILED;
  223. /*
  224. * Locate the tcm_loop_hba_t pointer
  225. */
  226. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  227. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  228. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  229. sc->request->tag, TMR_ABORT_TASK);
  230. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  231. }
  232. /*
  233. * Called from SCSI EH process context to issue a LUN_RESET TMR
  234. * to struct scsi_device
  235. */
  236. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  237. {
  238. struct tcm_loop_hba *tl_hba;
  239. struct tcm_loop_tpg *tl_tpg;
  240. int ret = FAILED;
  241. /*
  242. * Locate the tcm_loop_hba_t pointer
  243. */
  244. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  245. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  246. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  247. 0, TMR_LUN_RESET);
  248. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  249. }
  250. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  251. {
  252. struct tcm_loop_hba *tl_hba;
  253. struct tcm_loop_tpg *tl_tpg;
  254. /*
  255. * Locate the tcm_loop_hba_t pointer
  256. */
  257. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  258. if (!tl_hba) {
  259. pr_err("Unable to perform device reset without"
  260. " active I_T Nexus\n");
  261. return FAILED;
  262. }
  263. /*
  264. * Locate the tl_tpg pointer from TargetID in sc->device->id
  265. */
  266. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  267. if (tl_tpg) {
  268. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  269. return SUCCESS;
  270. }
  271. return FAILED;
  272. }
  273. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  274. {
  275. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  276. return 0;
  277. }
  278. static struct scsi_host_template tcm_loop_driver_template = {
  279. .show_info = tcm_loop_show_info,
  280. .proc_name = "tcm_loopback",
  281. .name = "TCM_Loopback",
  282. .queuecommand = tcm_loop_queuecommand,
  283. .change_queue_depth = scsi_change_queue_depth,
  284. .eh_abort_handler = tcm_loop_abort_task,
  285. .eh_device_reset_handler = tcm_loop_device_reset,
  286. .eh_target_reset_handler = tcm_loop_target_reset,
  287. .can_queue = 1024,
  288. .this_id = -1,
  289. .sg_tablesize = 256,
  290. .cmd_per_lun = 1024,
  291. .max_sectors = 0xFFFF,
  292. .use_clustering = DISABLE_CLUSTERING,
  293. .slave_alloc = tcm_loop_slave_alloc,
  294. .module = THIS_MODULE,
  295. .track_queue_depth = 1,
  296. };
  297. static int tcm_loop_driver_probe(struct device *dev)
  298. {
  299. struct tcm_loop_hba *tl_hba;
  300. struct Scsi_Host *sh;
  301. int error, host_prot;
  302. tl_hba = to_tcm_loop_hba(dev);
  303. sh = scsi_host_alloc(&tcm_loop_driver_template,
  304. sizeof(struct tcm_loop_hba));
  305. if (!sh) {
  306. pr_err("Unable to allocate struct scsi_host\n");
  307. return -ENODEV;
  308. }
  309. tl_hba->sh = sh;
  310. /*
  311. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  312. */
  313. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  314. /*
  315. * Setup single ID, Channel and LUN for now..
  316. */
  317. sh->max_id = 2;
  318. sh->max_lun = 0;
  319. sh->max_channel = 0;
  320. sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  321. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  322. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  323. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  324. scsi_host_set_prot(sh, host_prot);
  325. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  326. error = scsi_add_host(sh, &tl_hba->dev);
  327. if (error) {
  328. pr_err("%s: scsi_add_host failed\n", __func__);
  329. scsi_host_put(sh);
  330. return -ENODEV;
  331. }
  332. return 0;
  333. }
  334. static int tcm_loop_driver_remove(struct device *dev)
  335. {
  336. struct tcm_loop_hba *tl_hba;
  337. struct Scsi_Host *sh;
  338. tl_hba = to_tcm_loop_hba(dev);
  339. sh = tl_hba->sh;
  340. scsi_remove_host(sh);
  341. scsi_host_put(sh);
  342. return 0;
  343. }
  344. static void tcm_loop_release_adapter(struct device *dev)
  345. {
  346. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  347. kfree(tl_hba);
  348. }
  349. /*
  350. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  351. */
  352. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  353. {
  354. int ret;
  355. tl_hba->dev.bus = &tcm_loop_lld_bus;
  356. tl_hba->dev.parent = tcm_loop_primary;
  357. tl_hba->dev.release = &tcm_loop_release_adapter;
  358. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  359. ret = device_register(&tl_hba->dev);
  360. if (ret) {
  361. pr_err("device_register() failed for"
  362. " tl_hba->dev: %d\n", ret);
  363. return -ENODEV;
  364. }
  365. return 0;
  366. }
  367. /*
  368. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  369. * tcm_loop SCSI bus.
  370. */
  371. static int tcm_loop_alloc_core_bus(void)
  372. {
  373. int ret;
  374. tcm_loop_primary = root_device_register("tcm_loop_0");
  375. if (IS_ERR(tcm_loop_primary)) {
  376. pr_err("Unable to allocate tcm_loop_primary\n");
  377. return PTR_ERR(tcm_loop_primary);
  378. }
  379. ret = bus_register(&tcm_loop_lld_bus);
  380. if (ret) {
  381. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  382. goto dev_unreg;
  383. }
  384. ret = driver_register(&tcm_loop_driverfs);
  385. if (ret) {
  386. pr_err("driver_register() failed for"
  387. "tcm_loop_driverfs\n");
  388. goto bus_unreg;
  389. }
  390. pr_debug("Initialized TCM Loop Core Bus\n");
  391. return ret;
  392. bus_unreg:
  393. bus_unregister(&tcm_loop_lld_bus);
  394. dev_unreg:
  395. root_device_unregister(tcm_loop_primary);
  396. return ret;
  397. }
  398. static void tcm_loop_release_core_bus(void)
  399. {
  400. driver_unregister(&tcm_loop_driverfs);
  401. bus_unregister(&tcm_loop_lld_bus);
  402. root_device_unregister(tcm_loop_primary);
  403. pr_debug("Releasing TCM Loop Core BUS\n");
  404. }
  405. static char *tcm_loop_get_fabric_name(void)
  406. {
  407. return "loopback";
  408. }
  409. static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
  410. {
  411. return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  412. }
  413. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  414. {
  415. /*
  416. * Return the passed NAA identifier for the Target Port
  417. */
  418. return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
  419. }
  420. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  421. {
  422. /*
  423. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  424. * to represent the SCSI Target Port.
  425. */
  426. return tl_tpg(se_tpg)->tl_tpgt;
  427. }
  428. /*
  429. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  430. * based upon the incoming fabric dependent SCSI Initiator Port
  431. */
  432. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  433. {
  434. return 1;
  435. }
  436. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  437. {
  438. return 0;
  439. }
  440. /*
  441. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  442. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  443. */
  444. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  445. {
  446. return 0;
  447. }
  448. /*
  449. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  450. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  451. * It has been added here as a nop for target_fabric_tf_ops_check()
  452. */
  453. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  454. {
  455. return 0;
  456. }
  457. static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
  458. {
  459. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  460. tl_se_tpg);
  461. return tl_tpg->tl_fabric_prot_type;
  462. }
  463. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  464. {
  465. return 1;
  466. }
  467. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  468. {
  469. return 1;
  470. }
  471. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  472. {
  473. return;
  474. }
  475. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  476. {
  477. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  478. struct tcm_loop_cmd, tl_se_cmd);
  479. return tl_cmd->sc_cmd_state;
  480. }
  481. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  482. {
  483. /*
  484. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  485. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  486. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  487. * format with transport_generic_map_mem_to_cmd().
  488. *
  489. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  490. * object execution queue.
  491. */
  492. target_execute_cmd(se_cmd);
  493. return 0;
  494. }
  495. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  496. {
  497. return 0;
  498. }
  499. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  500. {
  501. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  502. struct tcm_loop_cmd, tl_se_cmd);
  503. struct scsi_cmnd *sc = tl_cmd->sc;
  504. pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
  505. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  506. sc->result = SAM_STAT_GOOD;
  507. set_host_byte(sc, DID_OK);
  508. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  509. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  510. scsi_set_resid(sc, se_cmd->residual_count);
  511. sc->scsi_done(sc);
  512. return 0;
  513. }
  514. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  515. {
  516. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  517. struct tcm_loop_cmd, tl_se_cmd);
  518. struct scsi_cmnd *sc = tl_cmd->sc;
  519. pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
  520. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  521. if (se_cmd->sense_buffer &&
  522. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  523. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  524. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  525. SCSI_SENSE_BUFFERSIZE);
  526. sc->result = SAM_STAT_CHECK_CONDITION;
  527. set_driver_byte(sc, DRIVER_SENSE);
  528. } else
  529. sc->result = se_cmd->scsi_status;
  530. set_host_byte(sc, DID_OK);
  531. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  532. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  533. scsi_set_resid(sc, se_cmd->residual_count);
  534. sc->scsi_done(sc);
  535. return 0;
  536. }
  537. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  538. {
  539. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  540. struct tcm_loop_cmd, tl_se_cmd);
  541. /* Wake up tcm_loop_issue_tmr(). */
  542. complete(&tl_cmd->tmr_done);
  543. }
  544. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  545. {
  546. return;
  547. }
  548. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  549. {
  550. switch (tl_hba->tl_proto_id) {
  551. case SCSI_PROTOCOL_SAS:
  552. return "SAS";
  553. case SCSI_PROTOCOL_FCP:
  554. return "FCP";
  555. case SCSI_PROTOCOL_ISCSI:
  556. return "iSCSI";
  557. default:
  558. break;
  559. }
  560. return "Unknown";
  561. }
  562. /* Start items for tcm_loop_port_cit */
  563. static int tcm_loop_port_link(
  564. struct se_portal_group *se_tpg,
  565. struct se_lun *lun)
  566. {
  567. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  568. struct tcm_loop_tpg, tl_se_tpg);
  569. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  570. atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
  571. /*
  572. * Add Linux/SCSI struct scsi_device by HCTL
  573. */
  574. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  575. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  576. return 0;
  577. }
  578. static void tcm_loop_port_unlink(
  579. struct se_portal_group *se_tpg,
  580. struct se_lun *se_lun)
  581. {
  582. struct scsi_device *sd;
  583. struct tcm_loop_hba *tl_hba;
  584. struct tcm_loop_tpg *tl_tpg;
  585. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  586. tl_hba = tl_tpg->tl_hba;
  587. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  588. se_lun->unpacked_lun);
  589. if (!sd) {
  590. pr_err("Unable to locate struct scsi_device for %d:%d:"
  591. "%llu\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  592. return;
  593. }
  594. /*
  595. * Remove Linux/SCSI struct scsi_device by HCTL
  596. */
  597. scsi_remove_device(sd);
  598. scsi_device_put(sd);
  599. atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
  600. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  601. }
  602. /* End items for tcm_loop_port_cit */
  603. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
  604. struct config_item *item, char *page)
  605. {
  606. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  607. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  608. tl_se_tpg);
  609. return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  610. }
  611. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
  612. struct config_item *item, const char *page, size_t count)
  613. {
  614. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  615. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  616. tl_se_tpg);
  617. unsigned long val;
  618. int ret = kstrtoul(page, 0, &val);
  619. if (ret) {
  620. pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
  621. return ret;
  622. }
  623. if (val != 0 && val != 1 && val != 3) {
  624. pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
  625. return -EINVAL;
  626. }
  627. tl_tpg->tl_fabric_prot_type = val;
  628. return count;
  629. }
  630. CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
  631. static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
  632. &tcm_loop_tpg_attrib_attr_fabric_prot_type,
  633. NULL,
  634. };
  635. /* Start items for tcm_loop_nexus_cit */
  636. static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
  637. struct se_session *se_sess, void *p)
  638. {
  639. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  640. struct tcm_loop_tpg, tl_se_tpg);
  641. tl_tpg->tl_nexus = p;
  642. return 0;
  643. }
  644. static int tcm_loop_make_nexus(
  645. struct tcm_loop_tpg *tl_tpg,
  646. const char *name)
  647. {
  648. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  649. struct tcm_loop_nexus *tl_nexus;
  650. int ret;
  651. if (tl_tpg->tl_nexus) {
  652. pr_debug("tl_tpg->tl_nexus already exists\n");
  653. return -EEXIST;
  654. }
  655. tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
  656. if (!tl_nexus) {
  657. pr_err("Unable to allocate struct tcm_loop_nexus\n");
  658. return -ENOMEM;
  659. }
  660. tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
  661. TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
  662. name, tl_nexus, tcm_loop_alloc_sess_cb);
  663. if (IS_ERR(tl_nexus->se_sess)) {
  664. ret = PTR_ERR(tl_nexus->se_sess);
  665. kfree(tl_nexus);
  666. return ret;
  667. }
  668. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
  669. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  670. name);
  671. return 0;
  672. }
  673. static int tcm_loop_drop_nexus(
  674. struct tcm_loop_tpg *tpg)
  675. {
  676. struct se_session *se_sess;
  677. struct tcm_loop_nexus *tl_nexus;
  678. tl_nexus = tpg->tl_nexus;
  679. if (!tl_nexus)
  680. return -ENODEV;
  681. se_sess = tl_nexus->se_sess;
  682. if (!se_sess)
  683. return -ENODEV;
  684. if (atomic_read(&tpg->tl_tpg_port_count)) {
  685. pr_err("Unable to remove TCM_Loop I_T Nexus with"
  686. " active TPG port count: %d\n",
  687. atomic_read(&tpg->tl_tpg_port_count));
  688. return -EPERM;
  689. }
  690. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
  691. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
  692. tl_nexus->se_sess->se_node_acl->initiatorname);
  693. /*
  694. * Release the SCSI I_T Nexus to the emulated Target Port
  695. */
  696. transport_deregister_session(tl_nexus->se_sess);
  697. tpg->tl_nexus = NULL;
  698. kfree(tl_nexus);
  699. return 0;
  700. }
  701. /* End items for tcm_loop_nexus_cit */
  702. static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
  703. {
  704. struct se_portal_group *se_tpg = to_tpg(item);
  705. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  706. struct tcm_loop_tpg, tl_se_tpg);
  707. struct tcm_loop_nexus *tl_nexus;
  708. ssize_t ret;
  709. tl_nexus = tl_tpg->tl_nexus;
  710. if (!tl_nexus)
  711. return -ENODEV;
  712. ret = snprintf(page, PAGE_SIZE, "%s\n",
  713. tl_nexus->se_sess->se_node_acl->initiatorname);
  714. return ret;
  715. }
  716. static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
  717. const char *page, size_t count)
  718. {
  719. struct se_portal_group *se_tpg = to_tpg(item);
  720. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  721. struct tcm_loop_tpg, tl_se_tpg);
  722. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  723. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  724. int ret;
  725. /*
  726. * Shutdown the active I_T nexus if 'NULL' is passed..
  727. */
  728. if (!strncmp(page, "NULL", 4)) {
  729. ret = tcm_loop_drop_nexus(tl_tpg);
  730. return (!ret) ? count : ret;
  731. }
  732. /*
  733. * Otherwise make sure the passed virtual Initiator port WWN matches
  734. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  735. * tcm_loop_make_nexus()
  736. */
  737. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  738. pr_err("Emulated NAA Sas Address: %s, exceeds"
  739. " max: %d\n", page, TL_WWN_ADDR_LEN);
  740. return -EINVAL;
  741. }
  742. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  743. ptr = strstr(i_port, "naa.");
  744. if (ptr) {
  745. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  746. pr_err("Passed SAS Initiator Port %s does not"
  747. " match target port protoid: %s\n", i_port,
  748. tcm_loop_dump_proto_id(tl_hba));
  749. return -EINVAL;
  750. }
  751. port_ptr = &i_port[0];
  752. goto check_newline;
  753. }
  754. ptr = strstr(i_port, "fc.");
  755. if (ptr) {
  756. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  757. pr_err("Passed FCP Initiator Port %s does not"
  758. " match target port protoid: %s\n", i_port,
  759. tcm_loop_dump_proto_id(tl_hba));
  760. return -EINVAL;
  761. }
  762. port_ptr = &i_port[3]; /* Skip over "fc." */
  763. goto check_newline;
  764. }
  765. ptr = strstr(i_port, "iqn.");
  766. if (ptr) {
  767. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  768. pr_err("Passed iSCSI Initiator Port %s does not"
  769. " match target port protoid: %s\n", i_port,
  770. tcm_loop_dump_proto_id(tl_hba));
  771. return -EINVAL;
  772. }
  773. port_ptr = &i_port[0];
  774. goto check_newline;
  775. }
  776. pr_err("Unable to locate prefix for emulated Initiator Port:"
  777. " %s\n", i_port);
  778. return -EINVAL;
  779. /*
  780. * Clear any trailing newline for the NAA WWN
  781. */
  782. check_newline:
  783. if (i_port[strlen(i_port)-1] == '\n')
  784. i_port[strlen(i_port)-1] = '\0';
  785. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  786. if (ret < 0)
  787. return ret;
  788. return count;
  789. }
  790. static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
  791. char *page)
  792. {
  793. struct se_portal_group *se_tpg = to_tpg(item);
  794. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  795. struct tcm_loop_tpg, tl_se_tpg);
  796. const char *status = NULL;
  797. ssize_t ret = -EINVAL;
  798. switch (tl_tpg->tl_transport_status) {
  799. case TCM_TRANSPORT_ONLINE:
  800. status = "online";
  801. break;
  802. case TCM_TRANSPORT_OFFLINE:
  803. status = "offline";
  804. break;
  805. default:
  806. break;
  807. }
  808. if (status)
  809. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  810. return ret;
  811. }
  812. static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
  813. const char *page, size_t count)
  814. {
  815. struct se_portal_group *se_tpg = to_tpg(item);
  816. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  817. struct tcm_loop_tpg, tl_se_tpg);
  818. if (!strncmp(page, "online", 6)) {
  819. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  820. return count;
  821. }
  822. if (!strncmp(page, "offline", 7)) {
  823. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  824. if (tl_tpg->tl_nexus) {
  825. struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
  826. core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
  827. }
  828. return count;
  829. }
  830. return -EINVAL;
  831. }
  832. static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
  833. char *page)
  834. {
  835. struct se_portal_group *se_tpg = to_tpg(item);
  836. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  837. struct tcm_loop_tpg, tl_se_tpg);
  838. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  839. return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
  840. tl_hba->sh->host_no, tl_tpg->tl_tpgt);
  841. }
  842. CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
  843. CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
  844. CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
  845. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  846. &tcm_loop_tpg_attr_nexus,
  847. &tcm_loop_tpg_attr_transport_status,
  848. &tcm_loop_tpg_attr_address,
  849. NULL,
  850. };
  851. /* Start items for tcm_loop_naa_cit */
  852. static struct se_portal_group *tcm_loop_make_naa_tpg(
  853. struct se_wwn *wwn,
  854. struct config_group *group,
  855. const char *name)
  856. {
  857. struct tcm_loop_hba *tl_hba = container_of(wwn,
  858. struct tcm_loop_hba, tl_hba_wwn);
  859. struct tcm_loop_tpg *tl_tpg;
  860. int ret;
  861. unsigned long tpgt;
  862. if (strstr(name, "tpgt_") != name) {
  863. pr_err("Unable to locate \"tpgt_#\" directory"
  864. " group\n");
  865. return ERR_PTR(-EINVAL);
  866. }
  867. if (kstrtoul(name+5, 10, &tpgt))
  868. return ERR_PTR(-EINVAL);
  869. if (tpgt >= TL_TPGS_PER_HBA) {
  870. pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA:"
  871. " %u\n", tpgt, TL_TPGS_PER_HBA);
  872. return ERR_PTR(-EINVAL);
  873. }
  874. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  875. tl_tpg->tl_hba = tl_hba;
  876. tl_tpg->tl_tpgt = tpgt;
  877. /*
  878. * Register the tl_tpg as a emulated TCM Target Endpoint
  879. */
  880. ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
  881. if (ret < 0)
  882. return ERR_PTR(-ENOMEM);
  883. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
  884. " Target Port %s,t,0x%04lx\n", tcm_loop_dump_proto_id(tl_hba),
  885. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  886. return &tl_tpg->tl_se_tpg;
  887. }
  888. static void tcm_loop_drop_naa_tpg(
  889. struct se_portal_group *se_tpg)
  890. {
  891. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  892. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  893. struct tcm_loop_tpg, tl_se_tpg);
  894. struct tcm_loop_hba *tl_hba;
  895. unsigned short tpgt;
  896. tl_hba = tl_tpg->tl_hba;
  897. tpgt = tl_tpg->tl_tpgt;
  898. /*
  899. * Release the I_T Nexus for the Virtual target link if present
  900. */
  901. tcm_loop_drop_nexus(tl_tpg);
  902. /*
  903. * Deregister the tl_tpg as a emulated TCM Target Endpoint
  904. */
  905. core_tpg_deregister(se_tpg);
  906. tl_tpg->tl_hba = NULL;
  907. tl_tpg->tl_tpgt = 0;
  908. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
  909. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  910. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  911. }
  912. /* End items for tcm_loop_naa_cit */
  913. /* Start items for tcm_loop_cit */
  914. static struct se_wwn *tcm_loop_make_scsi_hba(
  915. struct target_fabric_configfs *tf,
  916. struct config_group *group,
  917. const char *name)
  918. {
  919. struct tcm_loop_hba *tl_hba;
  920. struct Scsi_Host *sh;
  921. char *ptr;
  922. int ret, off = 0;
  923. tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
  924. if (!tl_hba) {
  925. pr_err("Unable to allocate struct tcm_loop_hba\n");
  926. return ERR_PTR(-ENOMEM);
  927. }
  928. /*
  929. * Determine the emulated Protocol Identifier and Target Port Name
  930. * based on the incoming configfs directory name.
  931. */
  932. ptr = strstr(name, "naa.");
  933. if (ptr) {
  934. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  935. goto check_len;
  936. }
  937. ptr = strstr(name, "fc.");
  938. if (ptr) {
  939. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  940. off = 3; /* Skip over "fc." */
  941. goto check_len;
  942. }
  943. ptr = strstr(name, "iqn.");
  944. if (!ptr) {
  945. pr_err("Unable to locate prefix for emulated Target "
  946. "Port: %s\n", name);
  947. ret = -EINVAL;
  948. goto out;
  949. }
  950. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  951. check_len:
  952. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  953. pr_err("Emulated NAA %s Address: %s, exceeds"
  954. " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
  955. TL_WWN_ADDR_LEN);
  956. ret = -EINVAL;
  957. goto out;
  958. }
  959. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  960. /*
  961. * Call device_register(tl_hba->dev) to register the emulated
  962. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  963. * device_register() callbacks in tcm_loop_driver_probe()
  964. */
  965. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  966. if (ret)
  967. goto out;
  968. sh = tl_hba->sh;
  969. tcm_loop_hba_no_cnt++;
  970. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
  971. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  972. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  973. return &tl_hba->tl_hba_wwn;
  974. out:
  975. kfree(tl_hba);
  976. return ERR_PTR(ret);
  977. }
  978. static void tcm_loop_drop_scsi_hba(
  979. struct se_wwn *wwn)
  980. {
  981. struct tcm_loop_hba *tl_hba = container_of(wwn,
  982. struct tcm_loop_hba, tl_hba_wwn);
  983. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
  984. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  985. tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
  986. tl_hba->sh->host_no);
  987. /*
  988. * Call device_unregister() on the original tl_hba->dev.
  989. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  990. * release *tl_hba;
  991. */
  992. device_unregister(&tl_hba->dev);
  993. }
  994. /* Start items for tcm_loop_cit */
  995. static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
  996. {
  997. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  998. }
  999. CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
  1000. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  1001. &tcm_loop_wwn_attr_version,
  1002. NULL,
  1003. };
  1004. /* End items for tcm_loop_cit */
  1005. static const struct target_core_fabric_ops loop_ops = {
  1006. .module = THIS_MODULE,
  1007. .name = "loopback",
  1008. .get_fabric_name = tcm_loop_get_fabric_name,
  1009. .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
  1010. .tpg_get_tag = tcm_loop_get_tag,
  1011. .tpg_check_demo_mode = tcm_loop_check_demo_mode,
  1012. .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
  1013. .tpg_check_demo_mode_write_protect =
  1014. tcm_loop_check_demo_mode_write_protect,
  1015. .tpg_check_prod_mode_write_protect =
  1016. tcm_loop_check_prod_mode_write_protect,
  1017. .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
  1018. .tpg_get_inst_index = tcm_loop_get_inst_index,
  1019. .check_stop_free = tcm_loop_check_stop_free,
  1020. .release_cmd = tcm_loop_release_cmd,
  1021. .sess_get_index = tcm_loop_sess_get_index,
  1022. .write_pending = tcm_loop_write_pending,
  1023. .write_pending_status = tcm_loop_write_pending_status,
  1024. .set_default_node_attributes = tcm_loop_set_default_node_attributes,
  1025. .get_cmd_state = tcm_loop_get_cmd_state,
  1026. .queue_data_in = tcm_loop_queue_data_in,
  1027. .queue_status = tcm_loop_queue_status,
  1028. .queue_tm_rsp = tcm_loop_queue_tm_rsp,
  1029. .aborted_task = tcm_loop_aborted_task,
  1030. .fabric_make_wwn = tcm_loop_make_scsi_hba,
  1031. .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
  1032. .fabric_make_tpg = tcm_loop_make_naa_tpg,
  1033. .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
  1034. .fabric_post_link = tcm_loop_port_link,
  1035. .fabric_pre_unlink = tcm_loop_port_unlink,
  1036. .tfc_wwn_attrs = tcm_loop_wwn_attrs,
  1037. .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
  1038. .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
  1039. };
  1040. static int __init tcm_loop_fabric_init(void)
  1041. {
  1042. int ret = -ENOMEM;
  1043. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1044. if (!tcm_loop_workqueue)
  1045. goto out;
  1046. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1047. sizeof(struct tcm_loop_cmd),
  1048. __alignof__(struct tcm_loop_cmd),
  1049. 0, NULL);
  1050. if (!tcm_loop_cmd_cache) {
  1051. pr_debug("kmem_cache_create() for"
  1052. " tcm_loop_cmd_cache failed\n");
  1053. goto out_destroy_workqueue;
  1054. }
  1055. ret = tcm_loop_alloc_core_bus();
  1056. if (ret)
  1057. goto out_destroy_cache;
  1058. ret = target_register_template(&loop_ops);
  1059. if (ret)
  1060. goto out_release_core_bus;
  1061. return 0;
  1062. out_release_core_bus:
  1063. tcm_loop_release_core_bus();
  1064. out_destroy_cache:
  1065. kmem_cache_destroy(tcm_loop_cmd_cache);
  1066. out_destroy_workqueue:
  1067. destroy_workqueue(tcm_loop_workqueue);
  1068. out:
  1069. return ret;
  1070. }
  1071. static void __exit tcm_loop_fabric_exit(void)
  1072. {
  1073. target_unregister_template(&loop_ops);
  1074. tcm_loop_release_core_bus();
  1075. kmem_cache_destroy(tcm_loop_cmd_cache);
  1076. destroy_workqueue(tcm_loop_workqueue);
  1077. }
  1078. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1079. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1080. MODULE_LICENSE("GPL");
  1081. module_init(tcm_loop_fabric_init);
  1082. module_exit(tcm_loop_fabric_exit);