tcm_loop.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include "tcm_loop.h"
  36. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  37. static struct workqueue_struct *tcm_loop_workqueue;
  38. static struct kmem_cache *tcm_loop_cmd_cache;
  39. static int tcm_loop_hba_no_cnt;
  40. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  41. /*
  42. * Called from struct target_core_fabric_ops->check_stop_free()
  43. */
  44. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  45. {
  46. return transport_generic_free_cmd(se_cmd, 0);
  47. }
  48. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  49. {
  50. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  51. struct tcm_loop_cmd, tl_se_cmd);
  52. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  53. }
  54. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  55. {
  56. seq_printf(m, "tcm_loop_proc_info()\n");
  57. return 0;
  58. }
  59. static int tcm_loop_driver_probe(struct device *);
  60. static int tcm_loop_driver_remove(struct device *);
  61. static int pseudo_lld_bus_match(struct device *dev,
  62. struct device_driver *dev_driver)
  63. {
  64. return 1;
  65. }
  66. static struct bus_type tcm_loop_lld_bus = {
  67. .name = "tcm_loop_bus",
  68. .match = pseudo_lld_bus_match,
  69. .probe = tcm_loop_driver_probe,
  70. .remove = tcm_loop_driver_remove,
  71. };
  72. static struct device_driver tcm_loop_driverfs = {
  73. .name = "tcm_loop",
  74. .bus = &tcm_loop_lld_bus,
  75. };
  76. /*
  77. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  78. */
  79. static struct device *tcm_loop_primary;
  80. static void tcm_loop_submission_work(struct work_struct *work)
  81. {
  82. struct tcm_loop_cmd *tl_cmd =
  83. container_of(work, struct tcm_loop_cmd, work);
  84. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  85. struct scsi_cmnd *sc = tl_cmd->sc;
  86. struct tcm_loop_nexus *tl_nexus;
  87. struct tcm_loop_hba *tl_hba;
  88. struct tcm_loop_tpg *tl_tpg;
  89. struct scatterlist *sgl_bidi = NULL;
  90. u32 sgl_bidi_count = 0, transfer_length;
  91. int rc;
  92. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  93. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  94. /*
  95. * Ensure that this tl_tpg reference from the incoming sc->device->id
  96. * has already been configured via tcm_loop_make_naa_tpg().
  97. */
  98. if (!tl_tpg->tl_hba) {
  99. set_host_byte(sc, DID_NO_CONNECT);
  100. goto out_done;
  101. }
  102. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  103. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  104. goto out_done;
  105. }
  106. tl_nexus = tl_tpg->tl_nexus;
  107. if (!tl_nexus) {
  108. scmd_printk(KERN_ERR, sc,
  109. "TCM_Loop I_T Nexus does not exist\n");
  110. set_host_byte(sc, DID_ERROR);
  111. goto out_done;
  112. }
  113. if (scsi_bidi_cmnd(sc)) {
  114. struct scsi_data_buffer *sdb = scsi_in(sc);
  115. sgl_bidi = sdb->table.sgl;
  116. sgl_bidi_count = sdb->table.nents;
  117. se_cmd->se_cmd_flags |= SCF_BIDI;
  118. }
  119. transfer_length = scsi_transfer_length(sc);
  120. if (!scsi_prot_sg_count(sc) &&
  121. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  122. se_cmd->prot_pto = true;
  123. /*
  124. * loopback transport doesn't support
  125. * WRITE_GENERATE, READ_STRIP protection
  126. * information operations, go ahead unprotected.
  127. */
  128. transfer_length = scsi_bufflen(sc);
  129. }
  130. se_cmd->tag = tl_cmd->sc_cmd_tag;
  131. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  132. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  133. transfer_length, TCM_SIMPLE_TAG,
  134. sc->sc_data_direction, 0,
  135. scsi_sglist(sc), scsi_sg_count(sc),
  136. sgl_bidi, sgl_bidi_count,
  137. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  138. if (rc < 0) {
  139. set_host_byte(sc, DID_NO_CONNECT);
  140. goto out_done;
  141. }
  142. return;
  143. out_done:
  144. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  145. sc->scsi_done(sc);
  146. return;
  147. }
  148. /*
  149. * ->queuecommand can be and usually is called from interrupt context, so
  150. * defer the actual submission to a workqueue.
  151. */
  152. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  153. {
  154. struct tcm_loop_cmd *tl_cmd;
  155. pr_debug("%s() %d:%d:%d:%llu got CDB: 0x%02x scsi_buf_len: %u\n",
  156. __func__, sc->device->host->host_no, sc->device->id,
  157. sc->device->channel, sc->device->lun, sc->cmnd[0],
  158. scsi_bufflen(sc));
  159. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  160. if (!tl_cmd) {
  161. set_host_byte(sc, DID_ERROR);
  162. sc->scsi_done(sc);
  163. return 0;
  164. }
  165. tl_cmd->sc = sc;
  166. tl_cmd->sc_cmd_tag = sc->request->tag;
  167. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  168. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  169. return 0;
  170. }
  171. /*
  172. * Called from SCSI EH process context to issue a LUN_RESET TMR
  173. * to struct scsi_device
  174. */
  175. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  176. u64 lun, int task, enum tcm_tmreq_table tmr)
  177. {
  178. struct se_cmd *se_cmd = NULL;
  179. struct se_session *se_sess;
  180. struct tcm_loop_nexus *tl_nexus;
  181. struct tcm_loop_cmd *tl_cmd = NULL;
  182. int ret = TMR_FUNCTION_FAILED, rc;
  183. /*
  184. * Locate the tl_nexus and se_sess pointers
  185. */
  186. tl_nexus = tl_tpg->tl_nexus;
  187. if (!tl_nexus) {
  188. pr_err("Unable to perform device reset without active I_T Nexus\n");
  189. return ret;
  190. }
  191. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  192. if (!tl_cmd)
  193. return ret;
  194. init_completion(&tl_cmd->tmr_done);
  195. se_cmd = &tl_cmd->tl_se_cmd;
  196. se_sess = tl_tpg->tl_nexus->se_sess;
  197. rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun,
  198. NULL, tmr, GFP_KERNEL, task,
  199. TARGET_SCF_ACK_KREF);
  200. if (rc < 0)
  201. goto release;
  202. wait_for_completion(&tl_cmd->tmr_done);
  203. ret = se_cmd->se_tmr_req->response;
  204. target_put_sess_cmd(se_cmd);
  205. out:
  206. return ret;
  207. release:
  208. if (se_cmd)
  209. transport_generic_free_cmd(se_cmd, 0);
  210. else
  211. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  212. goto out;
  213. }
  214. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  215. {
  216. struct tcm_loop_hba *tl_hba;
  217. struct tcm_loop_tpg *tl_tpg;
  218. int ret = FAILED;
  219. /*
  220. * Locate the tcm_loop_hba_t pointer
  221. */
  222. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  223. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  224. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  225. sc->request->tag, TMR_ABORT_TASK);
  226. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  227. }
  228. /*
  229. * Called from SCSI EH process context to issue a LUN_RESET TMR
  230. * to struct scsi_device
  231. */
  232. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  233. {
  234. struct tcm_loop_hba *tl_hba;
  235. struct tcm_loop_tpg *tl_tpg;
  236. int ret = FAILED;
  237. /*
  238. * Locate the tcm_loop_hba_t pointer
  239. */
  240. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  241. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  242. ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
  243. 0, TMR_LUN_RESET);
  244. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  245. }
  246. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  247. {
  248. struct tcm_loop_hba *tl_hba;
  249. struct tcm_loop_tpg *tl_tpg;
  250. /*
  251. * Locate the tcm_loop_hba_t pointer
  252. */
  253. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  254. if (!tl_hba) {
  255. pr_err("Unable to perform device reset without active I_T Nexus\n");
  256. return FAILED;
  257. }
  258. /*
  259. * Locate the tl_tpg pointer from TargetID in sc->device->id
  260. */
  261. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  262. if (tl_tpg) {
  263. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  264. return SUCCESS;
  265. }
  266. return FAILED;
  267. }
  268. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  269. {
  270. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  271. return 0;
  272. }
  273. static struct scsi_host_template tcm_loop_driver_template = {
  274. .show_info = tcm_loop_show_info,
  275. .proc_name = "tcm_loopback",
  276. .name = "TCM_Loopback",
  277. .queuecommand = tcm_loop_queuecommand,
  278. .change_queue_depth = scsi_change_queue_depth,
  279. .eh_abort_handler = tcm_loop_abort_task,
  280. .eh_device_reset_handler = tcm_loop_device_reset,
  281. .eh_target_reset_handler = tcm_loop_target_reset,
  282. .can_queue = 1024,
  283. .this_id = -1,
  284. .sg_tablesize = 256,
  285. .cmd_per_lun = 1024,
  286. .max_sectors = 0xFFFF,
  287. .use_clustering = DISABLE_CLUSTERING,
  288. .slave_alloc = tcm_loop_slave_alloc,
  289. .module = THIS_MODULE,
  290. .track_queue_depth = 1,
  291. };
  292. static int tcm_loop_driver_probe(struct device *dev)
  293. {
  294. struct tcm_loop_hba *tl_hba;
  295. struct Scsi_Host *sh;
  296. int error, host_prot;
  297. tl_hba = to_tcm_loop_hba(dev);
  298. sh = scsi_host_alloc(&tcm_loop_driver_template,
  299. sizeof(struct tcm_loop_hba));
  300. if (!sh) {
  301. pr_err("Unable to allocate struct scsi_host\n");
  302. return -ENODEV;
  303. }
  304. tl_hba->sh = sh;
  305. /*
  306. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  307. */
  308. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  309. /*
  310. * Setup single ID, Channel and LUN for now..
  311. */
  312. sh->max_id = 2;
  313. sh->max_lun = 0;
  314. sh->max_channel = 0;
  315. sh->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
  316. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  317. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  318. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  319. scsi_host_set_prot(sh, host_prot);
  320. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  321. error = scsi_add_host(sh, &tl_hba->dev);
  322. if (error) {
  323. pr_err("%s: scsi_add_host failed\n", __func__);
  324. scsi_host_put(sh);
  325. return -ENODEV;
  326. }
  327. return 0;
  328. }
  329. static int tcm_loop_driver_remove(struct device *dev)
  330. {
  331. struct tcm_loop_hba *tl_hba;
  332. struct Scsi_Host *sh;
  333. tl_hba = to_tcm_loop_hba(dev);
  334. sh = tl_hba->sh;
  335. scsi_remove_host(sh);
  336. scsi_host_put(sh);
  337. return 0;
  338. }
  339. static void tcm_loop_release_adapter(struct device *dev)
  340. {
  341. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  342. kfree(tl_hba);
  343. }
  344. /*
  345. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  346. */
  347. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  348. {
  349. int ret;
  350. tl_hba->dev.bus = &tcm_loop_lld_bus;
  351. tl_hba->dev.parent = tcm_loop_primary;
  352. tl_hba->dev.release = &tcm_loop_release_adapter;
  353. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  354. ret = device_register(&tl_hba->dev);
  355. if (ret) {
  356. pr_err("device_register() failed for tl_hba->dev: %d\n", ret);
  357. return -ENODEV;
  358. }
  359. return 0;
  360. }
  361. /*
  362. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  363. * tcm_loop SCSI bus.
  364. */
  365. static int tcm_loop_alloc_core_bus(void)
  366. {
  367. int ret;
  368. tcm_loop_primary = root_device_register("tcm_loop_0");
  369. if (IS_ERR(tcm_loop_primary)) {
  370. pr_err("Unable to allocate tcm_loop_primary\n");
  371. return PTR_ERR(tcm_loop_primary);
  372. }
  373. ret = bus_register(&tcm_loop_lld_bus);
  374. if (ret) {
  375. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  376. goto dev_unreg;
  377. }
  378. ret = driver_register(&tcm_loop_driverfs);
  379. if (ret) {
  380. pr_err("driver_register() failed for tcm_loop_driverfs\n");
  381. goto bus_unreg;
  382. }
  383. pr_debug("Initialized TCM Loop Core Bus\n");
  384. return ret;
  385. bus_unreg:
  386. bus_unregister(&tcm_loop_lld_bus);
  387. dev_unreg:
  388. root_device_unregister(tcm_loop_primary);
  389. return ret;
  390. }
  391. static void tcm_loop_release_core_bus(void)
  392. {
  393. driver_unregister(&tcm_loop_driverfs);
  394. bus_unregister(&tcm_loop_lld_bus);
  395. root_device_unregister(tcm_loop_primary);
  396. pr_debug("Releasing TCM Loop Core BUS\n");
  397. }
  398. static char *tcm_loop_get_fabric_name(void)
  399. {
  400. return "loopback";
  401. }
  402. static inline struct tcm_loop_tpg *tl_tpg(struct se_portal_group *se_tpg)
  403. {
  404. return container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  405. }
  406. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  407. {
  408. /*
  409. * Return the passed NAA identifier for the Target Port
  410. */
  411. return &tl_tpg(se_tpg)->tl_hba->tl_wwn_address[0];
  412. }
  413. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  414. {
  415. /*
  416. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  417. * to represent the SCSI Target Port.
  418. */
  419. return tl_tpg(se_tpg)->tl_tpgt;
  420. }
  421. /*
  422. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  423. * based upon the incoming fabric dependent SCSI Initiator Port
  424. */
  425. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  426. {
  427. return 1;
  428. }
  429. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  430. {
  431. return 0;
  432. }
  433. /*
  434. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  435. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  436. */
  437. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  438. {
  439. return 0;
  440. }
  441. /*
  442. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  443. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  444. * It has been added here as a nop for target_fabric_tf_ops_check()
  445. */
  446. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  447. {
  448. return 0;
  449. }
  450. static int tcm_loop_check_prot_fabric_only(struct se_portal_group *se_tpg)
  451. {
  452. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  453. tl_se_tpg);
  454. return tl_tpg->tl_fabric_prot_type;
  455. }
  456. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  457. {
  458. return 1;
  459. }
  460. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  461. {
  462. return 1;
  463. }
  464. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  465. {
  466. return;
  467. }
  468. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  469. {
  470. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  471. struct tcm_loop_cmd, tl_se_cmd);
  472. return tl_cmd->sc_cmd_state;
  473. }
  474. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  475. {
  476. /*
  477. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  478. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  479. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  480. * format with transport_generic_map_mem_to_cmd().
  481. *
  482. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  483. * object execution queue.
  484. */
  485. target_execute_cmd(se_cmd);
  486. return 0;
  487. }
  488. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  489. {
  490. return 0;
  491. }
  492. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  493. {
  494. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  495. struct tcm_loop_cmd, tl_se_cmd);
  496. struct scsi_cmnd *sc = tl_cmd->sc;
  497. pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
  498. __func__, sc, sc->cmnd[0]);
  499. sc->result = SAM_STAT_GOOD;
  500. set_host_byte(sc, DID_OK);
  501. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  502. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  503. scsi_set_resid(sc, se_cmd->residual_count);
  504. sc->scsi_done(sc);
  505. return 0;
  506. }
  507. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  508. {
  509. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  510. struct tcm_loop_cmd, tl_se_cmd);
  511. struct scsi_cmnd *sc = tl_cmd->sc;
  512. pr_debug("%s() called for scsi_cmnd: %p cdb: 0x%02x\n",
  513. __func__, sc, sc->cmnd[0]);
  514. if (se_cmd->sense_buffer &&
  515. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  516. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  517. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  518. SCSI_SENSE_BUFFERSIZE);
  519. sc->result = SAM_STAT_CHECK_CONDITION;
  520. set_driver_byte(sc, DRIVER_SENSE);
  521. } else
  522. sc->result = se_cmd->scsi_status;
  523. set_host_byte(sc, DID_OK);
  524. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  525. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  526. scsi_set_resid(sc, se_cmd->residual_count);
  527. sc->scsi_done(sc);
  528. return 0;
  529. }
  530. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  531. {
  532. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  533. struct tcm_loop_cmd, tl_se_cmd);
  534. /* Wake up tcm_loop_issue_tmr(). */
  535. complete(&tl_cmd->tmr_done);
  536. }
  537. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  538. {
  539. return;
  540. }
  541. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  542. {
  543. switch (tl_hba->tl_proto_id) {
  544. case SCSI_PROTOCOL_SAS:
  545. return "SAS";
  546. case SCSI_PROTOCOL_FCP:
  547. return "FCP";
  548. case SCSI_PROTOCOL_ISCSI:
  549. return "iSCSI";
  550. default:
  551. break;
  552. }
  553. return "Unknown";
  554. }
  555. /* Start items for tcm_loop_port_cit */
  556. static int tcm_loop_port_link(
  557. struct se_portal_group *se_tpg,
  558. struct se_lun *lun)
  559. {
  560. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  561. struct tcm_loop_tpg, tl_se_tpg);
  562. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  563. atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
  564. /*
  565. * Add Linux/SCSI struct scsi_device by HCTL
  566. */
  567. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  568. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  569. return 0;
  570. }
  571. static void tcm_loop_port_unlink(
  572. struct se_portal_group *se_tpg,
  573. struct se_lun *se_lun)
  574. {
  575. struct scsi_device *sd;
  576. struct tcm_loop_hba *tl_hba;
  577. struct tcm_loop_tpg *tl_tpg;
  578. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  579. tl_hba = tl_tpg->tl_hba;
  580. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  581. se_lun->unpacked_lun);
  582. if (!sd) {
  583. pr_err("Unable to locate struct scsi_device for %d:%d:%llu\n",
  584. 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  585. return;
  586. }
  587. /*
  588. * Remove Linux/SCSI struct scsi_device by HCTL
  589. */
  590. scsi_remove_device(sd);
  591. scsi_device_put(sd);
  592. atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
  593. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  594. }
  595. /* End items for tcm_loop_port_cit */
  596. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_show(
  597. struct config_item *item, char *page)
  598. {
  599. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  600. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  601. tl_se_tpg);
  602. return sprintf(page, "%d\n", tl_tpg->tl_fabric_prot_type);
  603. }
  604. static ssize_t tcm_loop_tpg_attrib_fabric_prot_type_store(
  605. struct config_item *item, const char *page, size_t count)
  606. {
  607. struct se_portal_group *se_tpg = attrib_to_tpg(item);
  608. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg, struct tcm_loop_tpg,
  609. tl_se_tpg);
  610. unsigned long val;
  611. int ret = kstrtoul(page, 0, &val);
  612. if (ret) {
  613. pr_err("kstrtoul() returned %d for fabric_prot_type\n", ret);
  614. return ret;
  615. }
  616. if (val != 0 && val != 1 && val != 3) {
  617. pr_err("Invalid qla2xxx fabric_prot_type: %lu\n", val);
  618. return -EINVAL;
  619. }
  620. tl_tpg->tl_fabric_prot_type = val;
  621. return count;
  622. }
  623. CONFIGFS_ATTR(tcm_loop_tpg_attrib_, fabric_prot_type);
  624. static struct configfs_attribute *tcm_loop_tpg_attrib_attrs[] = {
  625. &tcm_loop_tpg_attrib_attr_fabric_prot_type,
  626. NULL,
  627. };
  628. /* Start items for tcm_loop_nexus_cit */
  629. static int tcm_loop_alloc_sess_cb(struct se_portal_group *se_tpg,
  630. struct se_session *se_sess, void *p)
  631. {
  632. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  633. struct tcm_loop_tpg, tl_se_tpg);
  634. tl_tpg->tl_nexus = p;
  635. return 0;
  636. }
  637. static int tcm_loop_make_nexus(
  638. struct tcm_loop_tpg *tl_tpg,
  639. const char *name)
  640. {
  641. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  642. struct tcm_loop_nexus *tl_nexus;
  643. int ret;
  644. if (tl_tpg->tl_nexus) {
  645. pr_debug("tl_tpg->tl_nexus already exists\n");
  646. return -EEXIST;
  647. }
  648. tl_nexus = kzalloc(sizeof(*tl_nexus), GFP_KERNEL);
  649. if (!tl_nexus)
  650. return -ENOMEM;
  651. tl_nexus->se_sess = target_alloc_session(&tl_tpg->tl_se_tpg, 0, 0,
  652. TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS,
  653. name, tl_nexus, tcm_loop_alloc_sess_cb);
  654. if (IS_ERR(tl_nexus->se_sess)) {
  655. ret = PTR_ERR(tl_nexus->se_sess);
  656. kfree(tl_nexus);
  657. return ret;
  658. }
  659. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated %s Initiator Port: %s\n",
  660. tcm_loop_dump_proto_id(tl_hba), name);
  661. return 0;
  662. }
  663. static int tcm_loop_drop_nexus(
  664. struct tcm_loop_tpg *tpg)
  665. {
  666. struct se_session *se_sess;
  667. struct tcm_loop_nexus *tl_nexus;
  668. tl_nexus = tpg->tl_nexus;
  669. if (!tl_nexus)
  670. return -ENODEV;
  671. se_sess = tl_nexus->se_sess;
  672. if (!se_sess)
  673. return -ENODEV;
  674. if (atomic_read(&tpg->tl_tpg_port_count)) {
  675. pr_err("Unable to remove TCM_Loop I_T Nexus with active TPG port count: %d\n",
  676. atomic_read(&tpg->tl_tpg_port_count));
  677. return -EPERM;
  678. }
  679. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated %s Initiator Port: %s\n",
  680. tcm_loop_dump_proto_id(tpg->tl_hba),
  681. tl_nexus->se_sess->se_node_acl->initiatorname);
  682. /*
  683. * Release the SCSI I_T Nexus to the emulated Target Port
  684. */
  685. transport_deregister_session(tl_nexus->se_sess);
  686. tpg->tl_nexus = NULL;
  687. kfree(tl_nexus);
  688. return 0;
  689. }
  690. /* End items for tcm_loop_nexus_cit */
  691. static ssize_t tcm_loop_tpg_nexus_show(struct config_item *item, char *page)
  692. {
  693. struct se_portal_group *se_tpg = to_tpg(item);
  694. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  695. struct tcm_loop_tpg, tl_se_tpg);
  696. struct tcm_loop_nexus *tl_nexus;
  697. ssize_t ret;
  698. tl_nexus = tl_tpg->tl_nexus;
  699. if (!tl_nexus)
  700. return -ENODEV;
  701. ret = snprintf(page, PAGE_SIZE, "%s\n",
  702. tl_nexus->se_sess->se_node_acl->initiatorname);
  703. return ret;
  704. }
  705. static ssize_t tcm_loop_tpg_nexus_store(struct config_item *item,
  706. const char *page, size_t count)
  707. {
  708. struct se_portal_group *se_tpg = to_tpg(item);
  709. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  710. struct tcm_loop_tpg, tl_se_tpg);
  711. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  712. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  713. int ret;
  714. /*
  715. * Shutdown the active I_T nexus if 'NULL' is passed..
  716. */
  717. if (!strncmp(page, "NULL", 4)) {
  718. ret = tcm_loop_drop_nexus(tl_tpg);
  719. return (!ret) ? count : ret;
  720. }
  721. /*
  722. * Otherwise make sure the passed virtual Initiator port WWN matches
  723. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  724. * tcm_loop_make_nexus()
  725. */
  726. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  727. pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
  728. page, TL_WWN_ADDR_LEN);
  729. return -EINVAL;
  730. }
  731. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  732. ptr = strstr(i_port, "naa.");
  733. if (ptr) {
  734. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  735. pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
  736. i_port, tcm_loop_dump_proto_id(tl_hba));
  737. return -EINVAL;
  738. }
  739. port_ptr = &i_port[0];
  740. goto check_newline;
  741. }
  742. ptr = strstr(i_port, "fc.");
  743. if (ptr) {
  744. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  745. pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
  746. i_port, tcm_loop_dump_proto_id(tl_hba));
  747. return -EINVAL;
  748. }
  749. port_ptr = &i_port[3]; /* Skip over "fc." */
  750. goto check_newline;
  751. }
  752. ptr = strstr(i_port, "iqn.");
  753. if (ptr) {
  754. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  755. pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
  756. i_port, tcm_loop_dump_proto_id(tl_hba));
  757. return -EINVAL;
  758. }
  759. port_ptr = &i_port[0];
  760. goto check_newline;
  761. }
  762. pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
  763. i_port);
  764. return -EINVAL;
  765. /*
  766. * Clear any trailing newline for the NAA WWN
  767. */
  768. check_newline:
  769. if (i_port[strlen(i_port)-1] == '\n')
  770. i_port[strlen(i_port)-1] = '\0';
  771. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  772. if (ret < 0)
  773. return ret;
  774. return count;
  775. }
  776. static ssize_t tcm_loop_tpg_transport_status_show(struct config_item *item,
  777. char *page)
  778. {
  779. struct se_portal_group *se_tpg = to_tpg(item);
  780. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  781. struct tcm_loop_tpg, tl_se_tpg);
  782. const char *status = NULL;
  783. ssize_t ret = -EINVAL;
  784. switch (tl_tpg->tl_transport_status) {
  785. case TCM_TRANSPORT_ONLINE:
  786. status = "online";
  787. break;
  788. case TCM_TRANSPORT_OFFLINE:
  789. status = "offline";
  790. break;
  791. default:
  792. break;
  793. }
  794. if (status)
  795. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  796. return ret;
  797. }
  798. static ssize_t tcm_loop_tpg_transport_status_store(struct config_item *item,
  799. const char *page, size_t count)
  800. {
  801. struct se_portal_group *se_tpg = to_tpg(item);
  802. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  803. struct tcm_loop_tpg, tl_se_tpg);
  804. if (!strncmp(page, "online", 6)) {
  805. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  806. return count;
  807. }
  808. if (!strncmp(page, "offline", 7)) {
  809. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  810. if (tl_tpg->tl_nexus) {
  811. struct se_session *tl_sess = tl_tpg->tl_nexus->se_sess;
  812. core_allocate_nexus_loss_ua(tl_sess->se_node_acl);
  813. }
  814. return count;
  815. }
  816. return -EINVAL;
  817. }
  818. static ssize_t tcm_loop_tpg_address_show(struct config_item *item,
  819. char *page)
  820. {
  821. struct se_portal_group *se_tpg = to_tpg(item);
  822. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  823. struct tcm_loop_tpg, tl_se_tpg);
  824. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  825. return snprintf(page, PAGE_SIZE, "%d:0:%d\n",
  826. tl_hba->sh->host_no, tl_tpg->tl_tpgt);
  827. }
  828. CONFIGFS_ATTR(tcm_loop_tpg_, nexus);
  829. CONFIGFS_ATTR(tcm_loop_tpg_, transport_status);
  830. CONFIGFS_ATTR_RO(tcm_loop_tpg_, address);
  831. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  832. &tcm_loop_tpg_attr_nexus,
  833. &tcm_loop_tpg_attr_transport_status,
  834. &tcm_loop_tpg_attr_address,
  835. NULL,
  836. };
  837. /* Start items for tcm_loop_naa_cit */
  838. static struct se_portal_group *tcm_loop_make_naa_tpg(
  839. struct se_wwn *wwn,
  840. struct config_group *group,
  841. const char *name)
  842. {
  843. struct tcm_loop_hba *tl_hba = container_of(wwn,
  844. struct tcm_loop_hba, tl_hba_wwn);
  845. struct tcm_loop_tpg *tl_tpg;
  846. int ret;
  847. unsigned long tpgt;
  848. if (strstr(name, "tpgt_") != name) {
  849. pr_err("Unable to locate \"tpgt_#\" directory group\n");
  850. return ERR_PTR(-EINVAL);
  851. }
  852. if (kstrtoul(name+5, 10, &tpgt))
  853. return ERR_PTR(-EINVAL);
  854. if (tpgt >= TL_TPGS_PER_HBA) {
  855. pr_err("Passed tpgt: %lu exceeds TL_TPGS_PER_HBA: %u\n",
  856. tpgt, TL_TPGS_PER_HBA);
  857. return ERR_PTR(-EINVAL);
  858. }
  859. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  860. tl_tpg->tl_hba = tl_hba;
  861. tl_tpg->tl_tpgt = tpgt;
  862. /*
  863. * Register the tl_tpg as a emulated TCM Target Endpoint
  864. */
  865. ret = core_tpg_register(wwn, &tl_tpg->tl_se_tpg, tl_hba->tl_proto_id);
  866. if (ret < 0)
  867. return ERR_PTR(-ENOMEM);
  868. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s Target Port %s,t,0x%04lx\n",
  869. tcm_loop_dump_proto_id(tl_hba),
  870. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  871. return &tl_tpg->tl_se_tpg;
  872. }
  873. static void tcm_loop_drop_naa_tpg(
  874. struct se_portal_group *se_tpg)
  875. {
  876. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  877. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  878. struct tcm_loop_tpg, tl_se_tpg);
  879. struct tcm_loop_hba *tl_hba;
  880. unsigned short tpgt;
  881. tl_hba = tl_tpg->tl_hba;
  882. tpgt = tl_tpg->tl_tpgt;
  883. /*
  884. * Release the I_T Nexus for the Virtual target link if present
  885. */
  886. tcm_loop_drop_nexus(tl_tpg);
  887. /*
  888. * Deregister the tl_tpg as a emulated TCM Target Endpoint
  889. */
  890. core_tpg_deregister(se_tpg);
  891. tl_tpg->tl_hba = NULL;
  892. tl_tpg->tl_tpgt = 0;
  893. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s Target Port %s,t,0x%04x\n",
  894. tcm_loop_dump_proto_id(tl_hba),
  895. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  896. }
  897. /* End items for tcm_loop_naa_cit */
  898. /* Start items for tcm_loop_cit */
  899. static struct se_wwn *tcm_loop_make_scsi_hba(
  900. struct target_fabric_configfs *tf,
  901. struct config_group *group,
  902. const char *name)
  903. {
  904. struct tcm_loop_hba *tl_hba;
  905. struct Scsi_Host *sh;
  906. char *ptr;
  907. int ret, off = 0;
  908. tl_hba = kzalloc(sizeof(*tl_hba), GFP_KERNEL);
  909. if (!tl_hba)
  910. return ERR_PTR(-ENOMEM);
  911. /*
  912. * Determine the emulated Protocol Identifier and Target Port Name
  913. * based on the incoming configfs directory name.
  914. */
  915. ptr = strstr(name, "naa.");
  916. if (ptr) {
  917. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  918. goto check_len;
  919. }
  920. ptr = strstr(name, "fc.");
  921. if (ptr) {
  922. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  923. off = 3; /* Skip over "fc." */
  924. goto check_len;
  925. }
  926. ptr = strstr(name, "iqn.");
  927. if (!ptr) {
  928. pr_err("Unable to locate prefix for emulated Target Port: %s\n",
  929. name);
  930. ret = -EINVAL;
  931. goto out;
  932. }
  933. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  934. check_len:
  935. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  936. pr_err("Emulated NAA %s Address: %s, exceeds max: %d\n",
  937. name, tcm_loop_dump_proto_id(tl_hba), TL_WWN_ADDR_LEN);
  938. ret = -EINVAL;
  939. goto out;
  940. }
  941. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  942. /*
  943. * Call device_register(tl_hba->dev) to register the emulated
  944. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  945. * device_register() callbacks in tcm_loop_driver_probe()
  946. */
  947. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  948. if (ret)
  949. goto out;
  950. sh = tl_hba->sh;
  951. tcm_loop_hba_no_cnt++;
  952. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  953. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  954. return &tl_hba->tl_hba_wwn;
  955. out:
  956. kfree(tl_hba);
  957. return ERR_PTR(ret);
  958. }
  959. static void tcm_loop_drop_scsi_hba(
  960. struct se_wwn *wwn)
  961. {
  962. struct tcm_loop_hba *tl_hba = container_of(wwn,
  963. struct tcm_loop_hba, tl_hba_wwn);
  964. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target %s Address: %s at Linux/SCSI Host ID: %d\n",
  965. tcm_loop_dump_proto_id(tl_hba), tl_hba->tl_wwn_address,
  966. tl_hba->sh->host_no);
  967. /*
  968. * Call device_unregister() on the original tl_hba->dev.
  969. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  970. * release *tl_hba;
  971. */
  972. device_unregister(&tl_hba->dev);
  973. }
  974. /* Start items for tcm_loop_cit */
  975. static ssize_t tcm_loop_wwn_version_show(struct config_item *item, char *page)
  976. {
  977. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  978. }
  979. CONFIGFS_ATTR_RO(tcm_loop_wwn_, version);
  980. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  981. &tcm_loop_wwn_attr_version,
  982. NULL,
  983. };
  984. /* End items for tcm_loop_cit */
  985. static const struct target_core_fabric_ops loop_ops = {
  986. .module = THIS_MODULE,
  987. .name = "loopback",
  988. .get_fabric_name = tcm_loop_get_fabric_name,
  989. .tpg_get_wwn = tcm_loop_get_endpoint_wwn,
  990. .tpg_get_tag = tcm_loop_get_tag,
  991. .tpg_check_demo_mode = tcm_loop_check_demo_mode,
  992. .tpg_check_demo_mode_cache = tcm_loop_check_demo_mode_cache,
  993. .tpg_check_demo_mode_write_protect =
  994. tcm_loop_check_demo_mode_write_protect,
  995. .tpg_check_prod_mode_write_protect =
  996. tcm_loop_check_prod_mode_write_protect,
  997. .tpg_check_prot_fabric_only = tcm_loop_check_prot_fabric_only,
  998. .tpg_get_inst_index = tcm_loop_get_inst_index,
  999. .check_stop_free = tcm_loop_check_stop_free,
  1000. .release_cmd = tcm_loop_release_cmd,
  1001. .sess_get_index = tcm_loop_sess_get_index,
  1002. .write_pending = tcm_loop_write_pending,
  1003. .write_pending_status = tcm_loop_write_pending_status,
  1004. .set_default_node_attributes = tcm_loop_set_default_node_attributes,
  1005. .get_cmd_state = tcm_loop_get_cmd_state,
  1006. .queue_data_in = tcm_loop_queue_data_in,
  1007. .queue_status = tcm_loop_queue_status,
  1008. .queue_tm_rsp = tcm_loop_queue_tm_rsp,
  1009. .aborted_task = tcm_loop_aborted_task,
  1010. .fabric_make_wwn = tcm_loop_make_scsi_hba,
  1011. .fabric_drop_wwn = tcm_loop_drop_scsi_hba,
  1012. .fabric_make_tpg = tcm_loop_make_naa_tpg,
  1013. .fabric_drop_tpg = tcm_loop_drop_naa_tpg,
  1014. .fabric_post_link = tcm_loop_port_link,
  1015. .fabric_pre_unlink = tcm_loop_port_unlink,
  1016. .tfc_wwn_attrs = tcm_loop_wwn_attrs,
  1017. .tfc_tpg_base_attrs = tcm_loop_tpg_attrs,
  1018. .tfc_tpg_attrib_attrs = tcm_loop_tpg_attrib_attrs,
  1019. };
  1020. static int __init tcm_loop_fabric_init(void)
  1021. {
  1022. int ret = -ENOMEM;
  1023. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1024. if (!tcm_loop_workqueue)
  1025. goto out;
  1026. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1027. sizeof(struct tcm_loop_cmd),
  1028. __alignof__(struct tcm_loop_cmd),
  1029. 0, NULL);
  1030. if (!tcm_loop_cmd_cache) {
  1031. pr_debug("kmem_cache_create() for tcm_loop_cmd_cache failed\n");
  1032. goto out_destroy_workqueue;
  1033. }
  1034. ret = tcm_loop_alloc_core_bus();
  1035. if (ret)
  1036. goto out_destroy_cache;
  1037. ret = target_register_template(&loop_ops);
  1038. if (ret)
  1039. goto out_release_core_bus;
  1040. return 0;
  1041. out_release_core_bus:
  1042. tcm_loop_release_core_bus();
  1043. out_destroy_cache:
  1044. kmem_cache_destroy(tcm_loop_cmd_cache);
  1045. out_destroy_workqueue:
  1046. destroy_workqueue(tcm_loop_workqueue);
  1047. out:
  1048. return ret;
  1049. }
  1050. static void __exit tcm_loop_fabric_exit(void)
  1051. {
  1052. target_unregister_template(&loop_ops);
  1053. tcm_loop_release_core_bus();
  1054. kmem_cache_destroy(tcm_loop_cmd_cache);
  1055. destroy_workqueue(tcm_loop_workqueue);
  1056. }
  1057. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1058. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1059. MODULE_LICENSE("GPL");
  1060. module_init(tcm_loop_fabric_init);
  1061. module_exit(tcm_loop_fabric_exit);