tcm_loop.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include <target/target_core_fabric_configfs.h>
  36. #include <target/target_core_configfs.h>
  37. #include "tcm_loop.h"
  38. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  39. /* Local pointer to allocated TCM configfs fabric module */
  40. static struct target_fabric_configfs *tcm_loop_fabric_configfs;
  41. static struct workqueue_struct *tcm_loop_workqueue;
  42. static struct kmem_cache *tcm_loop_cmd_cache;
  43. static int tcm_loop_hba_no_cnt;
  44. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  45. /*
  46. * Called from struct target_core_fabric_ops->check_stop_free()
  47. */
  48. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  49. {
  50. /*
  51. * Do not release struct se_cmd's containing a valid TMR
  52. * pointer. These will be released directly in tcm_loop_device_reset()
  53. * with transport_generic_free_cmd().
  54. */
  55. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  56. return 0;
  57. /*
  58. * Release the struct se_cmd, which will make a callback to release
  59. * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  60. */
  61. transport_generic_free_cmd(se_cmd, 0);
  62. return 1;
  63. }
  64. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  65. {
  66. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  67. struct tcm_loop_cmd, tl_se_cmd);
  68. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  69. }
  70. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  71. {
  72. seq_printf(m, "tcm_loop_proc_info()\n");
  73. return 0;
  74. }
  75. static int tcm_loop_driver_probe(struct device *);
  76. static int tcm_loop_driver_remove(struct device *);
  77. static int pseudo_lld_bus_match(struct device *dev,
  78. struct device_driver *dev_driver)
  79. {
  80. return 1;
  81. }
  82. static struct bus_type tcm_loop_lld_bus = {
  83. .name = "tcm_loop_bus",
  84. .match = pseudo_lld_bus_match,
  85. .probe = tcm_loop_driver_probe,
  86. .remove = tcm_loop_driver_remove,
  87. };
  88. static struct device_driver tcm_loop_driverfs = {
  89. .name = "tcm_loop",
  90. .bus = &tcm_loop_lld_bus,
  91. };
  92. /*
  93. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  94. */
  95. struct device *tcm_loop_primary;
  96. /*
  97. * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
  98. * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
  99. */
  100. static int tcm_loop_change_queue_depth(
  101. struct scsi_device *sdev,
  102. int depth,
  103. int reason)
  104. {
  105. switch (reason) {
  106. case SCSI_QDEPTH_DEFAULT:
  107. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  108. break;
  109. case SCSI_QDEPTH_QFULL:
  110. scsi_track_queue_full(sdev, depth);
  111. break;
  112. case SCSI_QDEPTH_RAMP_UP:
  113. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  114. break;
  115. default:
  116. return -EOPNOTSUPP;
  117. }
  118. return sdev->queue_depth;
  119. }
  120. static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
  121. {
  122. if (sdev->tagged_supported) {
  123. scsi_set_tag_type(sdev, tag);
  124. if (tag)
  125. scsi_activate_tcq(sdev, sdev->queue_depth);
  126. else
  127. scsi_deactivate_tcq(sdev, sdev->queue_depth);
  128. } else
  129. tag = 0;
  130. return tag;
  131. }
  132. /*
  133. * Locate the SAM Task Attr from struct scsi_cmnd *
  134. */
  135. static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
  136. {
  137. if (sc->device->tagged_supported) {
  138. switch (sc->tag) {
  139. case HEAD_OF_QUEUE_TAG:
  140. return MSG_HEAD_TAG;
  141. case ORDERED_QUEUE_TAG:
  142. return MSG_ORDERED_TAG;
  143. default:
  144. break;
  145. }
  146. }
  147. return MSG_SIMPLE_TAG;
  148. }
  149. static void tcm_loop_submission_work(struct work_struct *work)
  150. {
  151. struct tcm_loop_cmd *tl_cmd =
  152. container_of(work, struct tcm_loop_cmd, work);
  153. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  154. struct scsi_cmnd *sc = tl_cmd->sc;
  155. struct tcm_loop_nexus *tl_nexus;
  156. struct tcm_loop_hba *tl_hba;
  157. struct tcm_loop_tpg *tl_tpg;
  158. struct scatterlist *sgl_bidi = NULL;
  159. u32 sgl_bidi_count = 0, transfer_length;
  160. int rc;
  161. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  162. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  163. /*
  164. * Ensure that this tl_tpg reference from the incoming sc->device->id
  165. * has already been configured via tcm_loop_make_naa_tpg().
  166. */
  167. if (!tl_tpg->tl_hba) {
  168. set_host_byte(sc, DID_NO_CONNECT);
  169. goto out_done;
  170. }
  171. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  172. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  173. goto out_done;
  174. }
  175. tl_nexus = tl_hba->tl_nexus;
  176. if (!tl_nexus) {
  177. scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
  178. " does not exist\n");
  179. set_host_byte(sc, DID_ERROR);
  180. goto out_done;
  181. }
  182. if (scsi_bidi_cmnd(sc)) {
  183. struct scsi_data_buffer *sdb = scsi_in(sc);
  184. sgl_bidi = sdb->table.sgl;
  185. sgl_bidi_count = sdb->table.nents;
  186. se_cmd->se_cmd_flags |= SCF_BIDI;
  187. }
  188. transfer_length = scsi_transfer_length(sc);
  189. if (!scsi_prot_sg_count(sc) &&
  190. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  191. se_cmd->prot_pto = true;
  192. /*
  193. * loopback transport doesn't support
  194. * WRITE_GENERATE, READ_STRIP protection
  195. * information operations, go ahead unprotected.
  196. */
  197. transfer_length = scsi_bufflen(sc);
  198. }
  199. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  200. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  201. transfer_length, tcm_loop_sam_attr(sc),
  202. sc->sc_data_direction, 0,
  203. scsi_sglist(sc), scsi_sg_count(sc),
  204. sgl_bidi, sgl_bidi_count,
  205. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  206. if (rc < 0) {
  207. set_host_byte(sc, DID_NO_CONNECT);
  208. goto out_done;
  209. }
  210. return;
  211. out_done:
  212. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  213. sc->scsi_done(sc);
  214. return;
  215. }
  216. /*
  217. * ->queuecommand can be and usually is called from interrupt context, so
  218. * defer the actual submission to a workqueue.
  219. */
  220. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  221. {
  222. struct tcm_loop_cmd *tl_cmd;
  223. pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
  224. " scsi_buf_len: %u\n", sc->device->host->host_no,
  225. sc->device->id, sc->device->channel, sc->device->lun,
  226. sc->cmnd[0], scsi_bufflen(sc));
  227. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  228. if (!tl_cmd) {
  229. pr_err("Unable to allocate struct tcm_loop_cmd\n");
  230. set_host_byte(sc, DID_ERROR);
  231. sc->scsi_done(sc);
  232. return 0;
  233. }
  234. tl_cmd->sc = sc;
  235. tl_cmd->sc_cmd_tag = sc->tag;
  236. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  237. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  238. return 0;
  239. }
  240. /*
  241. * Called from SCSI EH process context to issue a LUN_RESET TMR
  242. * to struct scsi_device
  243. */
  244. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  245. struct tcm_loop_nexus *tl_nexus,
  246. int lun, int task, enum tcm_tmreq_table tmr)
  247. {
  248. struct se_cmd *se_cmd = NULL;
  249. struct se_session *se_sess;
  250. struct se_portal_group *se_tpg;
  251. struct tcm_loop_cmd *tl_cmd = NULL;
  252. struct tcm_loop_tmr *tl_tmr = NULL;
  253. int ret = TMR_FUNCTION_FAILED, rc;
  254. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  255. if (!tl_cmd) {
  256. pr_err("Unable to allocate memory for tl_cmd\n");
  257. return ret;
  258. }
  259. tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
  260. if (!tl_tmr) {
  261. pr_err("Unable to allocate memory for tl_tmr\n");
  262. goto release;
  263. }
  264. init_waitqueue_head(&tl_tmr->tl_tmr_wait);
  265. se_cmd = &tl_cmd->tl_se_cmd;
  266. se_tpg = &tl_tpg->tl_se_tpg;
  267. se_sess = tl_nexus->se_sess;
  268. /*
  269. * Initialize struct se_cmd descriptor from target_core_mod infrastructure
  270. */
  271. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
  272. DMA_NONE, MSG_SIMPLE_TAG,
  273. &tl_cmd->tl_sense_buf[0]);
  274. rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
  275. if (rc < 0)
  276. goto release;
  277. if (tmr == TMR_ABORT_TASK)
  278. se_cmd->se_tmr_req->ref_task_tag = task;
  279. /*
  280. * Locate the underlying TCM struct se_lun
  281. */
  282. if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
  283. ret = TMR_LUN_DOES_NOT_EXIST;
  284. goto release;
  285. }
  286. /*
  287. * Queue the TMR to TCM Core and sleep waiting for
  288. * tcm_loop_queue_tm_rsp() to wake us up.
  289. */
  290. transport_generic_handle_tmr(se_cmd);
  291. wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
  292. /*
  293. * The TMR LUN_RESET has completed, check the response status and
  294. * then release allocations.
  295. */
  296. ret = se_cmd->se_tmr_req->response;
  297. release:
  298. if (se_cmd)
  299. transport_generic_free_cmd(se_cmd, 1);
  300. else
  301. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  302. kfree(tl_tmr);
  303. return ret;
  304. }
  305. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  306. {
  307. struct tcm_loop_hba *tl_hba;
  308. struct tcm_loop_nexus *tl_nexus;
  309. struct tcm_loop_tpg *tl_tpg;
  310. int ret = FAILED;
  311. /*
  312. * Locate the tcm_loop_hba_t pointer
  313. */
  314. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  315. /*
  316. * Locate the tl_nexus and se_sess pointers
  317. */
  318. tl_nexus = tl_hba->tl_nexus;
  319. if (!tl_nexus) {
  320. pr_err("Unable to perform device reset without"
  321. " active I_T Nexus\n");
  322. return FAILED;
  323. }
  324. /*
  325. * Locate the tl_tpg pointer from TargetID in sc->device->id
  326. */
  327. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  328. ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
  329. sc->tag, TMR_ABORT_TASK);
  330. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  331. }
  332. /*
  333. * Called from SCSI EH process context to issue a LUN_RESET TMR
  334. * to struct scsi_device
  335. */
  336. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  337. {
  338. struct tcm_loop_hba *tl_hba;
  339. struct tcm_loop_nexus *tl_nexus;
  340. struct tcm_loop_tpg *tl_tpg;
  341. int ret = FAILED;
  342. /*
  343. * Locate the tcm_loop_hba_t pointer
  344. */
  345. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  346. /*
  347. * Locate the tl_nexus and se_sess pointers
  348. */
  349. tl_nexus = tl_hba->tl_nexus;
  350. if (!tl_nexus) {
  351. pr_err("Unable to perform device reset without"
  352. " active I_T Nexus\n");
  353. return FAILED;
  354. }
  355. /*
  356. * Locate the tl_tpg pointer from TargetID in sc->device->id
  357. */
  358. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  359. ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
  360. 0, TMR_LUN_RESET);
  361. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  362. }
  363. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  364. {
  365. struct tcm_loop_hba *tl_hba;
  366. struct tcm_loop_tpg *tl_tpg;
  367. /*
  368. * Locate the tcm_loop_hba_t pointer
  369. */
  370. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  371. if (!tl_hba) {
  372. pr_err("Unable to perform device reset without"
  373. " active I_T Nexus\n");
  374. return FAILED;
  375. }
  376. /*
  377. * Locate the tl_tpg pointer from TargetID in sc->device->id
  378. */
  379. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  380. if (tl_tpg) {
  381. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  382. return SUCCESS;
  383. }
  384. return FAILED;
  385. }
  386. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  387. {
  388. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  389. return 0;
  390. }
  391. static int tcm_loop_slave_configure(struct scsi_device *sd)
  392. {
  393. if (sd->tagged_supported) {
  394. scsi_activate_tcq(sd, sd->queue_depth);
  395. scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
  396. sd->host->cmd_per_lun);
  397. } else {
  398. scsi_adjust_queue_depth(sd, 0,
  399. sd->host->cmd_per_lun);
  400. }
  401. return 0;
  402. }
  403. static struct scsi_host_template tcm_loop_driver_template = {
  404. .show_info = tcm_loop_show_info,
  405. .proc_name = "tcm_loopback",
  406. .name = "TCM_Loopback",
  407. .queuecommand = tcm_loop_queuecommand,
  408. .change_queue_depth = tcm_loop_change_queue_depth,
  409. .change_queue_type = tcm_loop_change_queue_type,
  410. .eh_abort_handler = tcm_loop_abort_task,
  411. .eh_device_reset_handler = tcm_loop_device_reset,
  412. .eh_target_reset_handler = tcm_loop_target_reset,
  413. .can_queue = 1024,
  414. .this_id = -1,
  415. .sg_tablesize = 256,
  416. .cmd_per_lun = 1024,
  417. .max_sectors = 0xFFFF,
  418. .use_clustering = DISABLE_CLUSTERING,
  419. .slave_alloc = tcm_loop_slave_alloc,
  420. .slave_configure = tcm_loop_slave_configure,
  421. .module = THIS_MODULE,
  422. };
  423. static int tcm_loop_driver_probe(struct device *dev)
  424. {
  425. struct tcm_loop_hba *tl_hba;
  426. struct Scsi_Host *sh;
  427. int error, host_prot;
  428. tl_hba = to_tcm_loop_hba(dev);
  429. sh = scsi_host_alloc(&tcm_loop_driver_template,
  430. sizeof(struct tcm_loop_hba));
  431. if (!sh) {
  432. pr_err("Unable to allocate struct scsi_host\n");
  433. return -ENODEV;
  434. }
  435. tl_hba->sh = sh;
  436. /*
  437. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  438. */
  439. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  440. /*
  441. * Setup single ID, Channel and LUN for now..
  442. */
  443. sh->max_id = 2;
  444. sh->max_lun = 0;
  445. sh->max_channel = 0;
  446. sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
  447. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  448. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  449. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  450. scsi_host_set_prot(sh, host_prot);
  451. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  452. error = scsi_add_host(sh, &tl_hba->dev);
  453. if (error) {
  454. pr_err("%s: scsi_add_host failed\n", __func__);
  455. scsi_host_put(sh);
  456. return -ENODEV;
  457. }
  458. return 0;
  459. }
  460. static int tcm_loop_driver_remove(struct device *dev)
  461. {
  462. struct tcm_loop_hba *tl_hba;
  463. struct Scsi_Host *sh;
  464. tl_hba = to_tcm_loop_hba(dev);
  465. sh = tl_hba->sh;
  466. scsi_remove_host(sh);
  467. scsi_host_put(sh);
  468. return 0;
  469. }
  470. static void tcm_loop_release_adapter(struct device *dev)
  471. {
  472. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  473. kfree(tl_hba);
  474. }
  475. /*
  476. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  477. */
  478. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  479. {
  480. int ret;
  481. tl_hba->dev.bus = &tcm_loop_lld_bus;
  482. tl_hba->dev.parent = tcm_loop_primary;
  483. tl_hba->dev.release = &tcm_loop_release_adapter;
  484. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  485. ret = device_register(&tl_hba->dev);
  486. if (ret) {
  487. pr_err("device_register() failed for"
  488. " tl_hba->dev: %d\n", ret);
  489. return -ENODEV;
  490. }
  491. return 0;
  492. }
  493. /*
  494. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  495. * tcm_loop SCSI bus.
  496. */
  497. static int tcm_loop_alloc_core_bus(void)
  498. {
  499. int ret;
  500. tcm_loop_primary = root_device_register("tcm_loop_0");
  501. if (IS_ERR(tcm_loop_primary)) {
  502. pr_err("Unable to allocate tcm_loop_primary\n");
  503. return PTR_ERR(tcm_loop_primary);
  504. }
  505. ret = bus_register(&tcm_loop_lld_bus);
  506. if (ret) {
  507. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  508. goto dev_unreg;
  509. }
  510. ret = driver_register(&tcm_loop_driverfs);
  511. if (ret) {
  512. pr_err("driver_register() failed for"
  513. "tcm_loop_driverfs\n");
  514. goto bus_unreg;
  515. }
  516. pr_debug("Initialized TCM Loop Core Bus\n");
  517. return ret;
  518. bus_unreg:
  519. bus_unregister(&tcm_loop_lld_bus);
  520. dev_unreg:
  521. root_device_unregister(tcm_loop_primary);
  522. return ret;
  523. }
  524. static void tcm_loop_release_core_bus(void)
  525. {
  526. driver_unregister(&tcm_loop_driverfs);
  527. bus_unregister(&tcm_loop_lld_bus);
  528. root_device_unregister(tcm_loop_primary);
  529. pr_debug("Releasing TCM Loop Core BUS\n");
  530. }
  531. static char *tcm_loop_get_fabric_name(void)
  532. {
  533. return "loopback";
  534. }
  535. static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  536. {
  537. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  538. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  539. /*
  540. * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
  541. * time based on the protocol dependent prefix of the passed configfs group.
  542. *
  543. * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
  544. * ProtocolID using target_core_fabric_lib.c symbols.
  545. */
  546. switch (tl_hba->tl_proto_id) {
  547. case SCSI_PROTOCOL_SAS:
  548. return sas_get_fabric_proto_ident(se_tpg);
  549. case SCSI_PROTOCOL_FCP:
  550. return fc_get_fabric_proto_ident(se_tpg);
  551. case SCSI_PROTOCOL_ISCSI:
  552. return iscsi_get_fabric_proto_ident(se_tpg);
  553. default:
  554. pr_err("Unknown tl_proto_id: 0x%02x, using"
  555. " SAS emulation\n", tl_hba->tl_proto_id);
  556. break;
  557. }
  558. return sas_get_fabric_proto_ident(se_tpg);
  559. }
  560. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  561. {
  562. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  563. /*
  564. * Return the passed NAA identifier for the SAS Target Port
  565. */
  566. return &tl_tpg->tl_hba->tl_wwn_address[0];
  567. }
  568. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  569. {
  570. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  571. /*
  572. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  573. * to represent the SCSI Target Port.
  574. */
  575. return tl_tpg->tl_tpgt;
  576. }
  577. static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
  578. {
  579. return 1;
  580. }
  581. static u32 tcm_loop_get_pr_transport_id(
  582. struct se_portal_group *se_tpg,
  583. struct se_node_acl *se_nacl,
  584. struct t10_pr_registration *pr_reg,
  585. int *format_code,
  586. unsigned char *buf)
  587. {
  588. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  589. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  590. switch (tl_hba->tl_proto_id) {
  591. case SCSI_PROTOCOL_SAS:
  592. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  593. format_code, buf);
  594. case SCSI_PROTOCOL_FCP:
  595. return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  596. format_code, buf);
  597. case SCSI_PROTOCOL_ISCSI:
  598. return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  599. format_code, buf);
  600. default:
  601. pr_err("Unknown tl_proto_id: 0x%02x, using"
  602. " SAS emulation\n", tl_hba->tl_proto_id);
  603. break;
  604. }
  605. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  606. format_code, buf);
  607. }
  608. static u32 tcm_loop_get_pr_transport_id_len(
  609. struct se_portal_group *se_tpg,
  610. struct se_node_acl *se_nacl,
  611. struct t10_pr_registration *pr_reg,
  612. int *format_code)
  613. {
  614. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  615. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  616. switch (tl_hba->tl_proto_id) {
  617. case SCSI_PROTOCOL_SAS:
  618. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  619. format_code);
  620. case SCSI_PROTOCOL_FCP:
  621. return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  622. format_code);
  623. case SCSI_PROTOCOL_ISCSI:
  624. return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  625. format_code);
  626. default:
  627. pr_err("Unknown tl_proto_id: 0x%02x, using"
  628. " SAS emulation\n", tl_hba->tl_proto_id);
  629. break;
  630. }
  631. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  632. format_code);
  633. }
  634. /*
  635. * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
  636. * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
  637. */
  638. static char *tcm_loop_parse_pr_out_transport_id(
  639. struct se_portal_group *se_tpg,
  640. const char *buf,
  641. u32 *out_tid_len,
  642. char **port_nexus_ptr)
  643. {
  644. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  645. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  646. switch (tl_hba->tl_proto_id) {
  647. case SCSI_PROTOCOL_SAS:
  648. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  649. port_nexus_ptr);
  650. case SCSI_PROTOCOL_FCP:
  651. return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  652. port_nexus_ptr);
  653. case SCSI_PROTOCOL_ISCSI:
  654. return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  655. port_nexus_ptr);
  656. default:
  657. pr_err("Unknown tl_proto_id: 0x%02x, using"
  658. " SAS emulation\n", tl_hba->tl_proto_id);
  659. break;
  660. }
  661. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  662. port_nexus_ptr);
  663. }
  664. /*
  665. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  666. * based upon the incoming fabric dependent SCSI Initiator Port
  667. */
  668. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  669. {
  670. return 1;
  671. }
  672. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  673. {
  674. return 0;
  675. }
  676. /*
  677. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  678. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  679. */
  680. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  681. {
  682. return 0;
  683. }
  684. /*
  685. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  686. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  687. * It has been added here as a nop for target_fabric_tf_ops_check()
  688. */
  689. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  690. {
  691. return 0;
  692. }
  693. static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
  694. struct se_portal_group *se_tpg)
  695. {
  696. struct tcm_loop_nacl *tl_nacl;
  697. tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
  698. if (!tl_nacl) {
  699. pr_err("Unable to allocate struct tcm_loop_nacl\n");
  700. return NULL;
  701. }
  702. return &tl_nacl->se_node_acl;
  703. }
  704. static void tcm_loop_tpg_release_fabric_acl(
  705. struct se_portal_group *se_tpg,
  706. struct se_node_acl *se_nacl)
  707. {
  708. struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
  709. struct tcm_loop_nacl, se_node_acl);
  710. kfree(tl_nacl);
  711. }
  712. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  713. {
  714. return 1;
  715. }
  716. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  717. {
  718. return 1;
  719. }
  720. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  721. {
  722. return;
  723. }
  724. static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
  725. {
  726. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  727. struct tcm_loop_cmd, tl_se_cmd);
  728. return tl_cmd->sc_cmd_tag;
  729. }
  730. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  731. {
  732. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  733. struct tcm_loop_cmd, tl_se_cmd);
  734. return tl_cmd->sc_cmd_state;
  735. }
  736. static int tcm_loop_shutdown_session(struct se_session *se_sess)
  737. {
  738. return 0;
  739. }
  740. static void tcm_loop_close_session(struct se_session *se_sess)
  741. {
  742. return;
  743. };
  744. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  745. {
  746. /*
  747. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  748. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  749. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  750. * format with transport_generic_map_mem_to_cmd().
  751. *
  752. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  753. * object execution queue.
  754. */
  755. target_execute_cmd(se_cmd);
  756. return 0;
  757. }
  758. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  759. {
  760. return 0;
  761. }
  762. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  763. {
  764. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  765. struct tcm_loop_cmd, tl_se_cmd);
  766. struct scsi_cmnd *sc = tl_cmd->sc;
  767. pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
  768. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  769. sc->result = SAM_STAT_GOOD;
  770. set_host_byte(sc, DID_OK);
  771. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  772. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  773. scsi_set_resid(sc, se_cmd->residual_count);
  774. sc->scsi_done(sc);
  775. return 0;
  776. }
  777. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  778. {
  779. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  780. struct tcm_loop_cmd, tl_se_cmd);
  781. struct scsi_cmnd *sc = tl_cmd->sc;
  782. pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
  783. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  784. if (se_cmd->sense_buffer &&
  785. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  786. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  787. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  788. SCSI_SENSE_BUFFERSIZE);
  789. sc->result = SAM_STAT_CHECK_CONDITION;
  790. set_driver_byte(sc, DRIVER_SENSE);
  791. } else
  792. sc->result = se_cmd->scsi_status;
  793. set_host_byte(sc, DID_OK);
  794. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  795. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  796. scsi_set_resid(sc, se_cmd->residual_count);
  797. sc->scsi_done(sc);
  798. return 0;
  799. }
  800. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  801. {
  802. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  803. struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
  804. /*
  805. * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
  806. * and wake up the wait_queue_head_t in tcm_loop_device_reset()
  807. */
  808. atomic_set(&tl_tmr->tmr_complete, 1);
  809. wake_up(&tl_tmr->tl_tmr_wait);
  810. }
  811. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  812. {
  813. return;
  814. }
  815. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  816. {
  817. switch (tl_hba->tl_proto_id) {
  818. case SCSI_PROTOCOL_SAS:
  819. return "SAS";
  820. case SCSI_PROTOCOL_FCP:
  821. return "FCP";
  822. case SCSI_PROTOCOL_ISCSI:
  823. return "iSCSI";
  824. default:
  825. break;
  826. }
  827. return "Unknown";
  828. }
  829. /* Start items for tcm_loop_port_cit */
  830. static int tcm_loop_port_link(
  831. struct se_portal_group *se_tpg,
  832. struct se_lun *lun)
  833. {
  834. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  835. struct tcm_loop_tpg, tl_se_tpg);
  836. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  837. atomic_inc(&tl_tpg->tl_tpg_port_count);
  838. smp_mb__after_atomic();
  839. /*
  840. * Add Linux/SCSI struct scsi_device by HCTL
  841. */
  842. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  843. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  844. return 0;
  845. }
  846. static void tcm_loop_port_unlink(
  847. struct se_portal_group *se_tpg,
  848. struct se_lun *se_lun)
  849. {
  850. struct scsi_device *sd;
  851. struct tcm_loop_hba *tl_hba;
  852. struct tcm_loop_tpg *tl_tpg;
  853. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  854. tl_hba = tl_tpg->tl_hba;
  855. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  856. se_lun->unpacked_lun);
  857. if (!sd) {
  858. pr_err("Unable to locate struct scsi_device for %d:%d:"
  859. "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  860. return;
  861. }
  862. /*
  863. * Remove Linux/SCSI struct scsi_device by HCTL
  864. */
  865. scsi_remove_device(sd);
  866. scsi_device_put(sd);
  867. atomic_dec(&tl_tpg->tl_tpg_port_count);
  868. smp_mb__after_atomic();
  869. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  870. }
  871. /* End items for tcm_loop_port_cit */
  872. /* Start items for tcm_loop_nexus_cit */
  873. static int tcm_loop_make_nexus(
  874. struct tcm_loop_tpg *tl_tpg,
  875. const char *name)
  876. {
  877. struct se_portal_group *se_tpg;
  878. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  879. struct tcm_loop_nexus *tl_nexus;
  880. int ret = -ENOMEM;
  881. if (tl_tpg->tl_hba->tl_nexus) {
  882. pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
  883. return -EEXIST;
  884. }
  885. se_tpg = &tl_tpg->tl_se_tpg;
  886. tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
  887. if (!tl_nexus) {
  888. pr_err("Unable to allocate struct tcm_loop_nexus\n");
  889. return -ENOMEM;
  890. }
  891. /*
  892. * Initialize the struct se_session pointer
  893. */
  894. tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
  895. if (IS_ERR(tl_nexus->se_sess)) {
  896. ret = PTR_ERR(tl_nexus->se_sess);
  897. goto out;
  898. }
  899. /*
  900. * Since we are running in 'demo mode' this call with generate a
  901. * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
  902. * Initiator port name of the passed configfs group 'name'.
  903. */
  904. tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
  905. se_tpg, (unsigned char *)name);
  906. if (!tl_nexus->se_sess->se_node_acl) {
  907. transport_free_session(tl_nexus->se_sess);
  908. goto out;
  909. }
  910. /*
  911. * Now, register the SAS I_T Nexus as active with the call to
  912. * transport_register_session()
  913. */
  914. __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
  915. tl_nexus->se_sess, tl_nexus);
  916. tl_tpg->tl_hba->tl_nexus = tl_nexus;
  917. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
  918. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  919. name);
  920. return 0;
  921. out:
  922. kfree(tl_nexus);
  923. return ret;
  924. }
  925. static int tcm_loop_drop_nexus(
  926. struct tcm_loop_tpg *tpg)
  927. {
  928. struct se_session *se_sess;
  929. struct tcm_loop_nexus *tl_nexus;
  930. struct tcm_loop_hba *tl_hba = tpg->tl_hba;
  931. if (!tl_hba)
  932. return -ENODEV;
  933. tl_nexus = tl_hba->tl_nexus;
  934. if (!tl_nexus)
  935. return -ENODEV;
  936. se_sess = tl_nexus->se_sess;
  937. if (!se_sess)
  938. return -ENODEV;
  939. if (atomic_read(&tpg->tl_tpg_port_count)) {
  940. pr_err("Unable to remove TCM_Loop I_T Nexus with"
  941. " active TPG port count: %d\n",
  942. atomic_read(&tpg->tl_tpg_port_count));
  943. return -EPERM;
  944. }
  945. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
  946. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  947. tl_nexus->se_sess->se_node_acl->initiatorname);
  948. /*
  949. * Release the SCSI I_T Nexus to the emulated SAS Target Port
  950. */
  951. transport_deregister_session(tl_nexus->se_sess);
  952. tpg->tl_hba->tl_nexus = NULL;
  953. kfree(tl_nexus);
  954. return 0;
  955. }
  956. /* End items for tcm_loop_nexus_cit */
  957. static ssize_t tcm_loop_tpg_show_nexus(
  958. struct se_portal_group *se_tpg,
  959. char *page)
  960. {
  961. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  962. struct tcm_loop_tpg, tl_se_tpg);
  963. struct tcm_loop_nexus *tl_nexus;
  964. ssize_t ret;
  965. tl_nexus = tl_tpg->tl_hba->tl_nexus;
  966. if (!tl_nexus)
  967. return -ENODEV;
  968. ret = snprintf(page, PAGE_SIZE, "%s\n",
  969. tl_nexus->se_sess->se_node_acl->initiatorname);
  970. return ret;
  971. }
  972. static ssize_t tcm_loop_tpg_store_nexus(
  973. struct se_portal_group *se_tpg,
  974. const char *page,
  975. size_t count)
  976. {
  977. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  978. struct tcm_loop_tpg, tl_se_tpg);
  979. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  980. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  981. int ret;
  982. /*
  983. * Shutdown the active I_T nexus if 'NULL' is passed..
  984. */
  985. if (!strncmp(page, "NULL", 4)) {
  986. ret = tcm_loop_drop_nexus(tl_tpg);
  987. return (!ret) ? count : ret;
  988. }
  989. /*
  990. * Otherwise make sure the passed virtual Initiator port WWN matches
  991. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  992. * tcm_loop_make_nexus()
  993. */
  994. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  995. pr_err("Emulated NAA Sas Address: %s, exceeds"
  996. " max: %d\n", page, TL_WWN_ADDR_LEN);
  997. return -EINVAL;
  998. }
  999. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  1000. ptr = strstr(i_port, "naa.");
  1001. if (ptr) {
  1002. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  1003. pr_err("Passed SAS Initiator Port %s does not"
  1004. " match target port protoid: %s\n", i_port,
  1005. tcm_loop_dump_proto_id(tl_hba));
  1006. return -EINVAL;
  1007. }
  1008. port_ptr = &i_port[0];
  1009. goto check_newline;
  1010. }
  1011. ptr = strstr(i_port, "fc.");
  1012. if (ptr) {
  1013. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  1014. pr_err("Passed FCP Initiator Port %s does not"
  1015. " match target port protoid: %s\n", i_port,
  1016. tcm_loop_dump_proto_id(tl_hba));
  1017. return -EINVAL;
  1018. }
  1019. port_ptr = &i_port[3]; /* Skip over "fc." */
  1020. goto check_newline;
  1021. }
  1022. ptr = strstr(i_port, "iqn.");
  1023. if (ptr) {
  1024. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  1025. pr_err("Passed iSCSI Initiator Port %s does not"
  1026. " match target port protoid: %s\n", i_port,
  1027. tcm_loop_dump_proto_id(tl_hba));
  1028. return -EINVAL;
  1029. }
  1030. port_ptr = &i_port[0];
  1031. goto check_newline;
  1032. }
  1033. pr_err("Unable to locate prefix for emulated Initiator Port:"
  1034. " %s\n", i_port);
  1035. return -EINVAL;
  1036. /*
  1037. * Clear any trailing newline for the NAA WWN
  1038. */
  1039. check_newline:
  1040. if (i_port[strlen(i_port)-1] == '\n')
  1041. i_port[strlen(i_port)-1] = '\0';
  1042. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  1043. if (ret < 0)
  1044. return ret;
  1045. return count;
  1046. }
  1047. TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
  1048. static ssize_t tcm_loop_tpg_show_transport_status(
  1049. struct se_portal_group *se_tpg,
  1050. char *page)
  1051. {
  1052. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1053. struct tcm_loop_tpg, tl_se_tpg);
  1054. const char *status = NULL;
  1055. ssize_t ret = -EINVAL;
  1056. switch (tl_tpg->tl_transport_status) {
  1057. case TCM_TRANSPORT_ONLINE:
  1058. status = "online";
  1059. break;
  1060. case TCM_TRANSPORT_OFFLINE:
  1061. status = "offline";
  1062. break;
  1063. default:
  1064. break;
  1065. }
  1066. if (status)
  1067. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  1068. return ret;
  1069. }
  1070. static ssize_t tcm_loop_tpg_store_transport_status(
  1071. struct se_portal_group *se_tpg,
  1072. const char *page,
  1073. size_t count)
  1074. {
  1075. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1076. struct tcm_loop_tpg, tl_se_tpg);
  1077. if (!strncmp(page, "online", 6)) {
  1078. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  1079. return count;
  1080. }
  1081. if (!strncmp(page, "offline", 7)) {
  1082. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  1083. return count;
  1084. }
  1085. return -EINVAL;
  1086. }
  1087. TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
  1088. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  1089. &tcm_loop_tpg_nexus.attr,
  1090. &tcm_loop_tpg_transport_status.attr,
  1091. NULL,
  1092. };
  1093. /* Start items for tcm_loop_naa_cit */
  1094. static struct se_portal_group *tcm_loop_make_naa_tpg(
  1095. struct se_wwn *wwn,
  1096. struct config_group *group,
  1097. const char *name)
  1098. {
  1099. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1100. struct tcm_loop_hba, tl_hba_wwn);
  1101. struct tcm_loop_tpg *tl_tpg;
  1102. char *tpgt_str, *end_ptr;
  1103. int ret;
  1104. unsigned short int tpgt;
  1105. tpgt_str = strstr(name, "tpgt_");
  1106. if (!tpgt_str) {
  1107. pr_err("Unable to locate \"tpgt_#\" directory"
  1108. " group\n");
  1109. return ERR_PTR(-EINVAL);
  1110. }
  1111. tpgt_str += 5; /* Skip ahead of "tpgt_" */
  1112. tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
  1113. if (tpgt >= TL_TPGS_PER_HBA) {
  1114. pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
  1115. " %u\n", tpgt, TL_TPGS_PER_HBA);
  1116. return ERR_PTR(-EINVAL);
  1117. }
  1118. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  1119. tl_tpg->tl_hba = tl_hba;
  1120. tl_tpg->tl_tpgt = tpgt;
  1121. /*
  1122. * Register the tl_tpg as a emulated SAS TCM Target Endpoint
  1123. */
  1124. ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
  1125. wwn, &tl_tpg->tl_se_tpg, tl_tpg,
  1126. TRANSPORT_TPG_TYPE_NORMAL);
  1127. if (ret < 0)
  1128. return ERR_PTR(-ENOMEM);
  1129. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
  1130. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1131. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1132. return &tl_tpg->tl_se_tpg;
  1133. }
  1134. static void tcm_loop_drop_naa_tpg(
  1135. struct se_portal_group *se_tpg)
  1136. {
  1137. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  1138. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1139. struct tcm_loop_tpg, tl_se_tpg);
  1140. struct tcm_loop_hba *tl_hba;
  1141. unsigned short tpgt;
  1142. tl_hba = tl_tpg->tl_hba;
  1143. tpgt = tl_tpg->tl_tpgt;
  1144. /*
  1145. * Release the I_T Nexus for the Virtual SAS link if present
  1146. */
  1147. tcm_loop_drop_nexus(tl_tpg);
  1148. /*
  1149. * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
  1150. */
  1151. core_tpg_deregister(se_tpg);
  1152. tl_tpg->tl_hba = NULL;
  1153. tl_tpg->tl_tpgt = 0;
  1154. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
  1155. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1156. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1157. }
  1158. /* End items for tcm_loop_naa_cit */
  1159. /* Start items for tcm_loop_cit */
  1160. static struct se_wwn *tcm_loop_make_scsi_hba(
  1161. struct target_fabric_configfs *tf,
  1162. struct config_group *group,
  1163. const char *name)
  1164. {
  1165. struct tcm_loop_hba *tl_hba;
  1166. struct Scsi_Host *sh;
  1167. char *ptr;
  1168. int ret, off = 0;
  1169. tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
  1170. if (!tl_hba) {
  1171. pr_err("Unable to allocate struct tcm_loop_hba\n");
  1172. return ERR_PTR(-ENOMEM);
  1173. }
  1174. /*
  1175. * Determine the emulated Protocol Identifier and Target Port Name
  1176. * based on the incoming configfs directory name.
  1177. */
  1178. ptr = strstr(name, "naa.");
  1179. if (ptr) {
  1180. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  1181. goto check_len;
  1182. }
  1183. ptr = strstr(name, "fc.");
  1184. if (ptr) {
  1185. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  1186. off = 3; /* Skip over "fc." */
  1187. goto check_len;
  1188. }
  1189. ptr = strstr(name, "iqn.");
  1190. if (!ptr) {
  1191. pr_err("Unable to locate prefix for emulated Target "
  1192. "Port: %s\n", name);
  1193. ret = -EINVAL;
  1194. goto out;
  1195. }
  1196. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  1197. check_len:
  1198. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  1199. pr_err("Emulated NAA %s Address: %s, exceeds"
  1200. " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
  1201. TL_WWN_ADDR_LEN);
  1202. ret = -EINVAL;
  1203. goto out;
  1204. }
  1205. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  1206. /*
  1207. * Call device_register(tl_hba->dev) to register the emulated
  1208. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  1209. * device_register() callbacks in tcm_loop_driver_probe()
  1210. */
  1211. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  1212. if (ret)
  1213. goto out;
  1214. sh = tl_hba->sh;
  1215. tcm_loop_hba_no_cnt++;
  1216. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
  1217. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  1218. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  1219. return &tl_hba->tl_hba_wwn;
  1220. out:
  1221. kfree(tl_hba);
  1222. return ERR_PTR(ret);
  1223. }
  1224. static void tcm_loop_drop_scsi_hba(
  1225. struct se_wwn *wwn)
  1226. {
  1227. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1228. struct tcm_loop_hba, tl_hba_wwn);
  1229. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
  1230. " SAS Address: %s at Linux/SCSI Host ID: %d\n",
  1231. tl_hba->tl_wwn_address, tl_hba->sh->host_no);
  1232. /*
  1233. * Call device_unregister() on the original tl_hba->dev.
  1234. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  1235. * release *tl_hba;
  1236. */
  1237. device_unregister(&tl_hba->dev);
  1238. }
  1239. /* Start items for tcm_loop_cit */
  1240. static ssize_t tcm_loop_wwn_show_attr_version(
  1241. struct target_fabric_configfs *tf,
  1242. char *page)
  1243. {
  1244. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  1245. }
  1246. TF_WWN_ATTR_RO(tcm_loop, version);
  1247. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  1248. &tcm_loop_wwn_version.attr,
  1249. NULL,
  1250. };
  1251. /* End items for tcm_loop_cit */
  1252. static int tcm_loop_register_configfs(void)
  1253. {
  1254. struct target_fabric_configfs *fabric;
  1255. int ret;
  1256. /*
  1257. * Set the TCM Loop HBA counter to zero
  1258. */
  1259. tcm_loop_hba_no_cnt = 0;
  1260. /*
  1261. * Register the top level struct config_item_type with TCM core
  1262. */
  1263. fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
  1264. if (IS_ERR(fabric)) {
  1265. pr_err("tcm_loop_register_configfs() failed!\n");
  1266. return PTR_ERR(fabric);
  1267. }
  1268. /*
  1269. * Setup the fabric API of function pointers used by target_core_mod
  1270. */
  1271. fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
  1272. fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
  1273. fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
  1274. fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
  1275. fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
  1276. fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
  1277. fabric->tf_ops.tpg_get_pr_transport_id_len =
  1278. &tcm_loop_get_pr_transport_id_len;
  1279. fabric->tf_ops.tpg_parse_pr_out_transport_id =
  1280. &tcm_loop_parse_pr_out_transport_id;
  1281. fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
  1282. fabric->tf_ops.tpg_check_demo_mode_cache =
  1283. &tcm_loop_check_demo_mode_cache;
  1284. fabric->tf_ops.tpg_check_demo_mode_write_protect =
  1285. &tcm_loop_check_demo_mode_write_protect;
  1286. fabric->tf_ops.tpg_check_prod_mode_write_protect =
  1287. &tcm_loop_check_prod_mode_write_protect;
  1288. /*
  1289. * The TCM loopback fabric module runs in demo-mode to a local
  1290. * virtual SCSI device, so fabric dependent initator ACLs are
  1291. * not required.
  1292. */
  1293. fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
  1294. fabric->tf_ops.tpg_release_fabric_acl =
  1295. &tcm_loop_tpg_release_fabric_acl;
  1296. fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
  1297. /*
  1298. * Used for setting up remaining TCM resources in process context
  1299. */
  1300. fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
  1301. fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
  1302. fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
  1303. fabric->tf_ops.close_session = &tcm_loop_close_session;
  1304. fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
  1305. fabric->tf_ops.sess_get_initiator_sid = NULL;
  1306. fabric->tf_ops.write_pending = &tcm_loop_write_pending;
  1307. fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
  1308. /*
  1309. * Not used for TCM loopback
  1310. */
  1311. fabric->tf_ops.set_default_node_attributes =
  1312. &tcm_loop_set_default_node_attributes;
  1313. fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
  1314. fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
  1315. fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
  1316. fabric->tf_ops.queue_status = &tcm_loop_queue_status;
  1317. fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
  1318. fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
  1319. /*
  1320. * Setup function pointers for generic logic in target_core_fabric_configfs.c
  1321. */
  1322. fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
  1323. fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
  1324. fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
  1325. fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
  1326. /*
  1327. * fabric_post_link() and fabric_pre_unlink() are used for
  1328. * registration and release of TCM Loop Virtual SCSI LUNs.
  1329. */
  1330. fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
  1331. fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
  1332. fabric->tf_ops.fabric_make_np = NULL;
  1333. fabric->tf_ops.fabric_drop_np = NULL;
  1334. /*
  1335. * Setup default attribute lists for various fabric->tf_cit_tmpl
  1336. */
  1337. fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
  1338. fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
  1339. fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
  1340. fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
  1341. fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
  1342. /*
  1343. * Once fabric->tf_ops has been setup, now register the fabric for
  1344. * use within TCM
  1345. */
  1346. ret = target_fabric_configfs_register(fabric);
  1347. if (ret < 0) {
  1348. pr_err("target_fabric_configfs_register() for"
  1349. " TCM_Loop failed!\n");
  1350. target_fabric_configfs_free(fabric);
  1351. return -1;
  1352. }
  1353. /*
  1354. * Setup our local pointer to *fabric.
  1355. */
  1356. tcm_loop_fabric_configfs = fabric;
  1357. pr_debug("TCM_LOOP[0] - Set fabric ->"
  1358. " tcm_loop_fabric_configfs\n");
  1359. return 0;
  1360. }
  1361. static void tcm_loop_deregister_configfs(void)
  1362. {
  1363. if (!tcm_loop_fabric_configfs)
  1364. return;
  1365. target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
  1366. tcm_loop_fabric_configfs = NULL;
  1367. pr_debug("TCM_LOOP[0] - Cleared"
  1368. " tcm_loop_fabric_configfs\n");
  1369. }
  1370. static int __init tcm_loop_fabric_init(void)
  1371. {
  1372. int ret = -ENOMEM;
  1373. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1374. if (!tcm_loop_workqueue)
  1375. goto out;
  1376. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1377. sizeof(struct tcm_loop_cmd),
  1378. __alignof__(struct tcm_loop_cmd),
  1379. 0, NULL);
  1380. if (!tcm_loop_cmd_cache) {
  1381. pr_debug("kmem_cache_create() for"
  1382. " tcm_loop_cmd_cache failed\n");
  1383. goto out_destroy_workqueue;
  1384. }
  1385. ret = tcm_loop_alloc_core_bus();
  1386. if (ret)
  1387. goto out_destroy_cache;
  1388. ret = tcm_loop_register_configfs();
  1389. if (ret)
  1390. goto out_release_core_bus;
  1391. return 0;
  1392. out_release_core_bus:
  1393. tcm_loop_release_core_bus();
  1394. out_destroy_cache:
  1395. kmem_cache_destroy(tcm_loop_cmd_cache);
  1396. out_destroy_workqueue:
  1397. destroy_workqueue(tcm_loop_workqueue);
  1398. out:
  1399. return ret;
  1400. }
  1401. static void __exit tcm_loop_fabric_exit(void)
  1402. {
  1403. tcm_loop_deregister_configfs();
  1404. tcm_loop_release_core_bus();
  1405. kmem_cache_destroy(tcm_loop_cmd_cache);
  1406. destroy_workqueue(tcm_loop_workqueue);
  1407. }
  1408. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1409. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1410. MODULE_LICENSE("GPL");
  1411. module_init(tcm_loop_fabric_init);
  1412. module_exit(tcm_loop_fabric_exit);