tcm_loop.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*******************************************************************************
  2. *
  3. * This file contains the Linux/SCSI LLD virtual SCSI initiator driver
  4. * for emulated SAS initiator ports
  5. *
  6. * © Copyright 2011-2013 Datera, Inc.
  7. *
  8. * Licensed to the Linux Foundation under the General Public License (GPL) version 2.
  9. *
  10. * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License as published by
  14. * the Free Software Foundation; either version 2 of the License, or
  15. * (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  20. * GNU General Public License for more details.
  21. ****************************************************************************/
  22. #include <linux/module.h>
  23. #include <linux/moduleparam.h>
  24. #include <linux/init.h>
  25. #include <linux/slab.h>
  26. #include <linux/types.h>
  27. #include <linux/configfs.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_tcq.h>
  30. #include <scsi/scsi_host.h>
  31. #include <scsi/scsi_device.h>
  32. #include <scsi/scsi_cmnd.h>
  33. #include <target/target_core_base.h>
  34. #include <target/target_core_fabric.h>
  35. #include <target/target_core_fabric_configfs.h>
  36. #include <target/target_core_configfs.h>
  37. #include "tcm_loop.h"
  38. #define to_tcm_loop_hba(hba) container_of(hba, struct tcm_loop_hba, dev)
  39. /* Local pointer to allocated TCM configfs fabric module */
  40. static struct target_fabric_configfs *tcm_loop_fabric_configfs;
  41. static struct workqueue_struct *tcm_loop_workqueue;
  42. static struct kmem_cache *tcm_loop_cmd_cache;
  43. static int tcm_loop_hba_no_cnt;
  44. static int tcm_loop_queue_status(struct se_cmd *se_cmd);
  45. /*
  46. * Called from struct target_core_fabric_ops->check_stop_free()
  47. */
  48. static int tcm_loop_check_stop_free(struct se_cmd *se_cmd)
  49. {
  50. /*
  51. * Do not release struct se_cmd's containing a valid TMR
  52. * pointer. These will be released directly in tcm_loop_device_reset()
  53. * with transport_generic_free_cmd().
  54. */
  55. if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
  56. return 0;
  57. /*
  58. * Release the struct se_cmd, which will make a callback to release
  59. * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()
  60. */
  61. transport_generic_free_cmd(se_cmd, 0);
  62. return 1;
  63. }
  64. static void tcm_loop_release_cmd(struct se_cmd *se_cmd)
  65. {
  66. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  67. struct tcm_loop_cmd, tl_se_cmd);
  68. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  69. }
  70. static int tcm_loop_show_info(struct seq_file *m, struct Scsi_Host *host)
  71. {
  72. seq_printf(m, "tcm_loop_proc_info()\n");
  73. return 0;
  74. }
  75. static int tcm_loop_driver_probe(struct device *);
  76. static int tcm_loop_driver_remove(struct device *);
  77. static int pseudo_lld_bus_match(struct device *dev,
  78. struct device_driver *dev_driver)
  79. {
  80. return 1;
  81. }
  82. static struct bus_type tcm_loop_lld_bus = {
  83. .name = "tcm_loop_bus",
  84. .match = pseudo_lld_bus_match,
  85. .probe = tcm_loop_driver_probe,
  86. .remove = tcm_loop_driver_remove,
  87. };
  88. static struct device_driver tcm_loop_driverfs = {
  89. .name = "tcm_loop",
  90. .bus = &tcm_loop_lld_bus,
  91. };
  92. /*
  93. * Used with root_device_register() in tcm_loop_alloc_core_bus() below
  94. */
  95. struct device *tcm_loop_primary;
  96. /*
  97. * Copied from drivers/scsi/libfc/fc_fcp.c:fc_change_queue_depth() and
  98. * drivers/scsi/libiscsi.c:iscsi_change_queue_depth()
  99. */
  100. static int tcm_loop_change_queue_depth(
  101. struct scsi_device *sdev,
  102. int depth,
  103. int reason)
  104. {
  105. switch (reason) {
  106. case SCSI_QDEPTH_DEFAULT:
  107. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  108. break;
  109. case SCSI_QDEPTH_QFULL:
  110. scsi_track_queue_full(sdev, depth);
  111. break;
  112. case SCSI_QDEPTH_RAMP_UP:
  113. scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
  114. break;
  115. default:
  116. return -EOPNOTSUPP;
  117. }
  118. return sdev->queue_depth;
  119. }
  120. static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
  121. {
  122. if (sdev->tagged_supported) {
  123. scsi_set_tag_type(sdev, tag);
  124. if (tag)
  125. scsi_activate_tcq(sdev, sdev->queue_depth);
  126. else
  127. scsi_deactivate_tcq(sdev, sdev->queue_depth);
  128. } else
  129. tag = 0;
  130. return tag;
  131. }
  132. /*
  133. * Locate the SAM Task Attr from struct scsi_cmnd *
  134. */
  135. static int tcm_loop_sam_attr(struct scsi_cmnd *sc)
  136. {
  137. if (sc->device->tagged_supported) {
  138. switch (sc->tag) {
  139. case HEAD_OF_QUEUE_TAG:
  140. return MSG_HEAD_TAG;
  141. case ORDERED_QUEUE_TAG:
  142. return MSG_ORDERED_TAG;
  143. default:
  144. break;
  145. }
  146. }
  147. return MSG_SIMPLE_TAG;
  148. }
  149. static void tcm_loop_submission_work(struct work_struct *work)
  150. {
  151. struct tcm_loop_cmd *tl_cmd =
  152. container_of(work, struct tcm_loop_cmd, work);
  153. struct se_cmd *se_cmd = &tl_cmd->tl_se_cmd;
  154. struct scsi_cmnd *sc = tl_cmd->sc;
  155. struct tcm_loop_nexus *tl_nexus;
  156. struct tcm_loop_hba *tl_hba;
  157. struct tcm_loop_tpg *tl_tpg;
  158. struct scatterlist *sgl_bidi = NULL;
  159. u32 sgl_bidi_count = 0, transfer_length;
  160. int rc;
  161. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  162. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  163. /*
  164. * Ensure that this tl_tpg reference from the incoming sc->device->id
  165. * has already been configured via tcm_loop_make_naa_tpg().
  166. */
  167. if (!tl_tpg->tl_hba) {
  168. set_host_byte(sc, DID_NO_CONNECT);
  169. goto out_done;
  170. }
  171. if (tl_tpg->tl_transport_status == TCM_TRANSPORT_OFFLINE) {
  172. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  173. goto out_done;
  174. }
  175. tl_nexus = tl_hba->tl_nexus;
  176. if (!tl_nexus) {
  177. scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
  178. " does not exist\n");
  179. set_host_byte(sc, DID_ERROR);
  180. goto out_done;
  181. }
  182. if (scsi_bidi_cmnd(sc)) {
  183. struct scsi_data_buffer *sdb = scsi_in(sc);
  184. sgl_bidi = sdb->table.sgl;
  185. sgl_bidi_count = sdb->table.nents;
  186. se_cmd->se_cmd_flags |= SCF_BIDI;
  187. }
  188. transfer_length = scsi_transfer_length(sc);
  189. if (!scsi_prot_sg_count(sc) &&
  190. scsi_get_prot_op(sc) != SCSI_PROT_NORMAL) {
  191. se_cmd->prot_pto = true;
  192. /*
  193. * loopback transport doesn't support
  194. * WRITE_GENERATE, READ_STRIP protection
  195. * information operations, go ahead unprotected.
  196. */
  197. transfer_length = scsi_bufflen(sc);
  198. }
  199. rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
  200. &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
  201. transfer_length, tcm_loop_sam_attr(sc),
  202. sc->sc_data_direction, 0,
  203. scsi_sglist(sc), scsi_sg_count(sc),
  204. sgl_bidi, sgl_bidi_count,
  205. scsi_prot_sglist(sc), scsi_prot_sg_count(sc));
  206. if (rc < 0) {
  207. set_host_byte(sc, DID_NO_CONNECT);
  208. goto out_done;
  209. }
  210. return;
  211. out_done:
  212. sc->scsi_done(sc);
  213. return;
  214. }
  215. /*
  216. * ->queuecommand can be and usually is called from interrupt context, so
  217. * defer the actual submission to a workqueue.
  218. */
  219. static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  220. {
  221. struct tcm_loop_cmd *tl_cmd;
  222. pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
  223. " scsi_buf_len: %u\n", sc->device->host->host_no,
  224. sc->device->id, sc->device->channel, sc->device->lun,
  225. sc->cmnd[0], scsi_bufflen(sc));
  226. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
  227. if (!tl_cmd) {
  228. pr_err("Unable to allocate struct tcm_loop_cmd\n");
  229. set_host_byte(sc, DID_ERROR);
  230. sc->scsi_done(sc);
  231. return 0;
  232. }
  233. tl_cmd->sc = sc;
  234. tl_cmd->sc_cmd_tag = sc->tag;
  235. INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
  236. queue_work(tcm_loop_workqueue, &tl_cmd->work);
  237. return 0;
  238. }
  239. /*
  240. * Called from SCSI EH process context to issue a LUN_RESET TMR
  241. * to struct scsi_device
  242. */
  243. static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
  244. struct tcm_loop_nexus *tl_nexus,
  245. int lun, int task, enum tcm_tmreq_table tmr)
  246. {
  247. struct se_cmd *se_cmd = NULL;
  248. struct se_session *se_sess;
  249. struct se_portal_group *se_tpg;
  250. struct tcm_loop_cmd *tl_cmd = NULL;
  251. struct tcm_loop_tmr *tl_tmr = NULL;
  252. int ret = TMR_FUNCTION_FAILED, rc;
  253. tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
  254. if (!tl_cmd) {
  255. pr_err("Unable to allocate memory for tl_cmd\n");
  256. return ret;
  257. }
  258. tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
  259. if (!tl_tmr) {
  260. pr_err("Unable to allocate memory for tl_tmr\n");
  261. goto release;
  262. }
  263. init_waitqueue_head(&tl_tmr->tl_tmr_wait);
  264. se_cmd = &tl_cmd->tl_se_cmd;
  265. se_tpg = &tl_tpg->tl_se_tpg;
  266. se_sess = tl_nexus->se_sess;
  267. /*
  268. * Initialize struct se_cmd descriptor from target_core_mod infrastructure
  269. */
  270. transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
  271. DMA_NONE, MSG_SIMPLE_TAG,
  272. &tl_cmd->tl_sense_buf[0]);
  273. rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
  274. if (rc < 0)
  275. goto release;
  276. if (tmr == TMR_ABORT_TASK)
  277. se_cmd->se_tmr_req->ref_task_tag = task;
  278. /*
  279. * Locate the underlying TCM struct se_lun
  280. */
  281. if (transport_lookup_tmr_lun(se_cmd, lun) < 0) {
  282. ret = TMR_LUN_DOES_NOT_EXIST;
  283. goto release;
  284. }
  285. /*
  286. * Queue the TMR to TCM Core and sleep waiting for
  287. * tcm_loop_queue_tm_rsp() to wake us up.
  288. */
  289. transport_generic_handle_tmr(se_cmd);
  290. wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete));
  291. /*
  292. * The TMR LUN_RESET has completed, check the response status and
  293. * then release allocations.
  294. */
  295. ret = se_cmd->se_tmr_req->response;
  296. release:
  297. if (se_cmd)
  298. transport_generic_free_cmd(se_cmd, 1);
  299. else
  300. kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
  301. kfree(tl_tmr);
  302. return ret;
  303. }
  304. static int tcm_loop_abort_task(struct scsi_cmnd *sc)
  305. {
  306. struct tcm_loop_hba *tl_hba;
  307. struct tcm_loop_nexus *tl_nexus;
  308. struct tcm_loop_tpg *tl_tpg;
  309. int ret = FAILED;
  310. /*
  311. * Locate the tcm_loop_hba_t pointer
  312. */
  313. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  314. /*
  315. * Locate the tl_nexus and se_sess pointers
  316. */
  317. tl_nexus = tl_hba->tl_nexus;
  318. if (!tl_nexus) {
  319. pr_err("Unable to perform device reset without"
  320. " active I_T Nexus\n");
  321. return FAILED;
  322. }
  323. /*
  324. * Locate the tl_tpg pointer from TargetID in sc->device->id
  325. */
  326. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  327. ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
  328. sc->tag, TMR_ABORT_TASK);
  329. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  330. }
  331. /*
  332. * Called from SCSI EH process context to issue a LUN_RESET TMR
  333. * to struct scsi_device
  334. */
  335. static int tcm_loop_device_reset(struct scsi_cmnd *sc)
  336. {
  337. struct tcm_loop_hba *tl_hba;
  338. struct tcm_loop_nexus *tl_nexus;
  339. struct tcm_loop_tpg *tl_tpg;
  340. int ret = FAILED;
  341. /*
  342. * Locate the tcm_loop_hba_t pointer
  343. */
  344. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  345. /*
  346. * Locate the tl_nexus and se_sess pointers
  347. */
  348. tl_nexus = tl_hba->tl_nexus;
  349. if (!tl_nexus) {
  350. pr_err("Unable to perform device reset without"
  351. " active I_T Nexus\n");
  352. return FAILED;
  353. }
  354. /*
  355. * Locate the tl_tpg pointer from TargetID in sc->device->id
  356. */
  357. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  358. ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
  359. 0, TMR_LUN_RESET);
  360. return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
  361. }
  362. static int tcm_loop_target_reset(struct scsi_cmnd *sc)
  363. {
  364. struct tcm_loop_hba *tl_hba;
  365. struct tcm_loop_tpg *tl_tpg;
  366. /*
  367. * Locate the tcm_loop_hba_t pointer
  368. */
  369. tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
  370. if (!tl_hba) {
  371. pr_err("Unable to perform device reset without"
  372. " active I_T Nexus\n");
  373. return FAILED;
  374. }
  375. /*
  376. * Locate the tl_tpg pointer from TargetID in sc->device->id
  377. */
  378. tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
  379. if (tl_tpg) {
  380. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  381. return SUCCESS;
  382. }
  383. return FAILED;
  384. }
  385. static int tcm_loop_slave_alloc(struct scsi_device *sd)
  386. {
  387. set_bit(QUEUE_FLAG_BIDI, &sd->request_queue->queue_flags);
  388. return 0;
  389. }
  390. static int tcm_loop_slave_configure(struct scsi_device *sd)
  391. {
  392. if (sd->tagged_supported) {
  393. scsi_activate_tcq(sd, sd->queue_depth);
  394. scsi_adjust_queue_depth(sd, MSG_SIMPLE_TAG,
  395. sd->host->cmd_per_lun);
  396. } else {
  397. scsi_adjust_queue_depth(sd, 0,
  398. sd->host->cmd_per_lun);
  399. }
  400. return 0;
  401. }
  402. static struct scsi_host_template tcm_loop_driver_template = {
  403. .show_info = tcm_loop_show_info,
  404. .proc_name = "tcm_loopback",
  405. .name = "TCM_Loopback",
  406. .queuecommand = tcm_loop_queuecommand,
  407. .change_queue_depth = tcm_loop_change_queue_depth,
  408. .change_queue_type = tcm_loop_change_queue_type,
  409. .eh_abort_handler = tcm_loop_abort_task,
  410. .eh_device_reset_handler = tcm_loop_device_reset,
  411. .eh_target_reset_handler = tcm_loop_target_reset,
  412. .can_queue = 1024,
  413. .this_id = -1,
  414. .sg_tablesize = 256,
  415. .cmd_per_lun = 1024,
  416. .max_sectors = 0xFFFF,
  417. .use_clustering = DISABLE_CLUSTERING,
  418. .slave_alloc = tcm_loop_slave_alloc,
  419. .slave_configure = tcm_loop_slave_configure,
  420. .module = THIS_MODULE,
  421. };
  422. static int tcm_loop_driver_probe(struct device *dev)
  423. {
  424. struct tcm_loop_hba *tl_hba;
  425. struct Scsi_Host *sh;
  426. int error, host_prot;
  427. tl_hba = to_tcm_loop_hba(dev);
  428. sh = scsi_host_alloc(&tcm_loop_driver_template,
  429. sizeof(struct tcm_loop_hba));
  430. if (!sh) {
  431. pr_err("Unable to allocate struct scsi_host\n");
  432. return -ENODEV;
  433. }
  434. tl_hba->sh = sh;
  435. /*
  436. * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
  437. */
  438. *((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
  439. /*
  440. * Setup single ID, Channel and LUN for now..
  441. */
  442. sh->max_id = 2;
  443. sh->max_lun = 0;
  444. sh->max_channel = 0;
  445. sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;
  446. host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
  447. SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
  448. SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
  449. scsi_host_set_prot(sh, host_prot);
  450. scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);
  451. error = scsi_add_host(sh, &tl_hba->dev);
  452. if (error) {
  453. pr_err("%s: scsi_add_host failed\n", __func__);
  454. scsi_host_put(sh);
  455. return -ENODEV;
  456. }
  457. return 0;
  458. }
  459. static int tcm_loop_driver_remove(struct device *dev)
  460. {
  461. struct tcm_loop_hba *tl_hba;
  462. struct Scsi_Host *sh;
  463. tl_hba = to_tcm_loop_hba(dev);
  464. sh = tl_hba->sh;
  465. scsi_remove_host(sh);
  466. scsi_host_put(sh);
  467. return 0;
  468. }
  469. static void tcm_loop_release_adapter(struct device *dev)
  470. {
  471. struct tcm_loop_hba *tl_hba = to_tcm_loop_hba(dev);
  472. kfree(tl_hba);
  473. }
  474. /*
  475. * Called from tcm_loop_make_scsi_hba() in tcm_loop_configfs.c
  476. */
  477. static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host_id)
  478. {
  479. int ret;
  480. tl_hba->dev.bus = &tcm_loop_lld_bus;
  481. tl_hba->dev.parent = tcm_loop_primary;
  482. tl_hba->dev.release = &tcm_loop_release_adapter;
  483. dev_set_name(&tl_hba->dev, "tcm_loop_adapter_%d", tcm_loop_host_id);
  484. ret = device_register(&tl_hba->dev);
  485. if (ret) {
  486. pr_err("device_register() failed for"
  487. " tl_hba->dev: %d\n", ret);
  488. return -ENODEV;
  489. }
  490. return 0;
  491. }
  492. /*
  493. * Called from tcm_loop_fabric_init() in tcl_loop_fabric.c to load the emulated
  494. * tcm_loop SCSI bus.
  495. */
  496. static int tcm_loop_alloc_core_bus(void)
  497. {
  498. int ret;
  499. tcm_loop_primary = root_device_register("tcm_loop_0");
  500. if (IS_ERR(tcm_loop_primary)) {
  501. pr_err("Unable to allocate tcm_loop_primary\n");
  502. return PTR_ERR(tcm_loop_primary);
  503. }
  504. ret = bus_register(&tcm_loop_lld_bus);
  505. if (ret) {
  506. pr_err("bus_register() failed for tcm_loop_lld_bus\n");
  507. goto dev_unreg;
  508. }
  509. ret = driver_register(&tcm_loop_driverfs);
  510. if (ret) {
  511. pr_err("driver_register() failed for"
  512. "tcm_loop_driverfs\n");
  513. goto bus_unreg;
  514. }
  515. pr_debug("Initialized TCM Loop Core Bus\n");
  516. return ret;
  517. bus_unreg:
  518. bus_unregister(&tcm_loop_lld_bus);
  519. dev_unreg:
  520. root_device_unregister(tcm_loop_primary);
  521. return ret;
  522. }
  523. static void tcm_loop_release_core_bus(void)
  524. {
  525. driver_unregister(&tcm_loop_driverfs);
  526. bus_unregister(&tcm_loop_lld_bus);
  527. root_device_unregister(tcm_loop_primary);
  528. pr_debug("Releasing TCM Loop Core BUS\n");
  529. }
  530. static char *tcm_loop_get_fabric_name(void)
  531. {
  532. return "loopback";
  533. }
  534. static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
  535. {
  536. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  537. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  538. /*
  539. * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
  540. * time based on the protocol dependent prefix of the passed configfs group.
  541. *
  542. * Based upon tl_proto_id, TCM_Loop emulates the requested fabric
  543. * ProtocolID using target_core_fabric_lib.c symbols.
  544. */
  545. switch (tl_hba->tl_proto_id) {
  546. case SCSI_PROTOCOL_SAS:
  547. return sas_get_fabric_proto_ident(se_tpg);
  548. case SCSI_PROTOCOL_FCP:
  549. return fc_get_fabric_proto_ident(se_tpg);
  550. case SCSI_PROTOCOL_ISCSI:
  551. return iscsi_get_fabric_proto_ident(se_tpg);
  552. default:
  553. pr_err("Unknown tl_proto_id: 0x%02x, using"
  554. " SAS emulation\n", tl_hba->tl_proto_id);
  555. break;
  556. }
  557. return sas_get_fabric_proto_ident(se_tpg);
  558. }
  559. static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
  560. {
  561. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  562. /*
  563. * Return the passed NAA identifier for the SAS Target Port
  564. */
  565. return &tl_tpg->tl_hba->tl_wwn_address[0];
  566. }
  567. static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
  568. {
  569. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  570. /*
  571. * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
  572. * to represent the SCSI Target Port.
  573. */
  574. return tl_tpg->tl_tpgt;
  575. }
  576. static u32 tcm_loop_get_default_depth(struct se_portal_group *se_tpg)
  577. {
  578. return 1;
  579. }
  580. static u32 tcm_loop_get_pr_transport_id(
  581. struct se_portal_group *se_tpg,
  582. struct se_node_acl *se_nacl,
  583. struct t10_pr_registration *pr_reg,
  584. int *format_code,
  585. unsigned char *buf)
  586. {
  587. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  588. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  589. switch (tl_hba->tl_proto_id) {
  590. case SCSI_PROTOCOL_SAS:
  591. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  592. format_code, buf);
  593. case SCSI_PROTOCOL_FCP:
  594. return fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  595. format_code, buf);
  596. case SCSI_PROTOCOL_ISCSI:
  597. return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  598. format_code, buf);
  599. default:
  600. pr_err("Unknown tl_proto_id: 0x%02x, using"
  601. " SAS emulation\n", tl_hba->tl_proto_id);
  602. break;
  603. }
  604. return sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
  605. format_code, buf);
  606. }
  607. static u32 tcm_loop_get_pr_transport_id_len(
  608. struct se_portal_group *se_tpg,
  609. struct se_node_acl *se_nacl,
  610. struct t10_pr_registration *pr_reg,
  611. int *format_code)
  612. {
  613. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  614. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  615. switch (tl_hba->tl_proto_id) {
  616. case SCSI_PROTOCOL_SAS:
  617. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  618. format_code);
  619. case SCSI_PROTOCOL_FCP:
  620. return fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  621. format_code);
  622. case SCSI_PROTOCOL_ISCSI:
  623. return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  624. format_code);
  625. default:
  626. pr_err("Unknown tl_proto_id: 0x%02x, using"
  627. " SAS emulation\n", tl_hba->tl_proto_id);
  628. break;
  629. }
  630. return sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
  631. format_code);
  632. }
  633. /*
  634. * Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
  635. * Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
  636. */
  637. static char *tcm_loop_parse_pr_out_transport_id(
  638. struct se_portal_group *se_tpg,
  639. const char *buf,
  640. u32 *out_tid_len,
  641. char **port_nexus_ptr)
  642. {
  643. struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
  644. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  645. switch (tl_hba->tl_proto_id) {
  646. case SCSI_PROTOCOL_SAS:
  647. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  648. port_nexus_ptr);
  649. case SCSI_PROTOCOL_FCP:
  650. return fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  651. port_nexus_ptr);
  652. case SCSI_PROTOCOL_ISCSI:
  653. return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  654. port_nexus_ptr);
  655. default:
  656. pr_err("Unknown tl_proto_id: 0x%02x, using"
  657. " SAS emulation\n", tl_hba->tl_proto_id);
  658. break;
  659. }
  660. return sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
  661. port_nexus_ptr);
  662. }
  663. /*
  664. * Returning (1) here allows for target_core_mod struct se_node_acl to be generated
  665. * based upon the incoming fabric dependent SCSI Initiator Port
  666. */
  667. static int tcm_loop_check_demo_mode(struct se_portal_group *se_tpg)
  668. {
  669. return 1;
  670. }
  671. static int tcm_loop_check_demo_mode_cache(struct se_portal_group *se_tpg)
  672. {
  673. return 0;
  674. }
  675. /*
  676. * Allow I_T Nexus full READ-WRITE access without explict Initiator Node ACLs for
  677. * local virtual Linux/SCSI LLD passthrough into VM hypervisor guest
  678. */
  679. static int tcm_loop_check_demo_mode_write_protect(struct se_portal_group *se_tpg)
  680. {
  681. return 0;
  682. }
  683. /*
  684. * Because TCM_Loop does not use explict ACLs and MappedLUNs, this will
  685. * never be called for TCM_Loop by target_core_fabric_configfs.c code.
  686. * It has been added here as a nop for target_fabric_tf_ops_check()
  687. */
  688. static int tcm_loop_check_prod_mode_write_protect(struct se_portal_group *se_tpg)
  689. {
  690. return 0;
  691. }
  692. static struct se_node_acl *tcm_loop_tpg_alloc_fabric_acl(
  693. struct se_portal_group *se_tpg)
  694. {
  695. struct tcm_loop_nacl *tl_nacl;
  696. tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
  697. if (!tl_nacl) {
  698. pr_err("Unable to allocate struct tcm_loop_nacl\n");
  699. return NULL;
  700. }
  701. return &tl_nacl->se_node_acl;
  702. }
  703. static void tcm_loop_tpg_release_fabric_acl(
  704. struct se_portal_group *se_tpg,
  705. struct se_node_acl *se_nacl)
  706. {
  707. struct tcm_loop_nacl *tl_nacl = container_of(se_nacl,
  708. struct tcm_loop_nacl, se_node_acl);
  709. kfree(tl_nacl);
  710. }
  711. static u32 tcm_loop_get_inst_index(struct se_portal_group *se_tpg)
  712. {
  713. return 1;
  714. }
  715. static u32 tcm_loop_sess_get_index(struct se_session *se_sess)
  716. {
  717. return 1;
  718. }
  719. static void tcm_loop_set_default_node_attributes(struct se_node_acl *se_acl)
  720. {
  721. return;
  722. }
  723. static u32 tcm_loop_get_task_tag(struct se_cmd *se_cmd)
  724. {
  725. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  726. struct tcm_loop_cmd, tl_se_cmd);
  727. return tl_cmd->sc_cmd_tag;
  728. }
  729. static int tcm_loop_get_cmd_state(struct se_cmd *se_cmd)
  730. {
  731. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  732. struct tcm_loop_cmd, tl_se_cmd);
  733. return tl_cmd->sc_cmd_state;
  734. }
  735. static int tcm_loop_shutdown_session(struct se_session *se_sess)
  736. {
  737. return 0;
  738. }
  739. static void tcm_loop_close_session(struct se_session *se_sess)
  740. {
  741. return;
  742. };
  743. static int tcm_loop_write_pending(struct se_cmd *se_cmd)
  744. {
  745. /*
  746. * Since Linux/SCSI has already sent down a struct scsi_cmnd
  747. * sc->sc_data_direction of DMA_TO_DEVICE with struct scatterlist array
  748. * memory, and memory has already been mapped to struct se_cmd->t_mem_list
  749. * format with transport_generic_map_mem_to_cmd().
  750. *
  751. * We now tell TCM to add this WRITE CDB directly into the TCM storage
  752. * object execution queue.
  753. */
  754. target_execute_cmd(se_cmd);
  755. return 0;
  756. }
  757. static int tcm_loop_write_pending_status(struct se_cmd *se_cmd)
  758. {
  759. return 0;
  760. }
  761. static int tcm_loop_queue_data_in(struct se_cmd *se_cmd)
  762. {
  763. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  764. struct tcm_loop_cmd, tl_se_cmd);
  765. struct scsi_cmnd *sc = tl_cmd->sc;
  766. pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
  767. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  768. sc->result = SAM_STAT_GOOD;
  769. set_host_byte(sc, DID_OK);
  770. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  771. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  772. scsi_set_resid(sc, se_cmd->residual_count);
  773. sc->scsi_done(sc);
  774. return 0;
  775. }
  776. static int tcm_loop_queue_status(struct se_cmd *se_cmd)
  777. {
  778. struct tcm_loop_cmd *tl_cmd = container_of(se_cmd,
  779. struct tcm_loop_cmd, tl_se_cmd);
  780. struct scsi_cmnd *sc = tl_cmd->sc;
  781. pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
  782. " cdb: 0x%02x\n", sc, sc->cmnd[0]);
  783. if (se_cmd->sense_buffer &&
  784. ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
  785. (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE))) {
  786. memcpy(sc->sense_buffer, se_cmd->sense_buffer,
  787. SCSI_SENSE_BUFFERSIZE);
  788. sc->result = SAM_STAT_CHECK_CONDITION;
  789. set_driver_byte(sc, DRIVER_SENSE);
  790. } else
  791. sc->result = se_cmd->scsi_status;
  792. set_host_byte(sc, DID_OK);
  793. if ((se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) ||
  794. (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT))
  795. scsi_set_resid(sc, se_cmd->residual_count);
  796. sc->scsi_done(sc);
  797. return 0;
  798. }
  799. static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd)
  800. {
  801. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  802. struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr;
  803. /*
  804. * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead
  805. * and wake up the wait_queue_head_t in tcm_loop_device_reset()
  806. */
  807. atomic_set(&tl_tmr->tmr_complete, 1);
  808. wake_up(&tl_tmr->tl_tmr_wait);
  809. }
  810. static void tcm_loop_aborted_task(struct se_cmd *se_cmd)
  811. {
  812. return;
  813. }
  814. static char *tcm_loop_dump_proto_id(struct tcm_loop_hba *tl_hba)
  815. {
  816. switch (tl_hba->tl_proto_id) {
  817. case SCSI_PROTOCOL_SAS:
  818. return "SAS";
  819. case SCSI_PROTOCOL_FCP:
  820. return "FCP";
  821. case SCSI_PROTOCOL_ISCSI:
  822. return "iSCSI";
  823. default:
  824. break;
  825. }
  826. return "Unknown";
  827. }
  828. /* Start items for tcm_loop_port_cit */
  829. static int tcm_loop_port_link(
  830. struct se_portal_group *se_tpg,
  831. struct se_lun *lun)
  832. {
  833. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  834. struct tcm_loop_tpg, tl_se_tpg);
  835. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  836. atomic_inc(&tl_tpg->tl_tpg_port_count);
  837. smp_mb__after_atomic();
  838. /*
  839. * Add Linux/SCSI struct scsi_device by HCTL
  840. */
  841. scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
  842. pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
  843. return 0;
  844. }
  845. static void tcm_loop_port_unlink(
  846. struct se_portal_group *se_tpg,
  847. struct se_lun *se_lun)
  848. {
  849. struct scsi_device *sd;
  850. struct tcm_loop_hba *tl_hba;
  851. struct tcm_loop_tpg *tl_tpg;
  852. tl_tpg = container_of(se_tpg, struct tcm_loop_tpg, tl_se_tpg);
  853. tl_hba = tl_tpg->tl_hba;
  854. sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
  855. se_lun->unpacked_lun);
  856. if (!sd) {
  857. pr_err("Unable to locate struct scsi_device for %d:%d:"
  858. "%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
  859. return;
  860. }
  861. /*
  862. * Remove Linux/SCSI struct scsi_device by HCTL
  863. */
  864. scsi_remove_device(sd);
  865. scsi_device_put(sd);
  866. atomic_dec(&tl_tpg->tl_tpg_port_count);
  867. smp_mb__after_atomic();
  868. pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
  869. }
  870. /* End items for tcm_loop_port_cit */
  871. /* Start items for tcm_loop_nexus_cit */
  872. static int tcm_loop_make_nexus(
  873. struct tcm_loop_tpg *tl_tpg,
  874. const char *name)
  875. {
  876. struct se_portal_group *se_tpg;
  877. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  878. struct tcm_loop_nexus *tl_nexus;
  879. int ret = -ENOMEM;
  880. if (tl_tpg->tl_hba->tl_nexus) {
  881. pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
  882. return -EEXIST;
  883. }
  884. se_tpg = &tl_tpg->tl_se_tpg;
  885. tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
  886. if (!tl_nexus) {
  887. pr_err("Unable to allocate struct tcm_loop_nexus\n");
  888. return -ENOMEM;
  889. }
  890. /*
  891. * Initialize the struct se_session pointer
  892. */
  893. tl_nexus->se_sess = transport_init_session(TARGET_PROT_ALL);
  894. if (IS_ERR(tl_nexus->se_sess)) {
  895. ret = PTR_ERR(tl_nexus->se_sess);
  896. goto out;
  897. }
  898. /*
  899. * Since we are running in 'demo mode' this call with generate a
  900. * struct se_node_acl for the tcm_loop struct se_portal_group with the SCSI
  901. * Initiator port name of the passed configfs group 'name'.
  902. */
  903. tl_nexus->se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
  904. se_tpg, (unsigned char *)name);
  905. if (!tl_nexus->se_sess->se_node_acl) {
  906. transport_free_session(tl_nexus->se_sess);
  907. goto out;
  908. }
  909. /*
  910. * Now, register the SAS I_T Nexus as active with the call to
  911. * transport_register_session()
  912. */
  913. __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
  914. tl_nexus->se_sess, tl_nexus);
  915. tl_tpg->tl_hba->tl_nexus = tl_nexus;
  916. pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
  917. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  918. name);
  919. return 0;
  920. out:
  921. kfree(tl_nexus);
  922. return ret;
  923. }
  924. static int tcm_loop_drop_nexus(
  925. struct tcm_loop_tpg *tpg)
  926. {
  927. struct se_session *se_sess;
  928. struct tcm_loop_nexus *tl_nexus;
  929. struct tcm_loop_hba *tl_hba = tpg->tl_hba;
  930. if (!tl_hba)
  931. return -ENODEV;
  932. tl_nexus = tl_hba->tl_nexus;
  933. if (!tl_nexus)
  934. return -ENODEV;
  935. se_sess = tl_nexus->se_sess;
  936. if (!se_sess)
  937. return -ENODEV;
  938. if (atomic_read(&tpg->tl_tpg_port_count)) {
  939. pr_err("Unable to remove TCM_Loop I_T Nexus with"
  940. " active TPG port count: %d\n",
  941. atomic_read(&tpg->tl_tpg_port_count));
  942. return -EPERM;
  943. }
  944. pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
  945. " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
  946. tl_nexus->se_sess->se_node_acl->initiatorname);
  947. /*
  948. * Release the SCSI I_T Nexus to the emulated SAS Target Port
  949. */
  950. transport_deregister_session(tl_nexus->se_sess);
  951. tpg->tl_hba->tl_nexus = NULL;
  952. kfree(tl_nexus);
  953. return 0;
  954. }
  955. /* End items for tcm_loop_nexus_cit */
  956. static ssize_t tcm_loop_tpg_show_nexus(
  957. struct se_portal_group *se_tpg,
  958. char *page)
  959. {
  960. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  961. struct tcm_loop_tpg, tl_se_tpg);
  962. struct tcm_loop_nexus *tl_nexus;
  963. ssize_t ret;
  964. tl_nexus = tl_tpg->tl_hba->tl_nexus;
  965. if (!tl_nexus)
  966. return -ENODEV;
  967. ret = snprintf(page, PAGE_SIZE, "%s\n",
  968. tl_nexus->se_sess->se_node_acl->initiatorname);
  969. return ret;
  970. }
  971. static ssize_t tcm_loop_tpg_store_nexus(
  972. struct se_portal_group *se_tpg,
  973. const char *page,
  974. size_t count)
  975. {
  976. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  977. struct tcm_loop_tpg, tl_se_tpg);
  978. struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
  979. unsigned char i_port[TL_WWN_ADDR_LEN], *ptr, *port_ptr;
  980. int ret;
  981. /*
  982. * Shutdown the active I_T nexus if 'NULL' is passed..
  983. */
  984. if (!strncmp(page, "NULL", 4)) {
  985. ret = tcm_loop_drop_nexus(tl_tpg);
  986. return (!ret) ? count : ret;
  987. }
  988. /*
  989. * Otherwise make sure the passed virtual Initiator port WWN matches
  990. * the fabric protocol_id set in tcm_loop_make_scsi_hba(), and call
  991. * tcm_loop_make_nexus()
  992. */
  993. if (strlen(page) >= TL_WWN_ADDR_LEN) {
  994. pr_err("Emulated NAA Sas Address: %s, exceeds"
  995. " max: %d\n", page, TL_WWN_ADDR_LEN);
  996. return -EINVAL;
  997. }
  998. snprintf(&i_port[0], TL_WWN_ADDR_LEN, "%s", page);
  999. ptr = strstr(i_port, "naa.");
  1000. if (ptr) {
  1001. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
  1002. pr_err("Passed SAS Initiator Port %s does not"
  1003. " match target port protoid: %s\n", i_port,
  1004. tcm_loop_dump_proto_id(tl_hba));
  1005. return -EINVAL;
  1006. }
  1007. port_ptr = &i_port[0];
  1008. goto check_newline;
  1009. }
  1010. ptr = strstr(i_port, "fc.");
  1011. if (ptr) {
  1012. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
  1013. pr_err("Passed FCP Initiator Port %s does not"
  1014. " match target port protoid: %s\n", i_port,
  1015. tcm_loop_dump_proto_id(tl_hba));
  1016. return -EINVAL;
  1017. }
  1018. port_ptr = &i_port[3]; /* Skip over "fc." */
  1019. goto check_newline;
  1020. }
  1021. ptr = strstr(i_port, "iqn.");
  1022. if (ptr) {
  1023. if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
  1024. pr_err("Passed iSCSI Initiator Port %s does not"
  1025. " match target port protoid: %s\n", i_port,
  1026. tcm_loop_dump_proto_id(tl_hba));
  1027. return -EINVAL;
  1028. }
  1029. port_ptr = &i_port[0];
  1030. goto check_newline;
  1031. }
  1032. pr_err("Unable to locate prefix for emulated Initiator Port:"
  1033. " %s\n", i_port);
  1034. return -EINVAL;
  1035. /*
  1036. * Clear any trailing newline for the NAA WWN
  1037. */
  1038. check_newline:
  1039. if (i_port[strlen(i_port)-1] == '\n')
  1040. i_port[strlen(i_port)-1] = '\0';
  1041. ret = tcm_loop_make_nexus(tl_tpg, port_ptr);
  1042. if (ret < 0)
  1043. return ret;
  1044. return count;
  1045. }
  1046. TF_TPG_BASE_ATTR(tcm_loop, nexus, S_IRUGO | S_IWUSR);
  1047. static ssize_t tcm_loop_tpg_show_transport_status(
  1048. struct se_portal_group *se_tpg,
  1049. char *page)
  1050. {
  1051. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1052. struct tcm_loop_tpg, tl_se_tpg);
  1053. const char *status = NULL;
  1054. ssize_t ret = -EINVAL;
  1055. switch (tl_tpg->tl_transport_status) {
  1056. case TCM_TRANSPORT_ONLINE:
  1057. status = "online";
  1058. break;
  1059. case TCM_TRANSPORT_OFFLINE:
  1060. status = "offline";
  1061. break;
  1062. default:
  1063. break;
  1064. }
  1065. if (status)
  1066. ret = snprintf(page, PAGE_SIZE, "%s\n", status);
  1067. return ret;
  1068. }
  1069. static ssize_t tcm_loop_tpg_store_transport_status(
  1070. struct se_portal_group *se_tpg,
  1071. const char *page,
  1072. size_t count)
  1073. {
  1074. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1075. struct tcm_loop_tpg, tl_se_tpg);
  1076. if (!strncmp(page, "online", 6)) {
  1077. tl_tpg->tl_transport_status = TCM_TRANSPORT_ONLINE;
  1078. return count;
  1079. }
  1080. if (!strncmp(page, "offline", 7)) {
  1081. tl_tpg->tl_transport_status = TCM_TRANSPORT_OFFLINE;
  1082. return count;
  1083. }
  1084. return -EINVAL;
  1085. }
  1086. TF_TPG_BASE_ATTR(tcm_loop, transport_status, S_IRUGO | S_IWUSR);
  1087. static struct configfs_attribute *tcm_loop_tpg_attrs[] = {
  1088. &tcm_loop_tpg_nexus.attr,
  1089. &tcm_loop_tpg_transport_status.attr,
  1090. NULL,
  1091. };
  1092. /* Start items for tcm_loop_naa_cit */
  1093. static struct se_portal_group *tcm_loop_make_naa_tpg(
  1094. struct se_wwn *wwn,
  1095. struct config_group *group,
  1096. const char *name)
  1097. {
  1098. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1099. struct tcm_loop_hba, tl_hba_wwn);
  1100. struct tcm_loop_tpg *tl_tpg;
  1101. char *tpgt_str, *end_ptr;
  1102. int ret;
  1103. unsigned short int tpgt;
  1104. tpgt_str = strstr(name, "tpgt_");
  1105. if (!tpgt_str) {
  1106. pr_err("Unable to locate \"tpgt_#\" directory"
  1107. " group\n");
  1108. return ERR_PTR(-EINVAL);
  1109. }
  1110. tpgt_str += 5; /* Skip ahead of "tpgt_" */
  1111. tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
  1112. if (tpgt >= TL_TPGS_PER_HBA) {
  1113. pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
  1114. " %u\n", tpgt, TL_TPGS_PER_HBA);
  1115. return ERR_PTR(-EINVAL);
  1116. }
  1117. tl_tpg = &tl_hba->tl_hba_tpgs[tpgt];
  1118. tl_tpg->tl_hba = tl_hba;
  1119. tl_tpg->tl_tpgt = tpgt;
  1120. /*
  1121. * Register the tl_tpg as a emulated SAS TCM Target Endpoint
  1122. */
  1123. ret = core_tpg_register(&tcm_loop_fabric_configfs->tf_ops,
  1124. wwn, &tl_tpg->tl_se_tpg, tl_tpg,
  1125. TRANSPORT_TPG_TYPE_NORMAL);
  1126. if (ret < 0)
  1127. return ERR_PTR(-ENOMEM);
  1128. pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
  1129. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1130. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1131. return &tl_tpg->tl_se_tpg;
  1132. }
  1133. static void tcm_loop_drop_naa_tpg(
  1134. struct se_portal_group *se_tpg)
  1135. {
  1136. struct se_wwn *wwn = se_tpg->se_tpg_wwn;
  1137. struct tcm_loop_tpg *tl_tpg = container_of(se_tpg,
  1138. struct tcm_loop_tpg, tl_se_tpg);
  1139. struct tcm_loop_hba *tl_hba;
  1140. unsigned short tpgt;
  1141. tl_hba = tl_tpg->tl_hba;
  1142. tpgt = tl_tpg->tl_tpgt;
  1143. /*
  1144. * Release the I_T Nexus for the Virtual SAS link if present
  1145. */
  1146. tcm_loop_drop_nexus(tl_tpg);
  1147. /*
  1148. * Deregister the tl_tpg as a emulated SAS TCM Target Endpoint
  1149. */
  1150. core_tpg_deregister(se_tpg);
  1151. tl_tpg->tl_hba = NULL;
  1152. tl_tpg->tl_tpgt = 0;
  1153. pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
  1154. " Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
  1155. config_item_name(&wwn->wwn_group.cg_item), tpgt);
  1156. }
  1157. /* End items for tcm_loop_naa_cit */
  1158. /* Start items for tcm_loop_cit */
  1159. static struct se_wwn *tcm_loop_make_scsi_hba(
  1160. struct target_fabric_configfs *tf,
  1161. struct config_group *group,
  1162. const char *name)
  1163. {
  1164. struct tcm_loop_hba *tl_hba;
  1165. struct Scsi_Host *sh;
  1166. char *ptr;
  1167. int ret, off = 0;
  1168. tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
  1169. if (!tl_hba) {
  1170. pr_err("Unable to allocate struct tcm_loop_hba\n");
  1171. return ERR_PTR(-ENOMEM);
  1172. }
  1173. /*
  1174. * Determine the emulated Protocol Identifier and Target Port Name
  1175. * based on the incoming configfs directory name.
  1176. */
  1177. ptr = strstr(name, "naa.");
  1178. if (ptr) {
  1179. tl_hba->tl_proto_id = SCSI_PROTOCOL_SAS;
  1180. goto check_len;
  1181. }
  1182. ptr = strstr(name, "fc.");
  1183. if (ptr) {
  1184. tl_hba->tl_proto_id = SCSI_PROTOCOL_FCP;
  1185. off = 3; /* Skip over "fc." */
  1186. goto check_len;
  1187. }
  1188. ptr = strstr(name, "iqn.");
  1189. if (!ptr) {
  1190. pr_err("Unable to locate prefix for emulated Target "
  1191. "Port: %s\n", name);
  1192. ret = -EINVAL;
  1193. goto out;
  1194. }
  1195. tl_hba->tl_proto_id = SCSI_PROTOCOL_ISCSI;
  1196. check_len:
  1197. if (strlen(name) >= TL_WWN_ADDR_LEN) {
  1198. pr_err("Emulated NAA %s Address: %s, exceeds"
  1199. " max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
  1200. TL_WWN_ADDR_LEN);
  1201. ret = -EINVAL;
  1202. goto out;
  1203. }
  1204. snprintf(&tl_hba->tl_wwn_address[0], TL_WWN_ADDR_LEN, "%s", &name[off]);
  1205. /*
  1206. * Call device_register(tl_hba->dev) to register the emulated
  1207. * Linux/SCSI LLD of type struct Scsi_Host at tl_hba->sh after
  1208. * device_register() callbacks in tcm_loop_driver_probe()
  1209. */
  1210. ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt);
  1211. if (ret)
  1212. goto out;
  1213. sh = tl_hba->sh;
  1214. tcm_loop_hba_no_cnt++;
  1215. pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
  1216. " %s Address: %s at Linux/SCSI Host ID: %d\n",
  1217. tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
  1218. return &tl_hba->tl_hba_wwn;
  1219. out:
  1220. kfree(tl_hba);
  1221. return ERR_PTR(ret);
  1222. }
  1223. static void tcm_loop_drop_scsi_hba(
  1224. struct se_wwn *wwn)
  1225. {
  1226. struct tcm_loop_hba *tl_hba = container_of(wwn,
  1227. struct tcm_loop_hba, tl_hba_wwn);
  1228. pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
  1229. " SAS Address: %s at Linux/SCSI Host ID: %d\n",
  1230. tl_hba->tl_wwn_address, tl_hba->sh->host_no);
  1231. /*
  1232. * Call device_unregister() on the original tl_hba->dev.
  1233. * tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
  1234. * release *tl_hba;
  1235. */
  1236. device_unregister(&tl_hba->dev);
  1237. }
  1238. /* Start items for tcm_loop_cit */
  1239. static ssize_t tcm_loop_wwn_show_attr_version(
  1240. struct target_fabric_configfs *tf,
  1241. char *page)
  1242. {
  1243. return sprintf(page, "TCM Loopback Fabric module %s\n", TCM_LOOP_VERSION);
  1244. }
  1245. TF_WWN_ATTR_RO(tcm_loop, version);
  1246. static struct configfs_attribute *tcm_loop_wwn_attrs[] = {
  1247. &tcm_loop_wwn_version.attr,
  1248. NULL,
  1249. };
  1250. /* End items for tcm_loop_cit */
  1251. static int tcm_loop_register_configfs(void)
  1252. {
  1253. struct target_fabric_configfs *fabric;
  1254. int ret;
  1255. /*
  1256. * Set the TCM Loop HBA counter to zero
  1257. */
  1258. tcm_loop_hba_no_cnt = 0;
  1259. /*
  1260. * Register the top level struct config_item_type with TCM core
  1261. */
  1262. fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
  1263. if (IS_ERR(fabric)) {
  1264. pr_err("tcm_loop_register_configfs() failed!\n");
  1265. return PTR_ERR(fabric);
  1266. }
  1267. /*
  1268. * Setup the fabric API of function pointers used by target_core_mod
  1269. */
  1270. fabric->tf_ops.get_fabric_name = &tcm_loop_get_fabric_name;
  1271. fabric->tf_ops.get_fabric_proto_ident = &tcm_loop_get_fabric_proto_ident;
  1272. fabric->tf_ops.tpg_get_wwn = &tcm_loop_get_endpoint_wwn;
  1273. fabric->tf_ops.tpg_get_tag = &tcm_loop_get_tag;
  1274. fabric->tf_ops.tpg_get_default_depth = &tcm_loop_get_default_depth;
  1275. fabric->tf_ops.tpg_get_pr_transport_id = &tcm_loop_get_pr_transport_id;
  1276. fabric->tf_ops.tpg_get_pr_transport_id_len =
  1277. &tcm_loop_get_pr_transport_id_len;
  1278. fabric->tf_ops.tpg_parse_pr_out_transport_id =
  1279. &tcm_loop_parse_pr_out_transport_id;
  1280. fabric->tf_ops.tpg_check_demo_mode = &tcm_loop_check_demo_mode;
  1281. fabric->tf_ops.tpg_check_demo_mode_cache =
  1282. &tcm_loop_check_demo_mode_cache;
  1283. fabric->tf_ops.tpg_check_demo_mode_write_protect =
  1284. &tcm_loop_check_demo_mode_write_protect;
  1285. fabric->tf_ops.tpg_check_prod_mode_write_protect =
  1286. &tcm_loop_check_prod_mode_write_protect;
  1287. /*
  1288. * The TCM loopback fabric module runs in demo-mode to a local
  1289. * virtual SCSI device, so fabric dependent initator ACLs are
  1290. * not required.
  1291. */
  1292. fabric->tf_ops.tpg_alloc_fabric_acl = &tcm_loop_tpg_alloc_fabric_acl;
  1293. fabric->tf_ops.tpg_release_fabric_acl =
  1294. &tcm_loop_tpg_release_fabric_acl;
  1295. fabric->tf_ops.tpg_get_inst_index = &tcm_loop_get_inst_index;
  1296. /*
  1297. * Used for setting up remaining TCM resources in process context
  1298. */
  1299. fabric->tf_ops.check_stop_free = &tcm_loop_check_stop_free;
  1300. fabric->tf_ops.release_cmd = &tcm_loop_release_cmd;
  1301. fabric->tf_ops.shutdown_session = &tcm_loop_shutdown_session;
  1302. fabric->tf_ops.close_session = &tcm_loop_close_session;
  1303. fabric->tf_ops.sess_get_index = &tcm_loop_sess_get_index;
  1304. fabric->tf_ops.sess_get_initiator_sid = NULL;
  1305. fabric->tf_ops.write_pending = &tcm_loop_write_pending;
  1306. fabric->tf_ops.write_pending_status = &tcm_loop_write_pending_status;
  1307. /*
  1308. * Not used for TCM loopback
  1309. */
  1310. fabric->tf_ops.set_default_node_attributes =
  1311. &tcm_loop_set_default_node_attributes;
  1312. fabric->tf_ops.get_task_tag = &tcm_loop_get_task_tag;
  1313. fabric->tf_ops.get_cmd_state = &tcm_loop_get_cmd_state;
  1314. fabric->tf_ops.queue_data_in = &tcm_loop_queue_data_in;
  1315. fabric->tf_ops.queue_status = &tcm_loop_queue_status;
  1316. fabric->tf_ops.queue_tm_rsp = &tcm_loop_queue_tm_rsp;
  1317. fabric->tf_ops.aborted_task = &tcm_loop_aborted_task;
  1318. /*
  1319. * Setup function pointers for generic logic in target_core_fabric_configfs.c
  1320. */
  1321. fabric->tf_ops.fabric_make_wwn = &tcm_loop_make_scsi_hba;
  1322. fabric->tf_ops.fabric_drop_wwn = &tcm_loop_drop_scsi_hba;
  1323. fabric->tf_ops.fabric_make_tpg = &tcm_loop_make_naa_tpg;
  1324. fabric->tf_ops.fabric_drop_tpg = &tcm_loop_drop_naa_tpg;
  1325. /*
  1326. * fabric_post_link() and fabric_pre_unlink() are used for
  1327. * registration and release of TCM Loop Virtual SCSI LUNs.
  1328. */
  1329. fabric->tf_ops.fabric_post_link = &tcm_loop_port_link;
  1330. fabric->tf_ops.fabric_pre_unlink = &tcm_loop_port_unlink;
  1331. fabric->tf_ops.fabric_make_np = NULL;
  1332. fabric->tf_ops.fabric_drop_np = NULL;
  1333. /*
  1334. * Setup default attribute lists for various fabric->tf_cit_tmpl
  1335. */
  1336. fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_loop_wwn_attrs;
  1337. fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_loop_tpg_attrs;
  1338. fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
  1339. fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
  1340. fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
  1341. /*
  1342. * Once fabric->tf_ops has been setup, now register the fabric for
  1343. * use within TCM
  1344. */
  1345. ret = target_fabric_configfs_register(fabric);
  1346. if (ret < 0) {
  1347. pr_err("target_fabric_configfs_register() for"
  1348. " TCM_Loop failed!\n");
  1349. target_fabric_configfs_free(fabric);
  1350. return -1;
  1351. }
  1352. /*
  1353. * Setup our local pointer to *fabric.
  1354. */
  1355. tcm_loop_fabric_configfs = fabric;
  1356. pr_debug("TCM_LOOP[0] - Set fabric ->"
  1357. " tcm_loop_fabric_configfs\n");
  1358. return 0;
  1359. }
  1360. static void tcm_loop_deregister_configfs(void)
  1361. {
  1362. if (!tcm_loop_fabric_configfs)
  1363. return;
  1364. target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
  1365. tcm_loop_fabric_configfs = NULL;
  1366. pr_debug("TCM_LOOP[0] - Cleared"
  1367. " tcm_loop_fabric_configfs\n");
  1368. }
  1369. static int __init tcm_loop_fabric_init(void)
  1370. {
  1371. int ret = -ENOMEM;
  1372. tcm_loop_workqueue = alloc_workqueue("tcm_loop", 0, 0);
  1373. if (!tcm_loop_workqueue)
  1374. goto out;
  1375. tcm_loop_cmd_cache = kmem_cache_create("tcm_loop_cmd_cache",
  1376. sizeof(struct tcm_loop_cmd),
  1377. __alignof__(struct tcm_loop_cmd),
  1378. 0, NULL);
  1379. if (!tcm_loop_cmd_cache) {
  1380. pr_debug("kmem_cache_create() for"
  1381. " tcm_loop_cmd_cache failed\n");
  1382. goto out_destroy_workqueue;
  1383. }
  1384. ret = tcm_loop_alloc_core_bus();
  1385. if (ret)
  1386. goto out_destroy_cache;
  1387. ret = tcm_loop_register_configfs();
  1388. if (ret)
  1389. goto out_release_core_bus;
  1390. return 0;
  1391. out_release_core_bus:
  1392. tcm_loop_release_core_bus();
  1393. out_destroy_cache:
  1394. kmem_cache_destroy(tcm_loop_cmd_cache);
  1395. out_destroy_workqueue:
  1396. destroy_workqueue(tcm_loop_workqueue);
  1397. out:
  1398. return ret;
  1399. }
  1400. static void __exit tcm_loop_fabric_exit(void)
  1401. {
  1402. tcm_loop_deregister_configfs();
  1403. tcm_loop_release_core_bus();
  1404. kmem_cache_destroy(tcm_loop_cmd_cache);
  1405. destroy_workqueue(tcm_loop_workqueue);
  1406. }
  1407. MODULE_DESCRIPTION("TCM loopback virtual Linux/SCSI fabric module");
  1408. MODULE_AUTHOR("Nicholas A. Bellinger <nab@risingtidesystems.com>");
  1409. MODULE_LICENSE("GPL");
  1410. module_init(tcm_loop_fabric_init);
  1411. module_exit(tcm_loop_fabric_exit);