target_core_device.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * (c) Copyright 2003-2013 Datera, Inc.
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. *
  25. ******************************************************************************/
  26. #include <linux/net.h>
  27. #include <linux/string.h>
  28. #include <linux/delay.h>
  29. #include <linux/timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/kthread.h>
  33. #include <linux/in.h>
  34. #include <linux/export.h>
  35. #include <net/sock.h>
  36. #include <net/tcp.h>
  37. #include <scsi/scsi.h>
  38. #include <scsi/scsi_device.h>
  39. #include <target/target_core_base.h>
  40. #include <target/target_core_backend.h>
  41. #include <target/target_core_fabric.h>
  42. #include "target_core_internal.h"
  43. #include "target_core_alua.h"
  44. #include "target_core_pr.h"
  45. #include "target_core_ua.h"
  46. DEFINE_MUTEX(g_device_mutex);
  47. LIST_HEAD(g_device_list);
  48. static struct se_hba *lun0_hba;
  49. /* not static, needed by tpg.c */
  50. struct se_device *g_lun0_dev;
  51. sense_reason_t
  52. transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  53. {
  54. struct se_lun *se_lun = NULL;
  55. struct se_session *se_sess = se_cmd->se_sess;
  56. struct se_device *dev;
  57. unsigned long flags;
  58. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  59. return TCM_NON_EXISTENT_LUN;
  60. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  61. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  62. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  63. struct se_dev_entry *deve = se_cmd->se_deve;
  64. deve->total_cmds++;
  65. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  66. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  67. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  68. " Access for 0x%08x\n",
  69. se_cmd->se_tfo->get_fabric_name(),
  70. unpacked_lun);
  71. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  72. return TCM_WRITE_PROTECTED;
  73. }
  74. if (se_cmd->data_direction == DMA_TO_DEVICE)
  75. deve->write_bytes += se_cmd->data_length;
  76. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  77. deve->read_bytes += se_cmd->data_length;
  78. se_lun = deve->se_lun;
  79. se_cmd->se_lun = deve->se_lun;
  80. se_cmd->pr_res_key = deve->pr_res_key;
  81. se_cmd->orig_fe_lun = unpacked_lun;
  82. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  83. percpu_ref_get(&se_lun->lun_ref);
  84. se_cmd->lun_ref_active = true;
  85. }
  86. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  87. if (!se_lun) {
  88. /*
  89. * Use the se_portal_group->tpg_virt_lun0 to allow for
  90. * REPORT_LUNS, et al to be returned when no active
  91. * MappedLUN=0 exists for this Initiator Port.
  92. */
  93. if (unpacked_lun != 0) {
  94. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  95. " Access for 0x%08x\n",
  96. se_cmd->se_tfo->get_fabric_name(),
  97. unpacked_lun);
  98. return TCM_NON_EXISTENT_LUN;
  99. }
  100. /*
  101. * Force WRITE PROTECT for virtual LUN 0
  102. */
  103. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  104. (se_cmd->data_direction != DMA_NONE))
  105. return TCM_WRITE_PROTECTED;
  106. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  107. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  108. se_cmd->orig_fe_lun = 0;
  109. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  110. percpu_ref_get(&se_lun->lun_ref);
  111. se_cmd->lun_ref_active = true;
  112. }
  113. /* Directly associate cmd with se_dev */
  114. se_cmd->se_dev = se_lun->lun_se_dev;
  115. dev = se_lun->lun_se_dev;
  116. atomic_long_inc(&dev->num_cmds);
  117. if (se_cmd->data_direction == DMA_TO_DEVICE)
  118. atomic_long_add(se_cmd->data_length, &dev->write_bytes);
  119. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  120. atomic_long_add(se_cmd->data_length, &dev->read_bytes);
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  124. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  125. {
  126. struct se_dev_entry *deve;
  127. struct se_lun *se_lun = NULL;
  128. struct se_session *se_sess = se_cmd->se_sess;
  129. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  130. unsigned long flags;
  131. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  132. return -ENODEV;
  133. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  134. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  135. deve = se_cmd->se_deve;
  136. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  137. se_tmr->tmr_lun = deve->se_lun;
  138. se_cmd->se_lun = deve->se_lun;
  139. se_lun = deve->se_lun;
  140. se_cmd->pr_res_key = deve->pr_res_key;
  141. se_cmd->orig_fe_lun = unpacked_lun;
  142. }
  143. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  144. if (!se_lun) {
  145. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  146. " Access for 0x%08x\n",
  147. se_cmd->se_tfo->get_fabric_name(),
  148. unpacked_lun);
  149. return -ENODEV;
  150. }
  151. /* Directly associate cmd with se_dev */
  152. se_cmd->se_dev = se_lun->lun_se_dev;
  153. se_tmr->tmr_dev = se_lun->lun_se_dev;
  154. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  155. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  156. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  157. return 0;
  158. }
  159. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  160. /*
  161. * This function is called from core_scsi3_emulate_pro_register_and_move()
  162. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  163. * when a matching rtpi is found.
  164. */
  165. struct se_dev_entry *core_get_se_deve_from_rtpi(
  166. struct se_node_acl *nacl,
  167. u16 rtpi)
  168. {
  169. struct se_dev_entry *deve;
  170. struct se_lun *lun;
  171. struct se_port *port;
  172. struct se_portal_group *tpg = nacl->se_tpg;
  173. u32 i;
  174. spin_lock_irq(&nacl->device_list_lock);
  175. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  176. deve = nacl->device_list[i];
  177. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  178. continue;
  179. lun = deve->se_lun;
  180. if (!lun) {
  181. pr_err("%s device entries device pointer is"
  182. " NULL, but Initiator has access.\n",
  183. tpg->se_tpg_tfo->get_fabric_name());
  184. continue;
  185. }
  186. port = lun->lun_sep;
  187. if (!port) {
  188. pr_err("%s device entries device pointer is"
  189. " NULL, but Initiator has access.\n",
  190. tpg->se_tpg_tfo->get_fabric_name());
  191. continue;
  192. }
  193. if (port->sep_rtpi != rtpi)
  194. continue;
  195. atomic_inc_mb(&deve->pr_ref_count);
  196. spin_unlock_irq(&nacl->device_list_lock);
  197. return deve;
  198. }
  199. spin_unlock_irq(&nacl->device_list_lock);
  200. return NULL;
  201. }
  202. int core_free_device_list_for_node(
  203. struct se_node_acl *nacl,
  204. struct se_portal_group *tpg)
  205. {
  206. struct se_dev_entry *deve;
  207. struct se_lun *lun;
  208. u32 i;
  209. if (!nacl->device_list)
  210. return 0;
  211. spin_lock_irq(&nacl->device_list_lock);
  212. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  213. deve = nacl->device_list[i];
  214. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  215. continue;
  216. if (!deve->se_lun) {
  217. pr_err("%s device entries device pointer is"
  218. " NULL, but Initiator has access.\n",
  219. tpg->se_tpg_tfo->get_fabric_name());
  220. continue;
  221. }
  222. lun = deve->se_lun;
  223. spin_unlock_irq(&nacl->device_list_lock);
  224. core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  225. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  226. spin_lock_irq(&nacl->device_list_lock);
  227. }
  228. spin_unlock_irq(&nacl->device_list_lock);
  229. array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
  230. nacl->device_list = NULL;
  231. return 0;
  232. }
  233. void core_update_device_list_access(
  234. u32 mapped_lun,
  235. u32 lun_access,
  236. struct se_node_acl *nacl)
  237. {
  238. struct se_dev_entry *deve;
  239. spin_lock_irq(&nacl->device_list_lock);
  240. deve = nacl->device_list[mapped_lun];
  241. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  242. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  243. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  244. } else {
  245. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  246. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  247. }
  248. spin_unlock_irq(&nacl->device_list_lock);
  249. }
  250. /* core_enable_device_list_for_node():
  251. *
  252. *
  253. */
  254. int core_enable_device_list_for_node(
  255. struct se_lun *lun,
  256. struct se_lun_acl *lun_acl,
  257. u32 mapped_lun,
  258. u32 lun_access,
  259. struct se_node_acl *nacl,
  260. struct se_portal_group *tpg)
  261. {
  262. struct se_port *port = lun->lun_sep;
  263. struct se_dev_entry *deve;
  264. spin_lock_irq(&nacl->device_list_lock);
  265. deve = nacl->device_list[mapped_lun];
  266. /*
  267. * Check if the call is handling demo mode -> explicit LUN ACL
  268. * transition. This transition must be for the same struct se_lun
  269. * + mapped_lun that was setup in demo mode..
  270. */
  271. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  272. if (deve->se_lun_acl != NULL) {
  273. pr_err("struct se_dev_entry->se_lun_acl"
  274. " already set for demo mode -> explicit"
  275. " LUN ACL transition\n");
  276. spin_unlock_irq(&nacl->device_list_lock);
  277. return -EINVAL;
  278. }
  279. if (deve->se_lun != lun) {
  280. pr_err("struct se_dev_entry->se_lun does"
  281. " match passed struct se_lun for demo mode"
  282. " -> explicit LUN ACL transition\n");
  283. spin_unlock_irq(&nacl->device_list_lock);
  284. return -EINVAL;
  285. }
  286. deve->se_lun_acl = lun_acl;
  287. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  288. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  289. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  290. } else {
  291. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  292. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  293. }
  294. spin_unlock_irq(&nacl->device_list_lock);
  295. return 0;
  296. }
  297. deve->se_lun = lun;
  298. deve->se_lun_acl = lun_acl;
  299. deve->mapped_lun = mapped_lun;
  300. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  301. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  302. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  303. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  304. } else {
  305. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  306. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  307. }
  308. deve->creation_time = get_jiffies_64();
  309. deve->attach_count++;
  310. spin_unlock_irq(&nacl->device_list_lock);
  311. spin_lock_bh(&port->sep_alua_lock);
  312. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  313. spin_unlock_bh(&port->sep_alua_lock);
  314. return 0;
  315. }
  316. /* core_disable_device_list_for_node():
  317. *
  318. *
  319. */
  320. int core_disable_device_list_for_node(
  321. struct se_lun *lun,
  322. struct se_lun_acl *lun_acl,
  323. u32 mapped_lun,
  324. u32 lun_access,
  325. struct se_node_acl *nacl,
  326. struct se_portal_group *tpg)
  327. {
  328. struct se_port *port = lun->lun_sep;
  329. struct se_dev_entry *deve = nacl->device_list[mapped_lun];
  330. /*
  331. * If the MappedLUN entry is being disabled, the entry in
  332. * port->sep_alua_list must be removed now before clearing the
  333. * struct se_dev_entry pointers below as logic in
  334. * core_alua_do_transition_tg_pt() depends on these being present.
  335. *
  336. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  337. * that have not been explicitly converted to MappedLUNs ->
  338. * struct se_lun_acl, but we remove deve->alua_port_list from
  339. * port->sep_alua_list. This also means that active UAs and
  340. * NodeACL context specific PR metadata for demo-mode
  341. * MappedLUN *deve will be released below..
  342. */
  343. spin_lock_bh(&port->sep_alua_lock);
  344. list_del(&deve->alua_port_list);
  345. spin_unlock_bh(&port->sep_alua_lock);
  346. /*
  347. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  348. * PR operation to complete.
  349. */
  350. while (atomic_read(&deve->pr_ref_count) != 0)
  351. cpu_relax();
  352. spin_lock_irq(&nacl->device_list_lock);
  353. /*
  354. * Disable struct se_dev_entry LUN ACL mapping
  355. */
  356. core_scsi3_ua_release_all(deve);
  357. deve->se_lun = NULL;
  358. deve->se_lun_acl = NULL;
  359. deve->lun_flags = 0;
  360. deve->creation_time = 0;
  361. deve->attach_count--;
  362. spin_unlock_irq(&nacl->device_list_lock);
  363. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  364. return 0;
  365. }
  366. /* core_clear_lun_from_tpg():
  367. *
  368. *
  369. */
  370. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  371. {
  372. struct se_node_acl *nacl;
  373. struct se_dev_entry *deve;
  374. u32 i;
  375. spin_lock_irq(&tpg->acl_node_lock);
  376. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  377. spin_unlock_irq(&tpg->acl_node_lock);
  378. spin_lock_irq(&nacl->device_list_lock);
  379. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  380. deve = nacl->device_list[i];
  381. if (lun != deve->se_lun)
  382. continue;
  383. spin_unlock_irq(&nacl->device_list_lock);
  384. core_disable_device_list_for_node(lun, NULL,
  385. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  386. nacl, tpg);
  387. spin_lock_irq(&nacl->device_list_lock);
  388. }
  389. spin_unlock_irq(&nacl->device_list_lock);
  390. spin_lock_irq(&tpg->acl_node_lock);
  391. }
  392. spin_unlock_irq(&tpg->acl_node_lock);
  393. }
  394. static struct se_port *core_alloc_port(struct se_device *dev)
  395. {
  396. struct se_port *port, *port_tmp;
  397. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  398. if (!port) {
  399. pr_err("Unable to allocate struct se_port\n");
  400. return ERR_PTR(-ENOMEM);
  401. }
  402. INIT_LIST_HEAD(&port->sep_alua_list);
  403. INIT_LIST_HEAD(&port->sep_list);
  404. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  405. spin_lock_init(&port->sep_alua_lock);
  406. mutex_init(&port->sep_tg_pt_md_mutex);
  407. spin_lock(&dev->se_port_lock);
  408. if (dev->dev_port_count == 0x0000ffff) {
  409. pr_warn("Reached dev->dev_port_count =="
  410. " 0x0000ffff\n");
  411. spin_unlock(&dev->se_port_lock);
  412. return ERR_PTR(-ENOSPC);
  413. }
  414. again:
  415. /*
  416. * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
  417. * Here is the table from spc4r17 section 7.7.3.8.
  418. *
  419. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  420. *
  421. * Code Description
  422. * 0h Reserved
  423. * 1h Relative port 1, historically known as port A
  424. * 2h Relative port 2, historically known as port B
  425. * 3h to FFFFh Relative port 3 through 65 535
  426. */
  427. port->sep_rtpi = dev->dev_rpti_counter++;
  428. if (!port->sep_rtpi)
  429. goto again;
  430. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  431. /*
  432. * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
  433. * for 16-bit wrap..
  434. */
  435. if (port->sep_rtpi == port_tmp->sep_rtpi)
  436. goto again;
  437. }
  438. spin_unlock(&dev->se_port_lock);
  439. return port;
  440. }
  441. static void core_export_port(
  442. struct se_device *dev,
  443. struct se_portal_group *tpg,
  444. struct se_port *port,
  445. struct se_lun *lun)
  446. {
  447. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  448. spin_lock(&dev->se_port_lock);
  449. spin_lock(&lun->lun_sep_lock);
  450. port->sep_tpg = tpg;
  451. port->sep_lun = lun;
  452. lun->lun_sep = port;
  453. spin_unlock(&lun->lun_sep_lock);
  454. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  455. spin_unlock(&dev->se_port_lock);
  456. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
  457. !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
  458. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  459. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  460. pr_err("Unable to allocate t10_alua_tg_pt"
  461. "_gp_member_t\n");
  462. return;
  463. }
  464. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  465. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  466. dev->t10_alua.default_tg_pt_gp);
  467. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  468. pr_debug("%s/%s: Adding to default ALUA Target Port"
  469. " Group: alua/default_tg_pt_gp\n",
  470. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  471. }
  472. dev->dev_port_count++;
  473. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
  474. }
  475. /*
  476. * Called with struct se_device->se_port_lock spinlock held.
  477. */
  478. static void core_release_port(struct se_device *dev, struct se_port *port)
  479. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  480. {
  481. /*
  482. * Wait for any port reference for PR ALL_TG_PT=1 operation
  483. * to complete in __core_scsi3_alloc_registration()
  484. */
  485. spin_unlock(&dev->se_port_lock);
  486. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  487. cpu_relax();
  488. spin_lock(&dev->se_port_lock);
  489. core_alua_free_tg_pt_gp_mem(port);
  490. list_del(&port->sep_list);
  491. dev->dev_port_count--;
  492. kfree(port);
  493. }
  494. int core_dev_export(
  495. struct se_device *dev,
  496. struct se_portal_group *tpg,
  497. struct se_lun *lun)
  498. {
  499. struct se_hba *hba = dev->se_hba;
  500. struct se_port *port;
  501. port = core_alloc_port(dev);
  502. if (IS_ERR(port))
  503. return PTR_ERR(port);
  504. lun->lun_se_dev = dev;
  505. spin_lock(&hba->device_lock);
  506. dev->export_count++;
  507. spin_unlock(&hba->device_lock);
  508. core_export_port(dev, tpg, port, lun);
  509. return 0;
  510. }
  511. void core_dev_unexport(
  512. struct se_device *dev,
  513. struct se_portal_group *tpg,
  514. struct se_lun *lun)
  515. {
  516. struct se_hba *hba = dev->se_hba;
  517. struct se_port *port = lun->lun_sep;
  518. spin_lock(&lun->lun_sep_lock);
  519. if (lun->lun_se_dev == NULL) {
  520. spin_unlock(&lun->lun_sep_lock);
  521. return;
  522. }
  523. spin_unlock(&lun->lun_sep_lock);
  524. spin_lock(&dev->se_port_lock);
  525. core_release_port(dev, port);
  526. spin_unlock(&dev->se_port_lock);
  527. spin_lock(&hba->device_lock);
  528. dev->export_count--;
  529. spin_unlock(&hba->device_lock);
  530. lun->lun_sep = NULL;
  531. lun->lun_se_dev = NULL;
  532. }
  533. static void se_release_vpd_for_dev(struct se_device *dev)
  534. {
  535. struct t10_vpd *vpd, *vpd_tmp;
  536. spin_lock(&dev->t10_wwn.t10_vpd_lock);
  537. list_for_each_entry_safe(vpd, vpd_tmp,
  538. &dev->t10_wwn.t10_vpd_list, vpd_list) {
  539. list_del(&vpd->vpd_list);
  540. kfree(vpd);
  541. }
  542. spin_unlock(&dev->t10_wwn.t10_vpd_lock);
  543. }
  544. static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  545. {
  546. u32 aligned_max_sectors;
  547. u32 alignment;
  548. /*
  549. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  550. * transport_allocate_data_tasks() operation.
  551. */
  552. alignment = max(1ul, PAGE_SIZE / block_size);
  553. aligned_max_sectors = rounddown(max_sectors, alignment);
  554. if (max_sectors != aligned_max_sectors)
  555. pr_info("Rounding down aligned max_sectors from %u to %u\n",
  556. max_sectors, aligned_max_sectors);
  557. return aligned_max_sectors;
  558. }
  559. int se_dev_set_max_unmap_lba_count(
  560. struct se_device *dev,
  561. u32 max_unmap_lba_count)
  562. {
  563. dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  564. pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
  565. dev, dev->dev_attrib.max_unmap_lba_count);
  566. return 0;
  567. }
  568. EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
  569. int se_dev_set_max_unmap_block_desc_count(
  570. struct se_device *dev,
  571. u32 max_unmap_block_desc_count)
  572. {
  573. dev->dev_attrib.max_unmap_block_desc_count =
  574. max_unmap_block_desc_count;
  575. pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
  576. dev, dev->dev_attrib.max_unmap_block_desc_count);
  577. return 0;
  578. }
  579. EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
  580. int se_dev_set_unmap_granularity(
  581. struct se_device *dev,
  582. u32 unmap_granularity)
  583. {
  584. dev->dev_attrib.unmap_granularity = unmap_granularity;
  585. pr_debug("dev[%p]: Set unmap_granularity: %u\n",
  586. dev, dev->dev_attrib.unmap_granularity);
  587. return 0;
  588. }
  589. EXPORT_SYMBOL(se_dev_set_unmap_granularity);
  590. int se_dev_set_unmap_granularity_alignment(
  591. struct se_device *dev,
  592. u32 unmap_granularity_alignment)
  593. {
  594. dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  595. pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
  596. dev, dev->dev_attrib.unmap_granularity_alignment);
  597. return 0;
  598. }
  599. EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
  600. int se_dev_set_max_write_same_len(
  601. struct se_device *dev,
  602. u32 max_write_same_len)
  603. {
  604. dev->dev_attrib.max_write_same_len = max_write_same_len;
  605. pr_debug("dev[%p]: Set max_write_same_len: %u\n",
  606. dev, dev->dev_attrib.max_write_same_len);
  607. return 0;
  608. }
  609. EXPORT_SYMBOL(se_dev_set_max_write_same_len);
  610. static void dev_set_t10_wwn_model_alias(struct se_device *dev)
  611. {
  612. const char *configname;
  613. configname = config_item_name(&dev->dev_group.cg_item);
  614. if (strlen(configname) >= 16) {
  615. pr_warn("dev[%p]: Backstore name '%s' is too long for "
  616. "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
  617. configname);
  618. }
  619. snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
  620. }
  621. int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
  622. {
  623. if (dev->export_count) {
  624. pr_err("dev[%p]: Unable to change model alias"
  625. " while export_count is %d\n",
  626. dev, dev->export_count);
  627. return -EINVAL;
  628. }
  629. if (flag != 0 && flag != 1) {
  630. pr_err("Illegal value %d\n", flag);
  631. return -EINVAL;
  632. }
  633. if (flag) {
  634. dev_set_t10_wwn_model_alias(dev);
  635. } else {
  636. strncpy(&dev->t10_wwn.model[0],
  637. dev->transport->inquiry_prod, 16);
  638. }
  639. dev->dev_attrib.emulate_model_alias = flag;
  640. return 0;
  641. }
  642. EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
  643. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  644. {
  645. if (flag != 0 && flag != 1) {
  646. pr_err("Illegal value %d\n", flag);
  647. return -EINVAL;
  648. }
  649. if (flag) {
  650. pr_err("dpo_emulated not supported\n");
  651. return -EINVAL;
  652. }
  653. return 0;
  654. }
  655. EXPORT_SYMBOL(se_dev_set_emulate_dpo);
  656. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  657. {
  658. if (flag != 0 && flag != 1) {
  659. pr_err("Illegal value %d\n", flag);
  660. return -EINVAL;
  661. }
  662. dev->dev_attrib.emulate_fua_write = flag;
  663. pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  664. dev, dev->dev_attrib.emulate_fua_write);
  665. return 0;
  666. }
  667. EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
  668. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  669. {
  670. if (flag != 0 && flag != 1) {
  671. pr_err("Illegal value %d\n", flag);
  672. return -EINVAL;
  673. }
  674. if (flag) {
  675. pr_err("ua read emulated not supported\n");
  676. return -EINVAL;
  677. }
  678. return 0;
  679. }
  680. EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
  681. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  682. {
  683. if (flag != 0 && flag != 1) {
  684. pr_err("Illegal value %d\n", flag);
  685. return -EINVAL;
  686. }
  687. if (flag &&
  688. dev->transport->get_write_cache) {
  689. pr_err("emulate_write_cache not supported for this device\n");
  690. return -EINVAL;
  691. }
  692. dev->dev_attrib.emulate_write_cache = flag;
  693. pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  694. dev, dev->dev_attrib.emulate_write_cache);
  695. return 0;
  696. }
  697. EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
  698. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  699. {
  700. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  701. pr_err("Illegal value %d\n", flag);
  702. return -EINVAL;
  703. }
  704. if (dev->export_count) {
  705. pr_err("dev[%p]: Unable to change SE Device"
  706. " UA_INTRLCK_CTRL while export_count is %d\n",
  707. dev, dev->export_count);
  708. return -EINVAL;
  709. }
  710. dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
  711. pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  712. dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
  713. return 0;
  714. }
  715. EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
  716. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  717. {
  718. if ((flag != 0) && (flag != 1)) {
  719. pr_err("Illegal value %d\n", flag);
  720. return -EINVAL;
  721. }
  722. if (dev->export_count) {
  723. pr_err("dev[%p]: Unable to change SE Device TAS while"
  724. " export_count is %d\n",
  725. dev, dev->export_count);
  726. return -EINVAL;
  727. }
  728. dev->dev_attrib.emulate_tas = flag;
  729. pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  730. dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  731. return 0;
  732. }
  733. EXPORT_SYMBOL(se_dev_set_emulate_tas);
  734. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  735. {
  736. if ((flag != 0) && (flag != 1)) {
  737. pr_err("Illegal value %d\n", flag);
  738. return -EINVAL;
  739. }
  740. /*
  741. * We expect this value to be non-zero when generic Block Layer
  742. * Discard supported is detected iblock_create_virtdevice().
  743. */
  744. if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
  745. pr_err("Generic Block Discard not supported\n");
  746. return -ENOSYS;
  747. }
  748. dev->dev_attrib.emulate_tpu = flag;
  749. pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  750. dev, flag);
  751. return 0;
  752. }
  753. EXPORT_SYMBOL(se_dev_set_emulate_tpu);
  754. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  755. {
  756. if ((flag != 0) && (flag != 1)) {
  757. pr_err("Illegal value %d\n", flag);
  758. return -EINVAL;
  759. }
  760. /*
  761. * We expect this value to be non-zero when generic Block Layer
  762. * Discard supported is detected iblock_create_virtdevice().
  763. */
  764. if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
  765. pr_err("Generic Block Discard not supported\n");
  766. return -ENOSYS;
  767. }
  768. dev->dev_attrib.emulate_tpws = flag;
  769. pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  770. dev, flag);
  771. return 0;
  772. }
  773. EXPORT_SYMBOL(se_dev_set_emulate_tpws);
  774. int se_dev_set_emulate_caw(struct se_device *dev, int flag)
  775. {
  776. if (flag != 0 && flag != 1) {
  777. pr_err("Illegal value %d\n", flag);
  778. return -EINVAL;
  779. }
  780. dev->dev_attrib.emulate_caw = flag;
  781. pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
  782. dev, flag);
  783. return 0;
  784. }
  785. EXPORT_SYMBOL(se_dev_set_emulate_caw);
  786. int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
  787. {
  788. if (flag != 0 && flag != 1) {
  789. pr_err("Illegal value %d\n", flag);
  790. return -EINVAL;
  791. }
  792. dev->dev_attrib.emulate_3pc = flag;
  793. pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
  794. dev, flag);
  795. return 0;
  796. }
  797. EXPORT_SYMBOL(se_dev_set_emulate_3pc);
  798. int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
  799. {
  800. int rc, old_prot = dev->dev_attrib.pi_prot_type;
  801. if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
  802. pr_err("Illegal value %d for pi_prot_type\n", flag);
  803. return -EINVAL;
  804. }
  805. if (flag == 2) {
  806. pr_err("DIF TYPE2 protection currently not supported\n");
  807. return -ENOSYS;
  808. }
  809. if (dev->dev_attrib.hw_pi_prot_type) {
  810. pr_warn("DIF protection enabled on underlying hardware,"
  811. " ignoring\n");
  812. return 0;
  813. }
  814. if (!dev->transport->init_prot || !dev->transport->free_prot) {
  815. /* 0 is only allowed value for non-supporting backends */
  816. if (flag == 0)
  817. return 0;
  818. pr_err("DIF protection not supported by backend: %s\n",
  819. dev->transport->name);
  820. return -ENOSYS;
  821. }
  822. if (!(dev->dev_flags & DF_CONFIGURED)) {
  823. pr_err("DIF protection requires device to be configured\n");
  824. return -ENODEV;
  825. }
  826. if (dev->export_count) {
  827. pr_err("dev[%p]: Unable to change SE Device PROT type while"
  828. " export_count is %d\n", dev, dev->export_count);
  829. return -EINVAL;
  830. }
  831. dev->dev_attrib.pi_prot_type = flag;
  832. if (flag && !old_prot) {
  833. rc = dev->transport->init_prot(dev);
  834. if (rc) {
  835. dev->dev_attrib.pi_prot_type = old_prot;
  836. return rc;
  837. }
  838. } else if (!flag && old_prot) {
  839. dev->transport->free_prot(dev);
  840. }
  841. pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
  842. return 0;
  843. }
  844. EXPORT_SYMBOL(se_dev_set_pi_prot_type);
  845. int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
  846. {
  847. int rc;
  848. if (!flag)
  849. return 0;
  850. if (flag != 1) {
  851. pr_err("Illegal value %d for pi_prot_format\n", flag);
  852. return -EINVAL;
  853. }
  854. if (!dev->transport->format_prot) {
  855. pr_err("DIF protection format not supported by backend %s\n",
  856. dev->transport->name);
  857. return -ENOSYS;
  858. }
  859. if (!(dev->dev_flags & DF_CONFIGURED)) {
  860. pr_err("DIF protection format requires device to be configured\n");
  861. return -ENODEV;
  862. }
  863. if (dev->export_count) {
  864. pr_err("dev[%p]: Unable to format SE Device PROT type while"
  865. " export_count is %d\n", dev, dev->export_count);
  866. return -EINVAL;
  867. }
  868. rc = dev->transport->format_prot(dev);
  869. if (rc)
  870. return rc;
  871. pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
  872. return 0;
  873. }
  874. EXPORT_SYMBOL(se_dev_set_pi_prot_format);
  875. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  876. {
  877. if ((flag != 0) && (flag != 1)) {
  878. pr_err("Illegal value %d\n", flag);
  879. return -EINVAL;
  880. }
  881. dev->dev_attrib.enforce_pr_isids = flag;
  882. pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  883. (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  884. return 0;
  885. }
  886. EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
  887. int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
  888. {
  889. if ((flag != 0) && (flag != 1)) {
  890. printk(KERN_ERR "Illegal value %d\n", flag);
  891. return -EINVAL;
  892. }
  893. if (dev->export_count) {
  894. pr_err("dev[%p]: Unable to set force_pr_aptpl while"
  895. " export_count is %d\n", dev, dev->export_count);
  896. return -EINVAL;
  897. }
  898. dev->dev_attrib.force_pr_aptpl = flag;
  899. pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
  900. return 0;
  901. }
  902. EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
  903. int se_dev_set_is_nonrot(struct se_device *dev, int flag)
  904. {
  905. if ((flag != 0) && (flag != 1)) {
  906. printk(KERN_ERR "Illegal value %d\n", flag);
  907. return -EINVAL;
  908. }
  909. dev->dev_attrib.is_nonrot = flag;
  910. pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
  911. dev, flag);
  912. return 0;
  913. }
  914. EXPORT_SYMBOL(se_dev_set_is_nonrot);
  915. int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  916. {
  917. if (flag != 0) {
  918. printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
  919. " reordering not implemented\n", dev);
  920. return -ENOSYS;
  921. }
  922. dev->dev_attrib.emulate_rest_reord = flag;
  923. pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
  924. return 0;
  925. }
  926. EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
  927. /*
  928. * Note, this can only be called on unexported SE Device Object.
  929. */
  930. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  931. {
  932. if (dev->export_count) {
  933. pr_err("dev[%p]: Unable to change SE Device TCQ while"
  934. " export_count is %d\n",
  935. dev, dev->export_count);
  936. return -EINVAL;
  937. }
  938. if (!queue_depth) {
  939. pr_err("dev[%p]: Illegal ZERO value for queue"
  940. "_depth\n", dev);
  941. return -EINVAL;
  942. }
  943. if (queue_depth > dev->dev_attrib.queue_depth) {
  944. if (queue_depth > dev->dev_attrib.hw_queue_depth) {
  945. pr_err("dev[%p]: Passed queue_depth:"
  946. " %u exceeds TCM/SE_Device MAX"
  947. " TCQ: %u\n", dev, queue_depth,
  948. dev->dev_attrib.hw_queue_depth);
  949. return -EINVAL;
  950. }
  951. }
  952. dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  953. pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
  954. dev, queue_depth);
  955. return 0;
  956. }
  957. EXPORT_SYMBOL(se_dev_set_queue_depth);
  958. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  959. {
  960. if (dev->export_count) {
  961. pr_err("dev[%p]: Unable to change SE Device"
  962. " optimal_sectors while export_count is %d\n",
  963. dev, dev->export_count);
  964. return -EINVAL;
  965. }
  966. if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
  967. pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
  968. " greater than hw_max_sectors: %u\n", dev,
  969. optimal_sectors, dev->dev_attrib.hw_max_sectors);
  970. return -EINVAL;
  971. }
  972. dev->dev_attrib.optimal_sectors = optimal_sectors;
  973. pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
  974. dev, optimal_sectors);
  975. return 0;
  976. }
  977. EXPORT_SYMBOL(se_dev_set_optimal_sectors);
  978. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  979. {
  980. if (dev->export_count) {
  981. pr_err("dev[%p]: Unable to change SE Device block_size"
  982. " while export_count is %d\n",
  983. dev, dev->export_count);
  984. return -EINVAL;
  985. }
  986. if ((block_size != 512) &&
  987. (block_size != 1024) &&
  988. (block_size != 2048) &&
  989. (block_size != 4096)) {
  990. pr_err("dev[%p]: Illegal value for block_device: %u"
  991. " for SE device, must be 512, 1024, 2048 or 4096\n",
  992. dev, block_size);
  993. return -EINVAL;
  994. }
  995. dev->dev_attrib.block_size = block_size;
  996. pr_debug("dev[%p]: SE Device block_size changed to %u\n",
  997. dev, block_size);
  998. if (dev->dev_attrib.max_bytes_per_io)
  999. dev->dev_attrib.hw_max_sectors =
  1000. dev->dev_attrib.max_bytes_per_io / block_size;
  1001. return 0;
  1002. }
  1003. EXPORT_SYMBOL(se_dev_set_block_size);
  1004. struct se_lun *core_dev_add_lun(
  1005. struct se_portal_group *tpg,
  1006. struct se_device *dev,
  1007. u32 unpacked_lun)
  1008. {
  1009. struct se_lun *lun;
  1010. int rc;
  1011. lun = core_tpg_alloc_lun(tpg, unpacked_lun);
  1012. if (IS_ERR(lun))
  1013. return lun;
  1014. rc = core_tpg_add_lun(tpg, lun,
  1015. TRANSPORT_LUNFLAGS_READ_WRITE, dev);
  1016. if (rc < 0)
  1017. return ERR_PTR(rc);
  1018. pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1019. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1020. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1021. tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
  1022. /*
  1023. * Update LUN maps for dynamically added initiators when
  1024. * generate_node_acl is enabled.
  1025. */
  1026. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1027. struct se_node_acl *acl;
  1028. spin_lock_irq(&tpg->acl_node_lock);
  1029. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1030. if (acl->dynamic_node_acl &&
  1031. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  1032. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  1033. spin_unlock_irq(&tpg->acl_node_lock);
  1034. core_tpg_add_node_to_devs(acl, tpg);
  1035. spin_lock_irq(&tpg->acl_node_lock);
  1036. }
  1037. }
  1038. spin_unlock_irq(&tpg->acl_node_lock);
  1039. }
  1040. return lun;
  1041. }
  1042. /* core_dev_del_lun():
  1043. *
  1044. *
  1045. */
  1046. void core_dev_del_lun(
  1047. struct se_portal_group *tpg,
  1048. struct se_lun *lun)
  1049. {
  1050. pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
  1051. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1052. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1053. tpg->se_tpg_tfo->get_fabric_name());
  1054. core_tpg_remove_lun(tpg, lun);
  1055. }
  1056. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1057. {
  1058. struct se_lun *lun;
  1059. spin_lock(&tpg->tpg_lun_lock);
  1060. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1061. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1062. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1063. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1064. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1065. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1066. spin_unlock(&tpg->tpg_lun_lock);
  1067. return NULL;
  1068. }
  1069. lun = tpg->tpg_lun_list[unpacked_lun];
  1070. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1071. pr_err("%s Logical Unit Number: %u is not free on"
  1072. " Target Portal Group: %hu, ignoring request.\n",
  1073. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1074. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1075. spin_unlock(&tpg->tpg_lun_lock);
  1076. return NULL;
  1077. }
  1078. spin_unlock(&tpg->tpg_lun_lock);
  1079. return lun;
  1080. }
  1081. /* core_dev_get_lun():
  1082. *
  1083. *
  1084. */
  1085. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1086. {
  1087. struct se_lun *lun;
  1088. spin_lock(&tpg->tpg_lun_lock);
  1089. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1090. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1091. "_TPG-1: %u for Target Portal Group: %hu\n",
  1092. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1093. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1094. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1095. spin_unlock(&tpg->tpg_lun_lock);
  1096. return NULL;
  1097. }
  1098. lun = tpg->tpg_lun_list[unpacked_lun];
  1099. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1100. pr_err("%s Logical Unit Number: %u is not active on"
  1101. " Target Portal Group: %hu, ignoring request.\n",
  1102. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1103. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1104. spin_unlock(&tpg->tpg_lun_lock);
  1105. return NULL;
  1106. }
  1107. spin_unlock(&tpg->tpg_lun_lock);
  1108. return lun;
  1109. }
  1110. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1111. struct se_portal_group *tpg,
  1112. struct se_node_acl *nacl,
  1113. u32 mapped_lun,
  1114. int *ret)
  1115. {
  1116. struct se_lun_acl *lacl;
  1117. if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
  1118. pr_err("%s InitiatorName exceeds maximum size.\n",
  1119. tpg->se_tpg_tfo->get_fabric_name());
  1120. *ret = -EOVERFLOW;
  1121. return NULL;
  1122. }
  1123. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1124. if (!lacl) {
  1125. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  1126. *ret = -ENOMEM;
  1127. return NULL;
  1128. }
  1129. INIT_LIST_HEAD(&lacl->lacl_list);
  1130. lacl->mapped_lun = mapped_lun;
  1131. lacl->se_lun_nacl = nacl;
  1132. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
  1133. nacl->initiatorname);
  1134. return lacl;
  1135. }
  1136. int core_dev_add_initiator_node_lun_acl(
  1137. struct se_portal_group *tpg,
  1138. struct se_lun_acl *lacl,
  1139. u32 unpacked_lun,
  1140. u32 lun_access)
  1141. {
  1142. struct se_lun *lun;
  1143. struct se_node_acl *nacl;
  1144. lun = core_dev_get_lun(tpg, unpacked_lun);
  1145. if (!lun) {
  1146. pr_err("%s Logical Unit Number: %u is not active on"
  1147. " Target Portal Group: %hu, ignoring request.\n",
  1148. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1149. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1150. return -EINVAL;
  1151. }
  1152. nacl = lacl->se_lun_nacl;
  1153. if (!nacl)
  1154. return -EINVAL;
  1155. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1156. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1157. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1158. lacl->se_lun = lun;
  1159. if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1160. lun_access, nacl, tpg) < 0)
  1161. return -EINVAL;
  1162. spin_lock(&lun->lun_acl_lock);
  1163. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1164. atomic_inc_mb(&lun->lun_acl_count);
  1165. spin_unlock(&lun->lun_acl_lock);
  1166. pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1167. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1168. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1169. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1170. lacl->initiatorname);
  1171. /*
  1172. * Check to see if there are any existing persistent reservation APTPL
  1173. * pre-registrations that need to be enabled for this LUN ACL..
  1174. */
  1175. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
  1176. lacl->mapped_lun);
  1177. return 0;
  1178. }
  1179. /* core_dev_del_initiator_node_lun_acl():
  1180. *
  1181. *
  1182. */
  1183. int core_dev_del_initiator_node_lun_acl(
  1184. struct se_portal_group *tpg,
  1185. struct se_lun *lun,
  1186. struct se_lun_acl *lacl)
  1187. {
  1188. struct se_node_acl *nacl;
  1189. nacl = lacl->se_lun_nacl;
  1190. if (!nacl)
  1191. return -EINVAL;
  1192. spin_lock(&lun->lun_acl_lock);
  1193. list_del(&lacl->lacl_list);
  1194. atomic_dec_mb(&lun->lun_acl_count);
  1195. spin_unlock(&lun->lun_acl_lock);
  1196. core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1197. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  1198. lacl->se_lun = NULL;
  1199. pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1200. " InitiatorNode: %s Mapped LUN: %u\n",
  1201. tpg->se_tpg_tfo->get_fabric_name(),
  1202. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1203. lacl->initiatorname, lacl->mapped_lun);
  1204. return 0;
  1205. }
  1206. void core_dev_free_initiator_node_lun_acl(
  1207. struct se_portal_group *tpg,
  1208. struct se_lun_acl *lacl)
  1209. {
  1210. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1211. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1212. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1213. tpg->se_tpg_tfo->get_fabric_name(),
  1214. lacl->initiatorname, lacl->mapped_lun);
  1215. kfree(lacl);
  1216. }
  1217. static void scsi_dump_inquiry(struct se_device *dev)
  1218. {
  1219. struct t10_wwn *wwn = &dev->t10_wwn;
  1220. char buf[17];
  1221. int i, device_type;
  1222. /*
  1223. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  1224. */
  1225. for (i = 0; i < 8; i++)
  1226. if (wwn->vendor[i] >= 0x20)
  1227. buf[i] = wwn->vendor[i];
  1228. else
  1229. buf[i] = ' ';
  1230. buf[i] = '\0';
  1231. pr_debug(" Vendor: %s\n", buf);
  1232. for (i = 0; i < 16; i++)
  1233. if (wwn->model[i] >= 0x20)
  1234. buf[i] = wwn->model[i];
  1235. else
  1236. buf[i] = ' ';
  1237. buf[i] = '\0';
  1238. pr_debug(" Model: %s\n", buf);
  1239. for (i = 0; i < 4; i++)
  1240. if (wwn->revision[i] >= 0x20)
  1241. buf[i] = wwn->revision[i];
  1242. else
  1243. buf[i] = ' ';
  1244. buf[i] = '\0';
  1245. pr_debug(" Revision: %s\n", buf);
  1246. device_type = dev->transport->get_device_type(dev);
  1247. pr_debug(" Type: %s ", scsi_device_type(device_type));
  1248. }
  1249. struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  1250. {
  1251. struct se_device *dev;
  1252. struct se_lun *xcopy_lun;
  1253. dev = hba->transport->alloc_device(hba, name);
  1254. if (!dev)
  1255. return NULL;
  1256. dev->dev_link_magic = SE_DEV_LINK_MAGIC;
  1257. dev->se_hba = hba;
  1258. dev->transport = hba->transport;
  1259. dev->prot_length = sizeof(struct se_dif_v1_tuple);
  1260. INIT_LIST_HEAD(&dev->dev_list);
  1261. INIT_LIST_HEAD(&dev->dev_sep_list);
  1262. INIT_LIST_HEAD(&dev->dev_tmr_list);
  1263. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  1264. INIT_LIST_HEAD(&dev->state_list);
  1265. INIT_LIST_HEAD(&dev->qf_cmd_list);
  1266. INIT_LIST_HEAD(&dev->g_dev_node);
  1267. spin_lock_init(&dev->execute_task_lock);
  1268. spin_lock_init(&dev->delayed_cmd_lock);
  1269. spin_lock_init(&dev->dev_reservation_lock);
  1270. spin_lock_init(&dev->se_port_lock);
  1271. spin_lock_init(&dev->se_tmr_lock);
  1272. spin_lock_init(&dev->qf_cmd_lock);
  1273. sema_init(&dev->caw_sem, 1);
  1274. atomic_set(&dev->dev_ordered_id, 0);
  1275. INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
  1276. spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
  1277. INIT_LIST_HEAD(&dev->t10_pr.registration_list);
  1278. INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
  1279. spin_lock_init(&dev->t10_pr.registration_lock);
  1280. spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
  1281. INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
  1282. spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
  1283. INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
  1284. spin_lock_init(&dev->t10_alua.lba_map_lock);
  1285. dev->t10_wwn.t10_dev = dev;
  1286. dev->t10_alua.t10_dev = dev;
  1287. dev->dev_attrib.da_dev = dev;
  1288. dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
  1289. dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  1290. dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  1291. dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  1292. dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  1293. dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  1294. dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
  1295. dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  1296. dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  1297. dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
  1298. dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
  1299. dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
  1300. dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  1301. dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
  1302. dev->dev_attrib.is_nonrot = DA_IS_NONROT;
  1303. dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  1304. dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  1305. dev->dev_attrib.max_unmap_block_desc_count =
  1306. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  1307. dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  1308. dev->dev_attrib.unmap_granularity_alignment =
  1309. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  1310. dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
  1311. xcopy_lun = &dev->xcopy_lun;
  1312. xcopy_lun->lun_se_dev = dev;
  1313. init_completion(&xcopy_lun->lun_shutdown_comp);
  1314. INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
  1315. spin_lock_init(&xcopy_lun->lun_acl_lock);
  1316. spin_lock_init(&xcopy_lun->lun_sep_lock);
  1317. init_completion(&xcopy_lun->lun_ref_comp);
  1318. return dev;
  1319. }
  1320. int target_configure_device(struct se_device *dev)
  1321. {
  1322. struct se_hba *hba = dev->se_hba;
  1323. int ret;
  1324. if (dev->dev_flags & DF_CONFIGURED) {
  1325. pr_err("se_dev->se_dev_ptr already set for storage"
  1326. " object\n");
  1327. return -EEXIST;
  1328. }
  1329. ret = dev->transport->configure_device(dev);
  1330. if (ret)
  1331. goto out;
  1332. dev->dev_flags |= DF_CONFIGURED;
  1333. /*
  1334. * XXX: there is not much point to have two different values here..
  1335. */
  1336. dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
  1337. dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
  1338. /*
  1339. * Align max_hw_sectors down to PAGE_SIZE I/O transfers
  1340. */
  1341. dev->dev_attrib.hw_max_sectors =
  1342. se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
  1343. dev->dev_attrib.hw_block_size);
  1344. dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
  1345. dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
  1346. dev->creation_time = get_jiffies_64();
  1347. ret = core_setup_alua(dev);
  1348. if (ret)
  1349. goto out;
  1350. /*
  1351. * Startup the struct se_device processing thread
  1352. */
  1353. dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
  1354. dev->transport->name);
  1355. if (!dev->tmr_wq) {
  1356. pr_err("Unable to create tmr workqueue for %s\n",
  1357. dev->transport->name);
  1358. ret = -ENOMEM;
  1359. goto out_free_alua;
  1360. }
  1361. /*
  1362. * Setup work_queue for QUEUE_FULL
  1363. */
  1364. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  1365. /*
  1366. * Preload the initial INQUIRY const values if we are doing
  1367. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  1368. * passthrough because this is being provided by the backend LLD.
  1369. */
  1370. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  1371. strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
  1372. strncpy(&dev->t10_wwn.model[0],
  1373. dev->transport->inquiry_prod, 16);
  1374. strncpy(&dev->t10_wwn.revision[0],
  1375. dev->transport->inquiry_rev, 4);
  1376. }
  1377. scsi_dump_inquiry(dev);
  1378. spin_lock(&hba->device_lock);
  1379. hba->dev_count++;
  1380. spin_unlock(&hba->device_lock);
  1381. mutex_lock(&g_device_mutex);
  1382. list_add_tail(&dev->g_dev_node, &g_device_list);
  1383. mutex_unlock(&g_device_mutex);
  1384. return 0;
  1385. out_free_alua:
  1386. core_alua_free_lu_gp_mem(dev);
  1387. out:
  1388. se_release_vpd_for_dev(dev);
  1389. return ret;
  1390. }
  1391. void target_free_device(struct se_device *dev)
  1392. {
  1393. struct se_hba *hba = dev->se_hba;
  1394. WARN_ON(!list_empty(&dev->dev_sep_list));
  1395. if (dev->dev_flags & DF_CONFIGURED) {
  1396. destroy_workqueue(dev->tmr_wq);
  1397. mutex_lock(&g_device_mutex);
  1398. list_del(&dev->g_dev_node);
  1399. mutex_unlock(&g_device_mutex);
  1400. spin_lock(&hba->device_lock);
  1401. hba->dev_count--;
  1402. spin_unlock(&hba->device_lock);
  1403. }
  1404. core_alua_free_lu_gp_mem(dev);
  1405. core_alua_set_lba_map(dev, NULL, 0, 0);
  1406. core_scsi3_free_all_registrations(dev);
  1407. se_release_vpd_for_dev(dev);
  1408. if (dev->transport->free_prot)
  1409. dev->transport->free_prot(dev);
  1410. dev->transport->free_device(dev);
  1411. }
  1412. int core_dev_setup_virtual_lun0(void)
  1413. {
  1414. struct se_hba *hba;
  1415. struct se_device *dev;
  1416. char buf[] = "rd_pages=8,rd_nullio=1";
  1417. int ret;
  1418. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  1419. if (IS_ERR(hba))
  1420. return PTR_ERR(hba);
  1421. dev = target_alloc_device(hba, "virt_lun0");
  1422. if (!dev) {
  1423. ret = -ENOMEM;
  1424. goto out_free_hba;
  1425. }
  1426. hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
  1427. ret = target_configure_device(dev);
  1428. if (ret)
  1429. goto out_free_se_dev;
  1430. lun0_hba = hba;
  1431. g_lun0_dev = dev;
  1432. return 0;
  1433. out_free_se_dev:
  1434. target_free_device(dev);
  1435. out_free_hba:
  1436. core_delete_hba(hba);
  1437. return ret;
  1438. }
  1439. void core_dev_release_virtual_lun0(void)
  1440. {
  1441. struct se_hba *hba = lun0_hba;
  1442. if (!hba)
  1443. return;
  1444. if (g_lun0_dev)
  1445. target_free_device(g_lun0_dev);
  1446. core_delete_hba(hba);
  1447. }