target_core_device.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734
  1. /*******************************************************************************
  2. * Filename: target_core_device.c (based on iscsi_target_device.c)
  3. *
  4. * This file contains the TCM Virtual Device and Disk Transport
  5. * agnostic related functions.
  6. *
  7. * (c) Copyright 2003-2013 Datera, Inc.
  8. *
  9. * Nicholas A. Bellinger <nab@kernel.org>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License as published by
  13. * the Free Software Foundation; either version 2 of the License, or
  14. * (at your option) any later version.
  15. *
  16. * This program is distributed in the hope that it will be useful,
  17. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  18. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  19. * GNU General Public License for more details.
  20. *
  21. * You should have received a copy of the GNU General Public License
  22. * along with this program; if not, write to the Free Software
  23. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  24. *
  25. ******************************************************************************/
  26. #include <linux/net.h>
  27. #include <linux/string.h>
  28. #include <linux/delay.h>
  29. #include <linux/timer.h>
  30. #include <linux/slab.h>
  31. #include <linux/spinlock.h>
  32. #include <linux/kthread.h>
  33. #include <linux/in.h>
  34. #include <linux/export.h>
  35. #include <net/sock.h>
  36. #include <net/tcp.h>
  37. #include <scsi/scsi.h>
  38. #include <scsi/scsi_device.h>
  39. #include <target/target_core_base.h>
  40. #include <target/target_core_backend.h>
  41. #include <target/target_core_fabric.h>
  42. #include "target_core_internal.h"
  43. #include "target_core_alua.h"
  44. #include "target_core_pr.h"
  45. #include "target_core_ua.h"
  46. DEFINE_MUTEX(g_device_mutex);
  47. LIST_HEAD(g_device_list);
  48. static struct se_hba *lun0_hba;
  49. /* not static, needed by tpg.c */
  50. struct se_device *g_lun0_dev;
  51. sense_reason_t
  52. transport_lookup_cmd_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  53. {
  54. struct se_lun *se_lun = NULL;
  55. struct se_session *se_sess = se_cmd->se_sess;
  56. struct se_device *dev;
  57. unsigned long flags;
  58. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  59. return TCM_NON_EXISTENT_LUN;
  60. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  61. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  62. if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  63. struct se_dev_entry *deve = se_cmd->se_deve;
  64. deve->total_cmds++;
  65. if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
  66. (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
  67. pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
  68. " Access for 0x%08x\n",
  69. se_cmd->se_tfo->get_fabric_name(),
  70. unpacked_lun);
  71. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  72. return TCM_WRITE_PROTECTED;
  73. }
  74. if (se_cmd->data_direction == DMA_TO_DEVICE)
  75. deve->write_bytes += se_cmd->data_length;
  76. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  77. deve->read_bytes += se_cmd->data_length;
  78. se_lun = deve->se_lun;
  79. se_cmd->se_lun = deve->se_lun;
  80. se_cmd->pr_res_key = deve->pr_res_key;
  81. se_cmd->orig_fe_lun = unpacked_lun;
  82. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  83. percpu_ref_get(&se_lun->lun_ref);
  84. se_cmd->lun_ref_active = true;
  85. }
  86. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  87. if (!se_lun) {
  88. /*
  89. * Use the se_portal_group->tpg_virt_lun0 to allow for
  90. * REPORT_LUNS, et al to be returned when no active
  91. * MappedLUN=0 exists for this Initiator Port.
  92. */
  93. if (unpacked_lun != 0) {
  94. pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  95. " Access for 0x%08x\n",
  96. se_cmd->se_tfo->get_fabric_name(),
  97. unpacked_lun);
  98. return TCM_NON_EXISTENT_LUN;
  99. }
  100. /*
  101. * Force WRITE PROTECT for virtual LUN 0
  102. */
  103. if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
  104. (se_cmd->data_direction != DMA_NONE))
  105. return TCM_WRITE_PROTECTED;
  106. se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  107. se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
  108. se_cmd->orig_fe_lun = 0;
  109. se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
  110. percpu_ref_get(&se_lun->lun_ref);
  111. se_cmd->lun_ref_active = true;
  112. }
  113. /* Directly associate cmd with se_dev */
  114. se_cmd->se_dev = se_lun->lun_se_dev;
  115. dev = se_lun->lun_se_dev;
  116. atomic_long_inc(&dev->num_cmds);
  117. if (se_cmd->data_direction == DMA_TO_DEVICE)
  118. atomic_long_add(se_cmd->data_length, &dev->write_bytes);
  119. else if (se_cmd->data_direction == DMA_FROM_DEVICE)
  120. atomic_long_add(se_cmd->data_length, &dev->read_bytes);
  121. return 0;
  122. }
  123. EXPORT_SYMBOL(transport_lookup_cmd_lun);
  124. int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u32 unpacked_lun)
  125. {
  126. struct se_dev_entry *deve;
  127. struct se_lun *se_lun = NULL;
  128. struct se_session *se_sess = se_cmd->se_sess;
  129. struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
  130. unsigned long flags;
  131. if (unpacked_lun >= TRANSPORT_MAX_LUNS_PER_TPG)
  132. return -ENODEV;
  133. spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
  134. se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
  135. deve = se_cmd->se_deve;
  136. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  137. se_tmr->tmr_lun = deve->se_lun;
  138. se_cmd->se_lun = deve->se_lun;
  139. se_lun = deve->se_lun;
  140. se_cmd->pr_res_key = deve->pr_res_key;
  141. se_cmd->orig_fe_lun = unpacked_lun;
  142. }
  143. spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
  144. if (!se_lun) {
  145. pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
  146. " Access for 0x%08x\n",
  147. se_cmd->se_tfo->get_fabric_name(),
  148. unpacked_lun);
  149. return -ENODEV;
  150. }
  151. /* Directly associate cmd with se_dev */
  152. se_cmd->se_dev = se_lun->lun_se_dev;
  153. se_tmr->tmr_dev = se_lun->lun_se_dev;
  154. spin_lock_irqsave(&se_tmr->tmr_dev->se_tmr_lock, flags);
  155. list_add_tail(&se_tmr->tmr_list, &se_tmr->tmr_dev->dev_tmr_list);
  156. spin_unlock_irqrestore(&se_tmr->tmr_dev->se_tmr_lock, flags);
  157. return 0;
  158. }
  159. EXPORT_SYMBOL(transport_lookup_tmr_lun);
  160. /*
  161. * This function is called from core_scsi3_emulate_pro_register_and_move()
  162. * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
  163. * when a matching rtpi is found.
  164. */
  165. struct se_dev_entry *core_get_se_deve_from_rtpi(
  166. struct se_node_acl *nacl,
  167. u16 rtpi)
  168. {
  169. struct se_dev_entry *deve;
  170. struct se_lun *lun;
  171. struct se_port *port;
  172. struct se_portal_group *tpg = nacl->se_tpg;
  173. u32 i;
  174. spin_lock_irq(&nacl->device_list_lock);
  175. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  176. deve = nacl->device_list[i];
  177. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  178. continue;
  179. lun = deve->se_lun;
  180. if (!lun) {
  181. pr_err("%s device entries device pointer is"
  182. " NULL, but Initiator has access.\n",
  183. tpg->se_tpg_tfo->get_fabric_name());
  184. continue;
  185. }
  186. port = lun->lun_sep;
  187. if (!port) {
  188. pr_err("%s device entries device pointer is"
  189. " NULL, but Initiator has access.\n",
  190. tpg->se_tpg_tfo->get_fabric_name());
  191. continue;
  192. }
  193. if (port->sep_rtpi != rtpi)
  194. continue;
  195. atomic_inc(&deve->pr_ref_count);
  196. smp_mb__after_atomic_inc();
  197. spin_unlock_irq(&nacl->device_list_lock);
  198. return deve;
  199. }
  200. spin_unlock_irq(&nacl->device_list_lock);
  201. return NULL;
  202. }
  203. int core_free_device_list_for_node(
  204. struct se_node_acl *nacl,
  205. struct se_portal_group *tpg)
  206. {
  207. struct se_dev_entry *deve;
  208. struct se_lun *lun;
  209. u32 i;
  210. if (!nacl->device_list)
  211. return 0;
  212. spin_lock_irq(&nacl->device_list_lock);
  213. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  214. deve = nacl->device_list[i];
  215. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  216. continue;
  217. if (!deve->se_lun) {
  218. pr_err("%s device entries device pointer is"
  219. " NULL, but Initiator has access.\n",
  220. tpg->se_tpg_tfo->get_fabric_name());
  221. continue;
  222. }
  223. lun = deve->se_lun;
  224. spin_unlock_irq(&nacl->device_list_lock);
  225. core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  226. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  227. spin_lock_irq(&nacl->device_list_lock);
  228. }
  229. spin_unlock_irq(&nacl->device_list_lock);
  230. array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
  231. nacl->device_list = NULL;
  232. return 0;
  233. }
  234. void core_update_device_list_access(
  235. u32 mapped_lun,
  236. u32 lun_access,
  237. struct se_node_acl *nacl)
  238. {
  239. struct se_dev_entry *deve;
  240. spin_lock_irq(&nacl->device_list_lock);
  241. deve = nacl->device_list[mapped_lun];
  242. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  243. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  244. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  245. } else {
  246. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  247. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  248. }
  249. spin_unlock_irq(&nacl->device_list_lock);
  250. }
  251. /* core_enable_device_list_for_node():
  252. *
  253. *
  254. */
  255. int core_enable_device_list_for_node(
  256. struct se_lun *lun,
  257. struct se_lun_acl *lun_acl,
  258. u32 mapped_lun,
  259. u32 lun_access,
  260. struct se_node_acl *nacl,
  261. struct se_portal_group *tpg)
  262. {
  263. struct se_port *port = lun->lun_sep;
  264. struct se_dev_entry *deve;
  265. spin_lock_irq(&nacl->device_list_lock);
  266. deve = nacl->device_list[mapped_lun];
  267. /*
  268. * Check if the call is handling demo mode -> explicit LUN ACL
  269. * transition. This transition must be for the same struct se_lun
  270. * + mapped_lun that was setup in demo mode..
  271. */
  272. if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
  273. if (deve->se_lun_acl != NULL) {
  274. pr_err("struct se_dev_entry->se_lun_acl"
  275. " already set for demo mode -> explicit"
  276. " LUN ACL transition\n");
  277. spin_unlock_irq(&nacl->device_list_lock);
  278. return -EINVAL;
  279. }
  280. if (deve->se_lun != lun) {
  281. pr_err("struct se_dev_entry->se_lun does"
  282. " match passed struct se_lun for demo mode"
  283. " -> explicit LUN ACL transition\n");
  284. spin_unlock_irq(&nacl->device_list_lock);
  285. return -EINVAL;
  286. }
  287. deve->se_lun_acl = lun_acl;
  288. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  289. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  290. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  291. } else {
  292. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  293. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  294. }
  295. spin_unlock_irq(&nacl->device_list_lock);
  296. return 0;
  297. }
  298. deve->se_lun = lun;
  299. deve->se_lun_acl = lun_acl;
  300. deve->mapped_lun = mapped_lun;
  301. deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
  302. if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
  303. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
  304. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
  305. } else {
  306. deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
  307. deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
  308. }
  309. deve->creation_time = get_jiffies_64();
  310. deve->attach_count++;
  311. spin_unlock_irq(&nacl->device_list_lock);
  312. spin_lock_bh(&port->sep_alua_lock);
  313. list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
  314. spin_unlock_bh(&port->sep_alua_lock);
  315. return 0;
  316. }
  317. /* core_disable_device_list_for_node():
  318. *
  319. *
  320. */
  321. int core_disable_device_list_for_node(
  322. struct se_lun *lun,
  323. struct se_lun_acl *lun_acl,
  324. u32 mapped_lun,
  325. u32 lun_access,
  326. struct se_node_acl *nacl,
  327. struct se_portal_group *tpg)
  328. {
  329. struct se_port *port = lun->lun_sep;
  330. struct se_dev_entry *deve = nacl->device_list[mapped_lun];
  331. /*
  332. * If the MappedLUN entry is being disabled, the entry in
  333. * port->sep_alua_list must be removed now before clearing the
  334. * struct se_dev_entry pointers below as logic in
  335. * core_alua_do_transition_tg_pt() depends on these being present.
  336. *
  337. * deve->se_lun_acl will be NULL for demo-mode created LUNs
  338. * that have not been explicitly converted to MappedLUNs ->
  339. * struct se_lun_acl, but we remove deve->alua_port_list from
  340. * port->sep_alua_list. This also means that active UAs and
  341. * NodeACL context specific PR metadata for demo-mode
  342. * MappedLUN *deve will be released below..
  343. */
  344. spin_lock_bh(&port->sep_alua_lock);
  345. list_del(&deve->alua_port_list);
  346. spin_unlock_bh(&port->sep_alua_lock);
  347. /*
  348. * Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
  349. * PR operation to complete.
  350. */
  351. while (atomic_read(&deve->pr_ref_count) != 0)
  352. cpu_relax();
  353. spin_lock_irq(&nacl->device_list_lock);
  354. /*
  355. * Disable struct se_dev_entry LUN ACL mapping
  356. */
  357. core_scsi3_ua_release_all(deve);
  358. deve->se_lun = NULL;
  359. deve->se_lun_acl = NULL;
  360. deve->lun_flags = 0;
  361. deve->creation_time = 0;
  362. deve->attach_count--;
  363. spin_unlock_irq(&nacl->device_list_lock);
  364. core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
  365. return 0;
  366. }
  367. /* core_clear_lun_from_tpg():
  368. *
  369. *
  370. */
  371. void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
  372. {
  373. struct se_node_acl *nacl;
  374. struct se_dev_entry *deve;
  375. u32 i;
  376. spin_lock_irq(&tpg->acl_node_lock);
  377. list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
  378. spin_unlock_irq(&tpg->acl_node_lock);
  379. spin_lock_irq(&nacl->device_list_lock);
  380. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  381. deve = nacl->device_list[i];
  382. if (lun != deve->se_lun)
  383. continue;
  384. spin_unlock_irq(&nacl->device_list_lock);
  385. core_disable_device_list_for_node(lun, NULL,
  386. deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
  387. nacl, tpg);
  388. spin_lock_irq(&nacl->device_list_lock);
  389. }
  390. spin_unlock_irq(&nacl->device_list_lock);
  391. spin_lock_irq(&tpg->acl_node_lock);
  392. }
  393. spin_unlock_irq(&tpg->acl_node_lock);
  394. }
  395. static struct se_port *core_alloc_port(struct se_device *dev)
  396. {
  397. struct se_port *port, *port_tmp;
  398. port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
  399. if (!port) {
  400. pr_err("Unable to allocate struct se_port\n");
  401. return ERR_PTR(-ENOMEM);
  402. }
  403. INIT_LIST_HEAD(&port->sep_alua_list);
  404. INIT_LIST_HEAD(&port->sep_list);
  405. atomic_set(&port->sep_tg_pt_secondary_offline, 0);
  406. spin_lock_init(&port->sep_alua_lock);
  407. mutex_init(&port->sep_tg_pt_md_mutex);
  408. spin_lock(&dev->se_port_lock);
  409. if (dev->dev_port_count == 0x0000ffff) {
  410. pr_warn("Reached dev->dev_port_count =="
  411. " 0x0000ffff\n");
  412. spin_unlock(&dev->se_port_lock);
  413. return ERR_PTR(-ENOSPC);
  414. }
  415. again:
  416. /*
  417. * Allocate the next RELATIVE TARGET PORT IDENTIFIER for this struct se_device
  418. * Here is the table from spc4r17 section 7.7.3.8.
  419. *
  420. * Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
  421. *
  422. * Code Description
  423. * 0h Reserved
  424. * 1h Relative port 1, historically known as port A
  425. * 2h Relative port 2, historically known as port B
  426. * 3h to FFFFh Relative port 3 through 65 535
  427. */
  428. port->sep_rtpi = dev->dev_rpti_counter++;
  429. if (!port->sep_rtpi)
  430. goto again;
  431. list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
  432. /*
  433. * Make sure RELATIVE TARGET PORT IDENTIFIER is unique
  434. * for 16-bit wrap..
  435. */
  436. if (port->sep_rtpi == port_tmp->sep_rtpi)
  437. goto again;
  438. }
  439. spin_unlock(&dev->se_port_lock);
  440. return port;
  441. }
  442. static void core_export_port(
  443. struct se_device *dev,
  444. struct se_portal_group *tpg,
  445. struct se_port *port,
  446. struct se_lun *lun)
  447. {
  448. struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
  449. spin_lock(&dev->se_port_lock);
  450. spin_lock(&lun->lun_sep_lock);
  451. port->sep_tpg = tpg;
  452. port->sep_lun = lun;
  453. lun->lun_sep = port;
  454. spin_unlock(&lun->lun_sep_lock);
  455. list_add_tail(&port->sep_list, &dev->dev_sep_list);
  456. spin_unlock(&dev->se_port_lock);
  457. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
  458. !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
  459. tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
  460. if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
  461. pr_err("Unable to allocate t10_alua_tg_pt"
  462. "_gp_member_t\n");
  463. return;
  464. }
  465. spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  466. __core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
  467. dev->t10_alua.default_tg_pt_gp);
  468. spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
  469. pr_debug("%s/%s: Adding to default ALUA Target Port"
  470. " Group: alua/default_tg_pt_gp\n",
  471. dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
  472. }
  473. dev->dev_port_count++;
  474. port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFIER */
  475. }
  476. /*
  477. * Called with struct se_device->se_port_lock spinlock held.
  478. */
  479. static void core_release_port(struct se_device *dev, struct se_port *port)
  480. __releases(&dev->se_port_lock) __acquires(&dev->se_port_lock)
  481. {
  482. /*
  483. * Wait for any port reference for PR ALL_TG_PT=1 operation
  484. * to complete in __core_scsi3_alloc_registration()
  485. */
  486. spin_unlock(&dev->se_port_lock);
  487. if (atomic_read(&port->sep_tg_pt_ref_cnt))
  488. cpu_relax();
  489. spin_lock(&dev->se_port_lock);
  490. core_alua_free_tg_pt_gp_mem(port);
  491. list_del(&port->sep_list);
  492. dev->dev_port_count--;
  493. kfree(port);
  494. }
  495. int core_dev_export(
  496. struct se_device *dev,
  497. struct se_portal_group *tpg,
  498. struct se_lun *lun)
  499. {
  500. struct se_hba *hba = dev->se_hba;
  501. struct se_port *port;
  502. port = core_alloc_port(dev);
  503. if (IS_ERR(port))
  504. return PTR_ERR(port);
  505. lun->lun_se_dev = dev;
  506. spin_lock(&hba->device_lock);
  507. dev->export_count++;
  508. spin_unlock(&hba->device_lock);
  509. core_export_port(dev, tpg, port, lun);
  510. return 0;
  511. }
  512. void core_dev_unexport(
  513. struct se_device *dev,
  514. struct se_portal_group *tpg,
  515. struct se_lun *lun)
  516. {
  517. struct se_hba *hba = dev->se_hba;
  518. struct se_port *port = lun->lun_sep;
  519. spin_lock(&lun->lun_sep_lock);
  520. if (lun->lun_se_dev == NULL) {
  521. spin_unlock(&lun->lun_sep_lock);
  522. return;
  523. }
  524. spin_unlock(&lun->lun_sep_lock);
  525. spin_lock(&dev->se_port_lock);
  526. core_release_port(dev, port);
  527. spin_unlock(&dev->se_port_lock);
  528. spin_lock(&hba->device_lock);
  529. dev->export_count--;
  530. spin_unlock(&hba->device_lock);
  531. lun->lun_se_dev = NULL;
  532. }
  533. static void se_release_vpd_for_dev(struct se_device *dev)
  534. {
  535. struct t10_vpd *vpd, *vpd_tmp;
  536. spin_lock(&dev->t10_wwn.t10_vpd_lock);
  537. list_for_each_entry_safe(vpd, vpd_tmp,
  538. &dev->t10_wwn.t10_vpd_list, vpd_list) {
  539. list_del(&vpd->vpd_list);
  540. kfree(vpd);
  541. }
  542. spin_unlock(&dev->t10_wwn.t10_vpd_lock);
  543. }
  544. static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
  545. {
  546. u32 aligned_max_sectors;
  547. u32 alignment;
  548. /*
  549. * Limit max_sectors to a PAGE_SIZE aligned value for modern
  550. * transport_allocate_data_tasks() operation.
  551. */
  552. alignment = max(1ul, PAGE_SIZE / block_size);
  553. aligned_max_sectors = rounddown(max_sectors, alignment);
  554. if (max_sectors != aligned_max_sectors)
  555. pr_info("Rounding down aligned max_sectors from %u to %u\n",
  556. max_sectors, aligned_max_sectors);
  557. return aligned_max_sectors;
  558. }
  559. int se_dev_set_max_unmap_lba_count(
  560. struct se_device *dev,
  561. u32 max_unmap_lba_count)
  562. {
  563. dev->dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
  564. pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
  565. dev, dev->dev_attrib.max_unmap_lba_count);
  566. return 0;
  567. }
  568. int se_dev_set_max_unmap_block_desc_count(
  569. struct se_device *dev,
  570. u32 max_unmap_block_desc_count)
  571. {
  572. dev->dev_attrib.max_unmap_block_desc_count =
  573. max_unmap_block_desc_count;
  574. pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
  575. dev, dev->dev_attrib.max_unmap_block_desc_count);
  576. return 0;
  577. }
  578. int se_dev_set_unmap_granularity(
  579. struct se_device *dev,
  580. u32 unmap_granularity)
  581. {
  582. dev->dev_attrib.unmap_granularity = unmap_granularity;
  583. pr_debug("dev[%p]: Set unmap_granularity: %u\n",
  584. dev, dev->dev_attrib.unmap_granularity);
  585. return 0;
  586. }
  587. int se_dev_set_unmap_granularity_alignment(
  588. struct se_device *dev,
  589. u32 unmap_granularity_alignment)
  590. {
  591. dev->dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
  592. pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
  593. dev, dev->dev_attrib.unmap_granularity_alignment);
  594. return 0;
  595. }
  596. int se_dev_set_max_write_same_len(
  597. struct se_device *dev,
  598. u32 max_write_same_len)
  599. {
  600. dev->dev_attrib.max_write_same_len = max_write_same_len;
  601. pr_debug("dev[%p]: Set max_write_same_len: %u\n",
  602. dev, dev->dev_attrib.max_write_same_len);
  603. return 0;
  604. }
  605. static void dev_set_t10_wwn_model_alias(struct se_device *dev)
  606. {
  607. const char *configname;
  608. configname = config_item_name(&dev->dev_group.cg_item);
  609. if (strlen(configname) >= 16) {
  610. pr_warn("dev[%p]: Backstore name '%s' is too long for "
  611. "INQUIRY_MODEL, truncating to 16 bytes\n", dev,
  612. configname);
  613. }
  614. snprintf(&dev->t10_wwn.model[0], 16, "%s", configname);
  615. }
  616. int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
  617. {
  618. if (dev->export_count) {
  619. pr_err("dev[%p]: Unable to change model alias"
  620. " while export_count is %d\n",
  621. dev, dev->export_count);
  622. return -EINVAL;
  623. }
  624. if (flag != 0 && flag != 1) {
  625. pr_err("Illegal value %d\n", flag);
  626. return -EINVAL;
  627. }
  628. if (flag) {
  629. dev_set_t10_wwn_model_alias(dev);
  630. } else {
  631. strncpy(&dev->t10_wwn.model[0],
  632. dev->transport->inquiry_prod, 16);
  633. }
  634. dev->dev_attrib.emulate_model_alias = flag;
  635. return 0;
  636. }
  637. int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
  638. {
  639. if (flag != 0 && flag != 1) {
  640. pr_err("Illegal value %d\n", flag);
  641. return -EINVAL;
  642. }
  643. if (flag) {
  644. pr_err("dpo_emulated not supported\n");
  645. return -EINVAL;
  646. }
  647. return 0;
  648. }
  649. int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
  650. {
  651. if (flag != 0 && flag != 1) {
  652. pr_err("Illegal value %d\n", flag);
  653. return -EINVAL;
  654. }
  655. if (flag &&
  656. dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  657. pr_err("emulate_fua_write not supported for pSCSI\n");
  658. return -EINVAL;
  659. }
  660. dev->dev_attrib.emulate_fua_write = flag;
  661. pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
  662. dev, dev->dev_attrib.emulate_fua_write);
  663. return 0;
  664. }
  665. int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
  666. {
  667. if (flag != 0 && flag != 1) {
  668. pr_err("Illegal value %d\n", flag);
  669. return -EINVAL;
  670. }
  671. if (flag) {
  672. pr_err("ua read emulated not supported\n");
  673. return -EINVAL;
  674. }
  675. return 0;
  676. }
  677. int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
  678. {
  679. if (flag != 0 && flag != 1) {
  680. pr_err("Illegal value %d\n", flag);
  681. return -EINVAL;
  682. }
  683. if (flag &&
  684. dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  685. pr_err("emulate_write_cache not supported for pSCSI\n");
  686. return -EINVAL;
  687. }
  688. if (dev->transport->get_write_cache) {
  689. pr_warn("emulate_write_cache cannot be changed when underlying"
  690. " HW reports WriteCacheEnabled, ignoring request\n");
  691. return 0;
  692. }
  693. dev->dev_attrib.emulate_write_cache = flag;
  694. pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
  695. dev, dev->dev_attrib.emulate_write_cache);
  696. return 0;
  697. }
  698. int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
  699. {
  700. if ((flag != 0) && (flag != 1) && (flag != 2)) {
  701. pr_err("Illegal value %d\n", flag);
  702. return -EINVAL;
  703. }
  704. if (dev->export_count) {
  705. pr_err("dev[%p]: Unable to change SE Device"
  706. " UA_INTRLCK_CTRL while export_count is %d\n",
  707. dev, dev->export_count);
  708. return -EINVAL;
  709. }
  710. dev->dev_attrib.emulate_ua_intlck_ctrl = flag;
  711. pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
  712. dev, dev->dev_attrib.emulate_ua_intlck_ctrl);
  713. return 0;
  714. }
  715. int se_dev_set_emulate_tas(struct se_device *dev, int flag)
  716. {
  717. if ((flag != 0) && (flag != 1)) {
  718. pr_err("Illegal value %d\n", flag);
  719. return -EINVAL;
  720. }
  721. if (dev->export_count) {
  722. pr_err("dev[%p]: Unable to change SE Device TAS while"
  723. " export_count is %d\n",
  724. dev, dev->export_count);
  725. return -EINVAL;
  726. }
  727. dev->dev_attrib.emulate_tas = flag;
  728. pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
  729. dev, (dev->dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
  730. return 0;
  731. }
  732. int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
  733. {
  734. if ((flag != 0) && (flag != 1)) {
  735. pr_err("Illegal value %d\n", flag);
  736. return -EINVAL;
  737. }
  738. /*
  739. * We expect this value to be non-zero when generic Block Layer
  740. * Discard supported is detected iblock_create_virtdevice().
  741. */
  742. if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
  743. pr_err("Generic Block Discard not supported\n");
  744. return -ENOSYS;
  745. }
  746. dev->dev_attrib.emulate_tpu = flag;
  747. pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
  748. dev, flag);
  749. return 0;
  750. }
  751. int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
  752. {
  753. if ((flag != 0) && (flag != 1)) {
  754. pr_err("Illegal value %d\n", flag);
  755. return -EINVAL;
  756. }
  757. /*
  758. * We expect this value to be non-zero when generic Block Layer
  759. * Discard supported is detected iblock_create_virtdevice().
  760. */
  761. if (flag && !dev->dev_attrib.max_unmap_block_desc_count) {
  762. pr_err("Generic Block Discard not supported\n");
  763. return -ENOSYS;
  764. }
  765. dev->dev_attrib.emulate_tpws = flag;
  766. pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
  767. dev, flag);
  768. return 0;
  769. }
  770. int se_dev_set_emulate_caw(struct se_device *dev, int flag)
  771. {
  772. if (flag != 0 && flag != 1) {
  773. pr_err("Illegal value %d\n", flag);
  774. return -EINVAL;
  775. }
  776. dev->dev_attrib.emulate_caw = flag;
  777. pr_debug("dev[%p]: SE Device CompareAndWrite (AtomicTestandSet): %d\n",
  778. dev, flag);
  779. return 0;
  780. }
  781. int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
  782. {
  783. if (flag != 0 && flag != 1) {
  784. pr_err("Illegal value %d\n", flag);
  785. return -EINVAL;
  786. }
  787. dev->dev_attrib.emulate_3pc = flag;
  788. pr_debug("dev[%p]: SE Device 3rd Party Copy (EXTENDED_COPY): %d\n",
  789. dev, flag);
  790. return 0;
  791. }
  792. int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
  793. {
  794. int rc, old_prot = dev->dev_attrib.pi_prot_type;
  795. if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
  796. pr_err("Illegal value %d for pi_prot_type\n", flag);
  797. return -EINVAL;
  798. }
  799. if (flag == 2) {
  800. pr_err("DIF TYPE2 protection currently not supported\n");
  801. return -ENOSYS;
  802. }
  803. if (dev->dev_attrib.hw_pi_prot_type) {
  804. pr_warn("DIF protection enabled on underlying hardware,"
  805. " ignoring\n");
  806. return 0;
  807. }
  808. if (!dev->transport->init_prot || !dev->transport->free_prot) {
  809. pr_err("DIF protection not supported by backend: %s\n",
  810. dev->transport->name);
  811. return -ENOSYS;
  812. }
  813. if (!(dev->dev_flags & DF_CONFIGURED)) {
  814. pr_err("DIF protection requires device to be configured\n");
  815. return -ENODEV;
  816. }
  817. if (dev->export_count) {
  818. pr_err("dev[%p]: Unable to change SE Device PROT type while"
  819. " export_count is %d\n", dev, dev->export_count);
  820. return -EINVAL;
  821. }
  822. dev->dev_attrib.pi_prot_type = flag;
  823. if (flag && !old_prot) {
  824. rc = dev->transport->init_prot(dev);
  825. if (rc) {
  826. dev->dev_attrib.pi_prot_type = old_prot;
  827. return rc;
  828. }
  829. } else if (!flag && old_prot) {
  830. dev->transport->free_prot(dev);
  831. }
  832. pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
  833. return 0;
  834. }
  835. int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
  836. {
  837. int rc;
  838. if (!flag)
  839. return 0;
  840. if (flag != 1) {
  841. pr_err("Illegal value %d for pi_prot_format\n", flag);
  842. return -EINVAL;
  843. }
  844. if (!dev->transport->format_prot) {
  845. pr_err("DIF protection format not supported by backend %s\n",
  846. dev->transport->name);
  847. return -ENOSYS;
  848. }
  849. if (!(dev->dev_flags & DF_CONFIGURED)) {
  850. pr_err("DIF protection format requires device to be configured\n");
  851. return -ENODEV;
  852. }
  853. if (dev->export_count) {
  854. pr_err("dev[%p]: Unable to format SE Device PROT type while"
  855. " export_count is %d\n", dev, dev->export_count);
  856. return -EINVAL;
  857. }
  858. rc = dev->transport->format_prot(dev);
  859. if (rc)
  860. return rc;
  861. pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
  862. return 0;
  863. }
  864. int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  865. {
  866. if ((flag != 0) && (flag != 1)) {
  867. pr_err("Illegal value %d\n", flag);
  868. return -EINVAL;
  869. }
  870. dev->dev_attrib.enforce_pr_isids = flag;
  871. pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
  872. (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
  873. return 0;
  874. }
  875. int se_dev_set_is_nonrot(struct se_device *dev, int flag)
  876. {
  877. if ((flag != 0) && (flag != 1)) {
  878. printk(KERN_ERR "Illegal value %d\n", flag);
  879. return -EINVAL;
  880. }
  881. dev->dev_attrib.is_nonrot = flag;
  882. pr_debug("dev[%p]: SE Device is_nonrot bit: %d\n",
  883. dev, flag);
  884. return 0;
  885. }
  886. int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  887. {
  888. if (flag != 0) {
  889. printk(KERN_ERR "dev[%p]: SE Device emulatation of restricted"
  890. " reordering not implemented\n", dev);
  891. return -ENOSYS;
  892. }
  893. dev->dev_attrib.emulate_rest_reord = flag;
  894. pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
  895. return 0;
  896. }
  897. /*
  898. * Note, this can only be called on unexported SE Device Object.
  899. */
  900. int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
  901. {
  902. if (dev->export_count) {
  903. pr_err("dev[%p]: Unable to change SE Device TCQ while"
  904. " export_count is %d\n",
  905. dev, dev->export_count);
  906. return -EINVAL;
  907. }
  908. if (!queue_depth) {
  909. pr_err("dev[%p]: Illegal ZERO value for queue"
  910. "_depth\n", dev);
  911. return -EINVAL;
  912. }
  913. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  914. if (queue_depth > dev->dev_attrib.hw_queue_depth) {
  915. pr_err("dev[%p]: Passed queue_depth: %u"
  916. " exceeds TCM/SE_Device TCQ: %u\n",
  917. dev, queue_depth,
  918. dev->dev_attrib.hw_queue_depth);
  919. return -EINVAL;
  920. }
  921. } else {
  922. if (queue_depth > dev->dev_attrib.queue_depth) {
  923. if (queue_depth > dev->dev_attrib.hw_queue_depth) {
  924. pr_err("dev[%p]: Passed queue_depth:"
  925. " %u exceeds TCM/SE_Device MAX"
  926. " TCQ: %u\n", dev, queue_depth,
  927. dev->dev_attrib.hw_queue_depth);
  928. return -EINVAL;
  929. }
  930. }
  931. }
  932. dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
  933. pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
  934. dev, queue_depth);
  935. return 0;
  936. }
  937. int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
  938. {
  939. int block_size = dev->dev_attrib.block_size;
  940. if (dev->export_count) {
  941. pr_err("dev[%p]: Unable to change SE Device"
  942. " fabric_max_sectors while export_count is %d\n",
  943. dev, dev->export_count);
  944. return -EINVAL;
  945. }
  946. if (!fabric_max_sectors) {
  947. pr_err("dev[%p]: Illegal ZERO value for"
  948. " fabric_max_sectors\n", dev);
  949. return -EINVAL;
  950. }
  951. if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
  952. pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
  953. " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
  954. DA_STATUS_MAX_SECTORS_MIN);
  955. return -EINVAL;
  956. }
  957. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  958. if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
  959. pr_err("dev[%p]: Passed fabric_max_sectors: %u"
  960. " greater than TCM/SE_Device max_sectors:"
  961. " %u\n", dev, fabric_max_sectors,
  962. dev->dev_attrib.hw_max_sectors);
  963. return -EINVAL;
  964. }
  965. } else {
  966. if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
  967. pr_err("dev[%p]: Passed fabric_max_sectors: %u"
  968. " greater than DA_STATUS_MAX_SECTORS_MAX:"
  969. " %u\n", dev, fabric_max_sectors,
  970. DA_STATUS_MAX_SECTORS_MAX);
  971. return -EINVAL;
  972. }
  973. }
  974. /*
  975. * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
  976. */
  977. if (!block_size) {
  978. block_size = 512;
  979. pr_warn("Defaulting to 512 for zero block_size\n");
  980. }
  981. fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
  982. block_size);
  983. dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
  984. pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
  985. dev, fabric_max_sectors);
  986. return 0;
  987. }
  988. int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
  989. {
  990. if (dev->export_count) {
  991. pr_err("dev[%p]: Unable to change SE Device"
  992. " optimal_sectors while export_count is %d\n",
  993. dev, dev->export_count);
  994. return -EINVAL;
  995. }
  996. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  997. pr_err("dev[%p]: Passed optimal_sectors cannot be"
  998. " changed for TCM/pSCSI\n", dev);
  999. return -EINVAL;
  1000. }
  1001. if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
  1002. pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
  1003. " greater than fabric_max_sectors: %u\n", dev,
  1004. optimal_sectors, dev->dev_attrib.fabric_max_sectors);
  1005. return -EINVAL;
  1006. }
  1007. dev->dev_attrib.optimal_sectors = optimal_sectors;
  1008. pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
  1009. dev, optimal_sectors);
  1010. return 0;
  1011. }
  1012. int se_dev_set_block_size(struct se_device *dev, u32 block_size)
  1013. {
  1014. if (dev->export_count) {
  1015. pr_err("dev[%p]: Unable to change SE Device block_size"
  1016. " while export_count is %d\n",
  1017. dev, dev->export_count);
  1018. return -EINVAL;
  1019. }
  1020. if ((block_size != 512) &&
  1021. (block_size != 1024) &&
  1022. (block_size != 2048) &&
  1023. (block_size != 4096)) {
  1024. pr_err("dev[%p]: Illegal value for block_device: %u"
  1025. " for SE device, must be 512, 1024, 2048 or 4096\n",
  1026. dev, block_size);
  1027. return -EINVAL;
  1028. }
  1029. if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
  1030. pr_err("dev[%p]: Not allowed to change block_size for"
  1031. " Physical Device, use for Linux/SCSI to change"
  1032. " block_size for underlying hardware\n", dev);
  1033. return -EINVAL;
  1034. }
  1035. dev->dev_attrib.block_size = block_size;
  1036. pr_debug("dev[%p]: SE Device block_size changed to %u\n",
  1037. dev, block_size);
  1038. if (dev->dev_attrib.max_bytes_per_io)
  1039. dev->dev_attrib.hw_max_sectors =
  1040. dev->dev_attrib.max_bytes_per_io / block_size;
  1041. return 0;
  1042. }
  1043. struct se_lun *core_dev_add_lun(
  1044. struct se_portal_group *tpg,
  1045. struct se_device *dev,
  1046. u32 unpacked_lun)
  1047. {
  1048. struct se_lun *lun;
  1049. int rc;
  1050. lun = core_tpg_alloc_lun(tpg, unpacked_lun);
  1051. if (IS_ERR(lun))
  1052. return lun;
  1053. rc = core_tpg_add_lun(tpg, lun,
  1054. TRANSPORT_LUNFLAGS_READ_WRITE, dev);
  1055. if (rc < 0)
  1056. return ERR_PTR(rc);
  1057. pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
  1058. " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1059. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1060. tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
  1061. /*
  1062. * Update LUN maps for dynamically added initiators when
  1063. * generate_node_acl is enabled.
  1064. */
  1065. if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
  1066. struct se_node_acl *acl;
  1067. spin_lock_irq(&tpg->acl_node_lock);
  1068. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  1069. if (acl->dynamic_node_acl &&
  1070. (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
  1071. !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
  1072. spin_unlock_irq(&tpg->acl_node_lock);
  1073. core_tpg_add_node_to_devs(acl, tpg);
  1074. spin_lock_irq(&tpg->acl_node_lock);
  1075. }
  1076. }
  1077. spin_unlock_irq(&tpg->acl_node_lock);
  1078. }
  1079. return lun;
  1080. }
  1081. /* core_dev_del_lun():
  1082. *
  1083. *
  1084. */
  1085. int core_dev_del_lun(
  1086. struct se_portal_group *tpg,
  1087. u32 unpacked_lun)
  1088. {
  1089. struct se_lun *lun;
  1090. lun = core_tpg_pre_dellun(tpg, unpacked_lun);
  1091. if (IS_ERR(lun))
  1092. return PTR_ERR(lun);
  1093. core_tpg_post_dellun(tpg, lun);
  1094. pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
  1095. " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
  1096. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
  1097. tpg->se_tpg_tfo->get_fabric_name());
  1098. return 0;
  1099. }
  1100. struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
  1101. {
  1102. struct se_lun *lun;
  1103. spin_lock(&tpg->tpg_lun_lock);
  1104. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1105. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
  1106. "_PER_TPG-1: %u for Target Portal Group: %hu\n",
  1107. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1108. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1109. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1110. spin_unlock(&tpg->tpg_lun_lock);
  1111. return NULL;
  1112. }
  1113. lun = tpg->tpg_lun_list[unpacked_lun];
  1114. if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
  1115. pr_err("%s Logical Unit Number: %u is not free on"
  1116. " Target Portal Group: %hu, ignoring request.\n",
  1117. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1118. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1119. spin_unlock(&tpg->tpg_lun_lock);
  1120. return NULL;
  1121. }
  1122. spin_unlock(&tpg->tpg_lun_lock);
  1123. return lun;
  1124. }
  1125. /* core_dev_get_lun():
  1126. *
  1127. *
  1128. */
  1129. static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
  1130. {
  1131. struct se_lun *lun;
  1132. spin_lock(&tpg->tpg_lun_lock);
  1133. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  1134. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
  1135. "_TPG-1: %u for Target Portal Group: %hu\n",
  1136. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1137. TRANSPORT_MAX_LUNS_PER_TPG-1,
  1138. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1139. spin_unlock(&tpg->tpg_lun_lock);
  1140. return NULL;
  1141. }
  1142. lun = tpg->tpg_lun_list[unpacked_lun];
  1143. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  1144. pr_err("%s Logical Unit Number: %u is not active on"
  1145. " Target Portal Group: %hu, ignoring request.\n",
  1146. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1147. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1148. spin_unlock(&tpg->tpg_lun_lock);
  1149. return NULL;
  1150. }
  1151. spin_unlock(&tpg->tpg_lun_lock);
  1152. return lun;
  1153. }
  1154. struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
  1155. struct se_portal_group *tpg,
  1156. struct se_node_acl *nacl,
  1157. u32 mapped_lun,
  1158. int *ret)
  1159. {
  1160. struct se_lun_acl *lacl;
  1161. if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
  1162. pr_err("%s InitiatorName exceeds maximum size.\n",
  1163. tpg->se_tpg_tfo->get_fabric_name());
  1164. *ret = -EOVERFLOW;
  1165. return NULL;
  1166. }
  1167. lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
  1168. if (!lacl) {
  1169. pr_err("Unable to allocate memory for struct se_lun_acl.\n");
  1170. *ret = -ENOMEM;
  1171. return NULL;
  1172. }
  1173. INIT_LIST_HEAD(&lacl->lacl_list);
  1174. lacl->mapped_lun = mapped_lun;
  1175. lacl->se_lun_nacl = nacl;
  1176. snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s",
  1177. nacl->initiatorname);
  1178. return lacl;
  1179. }
  1180. int core_dev_add_initiator_node_lun_acl(
  1181. struct se_portal_group *tpg,
  1182. struct se_lun_acl *lacl,
  1183. u32 unpacked_lun,
  1184. u32 lun_access)
  1185. {
  1186. struct se_lun *lun;
  1187. struct se_node_acl *nacl;
  1188. lun = core_dev_get_lun(tpg, unpacked_lun);
  1189. if (!lun) {
  1190. pr_err("%s Logical Unit Number: %u is not active on"
  1191. " Target Portal Group: %hu, ignoring request.\n",
  1192. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  1193. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  1194. return -EINVAL;
  1195. }
  1196. nacl = lacl->se_lun_nacl;
  1197. if (!nacl)
  1198. return -EINVAL;
  1199. if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
  1200. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
  1201. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  1202. lacl->se_lun = lun;
  1203. if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
  1204. lun_access, nacl, tpg) < 0)
  1205. return -EINVAL;
  1206. spin_lock(&lun->lun_acl_lock);
  1207. list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
  1208. atomic_inc(&lun->lun_acl_count);
  1209. smp_mb__after_atomic_inc();
  1210. spin_unlock(&lun->lun_acl_lock);
  1211. pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
  1212. " InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  1213. tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
  1214. (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
  1215. lacl->initiatorname);
  1216. /*
  1217. * Check to see if there are any existing persistent reservation APTPL
  1218. * pre-registrations that need to be enabled for this LUN ACL..
  1219. */
  1220. core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
  1221. return 0;
  1222. }
  1223. /* core_dev_del_initiator_node_lun_acl():
  1224. *
  1225. *
  1226. */
  1227. int core_dev_del_initiator_node_lun_acl(
  1228. struct se_portal_group *tpg,
  1229. struct se_lun *lun,
  1230. struct se_lun_acl *lacl)
  1231. {
  1232. struct se_node_acl *nacl;
  1233. nacl = lacl->se_lun_nacl;
  1234. if (!nacl)
  1235. return -EINVAL;
  1236. spin_lock(&lun->lun_acl_lock);
  1237. list_del(&lacl->lacl_list);
  1238. atomic_dec(&lun->lun_acl_count);
  1239. smp_mb__after_atomic_dec();
  1240. spin_unlock(&lun->lun_acl_lock);
  1241. core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
  1242. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  1243. lacl->se_lun = NULL;
  1244. pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
  1245. " InitiatorNode: %s Mapped LUN: %u\n",
  1246. tpg->se_tpg_tfo->get_fabric_name(),
  1247. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  1248. lacl->initiatorname, lacl->mapped_lun);
  1249. return 0;
  1250. }
  1251. void core_dev_free_initiator_node_lun_acl(
  1252. struct se_portal_group *tpg,
  1253. struct se_lun_acl *lacl)
  1254. {
  1255. pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
  1256. " Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
  1257. tpg->se_tpg_tfo->tpg_get_tag(tpg),
  1258. tpg->se_tpg_tfo->get_fabric_name(),
  1259. lacl->initiatorname, lacl->mapped_lun);
  1260. kfree(lacl);
  1261. }
  1262. static void scsi_dump_inquiry(struct se_device *dev)
  1263. {
  1264. struct t10_wwn *wwn = &dev->t10_wwn;
  1265. char buf[17];
  1266. int i, device_type;
  1267. /*
  1268. * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
  1269. */
  1270. for (i = 0; i < 8; i++)
  1271. if (wwn->vendor[i] >= 0x20)
  1272. buf[i] = wwn->vendor[i];
  1273. else
  1274. buf[i] = ' ';
  1275. buf[i] = '\0';
  1276. pr_debug(" Vendor: %s\n", buf);
  1277. for (i = 0; i < 16; i++)
  1278. if (wwn->model[i] >= 0x20)
  1279. buf[i] = wwn->model[i];
  1280. else
  1281. buf[i] = ' ';
  1282. buf[i] = '\0';
  1283. pr_debug(" Model: %s\n", buf);
  1284. for (i = 0; i < 4; i++)
  1285. if (wwn->revision[i] >= 0x20)
  1286. buf[i] = wwn->revision[i];
  1287. else
  1288. buf[i] = ' ';
  1289. buf[i] = '\0';
  1290. pr_debug(" Revision: %s\n", buf);
  1291. device_type = dev->transport->get_device_type(dev);
  1292. pr_debug(" Type: %s ", scsi_device_type(device_type));
  1293. }
  1294. struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
  1295. {
  1296. struct se_device *dev;
  1297. struct se_lun *xcopy_lun;
  1298. dev = hba->transport->alloc_device(hba, name);
  1299. if (!dev)
  1300. return NULL;
  1301. dev->dev_link_magic = SE_DEV_LINK_MAGIC;
  1302. dev->se_hba = hba;
  1303. dev->transport = hba->transport;
  1304. dev->prot_length = sizeof(struct se_dif_v1_tuple);
  1305. INIT_LIST_HEAD(&dev->dev_list);
  1306. INIT_LIST_HEAD(&dev->dev_sep_list);
  1307. INIT_LIST_HEAD(&dev->dev_tmr_list);
  1308. INIT_LIST_HEAD(&dev->delayed_cmd_list);
  1309. INIT_LIST_HEAD(&dev->state_list);
  1310. INIT_LIST_HEAD(&dev->qf_cmd_list);
  1311. INIT_LIST_HEAD(&dev->g_dev_node);
  1312. spin_lock_init(&dev->execute_task_lock);
  1313. spin_lock_init(&dev->delayed_cmd_lock);
  1314. spin_lock_init(&dev->dev_reservation_lock);
  1315. spin_lock_init(&dev->se_port_lock);
  1316. spin_lock_init(&dev->se_tmr_lock);
  1317. spin_lock_init(&dev->qf_cmd_lock);
  1318. sema_init(&dev->caw_sem, 1);
  1319. atomic_set(&dev->dev_ordered_id, 0);
  1320. INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
  1321. spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
  1322. INIT_LIST_HEAD(&dev->t10_pr.registration_list);
  1323. INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
  1324. spin_lock_init(&dev->t10_pr.registration_lock);
  1325. spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
  1326. INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
  1327. spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
  1328. INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
  1329. spin_lock_init(&dev->t10_alua.lba_map_lock);
  1330. dev->t10_wwn.t10_dev = dev;
  1331. dev->t10_alua.t10_dev = dev;
  1332. dev->dev_attrib.da_dev = dev;
  1333. dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
  1334. dev->dev_attrib.emulate_dpo = DA_EMULATE_DPO;
  1335. dev->dev_attrib.emulate_fua_write = DA_EMULATE_FUA_WRITE;
  1336. dev->dev_attrib.emulate_fua_read = DA_EMULATE_FUA_READ;
  1337. dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
  1338. dev->dev_attrib.emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
  1339. dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
  1340. dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
  1341. dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
  1342. dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
  1343. dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
  1344. dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
  1345. dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
  1346. dev->dev_attrib.is_nonrot = DA_IS_NONROT;
  1347. dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
  1348. dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
  1349. dev->dev_attrib.max_unmap_block_desc_count =
  1350. DA_MAX_UNMAP_BLOCK_DESC_COUNT;
  1351. dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
  1352. dev->dev_attrib.unmap_granularity_alignment =
  1353. DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
  1354. dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
  1355. dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
  1356. dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
  1357. xcopy_lun = &dev->xcopy_lun;
  1358. xcopy_lun->lun_se_dev = dev;
  1359. init_completion(&xcopy_lun->lun_shutdown_comp);
  1360. INIT_LIST_HEAD(&xcopy_lun->lun_acl_list);
  1361. spin_lock_init(&xcopy_lun->lun_acl_lock);
  1362. spin_lock_init(&xcopy_lun->lun_sep_lock);
  1363. init_completion(&xcopy_lun->lun_ref_comp);
  1364. return dev;
  1365. }
  1366. int target_configure_device(struct se_device *dev)
  1367. {
  1368. struct se_hba *hba = dev->se_hba;
  1369. int ret;
  1370. if (dev->dev_flags & DF_CONFIGURED) {
  1371. pr_err("se_dev->se_dev_ptr already set for storage"
  1372. " object\n");
  1373. return -EEXIST;
  1374. }
  1375. ret = dev->transport->configure_device(dev);
  1376. if (ret)
  1377. goto out;
  1378. dev->dev_flags |= DF_CONFIGURED;
  1379. /*
  1380. * XXX: there is not much point to have two different values here..
  1381. */
  1382. dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
  1383. dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
  1384. /*
  1385. * Align max_hw_sectors down to PAGE_SIZE I/O transfers
  1386. */
  1387. dev->dev_attrib.hw_max_sectors =
  1388. se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
  1389. dev->dev_attrib.hw_block_size);
  1390. dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
  1391. dev->creation_time = get_jiffies_64();
  1392. ret = core_setup_alua(dev);
  1393. if (ret)
  1394. goto out;
  1395. /*
  1396. * Startup the struct se_device processing thread
  1397. */
  1398. dev->tmr_wq = alloc_workqueue("tmr-%s", WQ_MEM_RECLAIM | WQ_UNBOUND, 1,
  1399. dev->transport->name);
  1400. if (!dev->tmr_wq) {
  1401. pr_err("Unable to create tmr workqueue for %s\n",
  1402. dev->transport->name);
  1403. ret = -ENOMEM;
  1404. goto out_free_alua;
  1405. }
  1406. /*
  1407. * Setup work_queue for QUEUE_FULL
  1408. */
  1409. INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
  1410. /*
  1411. * Preload the initial INQUIRY const values if we are doing
  1412. * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
  1413. * passthrough because this is being provided by the backend LLD.
  1414. */
  1415. if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
  1416. strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
  1417. strncpy(&dev->t10_wwn.model[0],
  1418. dev->transport->inquiry_prod, 16);
  1419. strncpy(&dev->t10_wwn.revision[0],
  1420. dev->transport->inquiry_rev, 4);
  1421. }
  1422. scsi_dump_inquiry(dev);
  1423. spin_lock(&hba->device_lock);
  1424. hba->dev_count++;
  1425. spin_unlock(&hba->device_lock);
  1426. mutex_lock(&g_device_mutex);
  1427. list_add_tail(&dev->g_dev_node, &g_device_list);
  1428. mutex_unlock(&g_device_mutex);
  1429. return 0;
  1430. out_free_alua:
  1431. core_alua_free_lu_gp_mem(dev);
  1432. out:
  1433. se_release_vpd_for_dev(dev);
  1434. return ret;
  1435. }
  1436. void target_free_device(struct se_device *dev)
  1437. {
  1438. struct se_hba *hba = dev->se_hba;
  1439. WARN_ON(!list_empty(&dev->dev_sep_list));
  1440. if (dev->dev_flags & DF_CONFIGURED) {
  1441. destroy_workqueue(dev->tmr_wq);
  1442. mutex_lock(&g_device_mutex);
  1443. list_del(&dev->g_dev_node);
  1444. mutex_unlock(&g_device_mutex);
  1445. spin_lock(&hba->device_lock);
  1446. hba->dev_count--;
  1447. spin_unlock(&hba->device_lock);
  1448. }
  1449. core_alua_free_lu_gp_mem(dev);
  1450. core_alua_set_lba_map(dev, NULL, 0, 0);
  1451. core_scsi3_free_all_registrations(dev);
  1452. se_release_vpd_for_dev(dev);
  1453. if (dev->transport->free_prot)
  1454. dev->transport->free_prot(dev);
  1455. dev->transport->free_device(dev);
  1456. }
  1457. int core_dev_setup_virtual_lun0(void)
  1458. {
  1459. struct se_hba *hba;
  1460. struct se_device *dev;
  1461. char buf[] = "rd_pages=8,rd_nullio=1";
  1462. int ret;
  1463. hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
  1464. if (IS_ERR(hba))
  1465. return PTR_ERR(hba);
  1466. dev = target_alloc_device(hba, "virt_lun0");
  1467. if (!dev) {
  1468. ret = -ENOMEM;
  1469. goto out_free_hba;
  1470. }
  1471. hba->transport->set_configfs_dev_params(dev, buf, sizeof(buf));
  1472. ret = target_configure_device(dev);
  1473. if (ret)
  1474. goto out_free_se_dev;
  1475. lun0_hba = hba;
  1476. g_lun0_dev = dev;
  1477. return 0;
  1478. out_free_se_dev:
  1479. target_free_device(dev);
  1480. out_free_hba:
  1481. core_delete_hba(hba);
  1482. return ret;
  1483. }
  1484. void core_dev_release_virtual_lun0(void)
  1485. {
  1486. struct se_hba *hba = lun0_hba;
  1487. if (!hba)
  1488. return;
  1489. if (g_lun0_dev)
  1490. target_free_device(g_lun0_dev);
  1491. core_delete_hba(hba);
  1492. }