target_core_tpg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892
  1. /*******************************************************************************
  2. * Filename: target_core_tpg.c
  3. *
  4. * This file contains generic Target Portal Group related functions.
  5. *
  6. * (c) Copyright 2002-2013 Datera, Inc.
  7. *
  8. * Nicholas A. Bellinger <nab@kernel.org>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License as published by
  12. * the Free Software Foundation; either version 2 of the License, or
  13. * (at your option) any later version.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program; if not, write to the Free Software
  22. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  23. *
  24. ******************************************************************************/
  25. #include <linux/net.h>
  26. #include <linux/string.h>
  27. #include <linux/timer.h>
  28. #include <linux/slab.h>
  29. #include <linux/spinlock.h>
  30. #include <linux/in.h>
  31. #include <linux/export.h>
  32. #include <net/sock.h>
  33. #include <net/tcp.h>
  34. #include <scsi/scsi.h>
  35. #include <scsi/scsi_cmnd.h>
  36. #include <target/target_core_base.h>
  37. #include <target/target_core_backend.h>
  38. #include <target/target_core_fabric.h>
  39. #include "target_core_internal.h"
  40. extern struct se_device *g_lun0_dev;
  41. static DEFINE_SPINLOCK(tpg_lock);
  42. static LIST_HEAD(tpg_list);
  43. /* core_clear_initiator_node_from_tpg():
  44. *
  45. *
  46. */
  47. static void core_clear_initiator_node_from_tpg(
  48. struct se_node_acl *nacl,
  49. struct se_portal_group *tpg)
  50. {
  51. int i;
  52. struct se_dev_entry *deve;
  53. struct se_lun *lun;
  54. spin_lock_irq(&nacl->device_list_lock);
  55. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  56. deve = nacl->device_list[i];
  57. if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
  58. continue;
  59. if (!deve->se_lun) {
  60. pr_err("%s device entries device pointer is"
  61. " NULL, but Initiator has access.\n",
  62. tpg->se_tpg_tfo->get_fabric_name());
  63. continue;
  64. }
  65. lun = deve->se_lun;
  66. spin_unlock_irq(&nacl->device_list_lock);
  67. core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
  68. TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
  69. spin_lock_irq(&nacl->device_list_lock);
  70. }
  71. spin_unlock_irq(&nacl->device_list_lock);
  72. }
  73. /* __core_tpg_get_initiator_node_acl():
  74. *
  75. * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
  76. */
  77. struct se_node_acl *__core_tpg_get_initiator_node_acl(
  78. struct se_portal_group *tpg,
  79. const char *initiatorname)
  80. {
  81. struct se_node_acl *acl;
  82. list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
  83. if (!strcmp(acl->initiatorname, initiatorname))
  84. return acl;
  85. }
  86. return NULL;
  87. }
  88. /* core_tpg_get_initiator_node_acl():
  89. *
  90. *
  91. */
  92. struct se_node_acl *core_tpg_get_initiator_node_acl(
  93. struct se_portal_group *tpg,
  94. unsigned char *initiatorname)
  95. {
  96. struct se_node_acl *acl;
  97. spin_lock_irq(&tpg->acl_node_lock);
  98. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  99. spin_unlock_irq(&tpg->acl_node_lock);
  100. return acl;
  101. }
  102. EXPORT_SYMBOL(core_tpg_get_initiator_node_acl);
  103. /* core_tpg_add_node_to_devs():
  104. *
  105. *
  106. */
  107. void core_tpg_add_node_to_devs(
  108. struct se_node_acl *acl,
  109. struct se_portal_group *tpg)
  110. {
  111. int i = 0;
  112. u32 lun_access = 0;
  113. struct se_lun *lun;
  114. struct se_device *dev;
  115. spin_lock(&tpg->tpg_lun_lock);
  116. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  117. lun = tpg->tpg_lun_list[i];
  118. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
  119. continue;
  120. spin_unlock(&tpg->tpg_lun_lock);
  121. dev = lun->lun_se_dev;
  122. /*
  123. * By default in LIO-Target $FABRIC_MOD,
  124. * demo_mode_write_protect is ON, or READ_ONLY;
  125. */
  126. if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
  127. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  128. } else {
  129. /*
  130. * Allow only optical drives to issue R/W in default RO
  131. * demo mode.
  132. */
  133. if (dev->transport->get_device_type(dev) == TYPE_DISK)
  134. lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  135. else
  136. lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
  137. }
  138. pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
  139. " access for LUN in Demo Mode\n",
  140. tpg->se_tpg_tfo->get_fabric_name(),
  141. tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
  142. (lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
  143. "READ-WRITE" : "READ-ONLY");
  144. core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
  145. lun_access, acl, tpg);
  146. spin_lock(&tpg->tpg_lun_lock);
  147. }
  148. spin_unlock(&tpg->tpg_lun_lock);
  149. }
  150. /* core_set_queue_depth_for_node():
  151. *
  152. *
  153. */
  154. static int core_set_queue_depth_for_node(
  155. struct se_portal_group *tpg,
  156. struct se_node_acl *acl)
  157. {
  158. if (!acl->queue_depth) {
  159. pr_err("Queue depth for %s Initiator Node: %s is 0,"
  160. "defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
  161. acl->initiatorname);
  162. acl->queue_depth = 1;
  163. }
  164. return 0;
  165. }
  166. void array_free(void *array, int n)
  167. {
  168. void **a = array;
  169. int i;
  170. for (i = 0; i < n; i++)
  171. kfree(a[i]);
  172. kfree(a);
  173. }
  174. static void *array_zalloc(int n, size_t size, gfp_t flags)
  175. {
  176. void **a;
  177. int i;
  178. a = kzalloc(n * sizeof(void*), flags);
  179. if (!a)
  180. return NULL;
  181. for (i = 0; i < n; i++) {
  182. a[i] = kzalloc(size, flags);
  183. if (!a[i]) {
  184. array_free(a, n);
  185. return NULL;
  186. }
  187. }
  188. return a;
  189. }
  190. /* core_create_device_list_for_node():
  191. *
  192. *
  193. */
  194. static int core_create_device_list_for_node(struct se_node_acl *nacl)
  195. {
  196. struct se_dev_entry *deve;
  197. int i;
  198. nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
  199. sizeof(struct se_dev_entry), GFP_KERNEL);
  200. if (!nacl->device_list) {
  201. pr_err("Unable to allocate memory for"
  202. " struct se_node_acl->device_list\n");
  203. return -ENOMEM;
  204. }
  205. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  206. deve = nacl->device_list[i];
  207. atomic_set(&deve->ua_count, 0);
  208. atomic_set(&deve->pr_ref_count, 0);
  209. spin_lock_init(&deve->ua_lock);
  210. INIT_LIST_HEAD(&deve->alua_port_list);
  211. INIT_LIST_HEAD(&deve->ua_list);
  212. }
  213. return 0;
  214. }
  215. /* core_tpg_check_initiator_node_acl()
  216. *
  217. *
  218. */
  219. struct se_node_acl *core_tpg_check_initiator_node_acl(
  220. struct se_portal_group *tpg,
  221. unsigned char *initiatorname)
  222. {
  223. struct se_node_acl *acl;
  224. acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
  225. if (acl)
  226. return acl;
  227. if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
  228. return NULL;
  229. acl = tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
  230. if (!acl)
  231. return NULL;
  232. INIT_LIST_HEAD(&acl->acl_list);
  233. INIT_LIST_HEAD(&acl->acl_sess_list);
  234. kref_init(&acl->acl_kref);
  235. init_completion(&acl->acl_free_comp);
  236. spin_lock_init(&acl->device_list_lock);
  237. spin_lock_init(&acl->nacl_sess_lock);
  238. atomic_set(&acl->acl_pr_ref_count, 0);
  239. acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
  240. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  241. acl->se_tpg = tpg;
  242. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  243. spin_lock_init(&acl->stats_lock);
  244. acl->dynamic_node_acl = 1;
  245. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  246. if (core_create_device_list_for_node(acl) < 0) {
  247. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  248. return NULL;
  249. }
  250. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  251. core_free_device_list_for_node(acl, tpg);
  252. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  253. return NULL;
  254. }
  255. /*
  256. * Here we only create demo-mode MappedLUNs from the active
  257. * TPG LUNs if the fabric is not explicitly asking for
  258. * tpg_check_demo_mode_login_only() == 1.
  259. */
  260. if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only == NULL) ||
  261. (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) != 1))
  262. core_tpg_add_node_to_devs(acl, tpg);
  263. spin_lock_irq(&tpg->acl_node_lock);
  264. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  265. tpg->num_node_acls++;
  266. spin_unlock_irq(&tpg->acl_node_lock);
  267. pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
  268. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  269. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  270. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  271. return acl;
  272. }
  273. EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
  274. void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
  275. {
  276. while (atomic_read(&nacl->acl_pr_ref_count) != 0)
  277. cpu_relax();
  278. }
  279. void core_tpg_clear_object_luns(struct se_portal_group *tpg)
  280. {
  281. int i;
  282. struct se_lun *lun;
  283. spin_lock(&tpg->tpg_lun_lock);
  284. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  285. lun = tpg->tpg_lun_list[i];
  286. if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
  287. (lun->lun_se_dev == NULL))
  288. continue;
  289. spin_unlock(&tpg->tpg_lun_lock);
  290. core_dev_del_lun(tpg, lun->unpacked_lun);
  291. spin_lock(&tpg->tpg_lun_lock);
  292. }
  293. spin_unlock(&tpg->tpg_lun_lock);
  294. }
  295. EXPORT_SYMBOL(core_tpg_clear_object_luns);
  296. /* core_tpg_add_initiator_node_acl():
  297. *
  298. *
  299. */
  300. struct se_node_acl *core_tpg_add_initiator_node_acl(
  301. struct se_portal_group *tpg,
  302. struct se_node_acl *se_nacl,
  303. const char *initiatorname,
  304. u32 queue_depth)
  305. {
  306. struct se_node_acl *acl = NULL;
  307. spin_lock_irq(&tpg->acl_node_lock);
  308. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  309. if (acl) {
  310. if (acl->dynamic_node_acl) {
  311. acl->dynamic_node_acl = 0;
  312. pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
  313. " for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  314. tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
  315. spin_unlock_irq(&tpg->acl_node_lock);
  316. /*
  317. * Release the locally allocated struct se_node_acl
  318. * because * core_tpg_add_initiator_node_acl() returned
  319. * a pointer to an existing demo mode node ACL.
  320. */
  321. if (se_nacl)
  322. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg,
  323. se_nacl);
  324. goto done;
  325. }
  326. pr_err("ACL entry for %s Initiator"
  327. " Node %s already exists for TPG %u, ignoring"
  328. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  329. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  330. spin_unlock_irq(&tpg->acl_node_lock);
  331. return ERR_PTR(-EEXIST);
  332. }
  333. spin_unlock_irq(&tpg->acl_node_lock);
  334. if (!se_nacl) {
  335. pr_err("struct se_node_acl pointer is NULL\n");
  336. return ERR_PTR(-EINVAL);
  337. }
  338. /*
  339. * For v4.x logic the se_node_acl_s is hanging off a fabric
  340. * dependent structure allocated via
  341. * struct target_core_fabric_ops->fabric_make_nodeacl()
  342. */
  343. acl = se_nacl;
  344. INIT_LIST_HEAD(&acl->acl_list);
  345. INIT_LIST_HEAD(&acl->acl_sess_list);
  346. kref_init(&acl->acl_kref);
  347. init_completion(&acl->acl_free_comp);
  348. spin_lock_init(&acl->device_list_lock);
  349. spin_lock_init(&acl->nacl_sess_lock);
  350. atomic_set(&acl->acl_pr_ref_count, 0);
  351. acl->queue_depth = queue_depth;
  352. snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
  353. acl->se_tpg = tpg;
  354. acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
  355. spin_lock_init(&acl->stats_lock);
  356. tpg->se_tpg_tfo->set_default_node_attributes(acl);
  357. if (core_create_device_list_for_node(acl) < 0) {
  358. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  359. return ERR_PTR(-ENOMEM);
  360. }
  361. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  362. core_free_device_list_for_node(acl, tpg);
  363. tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);
  364. return ERR_PTR(-EINVAL);
  365. }
  366. spin_lock_irq(&tpg->acl_node_lock);
  367. list_add_tail(&acl->acl_list, &tpg->acl_node_list);
  368. tpg->num_node_acls++;
  369. spin_unlock_irq(&tpg->acl_node_lock);
  370. done:
  371. pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
  372. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  373. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  374. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  375. return acl;
  376. }
  377. EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
  378. /* core_tpg_del_initiator_node_acl():
  379. *
  380. *
  381. */
  382. int core_tpg_del_initiator_node_acl(
  383. struct se_portal_group *tpg,
  384. struct se_node_acl *acl,
  385. int force)
  386. {
  387. LIST_HEAD(sess_list);
  388. struct se_session *sess, *sess_tmp;
  389. unsigned long flags;
  390. int rc;
  391. spin_lock_irq(&tpg->acl_node_lock);
  392. if (acl->dynamic_node_acl) {
  393. acl->dynamic_node_acl = 0;
  394. }
  395. list_del(&acl->acl_list);
  396. tpg->num_node_acls--;
  397. spin_unlock_irq(&tpg->acl_node_lock);
  398. spin_lock_irqsave(&acl->nacl_sess_lock, flags);
  399. acl->acl_stop = 1;
  400. list_for_each_entry_safe(sess, sess_tmp, &acl->acl_sess_list,
  401. sess_acl_list) {
  402. if (sess->sess_tearing_down != 0)
  403. continue;
  404. target_get_session(sess);
  405. list_move(&sess->sess_acl_list, &sess_list);
  406. }
  407. spin_unlock_irqrestore(&acl->nacl_sess_lock, flags);
  408. list_for_each_entry_safe(sess, sess_tmp, &sess_list, sess_acl_list) {
  409. list_del(&sess->sess_acl_list);
  410. rc = tpg->se_tpg_tfo->shutdown_session(sess);
  411. target_put_session(sess);
  412. if (!rc)
  413. continue;
  414. target_put_session(sess);
  415. }
  416. target_put_nacl(acl);
  417. /*
  418. * Wait for last target_put_nacl() to complete in target_complete_nacl()
  419. * for active fabric session transport_deregister_session() callbacks.
  420. */
  421. wait_for_completion(&acl->acl_free_comp);
  422. core_tpg_wait_for_nacl_pr_ref(acl);
  423. core_clear_initiator_node_from_tpg(acl, tpg);
  424. core_free_device_list_for_node(acl, tpg);
  425. pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
  426. " Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
  427. tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
  428. tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
  429. return 0;
  430. }
  431. EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
  432. /* core_tpg_set_initiator_node_queue_depth():
  433. *
  434. *
  435. */
  436. int core_tpg_set_initiator_node_queue_depth(
  437. struct se_portal_group *tpg,
  438. unsigned char *initiatorname,
  439. u32 queue_depth,
  440. int force)
  441. {
  442. struct se_session *sess, *init_sess = NULL;
  443. struct se_node_acl *acl;
  444. unsigned long flags;
  445. int dynamic_acl = 0;
  446. spin_lock_irq(&tpg->acl_node_lock);
  447. acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
  448. if (!acl) {
  449. pr_err("Access Control List entry for %s Initiator"
  450. " Node %s does not exists for TPG %hu, ignoring"
  451. " request.\n", tpg->se_tpg_tfo->get_fabric_name(),
  452. initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
  453. spin_unlock_irq(&tpg->acl_node_lock);
  454. return -ENODEV;
  455. }
  456. if (acl->dynamic_node_acl) {
  457. acl->dynamic_node_acl = 0;
  458. dynamic_acl = 1;
  459. }
  460. spin_unlock_irq(&tpg->acl_node_lock);
  461. spin_lock_irqsave(&tpg->session_lock, flags);
  462. list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
  463. if (sess->se_node_acl != acl)
  464. continue;
  465. if (!force) {
  466. pr_err("Unable to change queue depth for %s"
  467. " Initiator Node: %s while session is"
  468. " operational. To forcefully change the queue"
  469. " depth and force session reinstatement"
  470. " use the \"force=1\" parameter.\n",
  471. tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
  472. spin_unlock_irqrestore(&tpg->session_lock, flags);
  473. spin_lock_irq(&tpg->acl_node_lock);
  474. if (dynamic_acl)
  475. acl->dynamic_node_acl = 1;
  476. spin_unlock_irq(&tpg->acl_node_lock);
  477. return -EEXIST;
  478. }
  479. /*
  480. * Determine if the session needs to be closed by our context.
  481. */
  482. if (!tpg->se_tpg_tfo->shutdown_session(sess))
  483. continue;
  484. init_sess = sess;
  485. break;
  486. }
  487. /*
  488. * User has requested to change the queue depth for a Initiator Node.
  489. * Change the value in the Node's struct se_node_acl, and call
  490. * core_set_queue_depth_for_node() to add the requested queue depth.
  491. *
  492. * Finally call tpg->se_tpg_tfo->close_session() to force session
  493. * reinstatement to occur if there is an active session for the
  494. * $FABRIC_MOD Initiator Node in question.
  495. */
  496. acl->queue_depth = queue_depth;
  497. if (core_set_queue_depth_for_node(tpg, acl) < 0) {
  498. spin_unlock_irqrestore(&tpg->session_lock, flags);
  499. /*
  500. * Force session reinstatement if
  501. * core_set_queue_depth_for_node() failed, because we assume
  502. * the $FABRIC_MOD has already the set session reinstatement
  503. * bit from tpg->se_tpg_tfo->shutdown_session() called above.
  504. */
  505. if (init_sess)
  506. tpg->se_tpg_tfo->close_session(init_sess);
  507. spin_lock_irq(&tpg->acl_node_lock);
  508. if (dynamic_acl)
  509. acl->dynamic_node_acl = 1;
  510. spin_unlock_irq(&tpg->acl_node_lock);
  511. return -EINVAL;
  512. }
  513. spin_unlock_irqrestore(&tpg->session_lock, flags);
  514. /*
  515. * If the $FABRIC_MOD session for the Initiator Node ACL exists,
  516. * forcefully shutdown the $FABRIC_MOD session/nexus.
  517. */
  518. if (init_sess)
  519. tpg->se_tpg_tfo->close_session(init_sess);
  520. pr_debug("Successfully changed queue depth to: %d for Initiator"
  521. " Node: %s on %s Target Portal Group: %u\n", queue_depth,
  522. initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
  523. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  524. spin_lock_irq(&tpg->acl_node_lock);
  525. if (dynamic_acl)
  526. acl->dynamic_node_acl = 1;
  527. spin_unlock_irq(&tpg->acl_node_lock);
  528. return 0;
  529. }
  530. EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
  531. /* core_tpg_set_initiator_node_tag():
  532. *
  533. * Initiator nodeacl tags are not used internally, but may be used by
  534. * userspace to emulate aliases or groups.
  535. * Returns length of newly-set tag or -EINVAL.
  536. */
  537. int core_tpg_set_initiator_node_tag(
  538. struct se_portal_group *tpg,
  539. struct se_node_acl *acl,
  540. const char *new_tag)
  541. {
  542. if (strlen(new_tag) >= MAX_ACL_TAG_SIZE)
  543. return -EINVAL;
  544. if (!strncmp("NULL", new_tag, 4)) {
  545. acl->acl_tag[0] = '\0';
  546. return 0;
  547. }
  548. return snprintf(acl->acl_tag, MAX_ACL_TAG_SIZE, "%s", new_tag);
  549. }
  550. EXPORT_SYMBOL(core_tpg_set_initiator_node_tag);
  551. static void core_tpg_lun_ref_release(struct percpu_ref *ref)
  552. {
  553. struct se_lun *lun = container_of(ref, struct se_lun, lun_ref);
  554. complete(&lun->lun_ref_comp);
  555. }
  556. static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
  557. {
  558. /* Set in core_dev_setup_virtual_lun0() */
  559. struct se_device *dev = g_lun0_dev;
  560. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  561. u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
  562. int ret;
  563. lun->unpacked_lun = 0;
  564. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  565. atomic_set(&lun->lun_acl_count, 0);
  566. init_completion(&lun->lun_shutdown_comp);
  567. INIT_LIST_HEAD(&lun->lun_acl_list);
  568. spin_lock_init(&lun->lun_acl_lock);
  569. spin_lock_init(&lun->lun_sep_lock);
  570. init_completion(&lun->lun_ref_comp);
  571. ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
  572. if (ret < 0)
  573. return ret;
  574. ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
  575. if (ret < 0) {
  576. percpu_ref_cancel_init(&lun->lun_ref);
  577. return ret;
  578. }
  579. return 0;
  580. }
  581. static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
  582. {
  583. struct se_lun *lun = &se_tpg->tpg_virt_lun0;
  584. core_tpg_post_dellun(se_tpg, lun);
  585. }
  586. int core_tpg_register(
  587. struct target_core_fabric_ops *tfo,
  588. struct se_wwn *se_wwn,
  589. struct se_portal_group *se_tpg,
  590. void *tpg_fabric_ptr,
  591. int se_tpg_type)
  592. {
  593. struct se_lun *lun;
  594. u32 i;
  595. se_tpg->tpg_lun_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
  596. sizeof(struct se_lun), GFP_KERNEL);
  597. if (!se_tpg->tpg_lun_list) {
  598. pr_err("Unable to allocate struct se_portal_group->"
  599. "tpg_lun_list\n");
  600. return -ENOMEM;
  601. }
  602. for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
  603. lun = se_tpg->tpg_lun_list[i];
  604. lun->unpacked_lun = i;
  605. lun->lun_link_magic = SE_LUN_LINK_MAGIC;
  606. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  607. atomic_set(&lun->lun_acl_count, 0);
  608. init_completion(&lun->lun_shutdown_comp);
  609. INIT_LIST_HEAD(&lun->lun_acl_list);
  610. spin_lock_init(&lun->lun_acl_lock);
  611. spin_lock_init(&lun->lun_sep_lock);
  612. init_completion(&lun->lun_ref_comp);
  613. }
  614. se_tpg->se_tpg_type = se_tpg_type;
  615. se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
  616. se_tpg->se_tpg_tfo = tfo;
  617. se_tpg->se_tpg_wwn = se_wwn;
  618. atomic_set(&se_tpg->tpg_pr_ref_count, 0);
  619. INIT_LIST_HEAD(&se_tpg->acl_node_list);
  620. INIT_LIST_HEAD(&se_tpg->se_tpg_node);
  621. INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
  622. spin_lock_init(&se_tpg->acl_node_lock);
  623. spin_lock_init(&se_tpg->session_lock);
  624. spin_lock_init(&se_tpg->tpg_lun_lock);
  625. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
  626. if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
  627. array_free(se_tpg->tpg_lun_list,
  628. TRANSPORT_MAX_LUNS_PER_TPG);
  629. return -ENOMEM;
  630. }
  631. }
  632. spin_lock_bh(&tpg_lock);
  633. list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
  634. spin_unlock_bh(&tpg_lock);
  635. pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
  636. " endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
  637. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  638. "Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
  639. "None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
  640. return 0;
  641. }
  642. EXPORT_SYMBOL(core_tpg_register);
  643. int core_tpg_deregister(struct se_portal_group *se_tpg)
  644. {
  645. struct se_node_acl *nacl, *nacl_tmp;
  646. pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
  647. " for endpoint: %s Portal Tag %u\n",
  648. (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
  649. "Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
  650. se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
  651. se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
  652. spin_lock_bh(&tpg_lock);
  653. list_del(&se_tpg->se_tpg_node);
  654. spin_unlock_bh(&tpg_lock);
  655. while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
  656. cpu_relax();
  657. /*
  658. * Release any remaining demo-mode generated se_node_acl that have
  659. * not been released because of TFO->tpg_check_demo_mode_cache() == 1
  660. * in transport_deregister_session().
  661. */
  662. spin_lock_irq(&se_tpg->acl_node_lock);
  663. list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
  664. acl_list) {
  665. list_del(&nacl->acl_list);
  666. se_tpg->num_node_acls--;
  667. spin_unlock_irq(&se_tpg->acl_node_lock);
  668. core_tpg_wait_for_nacl_pr_ref(nacl);
  669. core_free_device_list_for_node(nacl, se_tpg);
  670. se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl);
  671. spin_lock_irq(&se_tpg->acl_node_lock);
  672. }
  673. spin_unlock_irq(&se_tpg->acl_node_lock);
  674. if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
  675. core_tpg_release_virtual_lun0(se_tpg);
  676. se_tpg->se_tpg_fabric_ptr = NULL;
  677. array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
  678. return 0;
  679. }
  680. EXPORT_SYMBOL(core_tpg_deregister);
  681. struct se_lun *core_tpg_pre_addlun(
  682. struct se_portal_group *tpg,
  683. u32 unpacked_lun)
  684. {
  685. struct se_lun *lun;
  686. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  687. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  688. "-1: %u for Target Portal Group: %u\n",
  689. tpg->se_tpg_tfo->get_fabric_name(),
  690. unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
  691. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  692. return ERR_PTR(-EOVERFLOW);
  693. }
  694. spin_lock(&tpg->tpg_lun_lock);
  695. lun = tpg->tpg_lun_list[unpacked_lun];
  696. if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
  697. pr_err("TPG Logical Unit Number: %u is already active"
  698. " on %s Target Portal Group: %u, ignoring request.\n",
  699. unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
  700. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  701. spin_unlock(&tpg->tpg_lun_lock);
  702. return ERR_PTR(-EINVAL);
  703. }
  704. spin_unlock(&tpg->tpg_lun_lock);
  705. return lun;
  706. }
  707. int core_tpg_post_addlun(
  708. struct se_portal_group *tpg,
  709. struct se_lun *lun,
  710. u32 lun_access,
  711. void *lun_ptr)
  712. {
  713. int ret;
  714. ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
  715. if (ret < 0)
  716. return ret;
  717. ret = core_dev_export(lun_ptr, tpg, lun);
  718. if (ret < 0) {
  719. percpu_ref_cancel_init(&lun->lun_ref);
  720. return ret;
  721. }
  722. spin_lock(&tpg->tpg_lun_lock);
  723. lun->lun_access = lun_access;
  724. lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
  725. spin_unlock(&tpg->tpg_lun_lock);
  726. return 0;
  727. }
  728. struct se_lun *core_tpg_pre_dellun(
  729. struct se_portal_group *tpg,
  730. u32 unpacked_lun)
  731. {
  732. struct se_lun *lun;
  733. if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
  734. pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
  735. "-1: %u for Target Portal Group: %u\n",
  736. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  737. TRANSPORT_MAX_LUNS_PER_TPG-1,
  738. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  739. return ERR_PTR(-EOVERFLOW);
  740. }
  741. spin_lock(&tpg->tpg_lun_lock);
  742. lun = tpg->tpg_lun_list[unpacked_lun];
  743. if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
  744. pr_err("%s Logical Unit Number: %u is not active on"
  745. " Target Portal Group: %u, ignoring request.\n",
  746. tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
  747. tpg->se_tpg_tfo->tpg_get_tag(tpg));
  748. spin_unlock(&tpg->tpg_lun_lock);
  749. return ERR_PTR(-ENODEV);
  750. }
  751. spin_unlock(&tpg->tpg_lun_lock);
  752. return lun;
  753. }
  754. int core_tpg_post_dellun(
  755. struct se_portal_group *tpg,
  756. struct se_lun *lun)
  757. {
  758. core_clear_lun_from_tpg(lun, tpg);
  759. transport_clear_lun_ref(lun);
  760. core_dev_unexport(lun->lun_se_dev, tpg, lun);
  761. spin_lock(&tpg->tpg_lun_lock);
  762. lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
  763. spin_unlock(&tpg->tpg_lun_lock);
  764. return 0;
  765. }