|
@@ -49,7 +49,7 @@ static LIST_HEAD(tpg_list);
|
|
|
|
|
|
/* __core_tpg_get_initiator_node_acl():
|
|
|
*
|
|
|
- * spin_lock_bh(&tpg->acl_node_lock); must be held when calling
|
|
|
+ * mutex_lock(&tpg->acl_node_mutex); must be held when calling
|
|
|
*/
|
|
|
struct se_node_acl *__core_tpg_get_initiator_node_acl(
|
|
|
struct se_portal_group *tpg,
|
|
@@ -75,9 +75,9 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(
|
|
|
{
|
|
|
struct se_node_acl *acl;
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
return acl;
|
|
|
}
|
|
@@ -198,10 +198,10 @@ static void target_add_node_acl(struct se_node_acl *acl)
|
|
|
{
|
|
|
struct se_portal_group *tpg = acl->se_tpg;
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
|
|
|
tpg->num_node_acls++;
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
pr_debug("%s_TPG[%hu] - Added %s ACL with TCQ Depth: %d for %s"
|
|
|
" Initiator Node: %s\n",
|
|
@@ -257,7 +257,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
|
|
{
|
|
|
struct se_node_acl *acl;
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
|
|
if (acl) {
|
|
|
if (acl->dynamic_node_acl) {
|
|
@@ -265,7 +265,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
|
|
pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
|
|
|
" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
|
|
|
tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
return acl;
|
|
|
}
|
|
|
|
|
@@ -273,10 +273,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
|
|
|
" Node %s already exists for TPG %u, ignoring"
|
|
|
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
|
|
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
return ERR_PTR(-EEXIST);
|
|
|
}
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
acl = target_alloc_node_acl(tpg, initiatorname);
|
|
|
if (!acl)
|
|
@@ -294,13 +294,13 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
|
|
|
unsigned long flags;
|
|
|
int rc;
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
if (acl->dynamic_node_acl) {
|
|
|
acl->dynamic_node_acl = 0;
|
|
|
}
|
|
|
list_del(&acl->acl_list);
|
|
|
tpg->num_node_acls--;
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
spin_lock_irqsave(&acl->nacl_sess_lock, flags);
|
|
|
acl->acl_stop = 1;
|
|
@@ -357,21 +357,21 @@ int core_tpg_set_initiator_node_queue_depth(
|
|
|
unsigned long flags;
|
|
|
int dynamic_acl = 0;
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
|
|
|
if (!acl) {
|
|
|
pr_err("Access Control List entry for %s Initiator"
|
|
|
" Node %s does not exists for TPG %hu, ignoring"
|
|
|
" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
|
|
|
initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
return -ENODEV;
|
|
|
}
|
|
|
if (acl->dynamic_node_acl) {
|
|
|
acl->dynamic_node_acl = 0;
|
|
|
dynamic_acl = 1;
|
|
|
}
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
spin_lock_irqsave(&tpg->session_lock, flags);
|
|
|
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
|
|
@@ -387,10 +387,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
|
|
tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
|
|
|
spin_unlock_irqrestore(&tpg->session_lock, flags);
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
if (dynamic_acl)
|
|
|
acl->dynamic_node_acl = 1;
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
return -EEXIST;
|
|
|
}
|
|
|
/*
|
|
@@ -425,10 +425,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
|
|
if (init_sess)
|
|
|
tpg->se_tpg_tfo->close_session(init_sess);
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
if (dynamic_acl)
|
|
|
acl->dynamic_node_acl = 1;
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
return -EINVAL;
|
|
|
}
|
|
|
spin_unlock_irqrestore(&tpg->session_lock, flags);
|
|
@@ -444,10 +444,10 @@ int core_tpg_set_initiator_node_queue_depth(
|
|
|
initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
|
|
|
tpg->se_tpg_tfo->tpg_get_tag(tpg));
|
|
|
|
|
|
- spin_lock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_lock(&tpg->acl_node_mutex);
|
|
|
if (dynamic_acl)
|
|
|
acl->dynamic_node_acl = 1;
|
|
|
- spin_unlock_irq(&tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&tpg->acl_node_mutex);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -521,9 +521,9 @@ int core_tpg_register(
|
|
|
INIT_LIST_HEAD(&se_tpg->acl_node_list);
|
|
|
INIT_LIST_HEAD(&se_tpg->se_tpg_node);
|
|
|
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
|
|
|
- spin_lock_init(&se_tpg->acl_node_lock);
|
|
|
spin_lock_init(&se_tpg->session_lock);
|
|
|
mutex_init(&se_tpg->tpg_lun_mutex);
|
|
|
+ mutex_init(&se_tpg->acl_node_mutex);
|
|
|
|
|
|
if (se_tpg->proto_id >= 0) {
|
|
|
if (core_tpg_setup_virtual_lun0(se_tpg) < 0)
|
|
@@ -559,25 +559,26 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
|
|
|
|
|
|
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
|
|
|
cpu_relax();
|
|
|
+
|
|
|
/*
|
|
|
* Release any remaining demo-mode generated se_node_acl that have
|
|
|
* not been released because of TFO->tpg_check_demo_mode_cache() == 1
|
|
|
* in transport_deregister_session().
|
|
|
*/
|
|
|
- spin_lock_irq(&se_tpg->acl_node_lock);
|
|
|
+ mutex_lock(&se_tpg->acl_node_mutex);
|
|
|
list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,
|
|
|
acl_list) {
|
|
|
list_del(&nacl->acl_list);
|
|
|
se_tpg->num_node_acls--;
|
|
|
- spin_unlock_irq(&se_tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&se_tpg->acl_node_mutex);
|
|
|
|
|
|
core_tpg_wait_for_nacl_pr_ref(nacl);
|
|
|
core_free_device_list_for_node(nacl, se_tpg);
|
|
|
kfree(nacl);
|
|
|
|
|
|
- spin_lock_irq(&se_tpg->acl_node_lock);
|
|
|
+ mutex_lock(&se_tpg->acl_node_mutex);
|
|
|
}
|
|
|
- spin_unlock_irq(&se_tpg->acl_node_lock);
|
|
|
+ mutex_unlock(&se_tpg->acl_node_mutex);
|
|
|
|
|
|
if (se_tpg->proto_id >= 0)
|
|
|
core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
|