|
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
|
|
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
|
|
static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
|
|
struct atio_from_isp *atio, uint16_t status, int qfull);
|
|
struct atio_from_isp *atio, uint16_t status, int qfull);
|
|
static void qlt_disable_vha(struct scsi_qla_host *vha);
|
|
static void qlt_disable_vha(struct scsi_qla_host *vha);
|
|
|
|
+static void qlt_clear_tgt_db(struct qla_tgt *tgt);
|
|
|
|
+static void qlt_send_notify_ack(struct scsi_qla_host *vha,
|
|
|
|
+ struct imm_ntfy_from_isp *ntfy,
|
|
|
|
+ uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
|
|
|
|
+ uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
|
|
/*
|
|
/*
|
|
* Global Variables
|
|
* Global Variables
|
|
*/
|
|
*/
|
|
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
|
|
static DEFINE_MUTEX(qla_tgt_mutex);
|
|
static DEFINE_MUTEX(qla_tgt_mutex);
|
|
static LIST_HEAD(qla_tgt_glist);
|
|
static LIST_HEAD(qla_tgt_glist);
|
|
|
|
|
|
|
|
+/* This API intentionally takes dest as a parameter, rather than returning
|
|
|
|
+ * int value to avoid caller forgetting to issue wmb() after the store */
|
|
|
|
+void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
|
|
|
|
+{
|
|
|
|
+ scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
|
|
|
|
+ *dest = atomic_inc_return(&base_vha->generation_tick);
|
|
|
|
+ /* memory barrier */
|
|
|
|
+ wmb();
|
|
|
|
+}
|
|
|
|
+
|
|
/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
|
|
/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
|
|
static struct qla_tgt_sess *qlt_find_sess_by_port_name(
|
|
static struct qla_tgt_sess *qlt_find_sess_by_port_name(
|
|
struct qla_tgt *tgt,
|
|
struct qla_tgt *tgt,
|
|
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
|
|
struct qla_tgt *tgt = sess->tgt;
|
|
struct qla_tgt *tgt = sess->tgt;
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+ bool logout_started = false;
|
|
|
|
+ fc_port_t fcport;
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
|
|
|
|
+ "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
|
|
|
|
+ " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
|
|
|
|
+ __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
|
|
|
|
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
|
|
|
|
+ sess->logout_on_delete, sess->keep_nport_handle,
|
|
|
|
+ sess->plogi_ack_needed);
|
|
|
|
|
|
BUG_ON(!tgt);
|
|
BUG_ON(!tgt);
|
|
|
|
+
|
|
|
|
+ if (sess->logout_on_delete) {
|
|
|
|
+ int rc;
|
|
|
|
+
|
|
|
|
+ memset(&fcport, 0, sizeof(fcport));
|
|
|
|
+ fcport.loop_id = sess->loop_id;
|
|
|
|
+ fcport.d_id = sess->s_id;
|
|
|
|
+ memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
|
|
|
|
+ fcport.vha = vha;
|
|
|
|
+ fcport.tgt_session = sess;
|
|
|
|
+
|
|
|
|
+ rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
|
|
|
|
+ if (rc != QLA_SUCCESS)
|
|
|
|
+ ql_log(ql_log_warn, vha, 0xf085,
|
|
|
|
+ "Schedule logo failed sess %p rc %d\n",
|
|
|
|
+ sess, rc);
|
|
|
|
+ else
|
|
|
|
+ logout_started = true;
|
|
|
|
+ }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Release the target session for FC Nexus from fabric module code.
|
|
* Release the target session for FC Nexus from fabric module code.
|
|
*/
|
|
*/
|
|
if (sess->se_sess != NULL)
|
|
if (sess->se_sess != NULL)
|
|
ha->tgt.tgt_ops->free_session(sess);
|
|
ha->tgt.tgt_ops->free_session(sess);
|
|
|
|
|
|
|
|
+ if (logout_started) {
|
|
|
|
+ bool traced = false;
|
|
|
|
+
|
|
|
|
+ while (!ACCESS_ONCE(sess->logout_completed)) {
|
|
|
|
+ if (!traced) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
|
|
|
|
+ "%s: waiting for sess %p logout\n",
|
|
|
|
+ __func__, sess);
|
|
|
|
+ traced = true;
|
|
|
|
+ }
|
|
|
|
+ msleep(100);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
|
|
|
|
+ "%s: sess %p logout completed\n",
|
|
|
|
+ __func__, sess);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (sess->plogi_ack_needed)
|
|
|
|
+ qlt_send_notify_ack(vha, &sess->tm_iocb,
|
|
|
|
+ 0, 0, 0, 0, 0, 0);
|
|
|
|
+
|
|
|
|
+ list_del(&sess->sess_list_entry);
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
|
|
"Unregistration of sess %p finished\n", sess);
|
|
"Unregistration of sess %p finished\n", sess);
|
|
|
|
|
|
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
|
|
|
|
|
|
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
|
|
vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
|
|
|
|
|
|
- list_del(&sess->sess_list_entry);
|
|
|
|
- if (sess->deleted)
|
|
|
|
- list_del(&sess->del_list_entry);
|
|
|
|
|
|
+ if (!list_empty(&sess->del_list_entry))
|
|
|
|
+ list_del_init(&sess->del_list_entry);
|
|
|
|
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
|
|
|
|
|
|
INIT_WORK(&sess->free_work, qlt_free_session_done);
|
|
INIT_WORK(&sess->free_work, qlt_free_session_done);
|
|
schedule_work(&sess->free_work);
|
|
schedule_work(&sess->free_work);
|
|
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
|
|
|
|
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
|
|
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
|
|
if (loop_id == 0xFFFF) {
|
|
if (loop_id == 0xFFFF) {
|
|
-#if 0 /* FIXME: Re-enable Global event handling.. */
|
|
|
|
/* Global event */
|
|
/* Global event */
|
|
- atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
|
|
|
|
- qlt_clear_tgt_db(ha->tgt.qla_tgt);
|
|
|
|
|
|
+ atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
|
|
|
+ qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
|
|
|
|
+#if 0 /* FIXME: do we need to choose a session here? */
|
|
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
|
|
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
|
|
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
|
|
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
|
|
typeof(*sess), sess_list_entry);
|
|
typeof(*sess), sess_list_entry);
|
|
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
|
|
struct qla_tgt *tgt = sess->tgt;
|
|
struct qla_tgt *tgt = sess->tgt;
|
|
uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
|
|
uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
|
|
|
|
|
|
- if (sess->deleted)
|
|
|
|
- return;
|
|
|
|
|
|
+ if (sess->deleted) {
|
|
|
|
+ /* Upgrade to unconditional deletion in case it was temporary */
|
|
|
|
+ if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
|
|
|
|
+ list_del(&sess->del_list_entry);
|
|
|
|
+ else
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
|
|
ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
|
|
"Scheduling sess %p for deletion\n", sess);
|
|
"Scheduling sess %p for deletion\n", sess);
|
|
- list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
|
|
|
|
- sess->deleted = 1;
|
|
|
|
|
|
|
|
- if (immediate)
|
|
|
|
|
|
+ if (immediate) {
|
|
dev_loss_tmo = 0;
|
|
dev_loss_tmo = 0;
|
|
|
|
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
|
|
|
|
+ list_add(&sess->del_list_entry, &tgt->del_sess_list);
|
|
|
|
+ } else {
|
|
|
|
+ sess->deleted = QLA_SESS_DELETION_PENDING;
|
|
|
|
+ list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
|
|
|
|
+ }
|
|
|
|
|
|
sess->expires = jiffies + dev_loss_tmo * HZ;
|
|
sess->expires = jiffies + dev_loss_tmo * HZ;
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
|
|
ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
|
|
- "qla_target(%d): session for port %8phC (loop ID %d) scheduled for "
|
|
|
|
- "deletion in %u secs (expires: %lu) immed: %d\n",
|
|
|
|
- sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo,
|
|
|
|
- sess->expires, immediate);
|
|
|
|
|
|
+ "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
|
|
|
|
+ " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
|
|
|
|
+ sess->vha->vp_idx, sess->port_name, sess->loop_id,
|
|
|
|
+ sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
|
|
|
|
+ dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
|
|
|
|
+ sess->generation);
|
|
|
|
|
|
if (immediate)
|
|
if (immediate)
|
|
- schedule_delayed_work(&tgt->sess_del_work, 0);
|
|
|
|
|
|
+ mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
|
|
else
|
|
else
|
|
schedule_delayed_work(&tgt->sess_del_work,
|
|
schedule_delayed_work(&tgt->sess_del_work,
|
|
sess->expires - jiffies);
|
|
sess->expires - jiffies);
|
|
@@ -578,9 +663,9 @@ out_free_id_list:
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
|
|
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
|
|
{
|
|
{
|
|
- BUG_ON(!sess->deleted);
|
|
|
|
|
|
+ BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
|
|
|
|
|
|
- list_del(&sess->del_list_entry);
|
|
|
|
|
|
+ list_del_init(&sess->del_list_entry);
|
|
sess->deleted = 0;
|
|
sess->deleted = 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
|
|
del_list_entry);
|
|
del_list_entry);
|
|
elapsed = jiffies;
|
|
elapsed = jiffies;
|
|
if (time_after_eq(elapsed, sess->expires)) {
|
|
if (time_after_eq(elapsed, sess->expires)) {
|
|
- qlt_undelete_sess(sess);
|
|
|
|
|
|
+ /* No turning back */
|
|
|
|
+ list_del_init(&sess->del_list_entry);
|
|
|
|
+ sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
|
|
"Timeout: sess %p about to be deleted\n",
|
|
"Timeout: sess %p about to be deleted\n",
|
|
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
fcport->d_id.b.al_pa, fcport->d_id.b.area,
|
|
fcport->d_id.b.al_pa, fcport->d_id.b.area,
|
|
fcport->loop_id);
|
|
fcport->loop_id);
|
|
|
|
|
|
|
|
+ /* Cannot undelete at this point */
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock,
|
|
|
|
+ flags);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
if (sess->deleted)
|
|
if (sess->deleted)
|
|
qlt_undelete_sess(sess);
|
|
qlt_undelete_sess(sess);
|
|
|
|
|
|
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
|
|
|
|
if (sess->local && !local)
|
|
if (sess->local && !local)
|
|
sess->local = 0;
|
|
sess->local = 0;
|
|
|
|
+
|
|
|
|
+ qlt_do_generation_tick(vha, &sess->generation);
|
|
|
|
+
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
return sess;
|
|
return sess;
|
|
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
sess->s_id = fcport->d_id;
|
|
sess->s_id = fcport->d_id;
|
|
sess->loop_id = fcport->loop_id;
|
|
sess->loop_id = fcport->loop_id;
|
|
sess->local = local;
|
|
sess->local = local;
|
|
|
|
+ INIT_LIST_HEAD(&sess->del_list_entry);
|
|
|
|
+
|
|
|
|
+ /* Under normal circumstances we want to logout from firmware when
|
|
|
|
+ * session eventually ends and release corresponding nport handle.
|
|
|
|
+ * In the exception cases (e.g. when new PLOGI is waiting) corresponding
|
|
|
|
+ * code will adjust these flags as necessary. */
|
|
|
|
+ sess->logout_on_delete = 1;
|
|
|
|
+ sess->keep_nport_handle = 0;
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
|
|
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
|
|
"Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
|
|
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
|
|
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
|
|
vha->vha_tgt.qla_tgt->sess_count++;
|
|
vha->vha_tgt.qla_tgt->sess_count++;
|
|
|
|
+ qlt_do_generation_tick(vha, &sess->generation);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
|
|
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
- * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
|
|
|
|
|
|
+ * Called from qla2x00_reg_remote_port()
|
|
*/
|
|
*/
|
|
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
{
|
|
{
|
|
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+ } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ /* Point of no return */
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+ return;
|
|
} else {
|
|
} else {
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
|
|
|
|
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
|
|
|
|
+/*
|
|
|
|
+ * max_gen - specifies maximum session generation
|
|
|
|
+ * at which this deletion requestion is still valid
|
|
|
|
+ */
|
|
|
|
+void
|
|
|
|
+qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
|
|
{
|
|
{
|
|
- struct qla_hw_data *ha = vha->hw;
|
|
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
- unsigned long flags;
|
|
|
|
|
|
|
|
if (!vha->hw->tgt.tgt_ops)
|
|
if (!vha->hw->tgt.tgt_ops)
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (!tgt || (fcport->port_type != FCT_INITIATOR))
|
|
|
|
|
|
+ if (!tgt)
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
if (tgt->tgt_stop) {
|
|
if (tgt->tgt_stop) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (max_gen - sess->generation < 0) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
|
|
|
|
+ "Ignoring stale deletion request for se_sess %p / sess %p"
|
|
|
|
+ " for port %8phC, req_gen %d, sess_gen %d\n",
|
|
|
|
+ sess->se_sess, sess, sess->port_name, max_gen,
|
|
|
|
+ sess->generation);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
|
|
|
|
sess->local = 1;
|
|
sess->local = 1;
|
|
qlt_schedule_sess_for_deletion(sess, false);
|
|
qlt_schedule_sess_for_deletion(sess, false);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
|
|
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
|
|
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
|
|
FCP_TMF_CMPL, true);
|
|
FCP_TMF_CMPL, true);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess_op *op;
|
|
|
|
+ struct qla_tgt_cmd *cmd;
|
|
|
|
+
|
|
|
|
+ spin_lock(&vha->cmd_list_lock);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
|
|
|
|
+ if (tag == op->atio.u.isp24.exchange_addr) {
|
|
|
|
+ op->aborted = true;
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
|
|
+ if (tag == cmd->atio.u.isp24.exchange_addr) {
|
|
|
|
+ cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+ return 1;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+ return 0;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* drop cmds for the given lun
|
|
|
|
+ * XXX only looks for cmds on the port through which lun reset was recieved
|
|
|
|
+ * XXX does not go through the list of other port (which may have cmds
|
|
|
|
+ * for the same lun)
|
|
|
|
+ */
|
|
|
|
+static void abort_cmds_for_lun(struct scsi_qla_host *vha,
|
|
|
|
+ uint32_t lun, uint8_t *s_id)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess_op *op;
|
|
|
|
+ struct qla_tgt_cmd *cmd;
|
|
|
|
+ uint32_t key;
|
|
|
|
+
|
|
|
|
+ key = sid_to_key(s_id);
|
|
|
|
+ spin_lock(&vha->cmd_list_lock);
|
|
|
|
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
|
|
|
|
+ uint32_t op_key;
|
|
|
|
+ uint32_t op_lun;
|
|
|
|
+
|
|
|
|
+ op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
|
|
|
|
+ op_lun = scsilun_to_int(
|
|
|
|
+ (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
|
|
|
|
+ if (op_key == key && op_lun == lun)
|
|
|
|
+ op->aborted = true;
|
|
|
|
+ }
|
|
|
|
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
|
|
+ uint32_t cmd_key;
|
|
|
|
+ uint32_t cmd_lun;
|
|
|
|
+
|
|
|
|
+ cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
|
|
|
|
+ cmd_lun = scsilun_to_int(
|
|
|
|
+ (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
|
|
|
|
+ if (cmd_key == key && cmd_lun == lun)
|
|
|
|
+ cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+}
|
|
|
|
+
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
|
|
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
|
|
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
}
|
|
}
|
|
spin_unlock(&se_sess->sess_cmd_lock);
|
|
spin_unlock(&se_sess->sess_cmd_lock);
|
|
|
|
|
|
- if (!found_lun)
|
|
|
|
- return -ENOENT;
|
|
|
|
|
|
+ /* cmd not in LIO lists, look in qla list */
|
|
|
|
+ if (!found_lun) {
|
|
|
|
+ if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
|
|
|
|
+ /* send TASK_ABORT response immediately */
|
|
|
|
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
|
|
|
|
+ return 0;
|
|
|
|
+ } else {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
|
|
|
|
+ "unable to find cmd in driver or LIO for tag 0x%x\n",
|
|
|
|
+ abts->exchange_addr_to_abort);
|
|
|
|
+ return -ENOENT;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
|
|
"qla_target(%d): task abort (tag=%d)\n",
|
|
"qla_target(%d): task abort (tag=%d)\n",
|
|
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
rc = __qlt_24xx_handle_abts(vha, abts, sess);
|
|
rc = __qlt_24xx_handle_abts(vha, abts, sess);
|
|
if (rc != 0) {
|
|
if (rc != 0) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
|
|
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
|
|
|
- if (unlikely(cmd->aborted)) {
|
|
|
|
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
|
|
|
|
- "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
|
|
|
|
- vha->vp_idx, cmd, se_cmd, se_cmd->tag);
|
|
|
|
-
|
|
|
|
- cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
- cmd->cmd_flags |= BIT_6;
|
|
|
|
-
|
|
|
|
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
|
|
|
|
-
|
|
|
|
- /* !! At this point cmd could be already freed !! */
|
|
|
|
- return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
prm->cmd = cmd;
|
|
prm->cmd = cmd;
|
|
prm->tgt = tgt;
|
|
prm->tgt = tgt;
|
|
prm->rq_result = scsi_status;
|
|
prm->rq_result = scsi_status;
|
|
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
|
unsigned long flags = 0;
|
|
unsigned long flags = 0;
|
|
int res;
|
|
int res;
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+ if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ cmd->state = QLA_TGT_STATE_PROCESSED;
|
|
|
|
+ if (cmd->sess->logout_completed)
|
|
|
|
+ /* no need to terminate. FW already freed exchange. */
|
|
|
|
+ qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
|
|
|
|
+ else
|
|
|
|
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+ return 0;
|
|
|
|
+ }
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
memset(&prm, 0, sizeof(prm));
|
|
memset(&prm, 0, sizeof(prm));
|
|
qlt_check_srr_debug(cmd, &xmit_type);
|
|
qlt_check_srr_debug(cmd, &xmit_type);
|
|
|
|
|
|
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
|
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
|
|
res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
|
|
&full_req_cnt);
|
|
&full_req_cnt);
|
|
if (unlikely(res != 0)) {
|
|
if (unlikely(res != 0)) {
|
|
- if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
|
|
|
|
- return 0;
|
|
|
|
-
|
|
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
|
res = qlt_build_ctio_crc2_pkt(&prm, vha);
|
|
res = qlt_build_ctio_crc2_pkt(&prm, vha);
|
|
else
|
|
else
|
|
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
|
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
|
- if (unlikely(res != 0))
|
|
|
|
|
|
+ if (unlikely(res != 0)) {
|
|
|
|
+ vha->req->cnt += full_req_cnt;
|
|
goto out_unmap_unlock;
|
|
goto out_unmap_unlock;
|
|
-
|
|
|
|
|
|
+ }
|
|
|
|
|
|
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
|
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
|
|
|
|
|
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
|
|
|
|
|
|
+ if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
|
|
|
|
+ (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
|
|
/*
|
|
/*
|
|
* Either a chip reset is active or this request was from
|
|
* Either a chip reset is active or this request was from
|
|
* previous life, just abort the processing.
|
|
* previous life, just abort the processing.
|
|
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
|
else
|
|
else
|
|
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
|
res = qlt_24xx_build_ctio_pkt(&prm, vha);
|
|
|
|
|
|
- if (unlikely(res != 0))
|
|
|
|
|
|
+ if (unlikely(res != 0)) {
|
|
|
|
+ vha->req->cnt += prm.req_cnt;
|
|
goto out_unlock_free_unmap;
|
|
goto out_unlock_free_unmap;
|
|
|
|
+ }
|
|
|
|
+
|
|
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
|
pkt = (struct ctio7_to_24xx *)prm.pkt;
|
|
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
|
|
pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
|
|
CTIO7_FLAGS_STATUS_MODE_0);
|
|
CTIO7_FLAGS_STATUS_MODE_0);
|
|
@@ -2649,6 +2848,89 @@ out:
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
+/* If hardware_lock held on entry, might drop it, then reaquire */
|
|
|
|
+/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
|
|
|
|
+static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
|
|
|
|
+ struct imm_ntfy_from_isp *ntfy)
|
|
|
|
+{
|
|
|
|
+ struct nack_to_isp *nack;
|
|
|
|
+ struct qla_hw_data *ha = vha->hw;
|
|
|
|
+ request_t *pkt;
|
|
|
|
+ int ret = 0;
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
|
|
|
|
+ "Sending TERM ELS CTIO (ha=%p)\n", ha);
|
|
|
|
+
|
|
|
|
+ pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
|
|
|
|
+ if (pkt == NULL) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt, vha, 0xe080,
|
|
|
|
+ "qla_target(%d): %s failed: unable to allocate "
|
|
|
|
+ "request packet\n", vha->vp_idx, __func__);
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pkt->entry_type = NOTIFY_ACK_TYPE;
|
|
|
|
+ pkt->entry_count = 1;
|
|
|
|
+ pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
|
|
|
+
|
|
|
|
+ nack = (struct nack_to_isp *)pkt;
|
|
|
|
+ nack->ox_id = ntfy->ox_id;
|
|
|
|
+
|
|
|
|
+ nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
|
|
|
|
+ if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
|
|
|
|
+ nack->u.isp24.flags = ntfy->u.isp24.flags &
|
|
|
|
+ __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* terminate */
|
|
|
|
+ nack->u.isp24.flags |=
|
|
|
|
+ __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
|
|
|
|
+
|
|
|
|
+ nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
|
|
|
|
+ nack->u.isp24.status = ntfy->u.isp24.status;
|
|
|
|
+ nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
|
|
|
|
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
|
|
|
|
+ nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
|
|
|
|
+ nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
|
|
|
|
+ nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
|
|
|
|
+ nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
|
|
|
|
+
|
|
|
|
+ qla2x00_start_iocbs(vha, vha->req);
|
|
|
|
+ return ret;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
|
|
|
|
+ struct imm_ntfy_from_isp *imm, int ha_locked)
|
|
|
|
+{
|
|
|
|
+ unsigned long flags = 0;
|
|
|
|
+ int rc;
|
|
|
|
+
|
|
|
|
+ if (qlt_issue_marker(vha, ha_locked) < 0)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ if (ha_locked) {
|
|
|
|
+ rc = __qlt_send_term_imm_notif(vha, imm);
|
|
|
|
+
|
|
|
|
+#if 0 /* Todo */
|
|
|
|
+ if (rc == -ENOMEM)
|
|
|
|
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
|
|
|
|
+#endif
|
|
|
|
+ goto done;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&vha->hw->hardware_lock, flags);
|
|
|
|
+ rc = __qlt_send_term_imm_notif(vha, imm);
|
|
|
|
+
|
|
|
|
+#if 0 /* Todo */
|
|
|
|
+ if (rc == -ENOMEM)
|
|
|
|
+ qlt_alloc_qfull_cmd(vha, imm, 0, 0);
|
|
|
|
+#endif
|
|
|
|
+
|
|
|
|
+done:
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
/* If hardware_lock held on entry, might drop it, then reaquire */
|
|
/* If hardware_lock held on entry, might drop it, then reaquire */
|
|
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
|
|
/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
|
|
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
|
|
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
|
|
{
|
|
{
|
|
- unsigned long flags;
|
|
|
|
|
|
+ unsigned long flags = 0;
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
if (qlt_issue_marker(vha, ha_locked) < 0)
|
|
if (qlt_issue_marker(vha, ha_locked) < 0)
|
|
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
rc = __qlt_send_term_exchange(vha, cmd, atio);
|
|
rc = __qlt_send_term_exchange(vha, cmd, atio);
|
|
if (rc == -ENOMEM)
|
|
if (rc == -ENOMEM)
|
|
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
|
|
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
|
|
- spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
|
|
|
|
|
|
|
|
done:
|
|
done:
|
|
if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
|
|
if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
|
|
!cmd->cmd_sent_to_fw)) {
|
|
!cmd->cmd_sent_to_fw)) {
|
|
- if (!ha_locked && !in_interrupt())
|
|
|
|
- msleep(250); /* just in case */
|
|
|
|
-
|
|
|
|
- qlt_unmap_sg(vha, cmd);
|
|
|
|
|
|
+ if (cmd->sg_mapped)
|
|
|
|
+ qlt_unmap_sg(vha, cmd);
|
|
vha->hw->tgt.tgt_ops->free_cmd(cmd);
|
|
vha->hw->tgt.tgt_ops->free_cmd(cmd);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
|
|
|
|
+
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt *tgt = cmd->tgt;
|
|
|
|
+ struct scsi_qla_host *vha = tgt->vha;
|
|
|
|
+ struct se_cmd *se_cmd = &cmd->se_cmd;
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
|
|
|
|
+ "qla_target(%d): terminating exchange for aborted cmd=%p "
|
|
|
|
+ "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
|
|
|
|
+ se_cmd->tag);
|
|
|
|
+
|
|
|
|
+ cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
+ cmd->cmd_flags |= BIT_6;
|
|
|
|
+
|
|
|
|
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
|
|
|
|
+}
|
|
|
|
+EXPORT_SYMBOL(qlt_abort_cmd);
|
|
|
|
+
|
|
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
|
void qlt_free_cmd(struct qla_tgt_cmd *cmd)
|
|
{
|
|
{
|
|
struct qla_tgt_sess *sess = cmd->sess;
|
|
struct qla_tgt_sess *sess = cmd->sess;
|
|
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
|
|
dump_stack();
|
|
dump_stack();
|
|
}
|
|
}
|
|
|
|
|
|
- cmd->cmd_flags |= BIT_12;
|
|
|
|
|
|
+ cmd->cmd_flags |= BIT_17;
|
|
ha->tgt.tgt_ops->free_cmd(cmd);
|
|
ha->tgt.tgt_ops->free_cmd(cmd);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
|
skip_term:
|
|
skip_term:
|
|
|
|
|
|
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
|
|
if (cmd->state == QLA_TGT_STATE_PROCESSED) {
|
|
- ;
|
|
|
|
|
|
+ cmd->cmd_flags |= BIT_12;
|
|
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
|
|
} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
|
|
int rx_status = 0;
|
|
int rx_status = 0;
|
|
|
|
|
|
@@ -3191,9 +3492,11 @@ skip_term:
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
return;
|
|
return;
|
|
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
|
|
+ cmd->cmd_flags |= BIT_18;
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
|
|
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
|
|
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
|
|
} else {
|
|
} else {
|
|
|
|
+ cmd->cmd_flags |= BIT_19;
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
|
|
"qla_target(%d): A command in state (%d) should "
|
|
"qla_target(%d): A command in state (%d) should "
|
|
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
|
|
"not return a CTIO complete\n", vha->vp_idx, cmd->state);
|
|
@@ -3205,7 +3508,6 @@ skip_term:
|
|
dump_stack();
|
|
dump_stack();
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
ha->tgt.tgt_ops->free_cmd(cmd);
|
|
ha->tgt.tgt_ops->free_cmd(cmd);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
|
|
if (tgt->tgt_stop)
|
|
if (tgt->tgt_stop)
|
|
goto out_term;
|
|
goto out_term;
|
|
|
|
|
|
|
|
+ if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
|
|
|
|
+ "cmd with tag %u is aborted\n",
|
|
|
|
+ cmd->atio.u.isp24.exchange_addr);
|
|
|
|
+ goto out_term;
|
|
|
|
+ }
|
|
|
|
+
|
|
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
|
|
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
|
|
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
|
|
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
|
|
cmd->unpacked_lun = scsilun_to_int(
|
|
cmd->unpacked_lun = scsilun_to_int(
|
|
@@ -3316,6 +3625,12 @@ out_term:
|
|
static void qlt_do_work(struct work_struct *work)
|
|
static void qlt_do_work(struct work_struct *work)
|
|
{
|
|
{
|
|
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
|
|
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
|
|
|
|
+ scsi_qla_host_t *vha = cmd->vha;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
|
|
|
|
+ list_del(&cmd->cmd_list);
|
|
|
|
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
|
|
|
|
|
|
__qlt_do_work(cmd);
|
|
__qlt_do_work(cmd);
|
|
}
|
|
}
|
|
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
|
|
cmd->loop_id = sess->loop_id;
|
|
cmd->loop_id = sess->loop_id;
|
|
cmd->conf_compl_supported = sess->conf_compl_supported;
|
|
cmd->conf_compl_supported = sess->conf_compl_supported;
|
|
|
|
|
|
|
|
+ cmd->cmd_flags = 0;
|
|
|
|
+ cmd->jiffies_at_alloc = get_jiffies_64();
|
|
|
|
+
|
|
|
|
+ cmd->reset_count = vha->hw->chip_reset;
|
|
|
|
+
|
|
return cmd;
|
|
return cmd;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
|
|
uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&vha->cmd_list_lock, flags);
|
|
|
|
+ list_del(&op->cmd_list);
|
|
|
|
+ spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
|
|
|
|
+
|
|
|
|
+ if (op->aborted) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
|
|
|
|
+ "sess_op with tag %u is aborted\n",
|
|
|
|
+ op->atio.u.isp24.exchange_addr);
|
|
|
|
+ goto out_term;
|
|
|
|
+ }
|
|
|
|
+
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
|
|
- "qla_target(%d): Unable to find wwn login"
|
|
|
|
- " (s_id %x:%x:%x), trying to create it manually\n",
|
|
|
|
- vha->vp_idx, s_id[0], s_id[1], s_id[2]);
|
|
|
|
|
|
+ "qla_target(%d): Unable to find wwn login"
|
|
|
|
+ " (s_id %x:%x:%x), trying to create it manually\n",
|
|
|
|
+ vha->vp_idx, s_id[0], s_id[1], s_id[2]);
|
|
|
|
|
|
if (op->atio.u.raw.entry_count > 1) {
|
|
if (op->atio.u.raw.entry_count > 1) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
|
|
- "Dropping multy entry atio %p\n", &op->atio);
|
|
|
|
|
|
+ "Dropping multy entry atio %p\n", &op->atio);
|
|
goto out_term;
|
|
goto out_term;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
|
|
|
|
|
memcpy(&op->atio, atio, sizeof(*atio));
|
|
memcpy(&op->atio, atio, sizeof(*atio));
|
|
op->vha = vha;
|
|
op->vha = vha;
|
|
|
|
+
|
|
|
|
+ spin_lock(&vha->cmd_list_lock);
|
|
|
|
+ list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+
|
|
INIT_WORK(&op->work, qlt_create_sess_from_atio);
|
|
INIT_WORK(&op->work, qlt_create_sess_from_atio);
|
|
queue_work(qla_tgt_wq, &op->work);
|
|
queue_work(qla_tgt_wq, &op->work);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* Another WWN used to have our s_id. Our PLOGI scheduled its
|
|
|
|
+ * session deletion, but it's still in sess_del_work wq */
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ ql_dbg(ql_dbg_io, vha, 0x3061,
|
|
|
|
+ "New command while old session %p is being deleted\n",
|
|
|
|
+ sess);
|
|
|
|
+ return -EFAULT;
|
|
|
|
+ }
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
|
|
* Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
|
|
*/
|
|
*/
|
|
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
- cmd->cmd_flags = 0;
|
|
|
|
- cmd->jiffies_at_alloc = get_jiffies_64();
|
|
|
|
-
|
|
|
|
- cmd->reset_count = vha->hw->chip_reset;
|
|
|
|
-
|
|
|
|
cmd->cmd_in_wq = 1;
|
|
cmd->cmd_in_wq = 1;
|
|
cmd->cmd_flags |= BIT_0;
|
|
cmd->cmd_flags |= BIT_0;
|
|
|
|
+
|
|
|
|
+ spin_lock(&vha->cmd_list_lock);
|
|
|
|
+ list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+
|
|
INIT_WORK(&cmd->work, qlt_do_work);
|
|
INIT_WORK(&cmd->work, qlt_do_work);
|
|
queue_work(qla_tgt_wq, &cmd->work);
|
|
queue_work(qla_tgt_wq, &cmd->work);
|
|
return 0;
|
|
return 0;
|
|
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_tgt_mgmt_cmd *mcmd;
|
|
struct qla_tgt_mgmt_cmd *mcmd;
|
|
|
|
+ struct atio_from_isp *a = (struct atio_from_isp *)iocb;
|
|
int res;
|
|
int res;
|
|
uint8_t tmr_func;
|
|
uint8_t tmr_func;
|
|
|
|
|
|
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
|
|
ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
|
|
ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
|
|
"qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
|
|
"qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
|
|
tmr_func = TMR_LUN_RESET;
|
|
tmr_func = TMR_LUN_RESET;
|
|
|
|
+ abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
|
|
break;
|
|
break;
|
|
|
|
|
|
case QLA_TGT_CLEAR_TS:
|
|
case QLA_TGT_CLEAR_TS:
|
|
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
|
|
sizeof(struct atio_from_isp));
|
|
sizeof(struct atio_from_isp));
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
|
|
|
|
+ return -EFAULT;
|
|
|
|
+
|
|
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
|
|
return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
|
|
return __qlt_abort_task(vha, iocb, sess);
|
|
return __qlt_abort_task(vha, iocb, sess);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
|
|
|
|
+{
|
|
|
|
+ if (fcport->tgt_session) {
|
|
|
|
+ if (rc != MBS_COMMAND_COMPLETE) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
|
|
|
|
+ "%s: se_sess %p / sess %p from"
|
|
|
|
+ " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
|
|
|
|
+ " LOGO failed: %#x\n",
|
|
|
|
+ __func__,
|
|
|
|
+ fcport->tgt_session->se_sess,
|
|
|
|
+ fcport->tgt_session,
|
|
|
|
+ fcport->port_name, fcport->loop_id,
|
|
|
|
+ fcport->d_id.b.domain, fcport->d_id.b.area,
|
|
|
|
+ fcport->d_id.b.al_pa, rc);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ fcport->tgt_session->logout_completed = 1;
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
|
|
|
|
+ struct imm_ntfy_from_isp *b)
|
|
|
|
+{
|
|
|
|
+ struct imm_ntfy_from_isp tmp;
|
|
|
|
+ memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
+ memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
+ memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
|
|
|
|
+*
|
|
|
|
+* Schedules sessions with matching port_id/loop_id but different wwn for
|
|
|
|
+* deletion. Returns existing session with matching wwn if present.
|
|
|
|
+* Null otherwise.
|
|
|
|
+*/
|
|
|
|
+static struct qla_tgt_sess *
|
|
|
|
+qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
|
|
|
|
+ port_id_t port_id, uint16_t loop_id)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess *sess = NULL, *other_sess;
|
|
|
|
+ uint64_t other_wwn;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
|
|
|
|
+
|
|
|
|
+ other_wwn = wwn_to_u64(other_sess->port_name);
|
|
|
|
+
|
|
|
|
+ if (wwn == other_wwn) {
|
|
|
|
+ WARN_ON(sess);
|
|
|
|
+ sess = other_sess;
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* find other sess with nport_id collision */
|
|
|
|
+ if (port_id.b24 == other_sess->s_id.b24) {
|
|
|
|
+ if (loop_id != other_sess->loop_id) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
|
|
|
|
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
|
|
|
|
+ other_sess, other_sess->loop_id, other_wwn);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * logout_on_delete is set by default, but another
|
|
|
|
+ * session that has the same s_id/loop_id combo
|
|
|
|
+ * might have cleared it when requested this session
|
|
|
|
+ * deletion, so don't touch it
|
|
|
|
+ */
|
|
|
|
+ qlt_schedule_sess_for_deletion(other_sess, true);
|
|
|
|
+ } else {
|
|
|
|
+ /*
|
|
|
|
+ * Another wwn used to have our s_id/loop_id
|
|
|
|
+ * combo - kill the session, but don't log out
|
|
|
|
+ */
|
|
|
|
+ sess->logout_on_delete = 0;
|
|
|
|
+ qlt_schedule_sess_for_deletion(other_sess,
|
|
|
|
+ true);
|
|
|
|
+ }
|
|
|
|
+ continue;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* find other sess with nport handle collision */
|
|
|
|
+ if (loop_id == other_sess->loop_id) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
|
|
|
|
+ "Invalidating sess %p loop_id %d wwn %llx.\n",
|
|
|
|
+ other_sess, other_sess->loop_id, other_wwn);
|
|
|
|
+
|
|
|
|
+ /* Same loop_id but different s_id
|
|
|
|
+ * Ok to kill and logout */
|
|
|
|
+ qlt_schedule_sess_for_deletion(other_sess, true);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ return sess;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
|
|
|
|
+static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess_op *op;
|
|
|
|
+ struct qla_tgt_cmd *cmd;
|
|
|
|
+ uint32_t key;
|
|
|
|
+ int count = 0;
|
|
|
|
+
|
|
|
|
+ key = (((u32)s_id->b.domain << 16) |
|
|
|
|
+ ((u32)s_id->b.area << 8) |
|
|
|
|
+ ((u32)s_id->b.al_pa));
|
|
|
|
+
|
|
|
|
+ spin_lock(&vha->cmd_list_lock);
|
|
|
|
+ list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
|
|
|
|
+ uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
|
|
|
|
+ if (op_key == key) {
|
|
|
|
+ op->aborted = true;
|
|
|
|
+ count++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
|
|
+ uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
|
|
|
|
+ if (cmd_key == key) {
|
|
|
|
+ cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
+ count++;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ spin_unlock(&vha->cmd_list_lock);
|
|
|
|
+
|
|
|
|
+ return count;
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
|
|
* ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
|
|
*/
|
|
*/
|
|
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
struct imm_ntfy_from_isp *iocb)
|
|
struct imm_ntfy_from_isp *iocb)
|
|
{
|
|
{
|
|
|
|
+ struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
|
|
+ struct qla_hw_data *ha = vha->hw;
|
|
|
|
+ struct qla_tgt_sess *sess = NULL;
|
|
|
|
+ uint64_t wwn;
|
|
|
|
+ port_id_t port_id;
|
|
|
|
+ uint16_t loop_id;
|
|
|
|
+ uint16_t wd3_lo;
|
|
int res = 0;
|
|
int res = 0;
|
|
|
|
|
|
|
|
+ wwn = wwn_to_u64(iocb->u.isp24.port_name);
|
|
|
|
+
|
|
|
|
+ port_id.b.domain = iocb->u.isp24.port_id[2];
|
|
|
|
+ port_id.b.area = iocb->u.isp24.port_id[1];
|
|
|
|
+ port_id.b.al_pa = iocb->u.isp24.port_id[0];
|
|
|
|
+ port_id.b.rsvd_1 = 0;
|
|
|
|
+
|
|
|
|
+ loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
|
|
|
|
+
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
|
|
"qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
|
|
"qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
|
|
vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
|
|
vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
|
|
|
|
|
|
|
|
+ /* res = 1 means ack at the end of thread
|
|
|
|
+ * res = 0 means ack async/later.
|
|
|
|
+ */
|
|
switch (iocb->u.isp24.status_subcode) {
|
|
switch (iocb->u.isp24.status_subcode) {
|
|
case ELS_PLOGI:
|
|
case ELS_PLOGI:
|
|
- case ELS_FLOGI:
|
|
|
|
|
|
+
|
|
|
|
+ /* Mark all stale commands in qla_tgt_wq for deletion */
|
|
|
|
+ abort_cmds_for_s_id(vha, &port_id);
|
|
|
|
+
|
|
|
|
+ if (wwn)
|
|
|
|
+ sess = qlt_find_sess_invalidate_other(tgt, wwn,
|
|
|
|
+ port_id, loop_id);
|
|
|
|
+
|
|
|
|
+ if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
|
|
|
|
+ res = 1;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (sess->plogi_ack_needed) {
|
|
|
|
+ /*
|
|
|
|
+ * Initiator sent another PLOGI before last PLOGI could
|
|
|
|
+ * finish. Swap plogi iocbs and terminate old one
|
|
|
|
+ * without acking, new one will get acked when session
|
|
|
|
+ * deletion completes.
|
|
|
|
+ */
|
|
|
|
+ ql_log(ql_log_warn, sess->vha, 0xf094,
|
|
|
|
+ "sess %p received double plogi.\n", sess);
|
|
|
|
+
|
|
|
|
+ qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
|
|
|
|
+
|
|
|
|
+ qlt_send_term_imm_notif(vha, iocb, 1);
|
|
|
|
+
|
|
|
|
+ res = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ res = 0;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Save immediate Notif IOCB for Ack when sess is done
|
|
|
|
+ * and being deleted.
|
|
|
|
+ */
|
|
|
|
+ memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
|
|
|
|
+ sess->plogi_ack_needed = 1;
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * Under normal circumstances we want to release nport handle
|
|
|
|
+ * during LOGO process to avoid nport handle leaks inside FW.
|
|
|
|
+ * The exception is when LOGO is done while another PLOGI with
|
|
|
|
+ * the same nport handle is waiting as might be the case here.
|
|
|
|
+ * Note: there is always a possibily of a race where session
|
|
|
|
+ * deletion has already started for other reasons (e.g. ACL
|
|
|
|
+ * removal) and now PLOGI arrives:
|
|
|
|
+ * 1. if PLOGI arrived in FW after nport handle has been freed,
|
|
|
|
+ * FW must have assigned this PLOGI a new/same handle and we
|
|
|
|
+ * can proceed ACK'ing it as usual when session deletion
|
|
|
|
+ * completes.
|
|
|
|
+ * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
|
|
|
|
+ * bit reached it, the handle has now been released. We'll
|
|
|
|
+ * get an error when we ACK this PLOGI. Nothing will be sent
|
|
|
|
+ * back to initiator. Initiator should eventually retry
|
|
|
|
+ * PLOGI and situation will correct itself.
|
|
|
|
+ */
|
|
|
|
+ sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
|
|
|
|
+ (sess->s_id.b24 == port_id.b24));
|
|
|
|
+ qlt_schedule_sess_for_deletion(sess, true);
|
|
|
|
+ break;
|
|
|
|
+
|
|
case ELS_PRLI:
|
|
case ELS_PRLI:
|
|
|
|
+ wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
|
|
|
|
+
|
|
|
|
+ if (wwn)
|
|
|
|
+ sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
|
|
|
|
+ loop_id);
|
|
|
|
+
|
|
|
|
+ if (sess != NULL) {
|
|
|
|
+ if (sess->deleted) {
|
|
|
|
+ /*
|
|
|
|
+ * Impatient initiator sent PRLI before last
|
|
|
|
+ * PLOGI could finish. Will force him to re-try,
|
|
|
|
+ * while last one finishes.
|
|
|
|
+ */
|
|
|
|
+ ql_log(ql_log_warn, sess->vha, 0xf095,
|
|
|
|
+ "sess %p PRLI received, before plogi ack.\n",
|
|
|
|
+ sess);
|
|
|
|
+ qlt_send_term_imm_notif(vha, iocb, 1);
|
|
|
|
+ res = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This shouldn't happen under normal circumstances,
|
|
|
|
+ * since we have deleted the old session during PLOGI
|
|
|
|
+ */
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
|
|
|
|
+ "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
|
|
|
|
+ sess->loop_id, sess, iocb->u.isp24.nport_handle);
|
|
|
|
+
|
|
|
|
+ sess->local = 0;
|
|
|
|
+ sess->loop_id = loop_id;
|
|
|
|
+ sess->s_id = port_id;
|
|
|
|
+
|
|
|
|
+ if (wd3_lo & BIT_7)
|
|
|
|
+ sess->conf_compl_supported = 1;
|
|
|
|
+
|
|
|
|
+ }
|
|
|
|
+ res = 1; /* send notify ack */
|
|
|
|
+
|
|
|
|
+ /* Make session global (not used in fabric mode) */
|
|
|
|
+ if (ha->current_topology != ISP_CFG_F) {
|
|
|
|
+ set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
|
|
|
|
+ set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
|
|
|
|
+ qla2xxx_wake_dpc(vha);
|
|
|
|
+ } else {
|
|
|
|
+ /* todo: else - create sess here. */
|
|
|
|
+ res = 1; /* send notify ack */
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ break;
|
|
|
|
+
|
|
case ELS_LOGO:
|
|
case ELS_LOGO:
|
|
case ELS_PRLO:
|
|
case ELS_PRLO:
|
|
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
|
|
res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
|
|
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ case ELS_FLOGI: /* should never happen */
|
|
default:
|
|
default:
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
|
|
"qla_target(%d): Unsupported ELS command %x "
|
|
"qla_target(%d): Unsupported ELS command %x "
|
|
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
|
|
if (!sess)
|
|
if (!sess)
|
|
goto out_term;
|
|
goto out_term;
|
|
} else {
|
|
} else {
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ sess = NULL;
|
|
|
|
+ goto out_term;
|
|
|
|
+ }
|
|
|
|
+
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
if (!sess)
|
|
if (!sess)
|
|
goto out_term;
|
|
goto out_term;
|
|
} else {
|
|
} else {
|
|
|
|
+ if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
|
|
+ sess = NULL;
|
|
|
|
+ goto out_term;
|
|
|
|
+ }
|
|
|
|
+
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
|
|
|
|
|
|
/* Adjust ring index */
|
|
/* Adjust ring index */
|
|
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
|
|
WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
|
|
|
|
+ RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
|
|
}
|
|
}
|
|
|
|
|
|
void
|
|
void
|
|
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
|
|
if (!QLA_TGT_MODE_ENABLED())
|
|
if (!QLA_TGT_MODE_ENABLED())
|
|
return;
|
|
return;
|
|
|
|
|
|
- if (ha->mqenable || IS_QLA83XX(ha)) {
|
|
|
|
|
|
+ if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
|
|
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
|
|
ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
|
|
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
|
|
ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
|
|
} else {
|
|
} else {
|