|
@@ -100,7 +100,7 @@ enum fcp_resp_rsp_codes {
|
|
*/
|
|
*/
|
|
/* Predefs for callbacks handed to qla2xxx LLD */
|
|
/* Predefs for callbacks handed to qla2xxx LLD */
|
|
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
|
|
static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
|
|
- struct atio_from_isp *pkt);
|
|
|
|
|
|
+ struct atio_from_isp *pkt, uint8_t);
|
|
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
|
|
static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
|
|
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
|
|
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
|
|
int fn, void *iocb, int flags);
|
|
int fn, void *iocb, int flags);
|
|
@@ -118,10 +118,13 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
|
|
struct imm_ntfy_from_isp *ntfy,
|
|
struct imm_ntfy_from_isp *ntfy,
|
|
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
|
|
uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
|
|
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
|
|
uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
|
|
|
|
+static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
|
|
|
|
+ struct imm_ntfy_from_isp *imm, int ha_locked);
|
|
/*
|
|
/*
|
|
* Global Variables
|
|
* Global Variables
|
|
*/
|
|
*/
|
|
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
|
|
static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
|
|
|
|
+static struct kmem_cache *qla_tgt_plogi_cachep;
|
|
static mempool_t *qla_tgt_mgmt_cmd_mempool;
|
|
static mempool_t *qla_tgt_mgmt_cmd_mempool;
|
|
static struct workqueue_struct *qla_tgt_wq;
|
|
static struct workqueue_struct *qla_tgt_wq;
|
|
static DEFINE_MUTEX(qla_tgt_mutex);
|
|
static DEFINE_MUTEX(qla_tgt_mutex);
|
|
@@ -226,8 +229,8 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
|
|
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
|
|
spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
-static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
|
|
|
- struct atio_from_isp *atio)
|
|
|
|
|
|
+static bool qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
|
|
|
+ struct atio_from_isp *atio, uint8_t ha_locked)
|
|
{
|
|
{
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe072,
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe072,
|
|
"%s: qla_target(%d): type %x ox_id %04x\n",
|
|
"%s: qla_target(%d): type %x ox_id %04x\n",
|
|
@@ -248,7 +251,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
|
atio->u.isp24.fcp_hdr.d_id[2]);
|
|
atio->u.isp24.fcp_hdr.d_id[2]);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
- qlt_24xx_atio_pkt(host, atio);
|
|
|
|
|
|
+ qlt_24xx_atio_pkt(host, atio, ha_locked);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -271,7 +274,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- qlt_24xx_atio_pkt(host, atio);
|
|
|
|
|
|
+ qlt_24xx_atio_pkt(host, atio, ha_locked);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -282,7 +285,7 @@ static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- return;
|
|
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
|
|
void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
|
|
@@ -389,6 +392,131 @@ void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
|
|
|
|
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/*
|
|
|
|
+ * All qlt_plogi_ack_t operations are protected by hardware_lock
|
|
|
|
+ */
|
|
|
|
+
|
|
|
|
+/*
|
|
|
|
+ * This is a zero-base ref-counting solution, since hardware_lock
|
|
|
|
+ * guarantees that ref_count is not modified concurrently.
|
|
|
|
+ * Upon successful return content of iocb is undefined
|
|
|
|
+ */
|
|
|
|
+static qlt_plogi_ack_t *
|
|
|
|
+qlt_plogi_ack_find_add(struct scsi_qla_host *vha, port_id_t *id,
|
|
|
|
+ struct imm_ntfy_from_isp *iocb)
|
|
|
|
+{
|
|
|
|
+ qlt_plogi_ack_t *pla;
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(pla, &vha->plogi_ack_list, list) {
|
|
|
|
+ if (pla->id.b24 == id->b24) {
|
|
|
|
+ qlt_send_term_imm_notif(vha, &pla->iocb, 1);
|
|
|
|
+ pla->iocb = *iocb;
|
|
|
|
+ return pla;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pla = kmem_cache_zalloc(qla_tgt_plogi_cachep, GFP_ATOMIC);
|
|
|
|
+ if (!pla) {
|
|
|
|
+ ql_dbg(ql_dbg_async, vha, 0x5088,
|
|
|
|
+ "qla_target(%d): Allocation of plogi_ack failed\n",
|
|
|
|
+ vha->vp_idx);
|
|
|
|
+ return NULL;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ pla->iocb = *iocb;
|
|
|
|
+ pla->id = *id;
|
|
|
|
+ list_add_tail(&pla->list, &vha->plogi_ack_list);
|
|
|
|
+
|
|
|
|
+ return pla;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void qlt_plogi_ack_unref(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla)
|
|
|
|
+{
|
|
|
|
+ BUG_ON(!pla->ref_count);
|
|
|
|
+ pla->ref_count--;
|
|
|
|
+
|
|
|
|
+ if (pla->ref_count)
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_async, vha, 0x5089,
|
|
|
|
+ "Sending PLOGI ACK to wwn %8phC s_id %02x:%02x:%02x loop_id %#04x"
|
|
|
|
+ " exch %#x ox_id %#x\n", pla->iocb.u.isp24.port_name,
|
|
|
|
+ pla->iocb.u.isp24.port_id[2], pla->iocb.u.isp24.port_id[1],
|
|
|
|
+ pla->iocb.u.isp24.port_id[0],
|
|
|
|
+ le16_to_cpu(pla->iocb.u.isp24.nport_handle),
|
|
|
|
+ pla->iocb.u.isp24.exchange_address, pla->iocb.ox_id);
|
|
|
|
+ qlt_send_notify_ack(vha, &pla->iocb, 0, 0, 0, 0, 0, 0);
|
|
|
|
+
|
|
|
|
+ list_del(&pla->list);
|
|
|
|
+ kmem_cache_free(qla_tgt_plogi_cachep, pla);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+qlt_plogi_ack_link(struct scsi_qla_host *vha, qlt_plogi_ack_t *pla,
|
|
|
|
+ struct qla_tgt_sess *sess, qlt_plogi_link_t link)
|
|
|
|
+{
|
|
|
|
+ /* Inc ref_count first because link might already be pointing at pla */
|
|
|
|
+ pla->ref_count++;
|
|
|
|
+
|
|
|
|
+ if (sess->plogi_link[link])
|
|
|
|
+ qlt_plogi_ack_unref(vha, sess->plogi_link[link]);
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf097,
|
|
|
|
+ "Linking sess %p [%d] wwn %8phC with PLOGI ACK to wwn %8phC"
|
|
|
|
+ " s_id %02x:%02x:%02x, ref=%d\n", sess, link, sess->port_name,
|
|
|
|
+ pla->iocb.u.isp24.port_name, pla->iocb.u.isp24.port_id[2],
|
|
|
|
+ pla->iocb.u.isp24.port_id[1], pla->iocb.u.isp24.port_id[0],
|
|
|
|
+ pla->ref_count);
|
|
|
|
+
|
|
|
|
+ sess->plogi_link[link] = pla;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+typedef struct {
|
|
|
|
+ /* These fields must be initialized by the caller */
|
|
|
|
+ port_id_t id;
|
|
|
|
+ /*
|
|
|
|
+ * number of cmds dropped while we were waiting for
|
|
|
|
+ * initiator to ack LOGO initialize to 1 if LOGO is
|
|
|
|
+ * triggered by a command, otherwise, to 0
|
|
|
|
+ */
|
|
|
|
+ int cmd_count;
|
|
|
|
+
|
|
|
|
+ /* These fields are used by callee */
|
|
|
|
+ struct list_head list;
|
|
|
|
+} qlt_port_logo_t;
|
|
|
|
+
|
|
|
|
+static void
|
|
|
|
+qlt_send_first_logo(struct scsi_qla_host *vha, qlt_port_logo_t *logo)
|
|
|
|
+{
|
|
|
|
+ qlt_port_logo_t *tmp;
|
|
|
|
+ int res;
|
|
|
|
+
|
|
|
|
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
|
|
+ list_for_each_entry(tmp, &vha->logo_list, list) {
|
|
|
|
+ if (tmp->id.b24 == logo->id.b24) {
|
|
|
|
+ tmp->cmd_count += logo->cmd_count;
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ list_add_tail(&logo->list, &vha->logo_list);
|
|
|
|
+
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
|
|
+ res = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, logo->id);
|
|
|
|
+
|
|
|
|
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+ list_del(&logo->list);
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf098,
|
|
|
|
+ "Finished LOGO to %02x:%02x:%02x, dropped %d cmds, res = %#x\n",
|
|
|
|
+ logo->id.b.domain, logo->id.b.area, logo->id.b.al_pa,
|
|
|
|
+ logo->cmd_count, res);
|
|
|
|
+}
|
|
|
|
+
|
|
static void qlt_free_session_done(struct work_struct *work)
|
|
static void qlt_free_session_done(struct work_struct *work)
|
|
{
|
|
{
|
|
struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
|
|
struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
|
|
@@ -402,14 +530,21 @@ static void qlt_free_session_done(struct work_struct *work)
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
|
|
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
|
|
"%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
|
|
- " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
|
|
|
|
|
|
+ " s_id %02x:%02x:%02x logout %d keep %d els_logo %d\n",
|
|
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
|
|
__func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
|
|
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
|
|
sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
|
|
sess->logout_on_delete, sess->keep_nport_handle,
|
|
sess->logout_on_delete, sess->keep_nport_handle,
|
|
- sess->plogi_ack_needed);
|
|
|
|
|
|
+ sess->send_els_logo);
|
|
|
|
|
|
BUG_ON(!tgt);
|
|
BUG_ON(!tgt);
|
|
|
|
|
|
|
|
+ if (sess->send_els_logo) {
|
|
|
|
+ qlt_port_logo_t logo;
|
|
|
|
+ logo.id = sess->s_id;
|
|
|
|
+ logo.cmd_count = 0;
|
|
|
|
+ qlt_send_first_logo(vha, &logo);
|
|
|
|
+ }
|
|
|
|
+
|
|
if (sess->logout_on_delete) {
|
|
if (sess->logout_on_delete) {
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
@@ -455,9 +590,34 @@ static void qlt_free_session_done(struct work_struct *work)
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
- if (sess->plogi_ack_needed)
|
|
|
|
- qlt_send_notify_ack(vha, &sess->tm_iocb,
|
|
|
|
- 0, 0, 0, 0, 0, 0);
|
|
|
|
|
|
+ {
|
|
|
|
+ qlt_plogi_ack_t *own =
|
|
|
|
+ sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN];
|
|
|
|
+ qlt_plogi_ack_t *con =
|
|
|
|
+ sess->plogi_link[QLT_PLOGI_LINK_CONFLICT];
|
|
|
|
+
|
|
|
|
+ if (con) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf099,
|
|
|
|
+ "se_sess %p / sess %p port %8phC is gone,"
|
|
|
|
+ " %s (ref=%d), releasing PLOGI for %8phC (ref=%d)\n",
|
|
|
|
+ sess->se_sess, sess, sess->port_name,
|
|
|
|
+ own ? "releasing own PLOGI" :
|
|
|
|
+ "no own PLOGI pending",
|
|
|
|
+ own ? own->ref_count : -1,
|
|
|
|
+ con->iocb.u.isp24.port_name, con->ref_count);
|
|
|
|
+ qlt_plogi_ack_unref(vha, con);
|
|
|
|
+ } else {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09a,
|
|
|
|
+ "se_sess %p / sess %p port %8phC is gone, %s (ref=%d)\n",
|
|
|
|
+ sess->se_sess, sess, sess->port_name,
|
|
|
|
+ own ? "releasing own PLOGI" :
|
|
|
|
+ "no own PLOGI pending",
|
|
|
|
+ own ? own->ref_count : -1);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (own)
|
|
|
|
+ qlt_plogi_ack_unref(vha, own);
|
|
|
|
+ }
|
|
|
|
|
|
list_del(&sess->sess_list_entry);
|
|
list_del(&sess->sess_list_entry);
|
|
|
|
|
|
@@ -476,7 +636,7 @@ static void qlt_free_session_done(struct work_struct *work)
|
|
wake_up_all(&tgt->waitQ);
|
|
wake_up_all(&tgt->waitQ);
|
|
}
|
|
}
|
|
|
|
|
|
-/* ha->hardware_lock supposed to be held on entry */
|
|
|
|
|
|
+/* ha->tgt.sess_lock supposed to be held on entry */
|
|
void qlt_unreg_sess(struct qla_tgt_sess *sess)
|
|
void qlt_unreg_sess(struct qla_tgt_sess *sess)
|
|
{
|
|
{
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
struct scsi_qla_host *vha = sess->vha;
|
|
@@ -492,7 +652,7 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(qlt_unreg_sess);
|
|
EXPORT_SYMBOL(qlt_unreg_sess);
|
|
|
|
|
|
-/* ha->hardware_lock supposed to be held on entry */
|
|
|
|
|
|
+
|
|
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
{
|
|
{
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
@@ -502,12 +662,15 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
int res = 0;
|
|
int res = 0;
|
|
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
|
|
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
|
|
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
|
|
struct atio_from_isp *a = (struct atio_from_isp *)iocb;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
|
|
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
|
|
if (loop_id == 0xFFFF) {
|
|
if (loop_id == 0xFFFF) {
|
|
/* Global event */
|
|
/* Global event */
|
|
atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
|
atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
|
|
qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
#if 0 /* FIXME: do we need to choose a session here? */
|
|
#if 0 /* FIXME: do we need to choose a session here? */
|
|
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
|
|
if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
|
|
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
|
|
sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
|
|
@@ -534,7 +697,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
sess = NULL;
|
|
sess = NULL;
|
|
#endif
|
|
#endif
|
|
} else {
|
|
} else {
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe000,
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe000,
|
|
@@ -556,7 +721,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
|
|
iocb, QLA24XX_MGMT_SEND_NACK);
|
|
iocb, QLA24XX_MGMT_SEND_NACK);
|
|
}
|
|
}
|
|
|
|
|
|
-/* ha->hardware_lock supposed to be held on entry */
|
|
|
|
|
|
+/* ha->tgt.sess_lock supposed to be held on entry */
|
|
static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
|
|
static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
|
|
bool immediate)
|
|
bool immediate)
|
|
{
|
|
{
|
|
@@ -600,7 +765,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
|
|
sess->expires - jiffies);
|
|
sess->expires - jiffies);
|
|
}
|
|
}
|
|
|
|
|
|
-/* ha->hardware_lock supposed to be held on entry */
|
|
|
|
|
|
+/* ha->tgt.sess_lock supposed to be held on entry */
|
|
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
|
|
static void qlt_clear_tgt_db(struct qla_tgt *tgt)
|
|
{
|
|
{
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
@@ -636,12 +801,12 @@ static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
|
|
"qla_target(%d): get_id_list() failed: %x\n",
|
|
"qla_target(%d): get_id_list() failed: %x\n",
|
|
vha->vp_idx, rc);
|
|
vha->vp_idx, rc);
|
|
- res = -1;
|
|
|
|
|
|
+ res = -EBUSY;
|
|
goto out_free_id_list;
|
|
goto out_free_id_list;
|
|
}
|
|
}
|
|
|
|
|
|
id_iter = (char *)gid_list;
|
|
id_iter = (char *)gid_list;
|
|
- res = -1;
|
|
|
|
|
|
+ res = -ENOENT;
|
|
for (i = 0; i < entries; i++) {
|
|
for (i = 0; i < entries; i++) {
|
|
struct gid_list_info *gid = (struct gid_list_info *)id_iter;
|
|
struct gid_list_info *gid = (struct gid_list_info *)id_iter;
|
|
if ((gid->al_pa == s_id[2]) &&
|
|
if ((gid->al_pa == s_id[2]) &&
|
|
@@ -660,7 +825,7 @@ out_free_id_list:
|
|
return res;
|
|
return res;
|
|
}
|
|
}
|
|
|
|
|
|
-/* ha->hardware_lock supposed to be held on entry */
|
|
|
|
|
|
+/* ha->tgt.sess_lock supposed to be held on entry */
|
|
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
|
|
static void qlt_undelete_sess(struct qla_tgt_sess *sess)
|
|
{
|
|
{
|
|
BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
|
|
BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
|
|
@@ -678,7 +843,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
unsigned long flags, elapsed;
|
|
unsigned long flags, elapsed;
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
while (!list_empty(&tgt->del_sess_list)) {
|
|
while (!list_empty(&tgt->del_sess_list)) {
|
|
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
|
|
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
|
|
del_list_entry);
|
|
del_list_entry);
|
|
@@ -699,7 +864,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -717,7 +882,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
unsigned char be_sid[3];
|
|
unsigned char be_sid[3];
|
|
|
|
|
|
/* Check to avoid double sessions */
|
|
/* Check to avoid double sessions */
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
|
|
list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
|
|
sess_list_entry) {
|
|
sess_list_entry) {
|
|
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
|
|
if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
|
|
@@ -732,7 +897,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
|
|
|
|
/* Cannot undelete at this point */
|
|
/* Cannot undelete at this point */
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock,
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock,
|
|
flags);
|
|
flags);
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
@@ -749,12 +914,12 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
|
|
|
|
qlt_do_generation_tick(vha, &sess->generation);
|
|
qlt_do_generation_tick(vha, &sess->generation);
|
|
|
|
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
return sess;
|
|
return sess;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
|
|
sess = kzalloc(sizeof(*sess), GFP_KERNEL);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
@@ -799,7 +964,7 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
}
|
|
}
|
|
/*
|
|
/*
|
|
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
|
|
* Take an extra reference to ->sess_kref here to handle qla_tgt_sess
|
|
- * access across ->hardware_lock reaquire.
|
|
|
|
|
|
+ * access across ->tgt.sess_lock reaquire.
|
|
*/
|
|
*/
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
|
|
|
|
@@ -807,11 +972,11 @@ static struct qla_tgt_sess *qlt_create_sess(
|
|
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
|
|
BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
|
|
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
|
|
memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
|
|
list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
|
|
vha->vha_tgt.qla_tgt->sess_count++;
|
|
vha->vha_tgt.qla_tgt->sess_count++;
|
|
qlt_do_generation_tick(vha, &sess->generation);
|
|
qlt_do_generation_tick(vha, &sess->generation);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
|
|
"qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
|
|
"qla_target(%d): %ssession for wwn %8phC (loop_id %d, "
|
|
@@ -842,23 +1007,23 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
if (qla_ini_mode_enabled(vha))
|
|
if (qla_ini_mode_enabled(vha))
|
|
return;
|
|
return;
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
if (tgt->tgt_stop) {
|
|
if (tgt->tgt_stop) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
sess = qlt_create_sess(vha, fcport, false);
|
|
sess = qlt_create_sess(vha, fcport, false);
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
} else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
} else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
/* Point of no return */
|
|
/* Point of no return */
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
} else {
|
|
} else {
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
@@ -887,7 +1052,7 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
|
|
sess->local = 0;
|
|
sess->local = 0;
|
|
}
|
|
}
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -899,6 +1064,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
|
|
{
|
|
{
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (!vha->hw->tgt.tgt_ops)
|
|
if (!vha->hw->tgt.tgt_ops)
|
|
return;
|
|
return;
|
|
@@ -906,15 +1072,19 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
|
|
if (!tgt)
|
|
if (!tgt)
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
|
|
if (tgt->tgt_stop) {
|
|
if (tgt->tgt_stop) {
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
|
|
if (max_gen - sess->generation < 0) {
|
|
if (max_gen - sess->generation < 0) {
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
|
|
"Ignoring stale deletion request for se_sess %p / sess %p"
|
|
"Ignoring stale deletion request for se_sess %p / sess %p"
|
|
" for port %8phC, req_gen %d, sess_gen %d\n",
|
|
" for port %8phC, req_gen %d, sess_gen %d\n",
|
|
@@ -927,6 +1097,7 @@ qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
|
|
|
|
|
|
sess->local = 1;
|
|
sess->local = 1;
|
|
qlt_schedule_sess_for_deletion(sess, false);
|
|
qlt_schedule_sess_for_deletion(sess, false);
|
|
|
|
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
|
|
static inline int test_tgt_sess_count(struct qla_tgt *tgt)
|
|
@@ -984,10 +1155,10 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
|
|
* Lock is needed, because we still can get an incoming packet.
|
|
* Lock is needed, because we still can get an incoming packet.
|
|
*/
|
|
*/
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
tgt->tgt_stop = 1;
|
|
tgt->tgt_stop = 1;
|
|
qlt_clear_tgt_db(tgt);
|
|
qlt_clear_tgt_db(tgt);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_unlock(&qla_tgt_mutex);
|
|
mutex_unlock(&qla_tgt_mutex);
|
|
|
|
|
|
@@ -1040,7 +1211,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
|
|
|
|
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
- while (tgt->irq_cmd_count != 0) {
|
|
|
|
|
|
+ while ((tgt->irq_cmd_count != 0) || (tgt->atio_irq_cmd_count != 0)) {
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
udelay(2);
|
|
udelay(2);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
@@ -1309,7 +1480,7 @@ static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
|
|
|
|
|
|
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
if (tag == cmd->atio.u.isp24.exchange_addr) {
|
|
if (tag == cmd->atio.u.isp24.exchange_addr) {
|
|
- cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
|
|
+ cmd->aborted = 1;
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
return 1;
|
|
return 1;
|
|
}
|
|
}
|
|
@@ -1351,7 +1522,7 @@ static void abort_cmds_for_lun(struct scsi_qla_host *vha,
|
|
cmd_lun = scsilun_to_int(
|
|
cmd_lun = scsilun_to_int(
|
|
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
|
|
(struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
|
|
if (cmd_key == key && cmd_lun == lun)
|
|
if (cmd_key == key && cmd_lun == lun)
|
|
- cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
|
|
+ cmd->aborted = 1;
|
|
}
|
|
}
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
}
|
|
}
|
|
@@ -1435,6 +1606,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
uint32_t tag = abts->exchange_addr_to_abort;
|
|
uint32_t tag = abts->exchange_addr_to_abort;
|
|
uint8_t s_id[3];
|
|
uint8_t s_id[3];
|
|
int rc;
|
|
int rc;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
|
|
if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
|
|
@@ -1462,6 +1634,7 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
s_id[1] = abts->fcp_hdr_le.s_id[1];
|
|
s_id[1] = abts->fcp_hdr_le.s_id[1];
|
|
s_id[2] = abts->fcp_hdr_le.s_id[0];
|
|
s_id[2] = abts->fcp_hdr_le.s_id[0];
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
|
|
@@ -1469,12 +1642,17 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
|
|
vha->vp_idx);
|
|
vha->vp_idx);
|
|
rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
|
|
rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
|
|
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
|
|
QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
|
|
|
|
+
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
+
|
|
if (rc != 0) {
|
|
if (rc != 0) {
|
|
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
|
|
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
|
|
false);
|
|
false);
|
|
}
|
|
}
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
+
|
|
|
|
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
|
|
qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
|
|
@@ -1560,15 +1738,15 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
- if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
|
|
|
|
|
|
+ if (!vha->flags.online || mcmd->reset_count != ha->chip_reset) {
|
|
/*
|
|
/*
|
|
- * Either a chip reset is active or this request was from
|
|
|
|
|
|
+ * Either the port is not online or this request was from
|
|
* previous life, just abort the processing.
|
|
* previous life, just abort the processing.
|
|
*/
|
|
*/
|
|
ql_dbg(ql_dbg_async, vha, 0xe100,
|
|
ql_dbg(ql_dbg_async, vha, 0xe100,
|
|
- "RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
|
|
|
|
- qla2x00_reset_active(vha), mcmd->reset_count,
|
|
|
|
- ha->chip_reset);
|
|
|
|
|
|
+ "RESET-TMR online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
|
|
|
+ vha->flags.online, qla2x00_reset_active(vha),
|
|
|
|
+ mcmd->reset_count, ha->chip_reset);
|
|
ha->tgt.tgt_ops->free_mcmd(mcmd);
|
|
ha->tgt.tgt_ops->free_mcmd(mcmd);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
return;
|
|
return;
|
|
@@ -2510,17 +2688,22 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
- if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
|
|
|
|
|
|
+ if (xmit_type == QLA_TGT_XMIT_STATUS)
|
|
|
|
+ vha->tgt_counters.core_qla_snd_status++;
|
|
|
|
+ else
|
|
|
|
+ vha->tgt_counters.core_qla_que_buf++;
|
|
|
|
+
|
|
|
|
+ if (!vha->flags.online || cmd->reset_count != ha->chip_reset) {
|
|
/*
|
|
/*
|
|
- * Either a chip reset is active or this request was from
|
|
|
|
|
|
+ * Either the port is not online or this request was from
|
|
* previous life, just abort the processing.
|
|
* previous life, just abort the processing.
|
|
*/
|
|
*/
|
|
cmd->state = QLA_TGT_STATE_PROCESSED;
|
|
cmd->state = QLA_TGT_STATE_PROCESSED;
|
|
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
|
|
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
|
|
ql_dbg(ql_dbg_async, vha, 0xe101,
|
|
ql_dbg(ql_dbg_async, vha, 0xe101,
|
|
- "RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
|
|
|
|
- qla2x00_reset_active(vha), cmd->reset_count,
|
|
|
|
- ha->chip_reset);
|
|
|
|
|
|
+ "RESET-RSP online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
|
|
|
+ vha->flags.online, qla2x00_reset_active(vha),
|
|
|
|
+ cmd->reset_count, ha->chip_reset);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2651,18 +2834,18 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
|
|
|
|
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
- if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
|
|
|
|
|
|
+ if (!vha->flags.online || (cmd->reset_count != ha->chip_reset) ||
|
|
(cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
|
|
(cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
|
|
/*
|
|
/*
|
|
- * Either a chip reset is active or this request was from
|
|
|
|
|
|
+ * Either the port is not online or this request was from
|
|
* previous life, just abort the processing.
|
|
* previous life, just abort the processing.
|
|
*/
|
|
*/
|
|
cmd->state = QLA_TGT_STATE_NEED_DATA;
|
|
cmd->state = QLA_TGT_STATE_NEED_DATA;
|
|
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
|
|
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
|
|
ql_dbg(ql_dbg_async, vha, 0xe102,
|
|
ql_dbg(ql_dbg_async, vha, 0xe102,
|
|
- "RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
|
|
|
|
- qla2x00_reset_active(vha), cmd->reset_count,
|
|
|
|
- ha->chip_reset);
|
|
|
|
|
|
+ "RESET-XFR online/active/old-count/new-count = %d/%d/%d/%d.\n",
|
|
|
|
+ vha->flags.online, qla2x00_reset_active(vha),
|
|
|
|
+ cmd->reset_count, ha->chip_reset);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
@@ -2957,12 +3140,13 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
ret = 1;
|
|
ret = 1;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ vha->tgt_counters.num_term_xchg_sent++;
|
|
pkt->entry_count = 1;
|
|
pkt->entry_count = 1;
|
|
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
|
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
|
|
|
|
|
ctio24 = (struct ctio7_to_24xx *)pkt;
|
|
ctio24 = (struct ctio7_to_24xx *)pkt;
|
|
ctio24->entry_type = CTIO_TYPE7;
|
|
ctio24->entry_type = CTIO_TYPE7;
|
|
- ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
|
|
|
|
|
|
+ ctio24->nport_handle = CTIO7_NHANDLE_UNRECOGNIZED;
|
|
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
|
ctio24->timeout = cpu_to_le16(QLA_TGT_TIMEOUT);
|
|
ctio24->vp_index = vha->vp_idx;
|
|
ctio24->vp_index = vha->vp_idx;
|
|
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
|
ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
|
|
@@ -3009,7 +3193,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
|
|
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
|
|
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
|
|
|
|
|
|
done:
|
|
done:
|
|
- if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
|
|
|
|
|
|
+ if (cmd && (!cmd->aborted ||
|
|
!cmd->cmd_sent_to_fw)) {
|
|
!cmd->cmd_sent_to_fw)) {
|
|
if (cmd->sg_mapped)
|
|
if (cmd->sg_mapped)
|
|
qlt_unmap_sg(vha, cmd);
|
|
qlt_unmap_sg(vha, cmd);
|
|
@@ -3028,7 +3212,7 @@ static void qlt_init_term_exchange(struct scsi_qla_host *vha)
|
|
struct qla_tgt_cmd *cmd, *tcmd;
|
|
struct qla_tgt_cmd *cmd, *tcmd;
|
|
|
|
|
|
vha->hw->tgt.leak_exchg_thresh_hold =
|
|
vha->hw->tgt.leak_exchg_thresh_hold =
|
|
- (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
|
|
|
|
|
|
+ (vha->hw->cur_fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
|
|
|
|
|
|
cmd = tcmd = NULL;
|
|
cmd = tcmd = NULL;
|
|
if (!list_empty(&vha->hw->tgt.q_full_list)) {
|
|
if (!list_empty(&vha->hw->tgt.q_full_list)) {
|
|
@@ -3058,7 +3242,7 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
|
|
|
|
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe079,
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe079,
|
|
"Chip reset due to exchange starvation: %d/%d.\n",
|
|
"Chip reset due to exchange starvation: %d/%d.\n",
|
|
- total_leaked, vha->hw->fw_xcb_count);
|
|
|
|
|
|
+ total_leaked, vha->hw->cur_fw_xcb_count);
|
|
|
|
|
|
if (IS_P3P_TYPE(vha->hw))
|
|
if (IS_P3P_TYPE(vha->hw))
|
|
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
|
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
|
@@ -3080,7 +3264,7 @@ void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
|
|
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
|
|
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
|
|
se_cmd->tag);
|
|
se_cmd->tag);
|
|
|
|
|
|
- cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
|
|
+ cmd->aborted = 1;
|
|
cmd->cmd_flags |= BIT_6;
|
|
cmd->cmd_flags |= BIT_6;
|
|
|
|
|
|
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
|
|
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
|
|
@@ -3300,9 +3484,6 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
|
|
|
|
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
return;
|
|
return;
|
|
- } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
|
|
- ql_dbg(ql_dbg_io, vha, 0xff02,
|
|
|
|
- "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
|
|
|
|
} else {
|
|
} else {
|
|
ql_dbg(ql_dbg_io, vha, 0xff03,
|
|
ql_dbg(ql_dbg_io, vha, 0xff03,
|
|
"HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
|
|
"HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
|
|
@@ -3398,13 +3579,26 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
|
|
|
|
|
case CTIO_PORT_LOGGED_OUT:
|
|
case CTIO_PORT_LOGGED_OUT:
|
|
case CTIO_PORT_UNAVAILABLE:
|
|
case CTIO_PORT_UNAVAILABLE:
|
|
|
|
+ {
|
|
|
|
+ int logged_out = (status & 0xFFFF);
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
|
|
- "qla_target(%d): CTIO with PORT LOGGED "
|
|
|
|
- "OUT (29) or PORT UNAVAILABLE (28) status %x "
|
|
|
|
|
|
+ "qla_target(%d): CTIO with %s status %x "
|
|
"received (state %x, se_cmd %p)\n", vha->vp_idx,
|
|
"received (state %x, se_cmd %p)\n", vha->vp_idx,
|
|
|
|
+ (logged_out == CTIO_PORT_LOGGED_OUT) ?
|
|
|
|
+ "PORT LOGGED OUT" : "PORT UNAVAILABLE",
|
|
status, cmd->state, se_cmd);
|
|
status, cmd->state, se_cmd);
|
|
- break;
|
|
|
|
|
|
|
|
|
|
+ if (logged_out && cmd->sess) {
|
|
|
|
+ /*
|
|
|
|
+ * Session is already logged out, but we need
|
|
|
|
+ * to notify initiator, who's not aware of this
|
|
|
|
+ */
|
|
|
|
+ cmd->sess->logout_on_delete = 0;
|
|
|
|
+ cmd->sess->send_els_logo = 1;
|
|
|
|
+ qlt_schedule_sess_for_deletion(cmd->sess, true);
|
|
|
|
+ }
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
case CTIO_SRR_RECEIVED:
|
|
case CTIO_SRR_RECEIVED:
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
|
|
"qla_target(%d): CTIO with SRR_RECEIVED"
|
|
"qla_target(%d): CTIO with SRR_RECEIVED"
|
|
@@ -3454,14 +3648,14 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
|
|
}
|
|
}
|
|
|
|
|
|
|
|
|
|
- /* "cmd->state == QLA_TGT_STATE_ABORTED" means
|
|
|
|
|
|
+ /* "cmd->aborted" means
|
|
* cmd is already aborted/terminated, we don't
|
|
* cmd is already aborted/terminated, we don't
|
|
* need to terminate again. The exchange is already
|
|
* need to terminate again. The exchange is already
|
|
* cleaned up/freed at FW level. Just cleanup at driver
|
|
* cleaned up/freed at FW level. Just cleanup at driver
|
|
* level.
|
|
* level.
|
|
*/
|
|
*/
|
|
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
|
|
if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
|
|
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
|
|
|
|
|
|
+ (!cmd->aborted)) {
|
|
cmd->cmd_flags |= BIT_13;
|
|
cmd->cmd_flags |= BIT_13;
|
|
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
|
|
if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
|
|
return;
|
|
return;
|
|
@@ -3479,7 +3673,7 @@ skip_term:
|
|
|
|
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
ha->tgt.tgt_ops->handle_data(cmd);
|
|
return;
|
|
return;
|
|
- } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
|
|
|
|
+ } else if (cmd->aborted) {
|
|
cmd->cmd_flags |= BIT_18;
|
|
cmd->cmd_flags |= BIT_18;
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
|
|
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
|
|
"Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
|
|
@@ -3491,7 +3685,7 @@ skip_term:
|
|
}
|
|
}
|
|
|
|
|
|
if (unlikely(status != CTIO_SUCCESS) &&
|
|
if (unlikely(status != CTIO_SUCCESS) &&
|
|
- (cmd->state != QLA_TGT_STATE_ABORTED)) {
|
|
|
|
|
|
+ !cmd->aborted) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
|
|
dump_stack();
|
|
dump_stack();
|
|
}
|
|
}
|
|
@@ -3553,7 +3747,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
|
|
if (tgt->tgt_stop)
|
|
if (tgt->tgt_stop)
|
|
goto out_term;
|
|
goto out_term;
|
|
|
|
|
|
- if (cmd->state == QLA_TGT_STATE_ABORTED) {
|
|
|
|
|
|
+ if (cmd->aborted) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
|
|
"cmd with tag %u is aborted\n",
|
|
"cmd with tag %u is aborted\n",
|
|
cmd->atio.u.isp24.exchange_addr);
|
|
cmd->atio.u.isp24.exchange_addr);
|
|
@@ -3589,9 +3783,9 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
|
|
/*
|
|
/*
|
|
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
|
|
* Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
|
|
*/
|
|
*/
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
|
|
|
|
out_term:
|
|
out_term:
|
|
@@ -3606,8 +3800,11 @@ out_term:
|
|
|
|
|
|
qlt_decr_num_pend_cmds(vha);
|
|
qlt_decr_num_pend_cmds(vha);
|
|
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
|
|
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
|
|
- ha->tgt.tgt_ops->put_sess(sess);
|
|
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
|
|
+ ha->tgt.tgt_ops->put_sess(sess);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void qlt_do_work(struct work_struct *work)
|
|
static void qlt_do_work(struct work_struct *work)
|
|
@@ -3692,10 +3889,8 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
|
|
goto out_term;
|
|
goto out_term;
|
|
}
|
|
}
|
|
|
|
|
|
- mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
/* sess has an extra creation ref. */
|
|
/* sess has an extra creation ref. */
|
|
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
|
|
|
|
if (!sess)
|
|
if (!sess)
|
|
goto out_term;
|
|
goto out_term;
|
|
@@ -3787,13 +3982,24 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
|
|
|
|
|
|
cmd->cmd_in_wq = 1;
|
|
cmd->cmd_in_wq = 1;
|
|
cmd->cmd_flags |= BIT_0;
|
|
cmd->cmd_flags |= BIT_0;
|
|
|
|
+ cmd->se_cmd.cpuid = -1;
|
|
|
|
|
|
spin_lock(&vha->cmd_list_lock);
|
|
spin_lock(&vha->cmd_list_lock);
|
|
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
|
|
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
spin_unlock(&vha->cmd_list_lock);
|
|
|
|
|
|
INIT_WORK(&cmd->work, qlt_do_work);
|
|
INIT_WORK(&cmd->work, qlt_do_work);
|
|
- queue_work(qla_tgt_wq, &cmd->work);
|
|
|
|
|
|
+ if (ha->msix_count) {
|
|
|
|
+ cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
|
|
|
|
+ if (cmd->atio.u.isp24.fcp_cmnd.rddata)
|
|
|
|
+ queue_work_on(smp_processor_id(), qla_tgt_wq,
|
|
|
|
+ &cmd->work);
|
|
|
|
+ else
|
|
|
|
+ queue_work_on(cmd->se_cmd.cpuid, qla_tgt_wq,
|
|
|
|
+ &cmd->work);
|
|
|
|
+ } else {
|
|
|
|
+ queue_work(qla_tgt_wq, &cmd->work);
|
|
|
|
+ }
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
}
|
|
}
|
|
@@ -3917,13 +4123,18 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
uint32_t lun, unpacked_lun;
|
|
uint32_t lun, unpacked_lun;
|
|
int fn;
|
|
int fn;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
tgt = vha->vha_tgt.qla_tgt;
|
|
tgt = vha->vha_tgt.qla_tgt;
|
|
|
|
|
|
lun = a->u.isp24.fcp_cmnd.lun;
|
|
lun = a->u.isp24.fcp_cmnd.lun;
|
|
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
|
|
fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
a->u.isp24.fcp_hdr.s_id);
|
|
a->u.isp24.fcp_hdr.s_id);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
+
|
|
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
|
|
unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
|
|
|
|
|
|
if (!sess) {
|
|
if (!sess) {
|
|
@@ -3987,10 +4198,14 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_tgt_sess *sess;
|
|
struct qla_tgt_sess *sess;
|
|
int loop_id;
|
|
int loop_id;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
|
|
loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
+
|
|
if (sess == NULL) {
|
|
if (sess == NULL) {
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
|
|
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
|
|
"qla_target(%d): task abort for unexisting "
|
|
"qla_target(%d): task abort for unexisting "
|
|
@@ -4022,15 +4237,6 @@ void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
|
|
|
|
- struct imm_ntfy_from_isp *b)
|
|
|
|
-{
|
|
|
|
- struct imm_ntfy_from_isp tmp;
|
|
|
|
- memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
- memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
- memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
|
|
* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
|
|
*
|
|
*
|
|
@@ -4040,11 +4246,13 @@ static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
|
|
*/
|
|
*/
|
|
static struct qla_tgt_sess *
|
|
static struct qla_tgt_sess *
|
|
qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
|
|
qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
|
|
- port_id_t port_id, uint16_t loop_id)
|
|
|
|
|
|
+ port_id_t port_id, uint16_t loop_id, struct qla_tgt_sess **conflict_sess)
|
|
{
|
|
{
|
|
struct qla_tgt_sess *sess = NULL, *other_sess;
|
|
struct qla_tgt_sess *sess = NULL, *other_sess;
|
|
uint64_t other_wwn;
|
|
uint64_t other_wwn;
|
|
|
|
|
|
|
|
+ *conflict_sess = NULL;
|
|
|
|
+
|
|
list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
|
|
list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
|
|
|
|
|
|
other_wwn = wwn_to_u64(other_sess->port_name);
|
|
other_wwn = wwn_to_u64(other_sess->port_name);
|
|
@@ -4072,9 +4280,10 @@ qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
|
|
} else {
|
|
} else {
|
|
/*
|
|
/*
|
|
* Another wwn used to have our s_id/loop_id
|
|
* Another wwn used to have our s_id/loop_id
|
|
- * combo - kill the session, but don't log out
|
|
|
|
|
|
+ * kill the session, but don't free the loop_id
|
|
*/
|
|
*/
|
|
- sess->logout_on_delete = 0;
|
|
|
|
|
|
+ other_sess->keep_nport_handle = 1;
|
|
|
|
+ *conflict_sess = other_sess;
|
|
qlt_schedule_sess_for_deletion(other_sess,
|
|
qlt_schedule_sess_for_deletion(other_sess,
|
|
true);
|
|
true);
|
|
}
|
|
}
|
|
@@ -4119,7 +4328,7 @@ static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
|
|
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
|
|
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
|
|
uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
|
|
if (cmd_key == key) {
|
|
if (cmd_key == key) {
|
|
- cmd->state = QLA_TGT_STATE_ABORTED;
|
|
|
|
|
|
+ cmd->aborted = 1;
|
|
count++;
|
|
count++;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -4136,12 +4345,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
{
|
|
{
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
- struct qla_tgt_sess *sess = NULL;
|
|
|
|
|
|
+ struct qla_tgt_sess *sess = NULL, *conflict_sess = NULL;
|
|
uint64_t wwn;
|
|
uint64_t wwn;
|
|
port_id_t port_id;
|
|
port_id_t port_id;
|
|
uint16_t loop_id;
|
|
uint16_t loop_id;
|
|
uint16_t wd3_lo;
|
|
uint16_t wd3_lo;
|
|
int res = 0;
|
|
int res = 0;
|
|
|
|
+ qlt_plogi_ack_t *pla;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
wwn = wwn_to_u64(iocb->u.isp24.port_name);
|
|
wwn = wwn_to_u64(iocb->u.isp24.port_name);
|
|
|
|
|
|
@@ -4165,27 +4376,20 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
/* Mark all stale commands in qla_tgt_wq for deletion */
|
|
/* Mark all stale commands in qla_tgt_wq for deletion */
|
|
abort_cmds_for_s_id(vha, &port_id);
|
|
abort_cmds_for_s_id(vha, &port_id);
|
|
|
|
|
|
- if (wwn)
|
|
|
|
|
|
+ if (wwn) {
|
|
|
|
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
|
|
sess = qlt_find_sess_invalidate_other(tgt, wwn,
|
|
sess = qlt_find_sess_invalidate_other(tgt, wwn,
|
|
- port_id, loop_id);
|
|
|
|
|
|
+ port_id, loop_id, &conflict_sess);
|
|
|
|
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
|
|
|
|
+ }
|
|
|
|
|
|
- if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
|
|
|
|
|
|
+ if (IS_SW_RESV_ADDR(port_id) || (!sess && !conflict_sess)) {
|
|
res = 1;
|
|
res = 1;
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- if (sess->plogi_ack_needed) {
|
|
|
|
- /*
|
|
|
|
- * Initiator sent another PLOGI before last PLOGI could
|
|
|
|
- * finish. Swap plogi iocbs and terminate old one
|
|
|
|
- * without acking, new one will get acked when session
|
|
|
|
- * deletion completes.
|
|
|
|
- */
|
|
|
|
- ql_log(ql_log_warn, sess->vha, 0xf094,
|
|
|
|
- "sess %p received double plogi.\n", sess);
|
|
|
|
-
|
|
|
|
- qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
|
|
|
|
-
|
|
|
|
|
|
+ pla = qlt_plogi_ack_find_add(vha, &port_id, iocb);
|
|
|
|
+ if (!pla) {
|
|
qlt_send_term_imm_notif(vha, iocb, 1);
|
|
qlt_send_term_imm_notif(vha, iocb, 1);
|
|
|
|
|
|
res = 0;
|
|
res = 0;
|
|
@@ -4194,13 +4398,14 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
|
|
|
|
res = 0;
|
|
res = 0;
|
|
|
|
|
|
- /*
|
|
|
|
- * Save immediate Notif IOCB for Ack when sess is done
|
|
|
|
- * and being deleted.
|
|
|
|
- */
|
|
|
|
- memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
|
|
|
|
- sess->plogi_ack_needed = 1;
|
|
|
|
|
|
+ if (conflict_sess)
|
|
|
|
+ qlt_plogi_ack_link(vha, pla, conflict_sess,
|
|
|
|
+ QLT_PLOGI_LINK_CONFLICT);
|
|
|
|
+
|
|
|
|
+ if (!sess)
|
|
|
|
+ break;
|
|
|
|
|
|
|
|
+ qlt_plogi_ack_link(vha, pla, sess, QLT_PLOGI_LINK_SAME_WWN);
|
|
/*
|
|
/*
|
|
* Under normal circumstances we want to release nport handle
|
|
* Under normal circumstances we want to release nport handle
|
|
* during LOGO process to avoid nport handle leaks inside FW.
|
|
* during LOGO process to avoid nport handle leaks inside FW.
|
|
@@ -4227,9 +4432,21 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
|
|
case ELS_PRLI:
|
|
case ELS_PRLI:
|
|
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
|
|
wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
|
|
|
|
|
|
- if (wwn)
|
|
|
|
|
|
+ if (wwn) {
|
|
|
|
+ spin_lock_irqsave(&tgt->ha->tgt.sess_lock, flags);
|
|
sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
|
|
sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
|
|
- loop_id);
|
|
|
|
|
|
+ loop_id, &conflict_sess);
|
|
|
|
+ spin_unlock_irqrestore(&tgt->ha->tgt.sess_lock, flags);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ if (conflict_sess) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xf09b,
|
|
|
|
+ "PRLI with conflicting sess %p port %8phC\n",
|
|
|
|
+ conflict_sess, conflict_sess->port_name);
|
|
|
|
+ qlt_send_term_imm_notif(vha, iocb, 1);
|
|
|
|
+ res = 0;
|
|
|
|
+ break;
|
|
|
|
+ }
|
|
|
|
|
|
if (sess != NULL) {
|
|
if (sess != NULL) {
|
|
if (sess->deleted) {
|
|
if (sess->deleted) {
|
|
@@ -4899,9 +5116,12 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
request_t *pkt;
|
|
request_t *pkt;
|
|
struct qla_tgt_sess *sess = NULL;
|
|
struct qla_tgt_sess *sess = NULL;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
atio->u.isp24.fcp_hdr.s_id);
|
|
atio->u.isp24.fcp_hdr.s_id);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
qlt_send_term_exchange(vha, NULL, atio, 1);
|
|
qlt_send_term_exchange(vha, NULL, atio, 1);
|
|
return 0;
|
|
return 0;
|
|
@@ -4916,6 +5136,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ vha->tgt_counters.num_q_full_sent++;
|
|
pkt->entry_count = 1;
|
|
pkt->entry_count = 1;
|
|
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
|
pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
|
|
|
|
|
|
@@ -5129,11 +5350,12 @@ qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
/* called via callback from qla2xxx */
|
|
/* called via callback from qla2xxx */
|
|
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
- struct atio_from_isp *atio)
|
|
|
|
|
|
+ struct atio_from_isp *atio, uint8_t ha_locked)
|
|
{
|
|
{
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
int rc;
|
|
int rc;
|
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (unlikely(tgt == NULL)) {
|
|
if (unlikely(tgt == NULL)) {
|
|
ql_dbg(ql_dbg_io, vha, 0x3064,
|
|
ql_dbg(ql_dbg_io, vha, 0x3064,
|
|
@@ -5145,7 +5367,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
* Otherwise, some commands can stuck.
|
|
* Otherwise, some commands can stuck.
|
|
*/
|
|
*/
|
|
|
|
|
|
- tgt->irq_cmd_count++;
|
|
|
|
|
|
+ tgt->atio_irq_cmd_count++;
|
|
|
|
|
|
switch (atio->u.raw.entry_type) {
|
|
switch (atio->u.raw.entry_type) {
|
|
case ATIO_TYPE7:
|
|
case ATIO_TYPE7:
|
|
@@ -5155,7 +5377,11 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
"qla_target(%d): ATIO_TYPE7 "
|
|
"qla_target(%d): ATIO_TYPE7 "
|
|
"received with UNKNOWN exchange address, "
|
|
"received with UNKNOWN exchange address, "
|
|
"sending QUEUE_FULL\n", vha->vp_idx);
|
|
"sending QUEUE_FULL\n", vha->vp_idx);
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
|
|
qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5164,7 +5390,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
|
|
if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
|
|
rc = qlt_chk_qfull_thresh_hold(vha, atio);
|
|
rc = qlt_chk_qfull_thresh_hold(vha, atio);
|
|
if (rc != 0) {
|
|
if (rc != 0) {
|
|
- tgt->irq_cmd_count--;
|
|
|
|
|
|
+ tgt->atio_irq_cmd_count--;
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
rc = qlt_handle_cmd_for_atio(vha, atio);
|
|
rc = qlt_handle_cmd_for_atio(vha, atio);
|
|
@@ -5173,11 +5399,20 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
}
|
|
}
|
|
if (unlikely(rc != 0)) {
|
|
if (unlikely(rc != 0)) {
|
|
if (rc == -ESRCH) {
|
|
if (rc == -ESRCH) {
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_lock_irqsave
|
|
|
|
+ (&ha->hardware_lock, flags);
|
|
|
|
+
|
|
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
|
|
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
|
|
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
|
|
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
|
|
#else
|
|
#else
|
|
qlt_send_term_exchange(vha, NULL, atio, 1);
|
|
qlt_send_term_exchange(vha, NULL, atio, 1);
|
|
#endif
|
|
#endif
|
|
|
|
+
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore
|
|
|
|
+ (&ha->hardware_lock, flags);
|
|
|
|
+
|
|
} else {
|
|
} else {
|
|
if (tgt->tgt_stop) {
|
|
if (tgt->tgt_stop) {
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe059,
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe059,
|
|
@@ -5189,7 +5424,13 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
"qla_target(%d): Unable to send "
|
|
"qla_target(%d): Unable to send "
|
|
"command to target, sending BUSY "
|
|
"command to target, sending BUSY "
|
|
"status.\n", vha->vp_idx);
|
|
"status.\n", vha->vp_idx);
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_lock_irqsave(
|
|
|
|
+ &ha->hardware_lock, flags);
|
|
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
|
|
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore(
|
|
|
|
+ &ha->hardware_lock, flags);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -5206,7 +5447,12 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
|
|
|
|
+
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
|
|
qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
|
|
|
|
+ if (!ha_locked)
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -5217,7 +5463,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
- tgt->irq_cmd_count--;
|
|
|
|
|
|
+ tgt->atio_irq_cmd_count--;
|
|
}
|
|
}
|
|
|
|
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
/* ha->hardware_lock supposed to be held on entry */
|
|
@@ -5534,12 +5780,16 @@ static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
|
|
int rc, global_resets;
|
|
int rc, global_resets;
|
|
uint16_t loop_id = 0;
|
|
uint16_t loop_id = 0;
|
|
|
|
|
|
|
|
+ mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
retry:
|
|
retry:
|
|
global_resets =
|
|
global_resets =
|
|
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
|
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
|
|
|
|
|
|
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
|
|
rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
|
|
if (rc != 0) {
|
|
if (rc != 0) {
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
if ((s_id[0] == 0xFF) &&
|
|
if ((s_id[0] == 0xFF) &&
|
|
(s_id[1] == 0xFC)) {
|
|
(s_id[1] == 0xFC)) {
|
|
/*
|
|
/*
|
|
@@ -5550,17 +5800,27 @@ retry:
|
|
"Unable to find initiator with S_ID %x:%x:%x",
|
|
"Unable to find initiator with S_ID %x:%x:%x",
|
|
s_id[0], s_id[1], s_id[2]);
|
|
s_id[0], s_id[1], s_id[2]);
|
|
} else
|
|
} else
|
|
- ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
|
|
|
|
|
|
+ ql_log(ql_log_info, vha, 0xf071,
|
|
"qla_target(%d): Unable to find "
|
|
"qla_target(%d): Unable to find "
|
|
"initiator with S_ID %x:%x:%x",
|
|
"initiator with S_ID %x:%x:%x",
|
|
vha->vp_idx, s_id[0], s_id[1],
|
|
vha->vp_idx, s_id[0], s_id[1],
|
|
s_id[2]);
|
|
s_id[2]);
|
|
|
|
+
|
|
|
|
+ if (rc == -ENOENT) {
|
|
|
|
+ qlt_port_logo_t logo;
|
|
|
|
+ sid_to_portid(s_id, &logo.id);
|
|
|
|
+ logo.cmd_count = 1;
|
|
|
|
+ qlt_send_first_logo(vha, &logo);
|
|
|
|
+ }
|
|
|
|
+
|
|
return NULL;
|
|
return NULL;
|
|
}
|
|
}
|
|
|
|
|
|
fcport = qlt_get_port_database(vha, loop_id);
|
|
fcport = qlt_get_port_database(vha, loop_id);
|
|
- if (!fcport)
|
|
|
|
|
|
+ if (!fcport) {
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
return NULL;
|
|
return NULL;
|
|
|
|
+ }
|
|
|
|
|
|
if (global_resets !=
|
|
if (global_resets !=
|
|
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
|
|
atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
|
|
@@ -5575,6 +5835,8 @@ retry:
|
|
|
|
|
|
sess = qlt_create_sess(vha, fcport, true);
|
|
sess = qlt_create_sess(vha, fcport, true);
|
|
|
|
|
|
|
|
+ mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
+
|
|
kfree(fcport);
|
|
kfree(fcport);
|
|
return sess;
|
|
return sess;
|
|
}
|
|
}
|
|
@@ -5585,15 +5847,15 @@ static void qlt_abort_work(struct qla_tgt *tgt,
|
|
struct scsi_qla_host *vha = tgt->vha;
|
|
struct scsi_qla_host *vha = tgt->vha;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_tgt_sess *sess = NULL;
|
|
struct qla_tgt_sess *sess = NULL;
|
|
- unsigned long flags;
|
|
|
|
|
|
+ unsigned long flags = 0, flags2 = 0;
|
|
uint32_t be_s_id;
|
|
uint32_t be_s_id;
|
|
uint8_t s_id[3];
|
|
uint8_t s_id[3];
|
|
int rc;
|
|
int rc;
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
|
|
|
|
|
|
if (tgt->tgt_stop)
|
|
if (tgt->tgt_stop)
|
|
- goto out_term;
|
|
|
|
|
|
+ goto out_term2;
|
|
|
|
|
|
s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
|
|
s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
|
|
s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
|
|
s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
|
|
@@ -5602,41 +5864,47 @@ static void qlt_abort_work(struct qla_tgt *tgt,
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
|
|
(unsigned char *)&be_s_id);
|
|
(unsigned char *)&be_s_id);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
|
|
|
|
|
|
- mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
/* sess has got an extra creation ref */
|
|
/* sess has got an extra creation ref */
|
|
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags2);
|
|
if (!sess)
|
|
if (!sess)
|
|
- goto out_term;
|
|
|
|
|
|
+ goto out_term2;
|
|
} else {
|
|
} else {
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
|
|
sess = NULL;
|
|
sess = NULL;
|
|
- goto out_term;
|
|
|
|
|
|
+ goto out_term2;
|
|
}
|
|
}
|
|
|
|
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
kref_get(&sess->se_sess->sess_kref);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
if (tgt->tgt_stop)
|
|
if (tgt->tgt_stop)
|
|
goto out_term;
|
|
goto out_term;
|
|
|
|
|
|
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
|
|
rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
|
|
if (rc != 0)
|
|
if (rc != 0)
|
|
goto out_term;
|
|
goto out_term;
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
|
|
return;
|
|
return;
|
|
|
|
|
|
|
|
+out_term2:
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
out_term:
|
|
out_term:
|
|
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
|
|
qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+
|
|
if (sess)
|
|
if (sess)
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
|
|
}
|
|
}
|
|
|
|
|
|
static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
@@ -5653,7 +5921,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
int fn;
|
|
int fn;
|
|
void *iocb;
|
|
void *iocb;
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
if (tgt->tgt_stop)
|
|
if (tgt->tgt_stop)
|
|
goto out_term;
|
|
goto out_term;
|
|
@@ -5661,14 +5929,12 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
|
|
s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
|
|
sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
|
|
if (!sess) {
|
|
if (!sess) {
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
|
|
|
|
- mutex_lock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
sess = qlt_make_local_sess(vha, s_id);
|
|
/* sess has got an extra creation ref */
|
|
/* sess has got an extra creation ref */
|
|
- mutex_unlock(&vha->vha_tgt.tgt_mutex);
|
|
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.sess_lock, flags);
|
|
if (!sess)
|
|
if (!sess)
|
|
goto out_term;
|
|
goto out_term;
|
|
} else {
|
|
} else {
|
|
@@ -5690,14 +5956,14 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
|
|
goto out_term;
|
|
goto out_term;
|
|
|
|
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
return;
|
|
return;
|
|
|
|
|
|
out_term:
|
|
out_term:
|
|
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
|
|
|
|
|
|
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
|
|
if (sess)
|
|
if (sess)
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
ha->tgt.tgt_ops->put_sess(sess);
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
|
}
|
|
}
|
|
|
|
|
|
static void qlt_sess_work_fn(struct work_struct *work)
|
|
static void qlt_sess_work_fn(struct work_struct *work)
|
|
@@ -6002,6 +6268,7 @@ qlt_enable_vha(struct scsi_qla_host *vha)
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
|
unsigned long flags;
|
|
unsigned long flags;
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
|
|
|
|
+ int rspq_ent = QLA83XX_RSPQ_MSIX_ENTRY_NUMBER;
|
|
|
|
|
|
if (!tgt) {
|
|
if (!tgt) {
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe069,
|
|
ql_dbg(ql_dbg_tgt, vha, 0xe069,
|
|
@@ -6020,6 +6287,17 @@ qlt_enable_vha(struct scsi_qla_host *vha)
|
|
qla24xx_disable_vp(vha);
|
|
qla24xx_disable_vp(vha);
|
|
qla24xx_enable_vp(vha);
|
|
qla24xx_enable_vp(vha);
|
|
} else {
|
|
} else {
|
|
|
|
+ if (ha->msix_entries) {
|
|
|
|
+ ql_dbg(ql_dbg_tgt, vha, 0xffff,
|
|
|
|
+ "%s: host%ld : vector %d cpu %d\n",
|
|
|
|
+ __func__, vha->host_no,
|
|
|
|
+ ha->msix_entries[rspq_ent].vector,
|
|
|
|
+ ha->msix_entries[rspq_ent].cpuid);
|
|
|
|
+
|
|
|
|
+ ha->tgt.rspq_vector_cpuid =
|
|
|
|
+ ha->msix_entries[rspq_ent].cpuid;
|
|
|
|
+ }
|
|
|
|
+
|
|
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
|
|
set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
|
|
qla2xxx_wake_dpc(base_vha);
|
|
qla2xxx_wake_dpc(base_vha);
|
|
qla2x00_wait_for_hba_online(base_vha);
|
|
qla2x00_wait_for_hba_online(base_vha);
|
|
@@ -6131,7 +6409,7 @@ qlt_init_atio_q_entries(struct scsi_qla_host *vha)
|
|
* @ha: SCSI driver HA context
|
|
* @ha: SCSI driver HA context
|
|
*/
|
|
*/
|
|
void
|
|
void
|
|
-qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
|
|
|
|
|
|
+qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
|
|
{
|
|
{
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct atio_from_isp *pkt;
|
|
struct atio_from_isp *pkt;
|
|
@@ -6144,7 +6422,8 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
|
|
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
|
|
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
|
|
cnt = pkt->u.raw.entry_count;
|
|
cnt = pkt->u.raw.entry_count;
|
|
|
|
|
|
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
|
|
|
|
|
|
+ qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
|
|
|
|
+ ha_locked);
|
|
|
|
|
|
for (i = 0; i < cnt; i++) {
|
|
for (i = 0; i < cnt; i++) {
|
|
ha->tgt.atio_ring_index++;
|
|
ha->tgt.atio_ring_index++;
|
|
@@ -6265,10 +6544,21 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
|
|
{
|
|
{
|
|
struct qla_hw_data *ha = vha->hw;
|
|
struct qla_hw_data *ha = vha->hw;
|
|
|
|
|
|
|
|
+ if (!QLA_TGT_MODE_ENABLED())
|
|
|
|
+ return;
|
|
|
|
+
|
|
if (ha->tgt.node_name_set) {
|
|
if (ha->tgt.node_name_set) {
|
|
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
|
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
|
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
|
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* disable ZIO at start time. */
|
|
|
|
+ if (!vha->flags.init_done) {
|
|
|
|
+ uint32_t tmp;
|
|
|
|
+ tmp = le32_to_cpu(icb->firmware_options_2);
|
|
|
|
+ tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
|
|
|
|
+ icb->firmware_options_2 = cpu_to_le32(tmp);
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
void
|
|
void
|
|
@@ -6359,6 +6649,15 @@ qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
|
|
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
|
memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
|
|
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
|
icb->firmware_options_1 |= cpu_to_le32(BIT_14);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /* disable ZIO at start time. */
|
|
|
|
+ if (!vha->flags.init_done) {
|
|
|
|
+ uint32_t tmp;
|
|
|
|
+ tmp = le32_to_cpu(icb->firmware_options_2);
|
|
|
|
+ tmp &= ~(BIT_3 | BIT_2 | BIT_1 | BIT_0);
|
|
|
|
+ icb->firmware_options_2 = cpu_to_le32(tmp);
|
|
|
|
+ }
|
|
|
|
+
|
|
}
|
|
}
|
|
|
|
|
|
void
|
|
void
|
|
@@ -6428,16 +6727,59 @@ qla83xx_msix_atio_q(int irq, void *dev_id)
|
|
ha = rsp->hw;
|
|
ha = rsp->hw;
|
|
vha = pci_get_drvdata(ha->pdev);
|
|
vha = pci_get_drvdata(ha->pdev);
|
|
|
|
|
|
- spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
|
|
|
|
|
|
- qlt_24xx_process_atio_queue(vha);
|
|
|
|
- qla24xx_process_response_queue(vha, rsp);
|
|
|
|
|
|
+ qlt_24xx_process_atio_queue(vha, 0);
|
|
|
|
|
|
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static void
|
|
|
|
+qlt_handle_abts_recv_work(struct work_struct *work)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess_op *op = container_of(work,
|
|
|
|
+ struct qla_tgt_sess_op, work);
|
|
|
|
+ scsi_qla_host_t *vha = op->vha;
|
|
|
|
+ struct qla_hw_data *ha = vha->hw;
|
|
|
|
+ unsigned long flags;
|
|
|
|
+
|
|
|
|
+ if (qla2x00_reset_active(vha) || (op->chip_reset != ha->chip_reset))
|
|
|
|
+ return;
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ha->tgt.atio_lock, flags);
|
|
|
|
+ qlt_24xx_process_atio_queue(vha, 0);
|
|
|
|
+ spin_unlock_irqrestore(&ha->tgt.atio_lock, flags);
|
|
|
|
+
|
|
|
|
+ spin_lock_irqsave(&ha->hardware_lock, flags);
|
|
|
|
+ qlt_response_pkt_all_vps(vha, (response_t *)&op->atio);
|
|
|
|
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void
|
|
|
|
+qlt_handle_abts_recv(struct scsi_qla_host *vha, response_t *pkt)
|
|
|
|
+{
|
|
|
|
+ struct qla_tgt_sess_op *op;
|
|
|
|
+
|
|
|
|
+ op = kzalloc(sizeof(*op), GFP_ATOMIC);
|
|
|
|
+
|
|
|
|
+ if (!op) {
|
|
|
|
+ /* do not reach for ATIO queue here. This is best effort err
|
|
|
|
+ * recovery at this point.
|
|
|
|
+ */
|
|
|
|
+ qlt_response_pkt_all_vps(vha, pkt);
|
|
|
|
+ return;
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ memcpy(&op->atio, pkt, sizeof(*pkt));
|
|
|
|
+ op->vha = vha;
|
|
|
|
+ op->chip_reset = vha->hw->chip_reset;
|
|
|
|
+ INIT_WORK(&op->work, qlt_handle_abts_recv_work);
|
|
|
|
+ queue_work(qla_tgt_wq, &op->work);
|
|
|
|
+ return;
|
|
|
|
+}
|
|
|
|
+
|
|
int
|
|
int
|
|
qlt_mem_alloc(struct qla_hw_data *ha)
|
|
qlt_mem_alloc(struct qla_hw_data *ha)
|
|
{
|
|
{
|
|
@@ -6532,13 +6874,25 @@ int __init qlt_init(void)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ qla_tgt_plogi_cachep = kmem_cache_create("qla_tgt_plogi_cachep",
|
|
|
|
+ sizeof(qlt_plogi_ack_t),
|
|
|
|
+ __alignof__(qlt_plogi_ack_t),
|
|
|
|
+ 0, NULL);
|
|
|
|
+
|
|
|
|
+ if (!qla_tgt_plogi_cachep) {
|
|
|
|
+ ql_log(ql_log_fatal, NULL, 0xe06d,
|
|
|
|
+ "kmem_cache_create for qla_tgt_plogi_cachep failed\n");
|
|
|
|
+ ret = -ENOMEM;
|
|
|
|
+ goto out_mgmt_cmd_cachep;
|
|
|
|
+ }
|
|
|
|
+
|
|
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
|
|
qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
|
|
mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
|
|
mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
|
|
if (!qla_tgt_mgmt_cmd_mempool) {
|
|
if (!qla_tgt_mgmt_cmd_mempool) {
|
|
ql_log(ql_log_fatal, NULL, 0xe06e,
|
|
ql_log(ql_log_fatal, NULL, 0xe06e,
|
|
"mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
|
|
"mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
|
|
ret = -ENOMEM;
|
|
ret = -ENOMEM;
|
|
- goto out_mgmt_cmd_cachep;
|
|
|
|
|
|
+ goto out_plogi_cachep;
|
|
}
|
|
}
|
|
|
|
|
|
qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
|
|
qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
|
|
@@ -6555,6 +6909,8 @@ int __init qlt_init(void)
|
|
|
|
|
|
out_cmd_mempool:
|
|
out_cmd_mempool:
|
|
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
|
|
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
|
|
|
|
+out_plogi_cachep:
|
|
|
|
+ kmem_cache_destroy(qla_tgt_plogi_cachep);
|
|
out_mgmt_cmd_cachep:
|
|
out_mgmt_cmd_cachep:
|
|
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
|
|
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
|
|
return ret;
|
|
return ret;
|
|
@@ -6567,5 +6923,6 @@ void qlt_exit(void)
|
|
|
|
|
|
destroy_workqueue(qla_tgt_wq);
|
|
destroy_workqueue(qla_tgt_wq);
|
|
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
|
|
mempool_destroy(qla_tgt_mgmt_cmd_mempool);
|
|
|
|
+ kmem_cache_destroy(qla_tgt_plogi_cachep);
|
|
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
|
|
kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
|
|
}
|
|
}
|