|
@@ -3532,20 +3532,63 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
|
|
/* Error everything on txq and txcmplq
|
|
|
* First do the txq.
|
|
|
*/
|
|
|
- spin_lock_irq(&phba->hbalock);
|
|
|
- list_splice_init(&pring->txq, &completions);
|
|
|
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
|
|
|
+ spin_lock_irq(&pring->ring_lock);
|
|
|
+ list_splice_init(&pring->txq, &completions);
|
|
|
+ pring->txq_cnt = 0;
|
|
|
+ spin_unlock_irq(&pring->ring_lock);
|
|
|
|
|
|
- /* Next issue ABTS for everything on the txcmplq */
|
|
|
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
|
|
- lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ /* Next issue ABTS for everything on the txcmplq */
|
|
|
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
|
|
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+ } else {
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ list_splice_init(&pring->txq, &completions);
|
|
|
+ pring->txq_cnt = 0;
|
|
|
|
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
|
+ /* Next issue ABTS for everything on the txcmplq */
|
|
|
+ list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
|
|
+ lpfc_sli_issue_abort_iotag(phba, pring, iocb);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+ }
|
|
|
|
|
|
/* Cancel all the IOCBs from the completions list */
|
|
|
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
|
|
IOERR_SLI_ABORTED);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
|
|
|
+ * @phba: Pointer to HBA context object.
|
|
|
+ * @pring: Pointer to driver SLI ring object.
|
|
|
+ *
|
|
|
+ * This function aborts all iocbs in FCP rings and frees all the iocb
|
|
|
+ * objects in txq. This function issues an abort iocb for all the iocb commands
|
|
|
+ * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
|
|
|
+ * the return of this function. The caller is not required to hold any locks.
|
|
|
+ **/
|
|
|
+void
|
|
|
+lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_sli *psli = &phba->sli;
|
|
|
+ struct lpfc_sli_ring *pring;
|
|
|
+ uint32_t i;
|
|
|
+
|
|
|
+ /* Look on all the FCP Rings for the iotag */
|
|
|
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
|
|
|
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
|
|
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
|
|
|
+ lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ pring = &psli->ring[psli->fcp_ring];
|
|
|
+ lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* lpfc_sli_flush_fcp_rings - flush all iocbs in the fcp ring
|
|
|
* @phba: Pointer to HBA context object.
|
|
@@ -3563,28 +3606,55 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
|
|
|
LIST_HEAD(txcmplq);
|
|
|
struct lpfc_sli *psli = &phba->sli;
|
|
|
struct lpfc_sli_ring *pring;
|
|
|
-
|
|
|
- /* Currently, only one fcp ring */
|
|
|
- pring = &psli->ring[psli->fcp_ring];
|
|
|
+ uint32_t i;
|
|
|
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
- /* Retrieve everything on txq */
|
|
|
- list_splice_init(&pring->txq, &txq);
|
|
|
-
|
|
|
- /* Retrieve everything on the txcmplq */
|
|
|
- list_splice_init(&pring->txcmplq, &txcmplq);
|
|
|
-
|
|
|
/* Indicate the I/O queues are flushed */
|
|
|
phba->hba_flag |= HBA_FCP_IOQ_FLUSH;
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
- /* Flush the txq */
|
|
|
- lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
|
|
|
- IOERR_SLI_DOWN);
|
|
|
+ /* Look on all the FCP Rings for the iotag */
|
|
|
+ if (phba->sli_rev >= LPFC_SLI_REV4) {
|
|
|
+ for (i = 0; i < phba->cfg_fcp_io_channel; i++) {
|
|
|
+ pring = &psli->ring[i + MAX_SLI3_CONFIGURED_RINGS];
|
|
|
+
|
|
|
+ spin_lock_irq(&pring->ring_lock);
|
|
|
+ /* Retrieve everything on txq */
|
|
|
+ list_splice_init(&pring->txq, &txq);
|
|
|
+ /* Retrieve everything on the txcmplq */
|
|
|
+ list_splice_init(&pring->txcmplq, &txcmplq);
|
|
|
+ pring->txq_cnt = 0;
|
|
|
+ pring->txcmplq_cnt = 0;
|
|
|
+ spin_unlock_irq(&pring->ring_lock);
|
|
|
+
|
|
|
+ /* Flush the txq */
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &txq,
|
|
|
+ IOSTAT_LOCAL_REJECT,
|
|
|
+ IOERR_SLI_DOWN);
|
|
|
+ /* Flush the txcmpq */
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &txcmplq,
|
|
|
+ IOSTAT_LOCAL_REJECT,
|
|
|
+ IOERR_SLI_DOWN);
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ pring = &psli->ring[psli->fcp_ring];
|
|
|
|
|
|
- /* Flush the txcmpq */
|
|
|
- lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
|
|
|
- IOERR_SLI_DOWN);
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ /* Retrieve everything on txq */
|
|
|
+ list_splice_init(&pring->txq, &txq);
|
|
|
+ /* Retrieve everything on the txcmplq */
|
|
|
+ list_splice_init(&pring->txcmplq, &txcmplq);
|
|
|
+ pring->txq_cnt = 0;
|
|
|
+ pring->txcmplq_cnt = 0;
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+
|
|
|
+ /* Flush the txq */
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
|
|
|
+ IOERR_SLI_DOWN);
|
|
|
+ /* Flush the txcmpq */
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
|
|
|
+ IOERR_SLI_DOWN);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -6722,7 +6792,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
|
|
|
LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
|
|
|
MAILBOX_t *mb = &pmbox->u.mb;
|
|
|
struct lpfc_sli *psli = &phba->sli;
|
|
|
- struct lpfc_sli_ring *pring;
|
|
|
|
|
|
/* If the mailbox completed, process the completion and return */
|
|
|
if (lpfc_sli4_process_missed_mbox_completions(phba))
|
|
@@ -6764,8 +6833,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
|
|
|
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
- pring = &psli->ring[psli->fcp_ring];
|
|
|
- lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
+ lpfc_sli_abort_fcp_rings(phba);
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
|
|
|
"0345 Resetting board due to mailbox timeout\n");
|
|
@@ -9804,43 +9872,6 @@ abort_iotag_exit:
|
|
|
return retval;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
|
|
|
- * @phba: Pointer to HBA context object.
|
|
|
- * @pring: Pointer to driver SLI ring object.
|
|
|
- *
|
|
|
- * This function aborts all iocbs in the given ring and frees all the iocb
|
|
|
- * objects in txq. This function issues abort iocbs unconditionally for all
|
|
|
- * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
|
|
|
- * to complete before the return of this function. The caller is not required
|
|
|
- * to hold any locks.
|
|
|
- **/
|
|
|
-static void
|
|
|
-lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
|
|
|
-{
|
|
|
- LIST_HEAD(completions);
|
|
|
- struct lpfc_iocbq *iocb, *next_iocb;
|
|
|
-
|
|
|
- if (pring->ringno == LPFC_ELS_RING)
|
|
|
- lpfc_fabric_abort_hba(phba);
|
|
|
-
|
|
|
- spin_lock_irq(&phba->hbalock);
|
|
|
-
|
|
|
- /* Take off all the iocbs on txq for cancelling */
|
|
|
- list_splice_init(&pring->txq, &completions);
|
|
|
- pring->txq_cnt = 0;
|
|
|
-
|
|
|
- /* Next issue ABTS for everything on the txcmplq */
|
|
|
- list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
|
|
|
- lpfc_sli_abort_iotag_issue(phba, pring, iocb);
|
|
|
-
|
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
|
-
|
|
|
- /* Cancel all the IOCBs from the completions list */
|
|
|
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
|
|
- IOERR_SLI_ABORTED);
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
|
|
|
* @phba: pointer to lpfc HBA data structure.
|
|
@@ -9856,7 +9887,7 @@ lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
|
|
|
|
|
|
for (i = 0; i < psli->num_rings; i++) {
|
|
|
pring = &psli->ring[i];
|
|
|
- lpfc_sli_iocb_ring_abort(phba, pring);
|
|
|
+ lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
}
|
|
|
}
|
|
|
|