|
|
@@ -1,9 +1,11 @@
|
|
|
/*******************************************************************
|
|
|
* This file is part of the Emulex Linux Device Driver for *
|
|
|
* Fibre Channel Host Bus Adapters. *
|
|
|
+ * Copyright (C) 2017 Broadcom. All Rights Reserved. The term *
|
|
|
+ * “Broadcom” refers to Broadcom Limited and/or its subsidiaries. *
|
|
|
* Copyright (C) 2004-2016 Emulex. All rights reserved. *
|
|
|
* EMULEX and SLI are trademarks of Emulex. *
|
|
|
- * www.emulex.com *
|
|
|
+ * www.broadcom.com *
|
|
|
* Portions Copyright (C) 2004-2005 Christoph Hellwig *
|
|
|
* *
|
|
|
* This program is free software; you can redistribute it and/or *
|
|
|
@@ -34,6 +36,7 @@
|
|
|
#include <linux/firmware.h>
|
|
|
#include <linux/miscdevice.h>
|
|
|
#include <linux/percpu.h>
|
|
|
+#include <linux/msi.h>
|
|
|
|
|
|
#include <scsi/scsi.h>
|
|
|
#include <scsi/scsi_device.h>
|
|
|
@@ -46,8 +49,9 @@
|
|
|
#include "lpfc_sli4.h"
|
|
|
#include "lpfc_nl.h"
|
|
|
#include "lpfc_disc.h"
|
|
|
-#include "lpfc_scsi.h"
|
|
|
#include "lpfc.h"
|
|
|
+#include "lpfc_scsi.h"
|
|
|
+#include "lpfc_nvme.h"
|
|
|
#include "lpfc_logmsg.h"
|
|
|
#include "lpfc_crtn.h"
|
|
|
#include "lpfc_vport.h"
|
|
|
@@ -71,6 +75,7 @@ static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
|
|
|
static int lpfc_setup_endian_order(struct lpfc_hba *);
|
|
|
static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
|
|
|
static void lpfc_free_els_sgl_list(struct lpfc_hba *);
|
|
|
+static void lpfc_free_nvmet_sgl_list(struct lpfc_hba *);
|
|
|
static void lpfc_init_sgl_list(struct lpfc_hba *);
|
|
|
static int lpfc_init_active_sgl_array(struct lpfc_hba *);
|
|
|
static void lpfc_free_active_sgl(struct lpfc_hba *);
|
|
|
@@ -86,6 +91,7 @@ static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
|
|
|
static struct scsi_transport_template *lpfc_transport_template = NULL;
|
|
|
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
|
|
|
static DEFINE_IDR(lpfc_hba_index);
|
|
|
+#define LPFC_NVMET_BUF_POST 254
|
|
|
|
|
|
/**
|
|
|
* lpfc_config_port_prep - Perform lpfc initialization prior to config port
|
|
|
@@ -499,12 +505,10 @@ lpfc_config_port_post(struct lpfc_hba *phba)
|
|
|
phba->link_state = LPFC_LINK_DOWN;
|
|
|
|
|
|
/* Only process IOCBs on ELS ring till hba_state is READY */
|
|
|
- if (psli->ring[psli->extra_ring].sli.sli3.cmdringaddr)
|
|
|
- psli->ring[psli->extra_ring].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
- if (psli->ring[psli->fcp_ring].sli.sli3.cmdringaddr)
|
|
|
- psli->ring[psli->fcp_ring].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
- if (psli->ring[psli->next_ring].sli.sli3.cmdringaddr)
|
|
|
- psli->ring[psli->next_ring].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
+ if (psli->sli3_ring[LPFC_EXTRA_RING].sli.sli3.cmdringaddr)
|
|
|
+ psli->sli3_ring[LPFC_EXTRA_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
+ if (psli->sli3_ring[LPFC_FCP_RING].sli.sli3.cmdringaddr)
|
|
|
+ psli->sli3_ring[LPFC_FCP_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
|
|
|
/* Post receive buffers for desired rings */
|
|
|
if (phba->sli_rev != 3)
|
|
|
@@ -892,7 +896,7 @@ lpfc_hba_free_post_buf(struct lpfc_hba *phba)
|
|
|
lpfc_sli_hbqbuf_free_all(phba);
|
|
|
else {
|
|
|
/* Cleanup preposted buffers on the ELS ring */
|
|
|
- pring = &psli->ring[LPFC_ELS_RING];
|
|
|
+ pring = &psli->sli3_ring[LPFC_ELS_RING];
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
list_splice_init(&pring->postbufq, &buflist);
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
@@ -925,32 +929,43 @@ static void
|
|
|
lpfc_hba_clean_txcmplq(struct lpfc_hba *phba)
|
|
|
{
|
|
|
struct lpfc_sli *psli = &phba->sli;
|
|
|
+ struct lpfc_queue *qp = NULL;
|
|
|
struct lpfc_sli_ring *pring;
|
|
|
LIST_HEAD(completions);
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < psli->num_rings; i++) {
|
|
|
- pring = &psli->ring[i];
|
|
|
- if (phba->sli_rev >= LPFC_SLI_REV4)
|
|
|
- spin_lock_irq(&pring->ring_lock);
|
|
|
- else
|
|
|
+ if (phba->sli_rev != LPFC_SLI_REV4) {
|
|
|
+ for (i = 0; i < psli->num_rings; i++) {
|
|
|
+ pring = &psli->sli3_ring[i];
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
- /* At this point in time the HBA is either reset or DOA. Either
|
|
|
- * way, nothing should be on txcmplq as it will NEVER complete.
|
|
|
- */
|
|
|
- list_splice_init(&pring->txcmplq, &completions);
|
|
|
- pring->txcmplq_cnt = 0;
|
|
|
-
|
|
|
- if (phba->sli_rev >= LPFC_SLI_REV4)
|
|
|
- spin_unlock_irq(&pring->ring_lock);
|
|
|
- else
|
|
|
+ /* At this point in time the HBA is either reset or DOA
|
|
|
+ * Nothing should be on txcmplq as it will
|
|
|
+ * NEVER complete.
|
|
|
+ */
|
|
|
+ list_splice_init(&pring->txcmplq, &completions);
|
|
|
+ pring->txcmplq_cnt = 0;
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
+ lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
+ }
|
|
|
/* Cancel all the IOCBs from the completions list */
|
|
|
- lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
|
|
|
- IOERR_SLI_ABORTED);
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &completions,
|
|
|
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
|
|
|
+ pring = qp->pring;
|
|
|
+ if (!pring)
|
|
|
+ continue;
|
|
|
+ spin_lock_irq(&pring->ring_lock);
|
|
|
+ list_splice_init(&pring->txcmplq, &completions);
|
|
|
+ pring->txcmplq_cnt = 0;
|
|
|
+ spin_unlock_irq(&pring->ring_lock);
|
|
|
lpfc_sli_abort_iocb_ring(phba, pring);
|
|
|
}
|
|
|
+ /* Cancel all the IOCBs from the completions list */
|
|
|
+ lpfc_sli_cancel_iocbs(phba, &completions,
|
|
|
+ IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -989,43 +1004,58 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
|
|
{
|
|
|
struct lpfc_scsi_buf *psb, *psb_next;
|
|
|
LIST_HEAD(aborts);
|
|
|
+ LIST_HEAD(nvme_aborts);
|
|
|
unsigned long iflag = 0;
|
|
|
struct lpfc_sglq *sglq_entry = NULL;
|
|
|
- struct lpfc_sli *psli = &phba->sli;
|
|
|
- struct lpfc_sli_ring *pring;
|
|
|
|
|
|
- lpfc_hba_free_post_buf(phba);
|
|
|
+
|
|
|
+ lpfc_sli_hbqbuf_free_all(phba);
|
|
|
lpfc_hba_clean_txcmplq(phba);
|
|
|
- pring = &psli->ring[LPFC_ELS_RING];
|
|
|
|
|
|
/* At this point in time the HBA is either reset or DOA. Either
|
|
|
* way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
|
|
|
- * on the lpfc_sgl_list so that it can either be freed if the
|
|
|
+ * on the lpfc_els_sgl_list so that it can either be freed if the
|
|
|
* driver is unloading or reposted if the driver is restarting
|
|
|
* the port.
|
|
|
*/
|
|
|
- spin_lock_irq(&phba->hbalock); /* required for lpfc_sgl_list and */
|
|
|
+ spin_lock_irq(&phba->hbalock); /* required for lpfc_els_sgl_list and */
|
|
|
/* scsl_buf_list */
|
|
|
- /* abts_sgl_list_lock required because worker thread uses this
|
|
|
+ /* sgl_list_lock required because worker thread uses this
|
|
|
* list.
|
|
|
*/
|
|
|
- spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
list_for_each_entry(sglq_entry,
|
|
|
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
|
|
|
sglq_entry->state = SGL_FREED;
|
|
|
+ list_for_each_entry(sglq_entry,
|
|
|
+ &phba->sli4_hba.lpfc_abts_nvmet_sgl_list, list)
|
|
|
+ sglq_entry->state = SGL_FREED;
|
|
|
|
|
|
- spin_lock(&pring->ring_lock);
|
|
|
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
|
|
|
- &phba->sli4_hba.lpfc_sgl_list);
|
|
|
- spin_unlock(&pring->ring_lock);
|
|
|
- spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
|
|
|
+ &phba->sli4_hba.lpfc_els_sgl_list);
|
|
|
+
|
|
|
+ if (phba->sli4_hba.nvme_wq)
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list,
|
|
|
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
|
|
|
+
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
/* abts_scsi_buf_list_lock required because worker thread uses this
|
|
|
* list.
|
|
|
*/
|
|
|
- spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
- list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
|
|
|
- &aborts);
|
|
|
- spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
|
|
+ spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
|
|
|
+ &aborts);
|
|
|
+ spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
|
|
+ spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_abts_nvme_buf_list,
|
|
|
+ &nvme_aborts);
|
|
|
+ spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
|
|
+ }
|
|
|
+
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
list_for_each_entry_safe(psb, psb_next, &aborts, list) {
|
|
|
@@ -1036,6 +1066,14 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
|
|
|
list_splice(&aborts, &phba->lpfc_scsi_buf_list_put);
|
|
|
spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, iflag);
|
|
|
|
|
|
+ list_for_each_entry_safe(psb, psb_next, &nvme_aborts, list) {
|
|
|
+ psb->pCmd = NULL;
|
|
|
+ psb->status = IOSTAT_SUCCESS;
|
|
|
+ }
|
|
|
+ spin_lock_irqsave(&phba->nvme_buf_list_put_lock, iflag);
|
|
|
+ list_splice(&nvme_aborts, &phba->lpfc_nvme_buf_list_put);
|
|
|
+ spin_unlock_irqrestore(&phba->nvme_buf_list_put_lock, iflag);
|
|
|
+
|
|
|
lpfc_sli4_free_sp_events(phba);
|
|
|
return 0;
|
|
|
}
|
|
|
@@ -1829,7 +1867,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
* This routine is invoked from the worker thread to handle a HBA host
|
|
|
- * attention link event.
|
|
|
+ * attention link event. SLI3 only.
|
|
|
**/
|
|
|
void
|
|
|
lpfc_handle_latt(struct lpfc_hba *phba)
|
|
|
@@ -1867,7 +1905,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
|
|
|
pmb->mbox_cmpl = lpfc_mbx_cmpl_read_topology;
|
|
|
pmb->vport = vport;
|
|
|
/* Block ELS IOCBs until we have processed this mbox command */
|
|
|
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
+ phba->sli.sli3_ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
rc = lpfc_sli_issue_mbox (phba, pmb, MBX_NOWAIT);
|
|
|
if (rc == MBX_NOT_FINISHED) {
|
|
|
rc = 4;
|
|
|
@@ -1883,7 +1921,7 @@ lpfc_handle_latt(struct lpfc_hba *phba)
|
|
|
return;
|
|
|
|
|
|
lpfc_handle_latt_free_mbuf:
|
|
|
- phba->sli.ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
|
|
|
+ phba->sli.sli3_ring[LPFC_ELS_RING].flag &= ~LPFC_STOP_IOCB_EVENT;
|
|
|
lpfc_mbuf_free(phba, mp->virt, mp->phys);
|
|
|
lpfc_handle_latt_free_mp:
|
|
|
kfree(mp);
|
|
|
@@ -2441,7 +2479,7 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
|
|
|
*
|
|
|
* This routine posts initial receive IOCB buffers to the ELS ring. The
|
|
|
* current number of initial IOCB buffers specified by LPFC_BUF_RING0 is
|
|
|
- * set to 64 IOCBs.
|
|
|
+ * set to 64 IOCBs. SLI3 only.
|
|
|
*
|
|
|
* Return codes
|
|
|
* 0 - success (currently always success)
|
|
|
@@ -2452,7 +2490,7 @@ lpfc_post_rcv_buf(struct lpfc_hba *phba)
|
|
|
struct lpfc_sli *psli = &phba->sli;
|
|
|
|
|
|
/* Ring 0, ELS / CT buffers */
|
|
|
- lpfc_post_buffer(phba, &psli->ring[LPFC_ELS_RING], LPFC_BUF_RING0);
|
|
|
+ lpfc_post_buffer(phba, &psli->sli3_ring[LPFC_ELS_RING], LPFC_BUF_RING0);
|
|
|
/* Ring 2 - FCP no buffers needed */
|
|
|
|
|
|
return 0;
|
|
|
@@ -2640,6 +2678,13 @@ lpfc_cleanup(struct lpfc_vport *vport)
|
|
|
lpfc_disc_state_machine(vport, ndlp, NULL,
|
|
|
NLP_EVT_DEVICE_RECOVERY);
|
|
|
|
|
|
+ if (ndlp->nlp_fc4_type & NLP_FC4_NVME) {
|
|
|
+ /* Remove the NVME transport reference now and
|
|
|
+ * continue to remove the node.
|
|
|
+ */
|
|
|
+ lpfc_nlp_put(ndlp);
|
|
|
+ }
|
|
|
+
|
|
|
lpfc_disc_state_machine(vport, ndlp, NULL,
|
|
|
NLP_EVT_DEVICE_RM);
|
|
|
}
|
|
|
@@ -2894,11 +2939,6 @@ lpfc_online(struct lpfc_hba *phba)
|
|
|
|
|
|
lpfc_block_mgmt_io(phba, LPFC_MBX_WAIT);
|
|
|
|
|
|
- if (!lpfc_sli_queue_setup(phba)) {
|
|
|
- lpfc_unblock_mgmt_io(phba);
|
|
|
- return 1;
|
|
|
- }
|
|
|
-
|
|
|
if (phba->sli_rev == LPFC_SLI_REV4) {
|
|
|
if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
|
|
|
lpfc_unblock_mgmt_io(phba);
|
|
|
@@ -2909,6 +2949,7 @@ lpfc_online(struct lpfc_hba *phba)
|
|
|
vpis_cleared = true;
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
} else {
|
|
|
+ lpfc_sli_queue_init(phba);
|
|
|
if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
|
|
|
lpfc_unblock_mgmt_io(phba);
|
|
|
return 1;
|
|
|
@@ -3098,7 +3139,9 @@ static void
|
|
|
lpfc_scsi_free(struct lpfc_hba *phba)
|
|
|
{
|
|
|
struct lpfc_scsi_buf *sb, *sb_next;
|
|
|
- struct lpfc_iocbq *io, *io_next;
|
|
|
+
|
|
|
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
|
|
|
+ return;
|
|
|
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
|
|
|
@@ -3108,7 +3151,7 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
|
|
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_put,
|
|
|
list) {
|
|
|
list_del(&sb->list);
|
|
|
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
|
|
sb->dma_handle);
|
|
|
kfree(sb);
|
|
|
phba->total_scsi_bufs--;
|
|
|
@@ -3119,25 +3162,58 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
|
|
list_for_each_entry_safe(sb, sb_next, &phba->lpfc_scsi_buf_list_get,
|
|
|
list) {
|
|
|
list_del(&sb->list);
|
|
|
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool, sb->data,
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, sb->data,
|
|
|
sb->dma_handle);
|
|
|
kfree(sb);
|
|
|
phba->total_scsi_bufs--;
|
|
|
}
|
|
|
spin_unlock(&phba->scsi_buf_list_get_lock);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+}
|
|
|
+/**
|
|
|
+ * lpfc_nvme_free - Free all the NVME buffers and IOCBs from driver lists
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine is to free all the NVME buffers and IOCBs from the driver
|
|
|
+ * list back to kernel. It is called from lpfc_pci_remove_one to free
|
|
|
+ * the internal resources before the device is removed from the system.
|
|
|
+ **/
|
|
|
+static void
|
|
|
+lpfc_nvme_free(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_nvme_buf *lpfc_ncmd, *lpfc_ncmd_next;
|
|
|
|
|
|
- /* Release all the lpfc_iocbq entries maintained by this host. */
|
|
|
- list_for_each_entry_safe(io, io_next, &phba->lpfc_iocb_list, list) {
|
|
|
- list_del(&io->list);
|
|
|
- kfree(io);
|
|
|
- phba->total_iocbq_bufs--;
|
|
|
- }
|
|
|
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
|
|
+ return;
|
|
|
+
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
|
|
|
+ /* Release all the lpfc_nvme_bufs maintained by this host. */
|
|
|
+ spin_lock(&phba->nvme_buf_list_put_lock);
|
|
|
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
|
|
+ &phba->lpfc_nvme_buf_list_put, list) {
|
|
|
+ list_del(&lpfc_ncmd->list);
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
|
|
+ lpfc_ncmd->dma_handle);
|
|
|
+ kfree(lpfc_ncmd);
|
|
|
+ phba->total_nvme_bufs--;
|
|
|
+ }
|
|
|
+ spin_unlock(&phba->nvme_buf_list_put_lock);
|
|
|
+
|
|
|
+ spin_lock(&phba->nvme_buf_list_get_lock);
|
|
|
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
|
|
+ &phba->lpfc_nvme_buf_list_get, list) {
|
|
|
+ list_del(&lpfc_ncmd->list);
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool, lpfc_ncmd->data,
|
|
|
+ lpfc_ncmd->dma_handle);
|
|
|
+ kfree(lpfc_ncmd);
|
|
|
+ phba->total_nvme_bufs--;
|
|
|
+ }
|
|
|
+ spin_unlock(&phba->nvme_buf_list_get_lock);
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
}
|
|
|
-
|
|
|
/**
|
|
|
- * lpfc_sli4_xri_sgl_update - update xri-sgl sizing and mapping
|
|
|
+ * lpfc_sli4_els_sgl_update - update ELS xri-sgl sizing and mapping
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
* This routine first calculates the sizes of the current els and allocated
|
|
|
@@ -3149,20 +3225,18 @@ lpfc_scsi_free(struct lpfc_hba *phba)
|
|
|
* 0 - successful (for now, it always returns 0)
|
|
|
**/
|
|
|
int
|
|
|
-lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
+lpfc_sli4_els_sgl_update(struct lpfc_hba *phba)
|
|
|
{
|
|
|
struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
|
|
|
- struct lpfc_scsi_buf *psb = NULL, *psb_next = NULL;
|
|
|
- uint16_t i, lxri, xri_cnt, els_xri_cnt, scsi_xri_cnt;
|
|
|
+ uint16_t i, lxri, xri_cnt, els_xri_cnt;
|
|
|
LIST_HEAD(els_sgl_list);
|
|
|
- LIST_HEAD(scsi_sgl_list);
|
|
|
int rc;
|
|
|
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
|
|
|
|
|
|
/*
|
|
|
* update on pci function's els xri-sgl list
|
|
|
*/
|
|
|
els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
|
|
+
|
|
|
if (els_xri_cnt > phba->sli4_hba.els_xri_cnt) {
|
|
|
/* els xri-sgl expanded */
|
|
|
xri_cnt = els_xri_cnt - phba->sli4_hba.els_xri_cnt;
|
|
|
@@ -3198,9 +3272,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
list_add_tail(&sglq_entry->list, &els_sgl_list);
|
|
|
}
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
- spin_lock(&pring->ring_lock);
|
|
|
- list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
|
|
|
- spin_unlock(&pring->ring_lock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&els_sgl_list,
|
|
|
+ &phba->sli4_hba.lpfc_els_sgl_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
|
|
|
/* els xri-sgl shrinked */
|
|
|
@@ -3210,24 +3285,22 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
"%d to %d\n", phba->sli4_hba.els_xri_cnt,
|
|
|
els_xri_cnt);
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
- spin_lock(&pring->ring_lock);
|
|
|
- list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
|
|
|
- spin_unlock(&pring->ring_lock);
|
|
|
- spin_unlock_irq(&phba->hbalock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list,
|
|
|
+ &els_sgl_list);
|
|
|
/* release extra els sgls from list */
|
|
|
for (i = 0; i < xri_cnt; i++) {
|
|
|
list_remove_head(&els_sgl_list,
|
|
|
sglq_entry, struct lpfc_sglq, list);
|
|
|
if (sglq_entry) {
|
|
|
- lpfc_mbuf_free(phba, sglq_entry->virt,
|
|
|
- sglq_entry->phys);
|
|
|
+ __lpfc_mbuf_free(phba, sglq_entry->virt,
|
|
|
+ sglq_entry->phys);
|
|
|
kfree(sglq_entry);
|
|
|
}
|
|
|
}
|
|
|
- spin_lock_irq(&phba->hbalock);
|
|
|
- spin_lock(&pring->ring_lock);
|
|
|
- list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
|
|
|
- spin_unlock(&pring->ring_lock);
|
|
|
+ list_splice_init(&els_sgl_list,
|
|
|
+ &phba->sli4_hba.lpfc_els_sgl_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
} else
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
@@ -3239,7 +3312,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
sglq_entry = NULL;
|
|
|
sglq_entry_next = NULL;
|
|
|
list_for_each_entry_safe(sglq_entry, sglq_entry_next,
|
|
|
- &phba->sli4_hba.lpfc_sgl_list, list) {
|
|
|
+ &phba->sli4_hba.lpfc_els_sgl_list, list) {
|
|
|
lxri = lpfc_sli4_next_xritag(phba);
|
|
|
if (lxri == NO_XRI) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
@@ -3251,21 +3324,182 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
sglq_entry->sli4_lxritag = lxri;
|
|
|
sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
|
|
}
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free_mem:
|
|
|
+ lpfc_free_els_sgl_list(phba);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_sli4_nvmet_sgl_update - update xri-sgl sizing and mapping
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine first calculates the sizes of the current els and allocated
|
|
|
+ * scsi sgl lists, and then goes through all sgls to updates the physical
|
|
|
+ * XRIs assigned due to port function reset. During port initialization, the
|
|
|
+ * current els and allocated scsi sgl lists are 0s.
|
|
|
+ *
|
|
|
+ * Return codes
|
|
|
+ * 0 - successful (for now, it always returns 0)
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_entry_next = NULL;
|
|
|
+ uint16_t i, lxri, xri_cnt, els_xri_cnt;
|
|
|
+ uint16_t nvmet_xri_cnt, tot_cnt;
|
|
|
+ LIST_HEAD(nvmet_sgl_list);
|
|
|
+ int rc;
|
|
|
|
|
|
/*
|
|
|
- * update on pci function's allocated scsi xri-sgl list
|
|
|
+ * update on pci function's nvmet xri-sgl list
|
|
|
+ */
|
|
|
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
|
|
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
|
|
|
+ tot_cnt = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
|
|
+ if (nvmet_xri_cnt > tot_cnt) {
|
|
|
+ phba->cfg_nvmet_mrq_post = tot_cnt / phba->cfg_nvmet_mrq;
|
|
|
+ nvmet_xri_cnt = phba->cfg_nvmet_mrq * phba->cfg_nvmet_mrq_post;
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
+ "6301 NVMET post-sgl count changed to %d\n",
|
|
|
+ phba->cfg_nvmet_mrq_post);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (nvmet_xri_cnt > phba->sli4_hba.nvmet_xri_cnt) {
|
|
|
+ /* els xri-sgl expanded */
|
|
|
+ xri_cnt = nvmet_xri_cnt - phba->sli4_hba.nvmet_xri_cnt;
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
+ "6302 NVMET xri-sgl cnt grew from %d to %d\n",
|
|
|
+ phba->sli4_hba.nvmet_xri_cnt, nvmet_xri_cnt);
|
|
|
+ /* allocate the additional nvmet sgls */
|
|
|
+ for (i = 0; i < xri_cnt; i++) {
|
|
|
+ sglq_entry = kzalloc(sizeof(struct lpfc_sglq),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (sglq_entry == NULL) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ "6303 Failure to allocate an "
|
|
|
+ "NVMET sgl entry:%d\n", i);
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free_mem;
|
|
|
+ }
|
|
|
+ sglq_entry->buff_type = NVMET_BUFF_TYPE;
|
|
|
+ sglq_entry->virt = lpfc_nvmet_buf_alloc(phba, 0,
|
|
|
+ &sglq_entry->phys);
|
|
|
+ if (sglq_entry->virt == NULL) {
|
|
|
+ kfree(sglq_entry);
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ "6304 Failure to allocate an "
|
|
|
+ "NVMET buf:%d\n", i);
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free_mem;
|
|
|
+ }
|
|
|
+ sglq_entry->sgl = sglq_entry->virt;
|
|
|
+ memset(sglq_entry->sgl, 0,
|
|
|
+ phba->cfg_sg_dma_buf_size);
|
|
|
+ sglq_entry->state = SGL_FREED;
|
|
|
+ list_add_tail(&sglq_entry->list, &nvmet_sgl_list);
|
|
|
+ }
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&nvmet_sgl_list,
|
|
|
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+ } else if (nvmet_xri_cnt < phba->sli4_hba.nvmet_xri_cnt) {
|
|
|
+ /* nvmet xri-sgl shrunk */
|
|
|
+ xri_cnt = phba->sli4_hba.nvmet_xri_cnt - nvmet_xri_cnt;
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
+ "6305 NVMET xri-sgl count decreased from "
|
|
|
+ "%d to %d\n", phba->sli4_hba.nvmet_xri_cnt,
|
|
|
+ nvmet_xri_cnt);
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list,
|
|
|
+ &nvmet_sgl_list);
|
|
|
+ /* release extra nvmet sgls from list */
|
|
|
+ for (i = 0; i < xri_cnt; i++) {
|
|
|
+ list_remove_head(&nvmet_sgl_list,
|
|
|
+ sglq_entry, struct lpfc_sglq, list);
|
|
|
+ if (sglq_entry) {
|
|
|
+ lpfc_nvmet_buf_free(phba, sglq_entry->virt,
|
|
|
+ sglq_entry->phys);
|
|
|
+ kfree(sglq_entry);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ list_splice_init(&nvmet_sgl_list,
|
|
|
+ &phba->sli4_hba.lpfc_nvmet_sgl_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+ } else
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
+ "6306 NVMET xri-sgl count unchanged: %d\n",
|
|
|
+ nvmet_xri_cnt);
|
|
|
+ phba->sli4_hba.nvmet_xri_cnt = nvmet_xri_cnt;
|
|
|
+
|
|
|
+ /* update xris to nvmet sgls on the list */
|
|
|
+ sglq_entry = NULL;
|
|
|
+ sglq_entry_next = NULL;
|
|
|
+ list_for_each_entry_safe(sglq_entry, sglq_entry_next,
|
|
|
+ &phba->sli4_hba.lpfc_nvmet_sgl_list, list) {
|
|
|
+ lxri = lpfc_sli4_next_xritag(phba);
|
|
|
+ if (lxri == NO_XRI) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ "6307 Failed to allocate xri for "
|
|
|
+ "NVMET sgl\n");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free_mem;
|
|
|
+ }
|
|
|
+ sglq_entry->sli4_lxritag = lxri;
|
|
|
+ sglq_entry->sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
|
|
+ }
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free_mem:
|
|
|
+ lpfc_free_nvmet_sgl_list(phba);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_sli4_scsi_sgl_update - update xri-sgl sizing and mapping
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine first calculates the sizes of the current els and allocated
|
|
|
+ * scsi sgl lists, and then goes through all sgls to updates the physical
|
|
|
+ * XRIs assigned due to port function reset. During port initialization, the
|
|
|
+ * current els and allocated scsi sgl lists are 0s.
|
|
|
+ *
|
|
|
+ * Return codes
|
|
|
+ * 0 - successful (for now, it always returns 0)
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_scsi_buf *psb, *psb_next;
|
|
|
+ uint16_t i, lxri, els_xri_cnt, scsi_xri_cnt;
|
|
|
+ LIST_HEAD(scsi_sgl_list);
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * update on pci function's els xri-sgl list
|
|
|
*/
|
|
|
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
|
|
phba->total_scsi_bufs = 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * update on pci function's allocated scsi xri-sgl list
|
|
|
+ */
|
|
|
/* maximum number of xris available for scsi buffers */
|
|
|
phba->sli4_hba.scsi_xri_max = phba->sli4_hba.max_cfg_param.max_xri -
|
|
|
els_xri_cnt;
|
|
|
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
- "2401 Current allocated SCSI xri-sgl count:%d, "
|
|
|
- "maximum SCSI xri count:%d\n",
|
|
|
- phba->sli4_hba.scsi_xri_cnt,
|
|
|
- phba->sli4_hba.scsi_xri_max);
|
|
|
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
|
|
+ phba->sli4_hba.scsi_xri_max = /* Split them up */
|
|
|
+ (phba->sli4_hba.scsi_xri_max *
|
|
|
+ phba->cfg_xri_split) / 100;
|
|
|
|
|
|
spin_lock_irq(&phba->scsi_buf_list_get_lock);
|
|
|
spin_lock(&phba->scsi_buf_list_put_lock);
|
|
|
@@ -3283,7 +3517,7 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
list_remove_head(&scsi_sgl_list, psb,
|
|
|
struct lpfc_scsi_buf, list);
|
|
|
if (psb) {
|
|
|
- pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
|
|
psb->data, psb->dma_handle);
|
|
|
kfree(psb);
|
|
|
}
|
|
|
@@ -3314,15 +3548,112 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba)
|
|
|
INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
|
|
spin_unlock(&phba->scsi_buf_list_put_lock);
|
|
|
spin_unlock_irq(&phba->scsi_buf_list_get_lock);
|
|
|
-
|
|
|
return 0;
|
|
|
|
|
|
out_free_mem:
|
|
|
- lpfc_free_els_sgl_list(phba);
|
|
|
lpfc_scsi_free(phba);
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine first calculates the sizes of the current els and allocated
|
|
|
+ * scsi sgl lists, and then goes through all sgls to updates the physical
|
|
|
+ * XRIs assigned due to port function reset. During port initialization, the
|
|
|
+ * current els and allocated scsi sgl lists are 0s.
|
|
|
+ *
|
|
|
+ * Return codes
|
|
|
+ * 0 - successful (for now, it always returns 0)
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_sli4_nvme_sgl_update(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_nvme_buf *lpfc_ncmd = NULL, *lpfc_ncmd_next = NULL;
|
|
|
+ uint16_t i, lxri, els_xri_cnt;
|
|
|
+ uint16_t nvme_xri_cnt, nvme_xri_max;
|
|
|
+ LIST_HEAD(nvme_sgl_list);
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ phba->total_nvme_bufs = 0;
|
|
|
+
|
|
|
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
|
|
+ return 0;
|
|
|
+ /*
|
|
|
+ * update on pci function's allocated nvme xri-sgl list
|
|
|
+ */
|
|
|
+
|
|
|
+ /* maximum number of xris available for nvme buffers */
|
|
|
+ els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
|
|
|
+ nvme_xri_max = phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
|
|
|
+ phba->sli4_hba.nvme_xri_max = nvme_xri_max;
|
|
|
+ phba->sli4_hba.nvme_xri_max -= phba->sli4_hba.scsi_xri_max;
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
|
|
+ "6074 Current allocated NVME xri-sgl count:%d, "
|
|
|
+ "maximum NVME xri count:%d\n",
|
|
|
+ phba->sli4_hba.nvme_xri_cnt,
|
|
|
+ phba->sli4_hba.nvme_xri_max);
|
|
|
+
|
|
|
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ spin_lock(&phba->nvme_buf_list_put_lock);
|
|
|
+ list_splice_init(&phba->lpfc_nvme_buf_list_get, &nvme_sgl_list);
|
|
|
+ list_splice(&phba->lpfc_nvme_buf_list_put, &nvme_sgl_list);
|
|
|
+ spin_unlock(&phba->nvme_buf_list_put_lock);
|
|
|
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+
|
|
|
+ if (phba->sli4_hba.nvme_xri_cnt > phba->sli4_hba.nvme_xri_max) {
|
|
|
+ /* max nvme xri shrunk below the allocated nvme buffers */
|
|
|
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ nvme_xri_cnt = phba->sli4_hba.nvme_xri_cnt -
|
|
|
+ phba->sli4_hba.nvme_xri_max;
|
|
|
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ /* release the extra allocated nvme buffers */
|
|
|
+ for (i = 0; i < nvme_xri_cnt; i++) {
|
|
|
+ list_remove_head(&nvme_sgl_list, lpfc_ncmd,
|
|
|
+ struct lpfc_nvme_buf, list);
|
|
|
+ if (lpfc_ncmd) {
|
|
|
+ pci_pool_free(phba->lpfc_sg_dma_buf_pool,
|
|
|
+ lpfc_ncmd->data,
|
|
|
+ lpfc_ncmd->dma_handle);
|
|
|
+ kfree(lpfc_ncmd);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ phba->sli4_hba.nvme_xri_cnt -= nvme_xri_cnt;
|
|
|
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* update xris associated to remaining allocated nvme buffers */
|
|
|
+ lpfc_ncmd = NULL;
|
|
|
+ lpfc_ncmd_next = NULL;
|
|
|
+ list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
|
|
|
+ &nvme_sgl_list, list) {
|
|
|
+ lxri = lpfc_sli4_next_xritag(phba);
|
|
|
+ if (lxri == NO_XRI) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ "6075 Failed to allocate xri for "
|
|
|
+ "nvme buffer\n");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_free_mem;
|
|
|
+ }
|
|
|
+ lpfc_ncmd->cur_iocbq.sli4_lxritag = lxri;
|
|
|
+ lpfc_ncmd->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri];
|
|
|
+ }
|
|
|
+ spin_lock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ spin_lock(&phba->nvme_buf_list_put_lock);
|
|
|
+ list_splice_init(&nvme_sgl_list, &phba->lpfc_nvme_buf_list_get);
|
|
|
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
|
|
+ spin_unlock(&phba->nvme_buf_list_put_lock);
|
|
|
+ spin_unlock_irq(&phba->nvme_buf_list_get_lock);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+out_free_mem:
|
|
|
+ lpfc_nvme_free(phba);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* lpfc_create_port - Create an FC port
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -3343,18 +3674,23 @@ struct lpfc_vport *
|
|
|
lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
|
|
{
|
|
|
struct lpfc_vport *vport;
|
|
|
- struct Scsi_Host *shost;
|
|
|
+ struct Scsi_Host *shost = NULL;
|
|
|
int error = 0;
|
|
|
|
|
|
- if (dev != &phba->pcidev->dev) {
|
|
|
- shost = scsi_host_alloc(&lpfc_vport_template,
|
|
|
- sizeof(struct lpfc_vport));
|
|
|
- } else {
|
|
|
- if (phba->sli_rev == LPFC_SLI_REV4)
|
|
|
- shost = scsi_host_alloc(&lpfc_template,
|
|
|
- sizeof(struct lpfc_vport));
|
|
|
- else
|
|
|
- shost = scsi_host_alloc(&lpfc_template_s3,
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
|
|
+ if (dev != &phba->pcidev->dev) {
|
|
|
+ shost = scsi_host_alloc(&lpfc_vport_template,
|
|
|
+ sizeof(struct lpfc_vport));
|
|
|
+ } else {
|
|
|
+ if (phba->sli_rev == LPFC_SLI_REV4)
|
|
|
+ shost = scsi_host_alloc(&lpfc_template,
|
|
|
+ sizeof(struct lpfc_vport));
|
|
|
+ else
|
|
|
+ shost = scsi_host_alloc(&lpfc_template_s3,
|
|
|
+ sizeof(struct lpfc_vport));
|
|
|
+ }
|
|
|
+ } else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
|
|
+ shost = scsi_host_alloc(&lpfc_template_nvme,
|
|
|
sizeof(struct lpfc_vport));
|
|
|
}
|
|
|
if (!shost)
|
|
|
@@ -3365,8 +3701,8 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
|
|
vport->load_flag |= FC_LOADING;
|
|
|
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
|
|
vport->fc_rscn_flush = 0;
|
|
|
-
|
|
|
lpfc_get_vport_cfgparam(vport);
|
|
|
+
|
|
|
shost->unique_id = instance;
|
|
|
shost->max_id = LPFC_MAX_TARGET;
|
|
|
shost->max_lun = vport->cfg_max_luns;
|
|
|
@@ -3944,7 +4280,7 @@ lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
|
|
|
lpfc_els_flush_all_cmd(phba);
|
|
|
|
|
|
/* Block ELS IOCBs until we have done process link event */
|
|
|
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
+ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
|
|
|
/* Update link event statistics */
|
|
|
phba->sli.slistat.link_event++;
|
|
|
@@ -4103,7 +4439,7 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
|
|
|
lpfc_els_flush_all_cmd(phba);
|
|
|
|
|
|
/* Block ELS IOCBs until we have done process link event */
|
|
|
- phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
+ phba->sli4_hba.els_wq->pring->flag |= LPFC_STOP_IOCB_EVENT;
|
|
|
|
|
|
/* Update link event statistics */
|
|
|
phba->sli.slistat.link_event++;
|
|
|
@@ -4272,13 +4608,13 @@ lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
|
|
|
sprintf(message, "Unqualified optics - Replace with "
|
|
|
"Avago optics for Warranty and Technical "
|
|
|
"Support - Link is%s operational",
|
|
|
- (operational) ? "" : " not");
|
|
|
+ (operational) ? " not" : "");
|
|
|
break;
|
|
|
case LPFC_SLI_EVENT_STATUS_UNCERTIFIED:
|
|
|
sprintf(message, "Uncertified optics - Replace with "
|
|
|
"Avago-certified optics to enable link "
|
|
|
"operation - Link is%s operational",
|
|
|
- (operational) ? "" : " not");
|
|
|
+ (operational) ? " not" : "");
|
|
|
break;
|
|
|
default:
|
|
|
/* firmware is reporting a status we don't know about */
|
|
|
@@ -5000,48 +5336,119 @@ lpfc_sli_probe_sriov_nr_virtfn(struct lpfc_hba *phba, int nr_vfn)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
|
|
|
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
- * This routine is invoked to set up the driver internal resources specific to
|
|
|
- * support the SLI-3 HBA device it attached to.
|
|
|
+ * This routine is invoked to set up the driver internal resources before the
|
|
|
+ * device specific resource setup to support the HBA device it attached to.
|
|
|
*
|
|
|
* Return codes
|
|
|
- * 0 - successful
|
|
|
- * other values - error
|
|
|
+ * 0 - successful
|
|
|
+ * other values - error
|
|
|
**/
|
|
|
static int
|
|
|
-lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- struct lpfc_sli *psli;
|
|
|
- int rc;
|
|
|
+ struct lpfc_sli *psli = &phba->sli;
|
|
|
|
|
|
/*
|
|
|
- * Initialize timers used by driver
|
|
|
+ * Driver resources common to all SLI revisions
|
|
|
*/
|
|
|
+ atomic_set(&phba->fast_event_count, 0);
|
|
|
+ spin_lock_init(&phba->hbalock);
|
|
|
|
|
|
- /* Heartbeat timer */
|
|
|
- init_timer(&phba->hb_tmofunc);
|
|
|
- phba->hb_tmofunc.function = lpfc_hb_timeout;
|
|
|
- phba->hb_tmofunc.data = (unsigned long)phba;
|
|
|
+ /* Initialize ndlp management spinlock */
|
|
|
+ spin_lock_init(&phba->ndlp_lock);
|
|
|
|
|
|
- psli = &phba->sli;
|
|
|
- /* MBOX heartbeat timer */
|
|
|
- init_timer(&psli->mbox_tmo);
|
|
|
- psli->mbox_tmo.function = lpfc_mbox_timeout;
|
|
|
- psli->mbox_tmo.data = (unsigned long) phba;
|
|
|
- /* FCP polling mode timer */
|
|
|
- init_timer(&phba->fcp_poll_timer);
|
|
|
- phba->fcp_poll_timer.function = lpfc_poll_timeout;
|
|
|
- phba->fcp_poll_timer.data = (unsigned long) phba;
|
|
|
- /* Fabric block timer */
|
|
|
- init_timer(&phba->fabric_block_timer);
|
|
|
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
|
|
|
- phba->fabric_block_timer.data = (unsigned long) phba;
|
|
|
- /* EA polling mode timer */
|
|
|
+ INIT_LIST_HEAD(&phba->port_list);
|
|
|
+ INIT_LIST_HEAD(&phba->work_list);
|
|
|
+ init_waitqueue_head(&phba->wait_4_mlo_m_q);
|
|
|
+
|
|
|
+ /* Initialize the wait queue head for the kernel thread */
|
|
|
+ init_waitqueue_head(&phba->work_waitq);
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "1403 Protocols supported %s %s %s\n",
|
|
|
+ ((phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) ?
|
|
|
+ "SCSI" : " "),
|
|
|
+ ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) ?
|
|
|
+ "NVME" : " "),
|
|
|
+ (phba->nvmet_support ? "NVMET" : " "));
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
|
|
+ /* Initialize the scsi buffer list used by driver for scsi IO */
|
|
|
+ spin_lock_init(&phba->scsi_buf_list_get_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
|
|
|
+ spin_lock_init(&phba->scsi_buf_list_put_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) &&
|
|
|
+ (phba->nvmet_support == 0)) {
|
|
|
+ /* Initialize the NVME buffer list used by driver for NVME IO */
|
|
|
+ spin_lock_init(&phba->nvme_buf_list_get_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_get);
|
|
|
+ spin_lock_init(&phba->nvme_buf_list_put_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->lpfc_nvme_buf_list_put);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Initialize the fabric iocb list */
|
|
|
+ INIT_LIST_HEAD(&phba->fabric_iocb_list);
|
|
|
+
|
|
|
+ /* Initialize list to save ELS buffers */
|
|
|
+ INIT_LIST_HEAD(&phba->elsbuf);
|
|
|
+
|
|
|
+ /* Initialize FCF connection rec list */
|
|
|
+ INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
|
|
|
+
|
|
|
+ /* Initialize OAS configuration list */
|
|
|
+ spin_lock_init(&phba->devicelock);
|
|
|
+ INIT_LIST_HEAD(&phba->luns);
|
|
|
+
|
|
|
+ /* MBOX heartbeat timer */
|
|
|
+ init_timer(&psli->mbox_tmo);
|
|
|
+ psli->mbox_tmo.function = lpfc_mbox_timeout;
|
|
|
+ psli->mbox_tmo.data = (unsigned long) phba;
|
|
|
+ /* Fabric block timer */
|
|
|
+ init_timer(&phba->fabric_block_timer);
|
|
|
+ phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
|
|
|
+ phba->fabric_block_timer.data = (unsigned long) phba;
|
|
|
+ /* EA polling mode timer */
|
|
|
init_timer(&phba->eratt_poll);
|
|
|
phba->eratt_poll.function = lpfc_poll_eratt;
|
|
|
phba->eratt_poll.data = (unsigned long) phba;
|
|
|
+ /* Heartbeat timer */
|
|
|
+ init_timer(&phba->hb_tmofunc);
|
|
|
+ phba->hb_tmofunc.function = lpfc_hb_timeout;
|
|
|
+ phba->hb_tmofunc.data = (unsigned long)phba;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine is invoked to set up the driver internal resources specific to
|
|
|
+ * support the SLI-3 HBA device it attached to.
|
|
|
+ *
|
|
|
+ * Return codes
|
|
|
+ * 0 - successful
|
|
|
+ * other values - error
|
|
|
+ **/
|
|
|
+static int
|
|
|
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize timers used by driver
|
|
|
+ */
|
|
|
+
|
|
|
+ /* FCP polling mode timer */
|
|
|
+ init_timer(&phba->fcp_poll_timer);
|
|
|
+ phba->fcp_poll_timer.function = lpfc_poll_timeout;
|
|
|
+ phba->fcp_poll_timer.data = (unsigned long) phba;
|
|
|
|
|
|
/* Host attention work mask setup */
|
|
|
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
|
|
|
@@ -5049,6 +5456,12 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
|
|
|
/* Get all the module params for configuring this host */
|
|
|
lpfc_get_cfgparam(phba);
|
|
|
+ /* Set up phase-1 common device driver resources */
|
|
|
+
|
|
|
+ rc = lpfc_setup_driver_resource_phase1(phba);
|
|
|
+ if (rc)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
|
|
|
phba->menlo_flag |= HBA_MENLO_SUPPORT;
|
|
|
/* check for menlo minimum sg count */
|
|
|
@@ -5056,10 +5469,10 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
|
|
|
}
|
|
|
|
|
|
- if (!phba->sli.ring)
|
|
|
- phba->sli.ring = kzalloc(LPFC_SLI3_MAX_RING *
|
|
|
+ if (!phba->sli.sli3_ring)
|
|
|
+ phba->sli.sli3_ring = kzalloc(LPFC_SLI3_MAX_RING *
|
|
|
sizeof(struct lpfc_sli_ring), GFP_KERNEL);
|
|
|
- if (!phba->sli.ring)
|
|
|
+ if (!phba->sli.sli3_ring)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
/*
|
|
|
@@ -5118,7 +5531,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
* Initialize the SLI Layer to run with lpfc HBAs.
|
|
|
*/
|
|
|
lpfc_sli_setup(phba);
|
|
|
- lpfc_sli_queue_setup(phba);
|
|
|
+ lpfc_sli_queue_init(phba);
|
|
|
|
|
|
/* Allocate device driver memory */
|
|
|
if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
|
|
|
@@ -5174,18 +5587,27 @@ lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
|
|
|
static int
|
|
|
lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- struct lpfc_vector_map_info *cpup;
|
|
|
- struct lpfc_sli *psli;
|
|
|
LPFC_MBOXQ_t *mboxq;
|
|
|
- int rc, i, hbq_count, max_buf_size;
|
|
|
+ MAILBOX_t *mb;
|
|
|
+ int rc, i, max_buf_size;
|
|
|
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
|
|
|
struct lpfc_mqe *mqe;
|
|
|
int longs;
|
|
|
int fof_vectors = 0;
|
|
|
+ uint64_t wwn;
|
|
|
+
|
|
|
+ phba->sli4_hba.num_online_cpu = num_online_cpus();
|
|
|
+ phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
|
|
|
+ phba->sli4_hba.curr_disp_cpu = 0;
|
|
|
|
|
|
/* Get all the module params for configuring this host */
|
|
|
lpfc_get_cfgparam(phba);
|
|
|
|
|
|
+ /* Set up phase-1 common device driver resources */
|
|
|
+ rc = lpfc_setup_driver_resource_phase1(phba);
|
|
|
+ if (rc)
|
|
|
+ return -ENODEV;
|
|
|
+
|
|
|
/* Before proceed, wait for POST done and device ready */
|
|
|
rc = lpfc_sli4_post_status_check(phba);
|
|
|
if (rc)
|
|
|
@@ -5195,27 +5617,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
* Initialize timers used by driver
|
|
|
*/
|
|
|
|
|
|
- /* Heartbeat timer */
|
|
|
- init_timer(&phba->hb_tmofunc);
|
|
|
- phba->hb_tmofunc.function = lpfc_hb_timeout;
|
|
|
- phba->hb_tmofunc.data = (unsigned long)phba;
|
|
|
init_timer(&phba->rrq_tmr);
|
|
|
phba->rrq_tmr.function = lpfc_rrq_timeout;
|
|
|
phba->rrq_tmr.data = (unsigned long)phba;
|
|
|
|
|
|
- psli = &phba->sli;
|
|
|
- /* MBOX heartbeat timer */
|
|
|
- init_timer(&psli->mbox_tmo);
|
|
|
- psli->mbox_tmo.function = lpfc_mbox_timeout;
|
|
|
- psli->mbox_tmo.data = (unsigned long) phba;
|
|
|
- /* Fabric block timer */
|
|
|
- init_timer(&phba->fabric_block_timer);
|
|
|
- phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
|
|
|
- phba->fabric_block_timer.data = (unsigned long) phba;
|
|
|
- /* EA polling mode timer */
|
|
|
- init_timer(&phba->eratt_poll);
|
|
|
- phba->eratt_poll.function = lpfc_poll_eratt;
|
|
|
- phba->eratt_poll.data = (unsigned long) phba;
|
|
|
/* FCF rediscover timer */
|
|
|
init_timer(&phba->fcf.redisc_wait);
|
|
|
phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
|
|
|
@@ -5242,14 +5647,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
|
|
|
/*
|
|
|
* For SLI4, instead of using ring 0 (LPFC_FCP_RING) for FCP commands
|
|
|
- * we will associate a new ring, for each FCP fastpath EQ/CQ/WQ tuple.
|
|
|
+ * we will associate a new ring, for each EQ/CQ/WQ tuple.
|
|
|
+ * The WQ create will allocate the ring.
|
|
|
*/
|
|
|
- if (!phba->sli.ring)
|
|
|
- phba->sli.ring = kzalloc(
|
|
|
- (LPFC_SLI3_MAX_RING + phba->cfg_fcp_io_channel) *
|
|
|
- sizeof(struct lpfc_sli_ring), GFP_KERNEL);
|
|
|
- if (!phba->sli.ring)
|
|
|
- return -ENOMEM;
|
|
|
|
|
|
/*
|
|
|
* It doesn't matter what family our adapter is in, we are
|
|
|
@@ -5261,43 +5661,45 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
phba->cfg_sg_seg_cnt = LPFC_MAX_SGL_SEG_CNT - 2;
|
|
|
|
|
|
/*
|
|
|
- * Since lpfc_sg_seg_cnt is module parameter, the sg_dma_buf_size
|
|
|
- * used to create the sg_dma_buf_pool must be dynamically calculated.
|
|
|
+ * Since lpfc_sg_seg_cnt is module param, the sg_dma_buf_size
|
|
|
+ * used to create the sg_dma_buf_pool must be calculated.
|
|
|
*/
|
|
|
-
|
|
|
if (phba->cfg_enable_bg) {
|
|
|
/*
|
|
|
- * The scsi_buf for a T10-DIF I/O will hold the FCP cmnd,
|
|
|
- * the FCP rsp, and a SGE for each. Sice we have no control
|
|
|
- * over how many protection data segments the SCSI Layer
|
|
|
+ * The scsi_buf for a T10-DIF I/O holds the FCP cmnd,
|
|
|
+ * the FCP rsp, and a SGE. Sice we have no control
|
|
|
+ * over how many protection segments the SCSI Layer
|
|
|
* will hand us (ie: there could be one for every block
|
|
|
- * in the IO), we just allocate enough SGEs to accomidate
|
|
|
- * our max amount and we need to limit lpfc_sg_seg_cnt to
|
|
|
- * minimize the risk of running out.
|
|
|
+ * in the IO), just allocate enough SGEs to accomidate
|
|
|
+ * our max amount and we need to limit lpfc_sg_seg_cnt
|
|
|
+ * to minimize the risk of running out.
|
|
|
*/
|
|
|
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
|
|
- sizeof(struct fcp_rsp) + max_buf_size;
|
|
|
+ sizeof(struct fcp_rsp) + max_buf_size;
|
|
|
|
|
|
/* Total SGEs for scsi_sg_list and scsi_sg_prot_list */
|
|
|
phba->cfg_total_seg_cnt = LPFC_MAX_SGL_SEG_CNT;
|
|
|
|
|
|
if (phba->cfg_sg_seg_cnt > LPFC_MAX_SG_SLI4_SEG_CNT_DIF)
|
|
|
- phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
|
|
|
+ phba->cfg_sg_seg_cnt =
|
|
|
+ LPFC_MAX_SG_SLI4_SEG_CNT_DIF;
|
|
|
} else {
|
|
|
/*
|
|
|
- * The scsi_buf for a regular I/O will hold the FCP cmnd,
|
|
|
+ * The scsi_buf for a regular I/O holds the FCP cmnd,
|
|
|
* the FCP rsp, a SGE for each, and a SGE for up to
|
|
|
* cfg_sg_seg_cnt data segments.
|
|
|
*/
|
|
|
phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
|
|
|
- sizeof(struct fcp_rsp) +
|
|
|
- ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
|
|
|
+ sizeof(struct fcp_rsp) +
|
|
|
+ ((phba->cfg_sg_seg_cnt + 2) *
|
|
|
+ sizeof(struct sli4_sge));
|
|
|
|
|
|
/* Total SGEs for scsi_sg_list */
|
|
|
phba->cfg_total_seg_cnt = phba->cfg_sg_seg_cnt + 2;
|
|
|
+
|
|
|
/*
|
|
|
- * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only need
|
|
|
- * to post 1 page for the SGL.
|
|
|
+ * NOTE: if (phba->cfg_sg_seg_cnt + 2) <= 256 we only
|
|
|
+ * need to post 1 page for the SGL.
|
|
|
*/
|
|
|
}
|
|
|
|
|
|
@@ -5317,21 +5719,28 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
phba->cfg_total_seg_cnt);
|
|
|
|
|
|
/* Initialize buffer queue management fields */
|
|
|
- hbq_count = lpfc_sli_hbq_count();
|
|
|
- for (i = 0; i < hbq_count; ++i)
|
|
|
- INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
|
|
|
- INIT_LIST_HEAD(&phba->rb_pend_list);
|
|
|
+ INIT_LIST_HEAD(&phba->hbqs[LPFC_ELS_HBQ].hbq_buffer_list);
|
|
|
phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
|
|
|
phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
|
|
|
|
|
|
/*
|
|
|
* Initialize the SLI Layer to run with lpfc SLI4 HBAs.
|
|
|
*/
|
|
|
- /* Initialize the Abort scsi buffer list used by driver */
|
|
|
- spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
|
|
+ /* Initialize the Abort scsi buffer list used by driver */
|
|
|
+ spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
|
|
+ /* Initialize the Abort nvme buffer list used by driver */
|
|
|
+ spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
|
|
+ }
|
|
|
+
|
|
|
/* This abort list used by worker thread */
|
|
|
- spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
|
|
|
+ spin_lock_init(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ spin_lock_init(&phba->sli4_hba.nvmet_io_lock);
|
|
|
|
|
|
/*
|
|
|
* Initialize driver internal slow-path work queues
|
|
|
@@ -5359,10 +5768,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
/* initialize optic_state to 0xFF */
|
|
|
phba->sli4_hba.lnk_info.optic_state = 0xff;
|
|
|
|
|
|
- /* Initialize the driver internal SLI layer lists. */
|
|
|
- lpfc_sli_setup(phba);
|
|
|
- lpfc_sli_queue_setup(phba);
|
|
|
-
|
|
|
/* Allocate device driver memory */
|
|
|
rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
|
|
|
if (rc)
|
|
|
@@ -5372,8 +5777,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
|
|
|
LPFC_SLI_INTF_IF_TYPE_2) {
|
|
|
rc = lpfc_pci_function_reset(phba);
|
|
|
- if (unlikely(rc))
|
|
|
- return -ENODEV;
|
|
|
+ if (unlikely(rc)) {
|
|
|
+ rc = -ENODEV;
|
|
|
+ goto out_free_mem;
|
|
|
+ }
|
|
|
phba->temp_sensor_support = 1;
|
|
|
}
|
|
|
|
|
|
@@ -5410,6 +5817,46 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
goto out_free_bsmbx;
|
|
|
}
|
|
|
|
|
|
+ /* Check for NVMET being configured */
|
|
|
+ phba->nvmet_support = 0;
|
|
|
+ if (lpfc_enable_nvmet_cnt) {
|
|
|
+
|
|
|
+ /* First get WWN of HBA instance */
|
|
|
+ lpfc_read_nv(phba, mboxq);
|
|
|
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
|
+ if (rc != MBX_SUCCESS) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
|
|
+ "6016 Mailbox failed , mbxCmd x%x "
|
|
|
+ "READ_NV, mbxStatus x%x\n",
|
|
|
+ bf_get(lpfc_mqe_command, &mboxq->u.mqe),
|
|
|
+ bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
|
|
+ rc = -EIO;
|
|
|
+ goto out_free_bsmbx;
|
|
|
+ }
|
|
|
+ mb = &mboxq->u.mb;
|
|
|
+ memcpy(&wwn, (char *)mb->un.varRDnvp.nodename,
|
|
|
+ sizeof(uint64_t));
|
|
|
+ wwn = cpu_to_be64(wwn);
|
|
|
+ phba->sli4_hba.wwnn.u.name = wwn;
|
|
|
+ memcpy(&wwn, (char *)mb->un.varRDnvp.portname,
|
|
|
+ sizeof(uint64_t));
|
|
|
+ /* wwn is WWPN of HBA instance */
|
|
|
+ wwn = cpu_to_be64(wwn);
|
|
|
+ phba->sli4_hba.wwpn.u.name = wwn;
|
|
|
+
|
|
|
+ /* Check to see if it matches any module parameter */
|
|
|
+ for (i = 0; i < lpfc_enable_nvmet_cnt; i++) {
|
|
|
+ if (wwn == lpfc_enable_nvmet[i]) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6017 NVME Target %016llx\n",
|
|
|
+ wwn);
|
|
|
+ phba->nvmet_support = 1; /* a match */
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ lpfc_nvme_mod_param_dep(phba);
|
|
|
+
|
|
|
/* Get the Supported Pages if PORT_CAPABILITIES is supported by port. */
|
|
|
lpfc_supported_pages(mboxq);
|
|
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
|
|
@@ -5448,9 +5895,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"2999 Unsupported SLI4 Parameters "
|
|
|
"Extents and RPI headers enabled.\n");
|
|
|
- goto out_free_bsmbx;
|
|
|
}
|
|
|
+ mempool_free(mboxq, phba->mbox_mem_pool);
|
|
|
+ goto out_free_bsmbx;
|
|
|
}
|
|
|
+
|
|
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
|
|
|
|
|
/* Verify OAS is supported */
|
|
|
@@ -5497,11 +5946,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
goto out_remove_rpi_hdrs;
|
|
|
}
|
|
|
|
|
|
- phba->sli4_hba.fcp_eq_hdl =
|
|
|
- kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
|
|
|
- (fof_vectors + phba->cfg_fcp_io_channel)),
|
|
|
- GFP_KERNEL);
|
|
|
- if (!phba->sli4_hba.fcp_eq_hdl) {
|
|
|
+ phba->sli4_hba.hba_eq_hdl = kcalloc(fof_vectors + phba->io_channel_irqs,
|
|
|
+ sizeof(struct lpfc_hba_eq_hdl),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.hba_eq_hdl) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"2572 Failed allocate memory for "
|
|
|
"fast-path per-EQ handle array\n");
|
|
|
@@ -5509,52 +5957,31 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
goto out_free_fcf_rr_bmask;
|
|
|
}
|
|
|
|
|
|
- phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
|
|
|
- (fof_vectors +
|
|
|
- phba->cfg_fcp_io_channel)), GFP_KERNEL);
|
|
|
- if (!phba->sli4_hba.msix_entries) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "2573 Failed allocate memory for msi-x "
|
|
|
- "interrupt vector entries\n");
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out_free_fcp_eq_hdl;
|
|
|
- }
|
|
|
-
|
|
|
- phba->sli4_hba.cpu_map = kzalloc((sizeof(struct lpfc_vector_map_info) *
|
|
|
- phba->sli4_hba.num_present_cpu),
|
|
|
- GFP_KERNEL);
|
|
|
+ phba->sli4_hba.cpu_map = kcalloc(phba->sli4_hba.num_present_cpu,
|
|
|
+ sizeof(struct lpfc_vector_map_info),
|
|
|
+ GFP_KERNEL);
|
|
|
if (!phba->sli4_hba.cpu_map) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"3327 Failed allocate memory for msi-x "
|
|
|
"interrupt vector mapping\n");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_free_msix;
|
|
|
+ goto out_free_hba_eq_hdl;
|
|
|
}
|
|
|
if (lpfc_used_cpu == NULL) {
|
|
|
- lpfc_used_cpu = kzalloc((sizeof(uint16_t) * lpfc_present_cpu),
|
|
|
- GFP_KERNEL);
|
|
|
+ lpfc_used_cpu = kcalloc(lpfc_present_cpu, sizeof(uint16_t),
|
|
|
+ GFP_KERNEL);
|
|
|
if (!lpfc_used_cpu) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"3335 Failed allocate memory for msi-x "
|
|
|
"interrupt vector mapping\n");
|
|
|
kfree(phba->sli4_hba.cpu_map);
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_free_msix;
|
|
|
+ goto out_free_hba_eq_hdl;
|
|
|
}
|
|
|
for (i = 0; i < lpfc_present_cpu; i++)
|
|
|
lpfc_used_cpu[i] = LPFC_VECTOR_MAP_EMPTY;
|
|
|
}
|
|
|
|
|
|
- /* Initialize io channels for round robin */
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- rc = 0;
|
|
|
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
|
|
|
- cpup->channel_id = rc;
|
|
|
- rc++;
|
|
|
- if (rc >= phba->cfg_fcp_io_channel)
|
|
|
- rc = 0;
|
|
|
- }
|
|
|
-
|
|
|
/*
|
|
|
* Enable sr-iov virtual functions if supported and configured
|
|
|
* through the module parameter.
|
|
|
@@ -5574,10 +6001,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-out_free_msix:
|
|
|
- kfree(phba->sli4_hba.msix_entries);
|
|
|
-out_free_fcp_eq_hdl:
|
|
|
- kfree(phba->sli4_hba.fcp_eq_hdl);
|
|
|
+out_free_hba_eq_hdl:
|
|
|
+ kfree(phba->sli4_hba.hba_eq_hdl);
|
|
|
out_free_fcf_rr_bmask:
|
|
|
kfree(phba->fcf.fcf_rr_bmask);
|
|
|
out_remove_rpi_hdrs:
|
|
|
@@ -5611,11 +6036,8 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
|
|
|
phba->sli4_hba.num_online_cpu = 0;
|
|
|
phba->sli4_hba.curr_disp_cpu = 0;
|
|
|
|
|
|
- /* Free memory allocated for msi-x interrupt vector entries */
|
|
|
- kfree(phba->sli4_hba.msix_entries);
|
|
|
-
|
|
|
/* Free memory allocated for fast-path work queue handles */
|
|
|
- kfree(phba->sli4_hba.fcp_eq_hdl);
|
|
|
+ kfree(phba->sli4_hba.hba_eq_hdl);
|
|
|
|
|
|
/* Free the allocated rpi headers. */
|
|
|
lpfc_sli4_remove_rpi_hdrs(phba);
|
|
|
@@ -5627,6 +6049,7 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
|
|
|
/* Free the ELS sgl list */
|
|
|
lpfc_free_active_sgl(phba);
|
|
|
lpfc_free_els_sgl_list(phba);
|
|
|
+ lpfc_free_nvmet_sgl_list(phba);
|
|
|
|
|
|
/* Free the completion queue EQ event pool */
|
|
|
lpfc_sli4_cq_event_release_all(phba);
|
|
|
@@ -5688,58 +6111,6 @@ lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to set up the driver internal resources before the
|
|
|
- * device specific resource setup to support the HBA device it attached to.
|
|
|
- *
|
|
|
- * Return codes
|
|
|
- * 0 - successful
|
|
|
- * other values - error
|
|
|
- **/
|
|
|
-static int
|
|
|
-lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
|
|
-{
|
|
|
- /*
|
|
|
- * Driver resources common to all SLI revisions
|
|
|
- */
|
|
|
- atomic_set(&phba->fast_event_count, 0);
|
|
|
- spin_lock_init(&phba->hbalock);
|
|
|
-
|
|
|
- /* Initialize ndlp management spinlock */
|
|
|
- spin_lock_init(&phba->ndlp_lock);
|
|
|
-
|
|
|
- INIT_LIST_HEAD(&phba->port_list);
|
|
|
- INIT_LIST_HEAD(&phba->work_list);
|
|
|
- init_waitqueue_head(&phba->wait_4_mlo_m_q);
|
|
|
-
|
|
|
- /* Initialize the wait queue head for the kernel thread */
|
|
|
- init_waitqueue_head(&phba->work_waitq);
|
|
|
-
|
|
|
- /* Initialize the scsi buffer list used by driver for scsi IO */
|
|
|
- spin_lock_init(&phba->scsi_buf_list_get_lock);
|
|
|
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_get);
|
|
|
- spin_lock_init(&phba->scsi_buf_list_put_lock);
|
|
|
- INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put);
|
|
|
-
|
|
|
- /* Initialize the fabric iocb list */
|
|
|
- INIT_LIST_HEAD(&phba->fabric_iocb_list);
|
|
|
-
|
|
|
- /* Initialize list to save ELS buffers */
|
|
|
- INIT_LIST_HEAD(&phba->elsbuf);
|
|
|
-
|
|
|
- /* Initialize FCF connection rec list */
|
|
|
- INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
|
|
|
-
|
|
|
- /* Initialize OAS configuration list */
|
|
|
- spin_lock_init(&phba->devicelock);
|
|
|
- INIT_LIST_HEAD(&phba->luns);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -5887,19 +6258,45 @@ static void
|
|
|
lpfc_free_els_sgl_list(struct lpfc_hba *phba)
|
|
|
{
|
|
|
LIST_HEAD(sglq_list);
|
|
|
- struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
|
|
|
|
|
|
/* Retrieve all els sgls from driver list */
|
|
|
spin_lock_irq(&phba->hbalock);
|
|
|
- spin_lock(&pring->ring_lock);
|
|
|
- list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
|
|
|
- spin_unlock(&pring->ring_lock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_els_sgl_list, &sglq_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
spin_unlock_irq(&phba->hbalock);
|
|
|
|
|
|
/* Now free the sgl list */
|
|
|
lpfc_free_sgl_list(phba, &sglq_list);
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * lpfc_free_nvmet_sgl_list - Free nvmet sgl list.
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine is invoked to free the driver's nvmet sgl list and memory.
|
|
|
+ **/
|
|
|
+static void
|
|
|
+lpfc_free_nvmet_sgl_list(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
|
|
|
+ LIST_HEAD(sglq_list);
|
|
|
+
|
|
|
+ /* Retrieve all nvmet sgls from driver list */
|
|
|
+ spin_lock_irq(&phba->hbalock);
|
|
|
+ spin_lock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ list_splice_init(&phba->sli4_hba.lpfc_nvmet_sgl_list, &sglq_list);
|
|
|
+ spin_unlock(&phba->sli4_hba.sgl_list_lock);
|
|
|
+ spin_unlock_irq(&phba->hbalock);
|
|
|
+
|
|
|
+ /* Now free the sgl list */
|
|
|
+ list_for_each_entry_safe(sglq_entry, sglq_next, &sglq_list, list) {
|
|
|
+ list_del(&sglq_entry->list);
|
|
|
+ lpfc_nvmet_buf_free(phba, sglq_entry->virt, sglq_entry->phys);
|
|
|
+ kfree(sglq_entry);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -5947,14 +6344,19 @@ static void
|
|
|
lpfc_init_sgl_list(struct lpfc_hba *phba)
|
|
|
{
|
|
|
/* Initialize and populate the sglq list per host/VF. */
|
|
|
- INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_els_sgl_list);
|
|
|
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_sgl_list);
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
|
|
|
|
|
/* els xri-sgl book keeping */
|
|
|
phba->sli4_hba.els_xri_cnt = 0;
|
|
|
|
|
|
/* scsi xri-buffer book keeping */
|
|
|
phba->sli4_hba.scsi_xri_cnt = 0;
|
|
|
+
|
|
|
+ /* nvme xri-buffer book keeping */
|
|
|
+ phba->sli4_hba.nvme_xri_cnt = 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -6185,9 +6587,9 @@ lpfc_hba_free(struct lpfc_hba *phba)
|
|
|
/* Release the driver assigned board number */
|
|
|
idr_remove(&lpfc_hba_index, phba->brd_no);
|
|
|
|
|
|
- /* Free memory allocated with sli rings */
|
|
|
- kfree(phba->sli.ring);
|
|
|
- phba->sli.ring = NULL;
|
|
|
+ /* Free memory allocated with sli3 rings */
|
|
|
+ kfree(phba->sli.sli3_ring);
|
|
|
+ phba->sli.sli3_ring = NULL;
|
|
|
|
|
|
kfree(phba);
|
|
|
return;
|
|
|
@@ -6223,6 +6625,23 @@ lpfc_create_shost(struct lpfc_hba *phba)
|
|
|
|
|
|
shost = lpfc_shost_from_vport(vport);
|
|
|
phba->pport = vport;
|
|
|
+
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ /* Only 1 vport (pport) will support NVME target */
|
|
|
+ if (phba->txrdy_payload_pool == NULL) {
|
|
|
+ phba->txrdy_payload_pool = pci_pool_create(
|
|
|
+ "txrdy_pool", phba->pcidev,
|
|
|
+ TXRDY_PAYLOAD_LEN, 16, 0);
|
|
|
+ if (phba->txrdy_payload_pool) {
|
|
|
+ phba->targetport = NULL;
|
|
|
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_NVME;
|
|
|
+ lpfc_printf_log(phba, KERN_INFO,
|
|
|
+ LOG_INIT | LOG_NVME_DISC,
|
|
|
+ "6076 NVME Target Found\n");
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
lpfc_debugfs_initialize(vport);
|
|
|
/* Put reference to SCSI host to driver's device private data */
|
|
|
pci_set_drvdata(phba->pcidev, shost);
|
|
|
@@ -6504,8 +6923,6 @@ lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
|
|
|
|
|
|
memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
|
|
|
|
|
|
- INIT_LIST_HEAD(&phba->rb_pend_list);
|
|
|
-
|
|
|
phba->MBslimaddr = phba->slim_memmap_p;
|
|
|
phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
|
|
|
phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
|
|
|
@@ -7009,7 +7426,7 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|
|
"VPI(B:%d M:%d) "
|
|
|
"VFI(B:%d M:%d) "
|
|
|
"RPI(B:%d M:%d) "
|
|
|
- "FCFI(Count:%d)\n",
|
|
|
+ "FCFI:%d EQ:%d CQ:%d WQ:%d RQ:%d\n",
|
|
|
phba->sli4_hba.extents_in_use,
|
|
|
phba->sli4_hba.max_cfg_param.xri_base,
|
|
|
phba->sli4_hba.max_cfg_param.max_xri,
|
|
|
@@ -7019,7 +7436,12 @@ lpfc_sli4_read_config(struct lpfc_hba *phba)
|
|
|
phba->sli4_hba.max_cfg_param.max_vfi,
|
|
|
phba->sli4_hba.max_cfg_param.rpi_base,
|
|
|
phba->sli4_hba.max_cfg_param.max_rpi,
|
|
|
- phba->sli4_hba.max_cfg_param.max_fcfi);
|
|
|
+ phba->sli4_hba.max_cfg_param.max_fcfi,
|
|
|
+ phba->sli4_hba.max_cfg_param.max_eq,
|
|
|
+ phba->sli4_hba.max_cfg_param.max_cq,
|
|
|
+ phba->sli4_hba.max_cfg_param.max_wq,
|
|
|
+ phba->sli4_hba.max_cfg_param.max_rq);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
if (rc)
|
|
|
@@ -7210,11 +7632,11 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * lpfc_sli4_queue_verify - Verify and update EQ and CQ counts
|
|
|
+ * lpfc_sli4_queue_verify - Verify and update EQ counts
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
- * This routine is invoked to check the user settable queue counts for EQs and
|
|
|
- * CQs. after this routine is called the counts will be set to valid values that
|
|
|
+ * This routine is invoked to check the user settable queue counts for EQs.
|
|
|
+ * After this routine is called the counts will be set to valid values that
|
|
|
* adhere to the constraints of the system's interrupt vectors and the port's
|
|
|
* queue resources.
|
|
|
*
|
|
|
@@ -7225,9 +7647,7 @@ lpfc_setup_endian_order(struct lpfc_hba *phba)
|
|
|
static int
|
|
|
lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- int cfg_fcp_io_channel;
|
|
|
- uint32_t cpu;
|
|
|
- uint32_t i = 0;
|
|
|
+ int io_channel;
|
|
|
int fof_vectors = phba->cfg_fof ? 1 : 0;
|
|
|
|
|
|
/*
|
|
|
@@ -7236,49 +7656,40 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
|
|
*/
|
|
|
|
|
|
/* Sanity check on HBA EQ parameters */
|
|
|
- cfg_fcp_io_channel = phba->cfg_fcp_io_channel;
|
|
|
-
|
|
|
- /* It doesn't make sense to have more io channels then online CPUs */
|
|
|
- for_each_present_cpu(cpu) {
|
|
|
- if (cpu_online(cpu))
|
|
|
- i++;
|
|
|
- }
|
|
|
- phba->sli4_hba.num_online_cpu = i;
|
|
|
- phba->sli4_hba.num_present_cpu = lpfc_present_cpu;
|
|
|
- phba->sli4_hba.curr_disp_cpu = 0;
|
|
|
+ io_channel = phba->io_channel_irqs;
|
|
|
|
|
|
- if (i < cfg_fcp_io_channel) {
|
|
|
+ if (phba->sli4_hba.num_online_cpu < io_channel) {
|
|
|
lpfc_printf_log(phba,
|
|
|
KERN_ERR, LOG_INIT,
|
|
|
"3188 Reducing IO channels to match number of "
|
|
|
"online CPUs: from %d to %d\n",
|
|
|
- cfg_fcp_io_channel, i);
|
|
|
- cfg_fcp_io_channel = i;
|
|
|
+ io_channel, phba->sli4_hba.num_online_cpu);
|
|
|
+ io_channel = phba->sli4_hba.num_online_cpu;
|
|
|
}
|
|
|
|
|
|
- if (cfg_fcp_io_channel + fof_vectors >
|
|
|
- phba->sli4_hba.max_cfg_param.max_eq) {
|
|
|
- if (phba->sli4_hba.max_cfg_param.max_eq <
|
|
|
- LPFC_FCP_IO_CHAN_MIN) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "2574 Not enough EQs (%d) from the "
|
|
|
- "pci function for supporting FCP "
|
|
|
- "EQs (%d)\n",
|
|
|
- phba->sli4_hba.max_cfg_param.max_eq,
|
|
|
- phba->cfg_fcp_io_channel);
|
|
|
- goto out_error;
|
|
|
- }
|
|
|
+ if (io_channel + fof_vectors > phba->sli4_hba.max_cfg_param.max_eq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"2575 Reducing IO channels to match number of "
|
|
|
"available EQs: from %d to %d\n",
|
|
|
- cfg_fcp_io_channel,
|
|
|
+ io_channel,
|
|
|
phba->sli4_hba.max_cfg_param.max_eq);
|
|
|
- cfg_fcp_io_channel = phba->sli4_hba.max_cfg_param.max_eq -
|
|
|
- fof_vectors;
|
|
|
+ io_channel = phba->sli4_hba.max_cfg_param.max_eq - fof_vectors;
|
|
|
}
|
|
|
|
|
|
- /* The actual number of FCP event queues adopted */
|
|
|
- phba->cfg_fcp_io_channel = cfg_fcp_io_channel;
|
|
|
+ /* The actual number of FCP / NVME event queues adopted */
|
|
|
+ if (io_channel != phba->io_channel_irqs)
|
|
|
+ phba->io_channel_irqs = io_channel;
|
|
|
+ if (phba->cfg_fcp_io_channel > io_channel)
|
|
|
+ phba->cfg_fcp_io_channel = io_channel;
|
|
|
+ if (phba->cfg_nvme_io_channel > io_channel)
|
|
|
+ phba->cfg_nvme_io_channel = io_channel;
|
|
|
+ if (phba->cfg_nvme_io_channel < phba->cfg_nvmet_mrq)
|
|
|
+ phba->cfg_nvmet_mrq = phba->cfg_nvme_io_channel;
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2574 IO channels: irqs %d fcp %d nvme %d MRQ: %d\n",
|
|
|
+ phba->io_channel_irqs, phba->cfg_fcp_io_channel,
|
|
|
+ phba->cfg_nvme_io_channel, phba->cfg_nvmet_mrq);
|
|
|
|
|
|
/* Get EQ depth from module parameter, fake the default for now */
|
|
|
phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
|
|
|
@@ -7287,37 +7698,95 @@ lpfc_sli4_queue_verify(struct lpfc_hba *phba)
|
|
|
/* Get CQ depth from module parameter, fake the default for now */
|
|
|
phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
|
|
|
phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
|
|
|
-
|
|
|
return 0;
|
|
|
-out_error:
|
|
|
- return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli4_queue_create - Create all the SLI4 queues
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
|
|
|
- * operation. For each SLI4 queue type, the parameters such as queue entry
|
|
|
- * count (queue depth) shall be taken from the module parameter. For now,
|
|
|
- * we just use some constant number as place holder.
|
|
|
- *
|
|
|
- * Return codes
|
|
|
- * 0 - successful
|
|
|
- * -ENOMEM - No availble memory
|
|
|
- * -EIO - The mailbox failed to complete successfully.
|
|
|
- **/
|
|
|
-int
|
|
|
-lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
+static int
|
|
|
+lpfc_alloc_nvme_wq_cq(struct lpfc_hba *phba, int wqidx)
|
|
|
{
|
|
|
struct lpfc_queue *qdesc;
|
|
|
- uint32_t wqesize;
|
|
|
- int idx;
|
|
|
+ int cnt;
|
|
|
|
|
|
- /*
|
|
|
- * Create HBA Record arrays.
|
|
|
- */
|
|
|
- if (!phba->cfg_fcp_io_channel)
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
|
|
+ phba->sli4_hba.cq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0508 Failed allocate fast-path NVME CQ (%d)\n",
|
|
|
+ wqidx);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvme_cq[wqidx] = qdesc;
|
|
|
+
|
|
|
+ cnt = LPFC_NVME_WQSIZE;
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, LPFC_WQE128_SIZE, cnt);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0509 Failed allocate fast-path NVME WQ (%d)\n",
|
|
|
+ wqidx);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvme_wq[wqidx] = qdesc;
|
|
|
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+lpfc_alloc_fcp_wq_cq(struct lpfc_hba *phba, int wqidx)
|
|
|
+{
|
|
|
+ struct lpfc_queue *qdesc;
|
|
|
+ uint32_t wqesize;
|
|
|
+
|
|
|
+ /* Create Fast Path FCP CQs */
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
|
|
+ phba->sli4_hba.cq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0499 Failed allocate fast-path FCP CQ (%d)\n", wqidx);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.fcp_cq[wqidx] = qdesc;
|
|
|
+
|
|
|
+ /* Create Fast Path FCP WQs */
|
|
|
+ wqesize = (phba->fcp_embed_io) ?
|
|
|
+ LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, wqesize, phba->sli4_hba.wq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0503 Failed allocate fast-path FCP WQ (%d)\n",
|
|
|
+ wqidx);
|
|
|
+ return 1;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.fcp_wq[wqidx] = qdesc;
|
|
|
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
|
|
|
+ * operation. For each SLI4 queue type, the parameters such as queue entry
|
|
|
+ * count (queue depth) shall be taken from the module parameter. For now,
|
|
|
+ * we just use some constant number as place holder.
|
|
|
+ *
|
|
|
+ * Return codes
|
|
|
+ * 0 - successful
|
|
|
+ * -ENOMEM - No availble memory
|
|
|
+ * -EIO - The mailbox failed to complete successfully.
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ struct lpfc_queue *qdesc;
|
|
|
+ int idx, io_channel, max;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Create HBA Record arrays.
|
|
|
+ * Both NVME and FCP will share that same vectors / EQs
|
|
|
+ */
|
|
|
+ io_channel = phba->io_channel_irqs;
|
|
|
+ if (!io_channel)
|
|
|
return -ERANGE;
|
|
|
|
|
|
phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
|
|
|
@@ -7326,9 +7795,14 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
|
|
|
phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
|
|
|
phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
|
|
|
+ phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
|
|
|
+ phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
|
|
|
+ phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
|
|
|
+ phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
|
|
|
|
|
|
- phba->sli4_hba.hba_eq = kzalloc((sizeof(struct lpfc_queue *) *
|
|
|
- phba->cfg_fcp_io_channel), GFP_KERNEL);
|
|
|
+ phba->sli4_hba.hba_eq = kcalloc(io_channel,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
if (!phba->sli4_hba.hba_eq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"2576 Failed allocate memory for "
|
|
|
@@ -7336,44 +7810,115 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
goto out_error;
|
|
|
}
|
|
|
|
|
|
- phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
|
|
|
- phba->cfg_fcp_io_channel), GFP_KERNEL);
|
|
|
- if (!phba->sli4_hba.fcp_cq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "2577 Failed allocate memory for fast-path "
|
|
|
- "CQ record array\n");
|
|
|
- goto out_error;
|
|
|
+ if (phba->cfg_fcp_io_channel) {
|
|
|
+ phba->sli4_hba.fcp_cq = kcalloc(phba->cfg_fcp_io_channel,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.fcp_cq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2577 Failed allocate memory for "
|
|
|
+ "fast-path CQ record array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.fcp_wq = kcalloc(phba->cfg_fcp_io_channel,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.fcp_wq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2578 Failed allocate memory for "
|
|
|
+ "fast-path FCP WQ record array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ /*
|
|
|
+ * Since the first EQ can have multiple CQs associated with it,
|
|
|
+ * this array is used to quickly see if we have a FCP fast-path
|
|
|
+ * CQ match.
|
|
|
+ */
|
|
|
+ phba->sli4_hba.fcp_cq_map = kcalloc(phba->cfg_fcp_io_channel,
|
|
|
+ sizeof(uint16_t),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.fcp_cq_map) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2545 Failed allocate memory for "
|
|
|
+ "fast-path CQ map\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
|
|
|
- phba->cfg_fcp_io_channel), GFP_KERNEL);
|
|
|
- if (!phba->sli4_hba.fcp_wq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "2578 Failed allocate memory for fast-path "
|
|
|
- "WQ record array\n");
|
|
|
- goto out_error;
|
|
|
- }
|
|
|
+ if (phba->cfg_nvme_io_channel) {
|
|
|
+ phba->sli4_hba.nvme_cq = kcalloc(phba->cfg_nvme_io_channel,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvme_cq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6077 Failed allocate memory for "
|
|
|
+ "fast-path CQ record array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Since the first EQ can have multiple CQs associated with it,
|
|
|
- * this array is used to quickly see if we have a FCP fast-path
|
|
|
- * CQ match.
|
|
|
- */
|
|
|
- phba->sli4_hba.fcp_cq_map = kzalloc((sizeof(uint16_t) *
|
|
|
- phba->cfg_fcp_io_channel), GFP_KERNEL);
|
|
|
- if (!phba->sli4_hba.fcp_cq_map) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "2545 Failed allocate memory for fast-path "
|
|
|
- "CQ map\n");
|
|
|
- goto out_error;
|
|
|
+ phba->sli4_hba.nvme_wq = kcalloc(phba->cfg_nvme_io_channel,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvme_wq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "2581 Failed allocate memory for "
|
|
|
+ "fast-path NVME WQ record array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Since the first EQ can have multiple CQs associated with it,
|
|
|
+ * this array is used to quickly see if we have a NVME fast-path
|
|
|
+ * CQ match.
|
|
|
+ */
|
|
|
+ phba->sli4_hba.nvme_cq_map = kcalloc(phba->cfg_nvme_io_channel,
|
|
|
+ sizeof(uint16_t),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvme_cq_map) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6078 Failed allocate memory for "
|
|
|
+ "fast-path CQ map\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ phba->sli4_hba.nvmet_cqset = kcalloc(
|
|
|
+ phba->cfg_nvmet_mrq,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvmet_cqset) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3121 Fail allocate memory for "
|
|
|
+ "fast-path CQ set array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr = kcalloc(
|
|
|
+ phba->cfg_nvmet_mrq,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvmet_mrq_hdr) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3122 Fail allocate memory for "
|
|
|
+ "fast-path RQ set hdr array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmet_mrq_data = kcalloc(
|
|
|
+ phba->cfg_nvmet_mrq,
|
|
|
+ sizeof(struct lpfc_queue *),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!phba->sli4_hba.nvmet_mrq_data) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3124 Fail allocate memory for "
|
|
|
+ "fast-path RQ set data array\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- /*
|
|
|
- * Create HBA Event Queues (EQs). The cfg_fcp_io_channel specifies
|
|
|
- * how many EQs to create.
|
|
|
- */
|
|
|
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
|
|
|
|
|
|
+ /* Create HBA Event Queues (EQs) */
|
|
|
+ for (idx = 0; idx < io_channel; idx++) {
|
|
|
/* Create EQs */
|
|
|
qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
|
|
|
phba->sli4_hba.eq_ecount);
|
|
|
@@ -7383,33 +7928,42 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
goto out_error;
|
|
|
}
|
|
|
phba->sli4_hba.hba_eq[idx] = qdesc;
|
|
|
+ }
|
|
|
|
|
|
- /* Create Fast Path FCP CQs */
|
|
|
- qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
|
|
- phba->sli4_hba.cq_ecount);
|
|
|
- if (!qdesc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0499 Failed allocate fast-path FCP "
|
|
|
- "CQ (%d)\n", idx);
|
|
|
+ /* FCP and NVME io channels are not required to be balanced */
|
|
|
+
|
|
|
+ for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
|
|
|
+ if (lpfc_alloc_fcp_wq_cq(phba, idx))
|
|
|
goto out_error;
|
|
|
- }
|
|
|
- phba->sli4_hba.fcp_cq[idx] = qdesc;
|
|
|
|
|
|
- /* Create Fast Path FCP WQs */
|
|
|
- wqesize = (phba->fcp_embed_io) ?
|
|
|
- LPFC_WQE128_SIZE : phba->sli4_hba.wq_esize;
|
|
|
- qdesc = lpfc_sli4_queue_alloc(phba, wqesize,
|
|
|
- phba->sli4_hba.wq_ecount);
|
|
|
- if (!qdesc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0503 Failed allocate fast-path FCP "
|
|
|
- "WQ (%d)\n", idx);
|
|
|
+ for (idx = 0; idx < phba->cfg_nvme_io_channel; idx++)
|
|
|
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
|
|
+ goto out_error;
|
|
|
+
|
|
|
+ /* allocate MRQ CQs */
|
|
|
+ max = phba->cfg_nvme_io_channel;
|
|
|
+ if (max < phba->cfg_nvmet_mrq)
|
|
|
+ max = phba->cfg_nvmet_mrq;
|
|
|
+
|
|
|
+ for (idx = 0; idx < max; idx++)
|
|
|
+ if (lpfc_alloc_nvme_wq_cq(phba, idx))
|
|
|
goto out_error;
|
|
|
+
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba,
|
|
|
+ phba->sli4_hba.cq_esize,
|
|
|
+ phba->sli4_hba.cq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3142 Failed allocate NVME "
|
|
|
+ "CQ Set (%d)\n", idx);
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmet_cqset[idx] = qdesc;
|
|
|
}
|
|
|
- phba->sli4_hba.fcp_wq[idx] = qdesc;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
/*
|
|
|
* Create Slow Path Completion Queues (CQs)
|
|
|
*/
|
|
|
@@ -7463,6 +8017,30 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
goto out_error;
|
|
|
}
|
|
|
phba->sli4_hba.els_wq = qdesc;
|
|
|
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
|
|
+ /* Create NVME LS Complete Queue */
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
|
|
|
+ phba->sli4_hba.cq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6079 Failed allocate NVME LS CQ\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmels_cq = qdesc;
|
|
|
+
|
|
|
+ /* Create NVME LS Work Queue */
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
|
|
|
+ phba->sli4_hba.wq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6080 Failed allocate NVME LS WQ\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmels_wq = qdesc;
|
|
|
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Create Receive Queue (RQ)
|
|
|
@@ -7488,6 +8066,44 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
|
|
|
}
|
|
|
phba->sli4_hba.dat_rq = qdesc;
|
|
|
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ for (idx = 0; idx < phba->cfg_nvmet_mrq; idx++) {
|
|
|
+ /* Create NVMET Receive Queue for header */
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba,
|
|
|
+ phba->sli4_hba.rq_esize,
|
|
|
+ phba->sli4_hba.rq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3146 Failed allocate "
|
|
|
+ "receive HRQ\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr[idx] = qdesc;
|
|
|
+
|
|
|
+ /* Only needed for header of RQ pair */
|
|
|
+ qdesc->rqbp = kzalloc(sizeof(struct lpfc_rqb),
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (qdesc->rqbp == NULL) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6131 Failed allocate "
|
|
|
+ "Header RQBP\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Create NVMET Receive Queue for data */
|
|
|
+ qdesc = lpfc_sli4_queue_alloc(phba,
|
|
|
+ phba->sli4_hba.rq_esize,
|
|
|
+ phba->sli4_hba.rq_ecount);
|
|
|
+ if (!qdesc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3156 Failed allocate "
|
|
|
+ "receive DRQ\n");
|
|
|
+ goto out_error;
|
|
|
+ }
|
|
|
+ phba->sli4_hba.nvmet_mrq_data[idx] = qdesc;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* Create the Queues needed for Flash Optimized Fabric operations */
|
|
|
if (phba->cfg_fof)
|
|
|
lpfc_fof_queue_create(phba);
|
|
|
@@ -7498,6 +8114,39 @@ out_error:
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+static inline void
|
|
|
+__lpfc_sli4_release_queue(struct lpfc_queue **qp)
|
|
|
+{
|
|
|
+ if (*qp != NULL) {
|
|
|
+ lpfc_sli4_queue_free(*qp);
|
|
|
+ *qp = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+lpfc_sli4_release_queues(struct lpfc_queue ***qs, int max)
|
|
|
+{
|
|
|
+ int idx;
|
|
|
+
|
|
|
+ if (*qs == NULL)
|
|
|
+ return;
|
|
|
+
|
|
|
+ for (idx = 0; idx < max; idx++)
|
|
|
+ __lpfc_sli4_release_queue(&(*qs)[idx]);
|
|
|
+
|
|
|
+ kfree(*qs);
|
|
|
+ *qs = NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static inline void
|
|
|
+lpfc_sli4_release_queue_map(uint16_t **qmap)
|
|
|
+{
|
|
|
+ if (*qmap != NULL) {
|
|
|
+ kfree(*qmap);
|
|
|
+ *qmap = NULL;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
/**
|
|
|
* lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -7513,91 +8162,196 @@ out_error:
|
|
|
void
|
|
|
lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- int idx;
|
|
|
-
|
|
|
if (phba->cfg_fof)
|
|
|
lpfc_fof_queue_destroy(phba);
|
|
|
|
|
|
- if (phba->sli4_hba.hba_eq != NULL) {
|
|
|
- /* Release HBA event queue */
|
|
|
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
|
|
|
- if (phba->sli4_hba.hba_eq[idx] != NULL) {
|
|
|
- lpfc_sli4_queue_free(
|
|
|
- phba->sli4_hba.hba_eq[idx]);
|
|
|
- phba->sli4_hba.hba_eq[idx] = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- kfree(phba->sli4_hba.hba_eq);
|
|
|
- phba->sli4_hba.hba_eq = NULL;
|
|
|
- }
|
|
|
+ /* Release HBA eqs */
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.hba_eq, phba->io_channel_irqs);
|
|
|
|
|
|
- if (phba->sli4_hba.fcp_cq != NULL) {
|
|
|
- /* Release FCP completion queue */
|
|
|
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
|
|
|
- if (phba->sli4_hba.fcp_cq[idx] != NULL) {
|
|
|
- lpfc_sli4_queue_free(
|
|
|
- phba->sli4_hba.fcp_cq[idx]);
|
|
|
- phba->sli4_hba.fcp_cq[idx] = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- kfree(phba->sli4_hba.fcp_cq);
|
|
|
- phba->sli4_hba.fcp_cq = NULL;
|
|
|
- }
|
|
|
+ /* Release FCP cqs */
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.fcp_cq,
|
|
|
+ phba->cfg_fcp_io_channel);
|
|
|
|
|
|
- if (phba->sli4_hba.fcp_wq != NULL) {
|
|
|
- /* Release FCP work queue */
|
|
|
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++) {
|
|
|
- if (phba->sli4_hba.fcp_wq[idx] != NULL) {
|
|
|
- lpfc_sli4_queue_free(
|
|
|
- phba->sli4_hba.fcp_wq[idx]);
|
|
|
- phba->sli4_hba.fcp_wq[idx] = NULL;
|
|
|
- }
|
|
|
- }
|
|
|
- kfree(phba->sli4_hba.fcp_wq);
|
|
|
- phba->sli4_hba.fcp_wq = NULL;
|
|
|
- }
|
|
|
+ /* Release FCP wqs */
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.fcp_wq,
|
|
|
+ phba->cfg_fcp_io_channel);
|
|
|
|
|
|
/* Release FCP CQ mapping array */
|
|
|
- if (phba->sli4_hba.fcp_cq_map != NULL) {
|
|
|
- kfree(phba->sli4_hba.fcp_cq_map);
|
|
|
- phba->sli4_hba.fcp_cq_map = NULL;
|
|
|
- }
|
|
|
+ lpfc_sli4_release_queue_map(&phba->sli4_hba.fcp_cq_map);
|
|
|
+
|
|
|
+ /* Release NVME cqs */
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvme_cq,
|
|
|
+ phba->cfg_nvme_io_channel);
|
|
|
+
|
|
|
+ /* Release NVME wqs */
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvme_wq,
|
|
|
+ phba->cfg_nvme_io_channel);
|
|
|
+
|
|
|
+ /* Release NVME CQ mapping array */
|
|
|
+ lpfc_sli4_release_queue_map(&phba->sli4_hba.nvme_cq_map);
|
|
|
+
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_cqset,
|
|
|
+ phba->cfg_nvmet_mrq);
|
|
|
+
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_hdr,
|
|
|
+ phba->cfg_nvmet_mrq);
|
|
|
+ lpfc_sli4_release_queues(&phba->sli4_hba.nvmet_mrq_data,
|
|
|
+ phba->cfg_nvmet_mrq);
|
|
|
|
|
|
/* Release mailbox command work queue */
|
|
|
- if (phba->sli4_hba.mbx_wq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
|
|
|
- phba->sli4_hba.mbx_wq = NULL;
|
|
|
- }
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_wq);
|
|
|
|
|
|
/* Release ELS work queue */
|
|
|
- if (phba->sli4_hba.els_wq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
|
|
|
- phba->sli4_hba.els_wq = NULL;
|
|
|
- }
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.els_wq);
|
|
|
+
|
|
|
+ /* Release ELS work queue */
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_wq);
|
|
|
|
|
|
/* Release unsolicited receive queue */
|
|
|
- if (phba->sli4_hba.hdr_rq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
|
|
|
- phba->sli4_hba.hdr_rq = NULL;
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.hdr_rq);
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.dat_rq);
|
|
|
+
|
|
|
+ /* Release ELS complete queue */
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.els_cq);
|
|
|
+
|
|
|
+ /* Release NVME LS complete queue */
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.nvmels_cq);
|
|
|
+
|
|
|
+ /* Release mailbox command complete queue */
|
|
|
+ __lpfc_sli4_release_queue(&phba->sli4_hba.mbx_cq);
|
|
|
+
|
|
|
+ /* Everything on this list has been freed */
|
|
|
+ INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|
|
+ struct lpfc_queue *drq, int count)
|
|
|
+{
|
|
|
+ int rc, i;
|
|
|
+ struct lpfc_rqe hrqe;
|
|
|
+ struct lpfc_rqe drqe;
|
|
|
+ struct lpfc_rqb *rqbp;
|
|
|
+ struct rqb_dmabuf *rqb_buffer;
|
|
|
+ LIST_HEAD(rqb_buf_list);
|
|
|
+
|
|
|
+ rqbp = hrq->rqbp;
|
|
|
+ for (i = 0; i < count; i++) {
|
|
|
+ rqb_buffer = (rqbp->rqb_alloc_buffer)(phba);
|
|
|
+ if (!rqb_buffer)
|
|
|
+ break;
|
|
|
+ rqb_buffer->hrq = hrq;
|
|
|
+ rqb_buffer->drq = drq;
|
|
|
+ list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
|
|
|
+ }
|
|
|
+ while (!list_empty(&rqb_buf_list)) {
|
|
|
+ list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
|
|
|
+ hbuf.list);
|
|
|
+
|
|
|
+ hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
|
|
|
+ hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
|
|
|
+ drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
|
|
|
+ drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
|
|
|
+ rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
|
|
|
+ if (rc < 0) {
|
|
|
+ (rqbp->rqb_free_buffer)(phba, rqb_buffer);
|
|
|
+ } else {
|
|
|
+ list_add_tail(&rqb_buffer->hbuf.list,
|
|
|
+ &rqbp->rqb_buffer_list);
|
|
|
+ rqbp->buffer_count++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+lpfc_free_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *rq)
|
|
|
+{
|
|
|
+ struct lpfc_rqb *rqbp;
|
|
|
+ struct lpfc_dmabuf *h_buf;
|
|
|
+ struct rqb_dmabuf *rqb_buffer;
|
|
|
+
|
|
|
+ rqbp = rq->rqbp;
|
|
|
+ while (!list_empty(&rqbp->rqb_buffer_list)) {
|
|
|
+ list_remove_head(&rqbp->rqb_buffer_list, h_buf,
|
|
|
+ struct lpfc_dmabuf, list);
|
|
|
+
|
|
|
+ rqb_buffer = container_of(h_buf, struct rqb_dmabuf, hbuf);
|
|
|
+ (rqbp->rqb_free_buffer)(phba, rqb_buffer);
|
|
|
+ rqbp->buffer_count--;
|
|
|
}
|
|
|
- if (phba->sli4_hba.dat_rq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
|
|
|
- phba->sli4_hba.dat_rq = NULL;
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+static int
|
|
|
+lpfc_create_wq_cq(struct lpfc_hba *phba, struct lpfc_queue *eq,
|
|
|
+ struct lpfc_queue *cq, struct lpfc_queue *wq, uint16_t *cq_map,
|
|
|
+ int qidx, uint32_t qtype)
|
|
|
+{
|
|
|
+ struct lpfc_sli_ring *pring;
|
|
|
+ int rc;
|
|
|
+
|
|
|
+ if (!eq || !cq || !wq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6085 Fast-path %s (%d) not allocated\n",
|
|
|
+ ((eq) ? ((cq) ? "WQ" : "CQ") : "EQ"), qidx);
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
- /* Release ELS complete queue */
|
|
|
- if (phba->sli4_hba.els_cq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
|
|
|
- phba->sli4_hba.els_cq = NULL;
|
|
|
+ /* create the Cq first */
|
|
|
+ rc = lpfc_cq_create(phba, cq, eq,
|
|
|
+ (qtype == LPFC_MBOX) ? LPFC_MCQ : LPFC_WCQ, qtype);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6086 Failed setup of CQ (%d), rc = 0x%x\n",
|
|
|
+ qidx, (uint32_t)rc);
|
|
|
+ return rc;
|
|
|
}
|
|
|
|
|
|
- /* Release mailbox command complete queue */
|
|
|
- if (phba->sli4_hba.mbx_cq != NULL) {
|
|
|
- lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
|
|
|
- phba->sli4_hba.mbx_cq = NULL;
|
|
|
+ if (qtype != LPFC_MBOX) {
|
|
|
+ /* Setup nvme_cq_map for fast lookup */
|
|
|
+ if (cq_map)
|
|
|
+ *cq_map = cq->queue_id;
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "6087 CQ setup: cq[%d]-id=%d, parent eq[%d]-id=%d\n",
|
|
|
+ qidx, cq->queue_id, qidx, eq->queue_id);
|
|
|
+
|
|
|
+ /* create the wq */
|
|
|
+ rc = lpfc_wq_create(phba, wq, cq, qtype);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6123 Fail setup fastpath WQ (%d), rc = 0x%x\n",
|
|
|
+ qidx, (uint32_t)rc);
|
|
|
+ /* no need to tear down cq - caller will do so */
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Bind this CQ/WQ to the NVME ring */
|
|
|
+ pring = wq->pring;
|
|
|
+ pring->sli.sli4.wqp = (void *)wq;
|
|
|
+ cq->pring = pring;
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "2593 WQ setup: wq[%d]-id=%d assoc=%d, cq[%d]-id=%d\n",
|
|
|
+ qidx, wq->queue_id, wq->assoc_qid, qidx, cq->queue_id);
|
|
|
+ } else {
|
|
|
+ rc = lpfc_mq_create(phba, wq, cq, LPFC_MBOX);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0539 Failed setup of slow-path MQ: "
|
|
|
+ "rc = 0x%x\n", rc);
|
|
|
+ /* no need to tear down cq - caller will do so */
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
|
|
|
+ phba->sli4_hba.mbx_wq->queue_id,
|
|
|
+ phba->sli4_hba.mbx_cq->queue_id);
|
|
|
}
|
|
|
|
|
|
- return;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -7615,15 +8369,12 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
|
|
int
|
|
|
lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- struct lpfc_sli *psli = &phba->sli;
|
|
|
- struct lpfc_sli_ring *pring;
|
|
|
- int rc = -ENOMEM;
|
|
|
- int fcp_eqidx, fcp_cqidx, fcp_wqidx;
|
|
|
- int fcp_cq_index = 0;
|
|
|
uint32_t shdr_status, shdr_add_status;
|
|
|
union lpfc_sli4_cfg_shdr *shdr;
|
|
|
LPFC_MBOXQ_t *mboxq;
|
|
|
- uint32_t length;
|
|
|
+ int qidx;
|
|
|
+ uint32_t length, io_channel;
|
|
|
+ int rc = -ENOMEM;
|
|
|
|
|
|
/* Check for dual-ULP support */
|
|
|
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
|
|
@@ -7673,220 +8424,263 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
|
/*
|
|
|
* Set up HBA Event Queues (EQs)
|
|
|
*/
|
|
|
+ io_channel = phba->io_channel_irqs;
|
|
|
|
|
|
/* Set up HBA event queue */
|
|
|
- if (phba->cfg_fcp_io_channel && !phba->sli4_hba.hba_eq) {
|
|
|
+ if (io_channel && !phba->sli4_hba.hba_eq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"3147 Fast-path EQs not allocated\n");
|
|
|
rc = -ENOMEM;
|
|
|
goto out_error;
|
|
|
}
|
|
|
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel; fcp_eqidx++) {
|
|
|
- if (!phba->sli4_hba.hba_eq[fcp_eqidx]) {
|
|
|
+ for (qidx = 0; qidx < io_channel; qidx++) {
|
|
|
+ if (!phba->sli4_hba.hba_eq[qidx]) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"0522 Fast-path EQ (%d) not "
|
|
|
- "allocated\n", fcp_eqidx);
|
|
|
+ "allocated\n", qidx);
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_hba_eq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
- rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[fcp_eqidx],
|
|
|
- (phba->cfg_fcp_imax / phba->cfg_fcp_io_channel));
|
|
|
+ rc = lpfc_eq_create(phba, phba->sli4_hba.hba_eq[qidx],
|
|
|
+ phba->cfg_fcp_imax);
|
|
|
if (rc) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"0523 Failed setup of fast-path EQ "
|
|
|
- "(%d), rc = 0x%x\n", fcp_eqidx,
|
|
|
+ "(%d), rc = 0x%x\n", qidx,
|
|
|
(uint32_t)rc);
|
|
|
- goto out_destroy_hba_eq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2584 HBA EQ setup: "
|
|
|
- "queue[%d]-id=%d\n", fcp_eqidx,
|
|
|
- phba->sli4_hba.hba_eq[fcp_eqidx]->queue_id);
|
|
|
+ "2584 HBA EQ setup: queue[%d]-id=%d\n",
|
|
|
+ qidx, phba->sli4_hba.hba_eq[qidx]->queue_id);
|
|
|
}
|
|
|
|
|
|
- /* Set up fast-path FCP Response Complete Queue */
|
|
|
- if (!phba->sli4_hba.fcp_cq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "3148 Fast-path FCP CQ array not "
|
|
|
- "allocated\n");
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out_destroy_hba_eq;
|
|
|
+ if (phba->cfg_nvme_io_channel) {
|
|
|
+ if (!phba->sli4_hba.nvme_cq || !phba->sli4_hba.nvme_wq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6084 Fast-path NVME %s array not allocated\n",
|
|
|
+ (phba->sli4_hba.nvme_cq) ? "CQ" : "WQ");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+
|
|
|
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++) {
|
|
|
+ rc = lpfc_create_wq_cq(phba,
|
|
|
+ phba->sli4_hba.hba_eq[
|
|
|
+ qidx % io_channel],
|
|
|
+ phba->sli4_hba.nvme_cq[qidx],
|
|
|
+ phba->sli4_hba.nvme_wq[qidx],
|
|
|
+ &phba->sli4_hba.nvme_cq_map[qidx],
|
|
|
+ qidx, LPFC_NVME);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6123 Failed to setup fastpath "
|
|
|
+ "NVME WQ/CQ (%d), rc = 0x%x\n",
|
|
|
+ qidx, (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_io_channel; fcp_cqidx++) {
|
|
|
- if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
|
|
|
+ if (phba->cfg_fcp_io_channel) {
|
|
|
+ /* Set up fast-path FCP Response Complete Queue */
|
|
|
+ if (!phba->sli4_hba.fcp_cq || !phba->sli4_hba.fcp_wq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0526 Fast-path FCP CQ (%d) not "
|
|
|
- "allocated\n", fcp_cqidx);
|
|
|
+ "3148 Fast-path FCP %s array not allocated\n",
|
|
|
+ phba->sli4_hba.fcp_cq ? "WQ" : "CQ");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_fcp_cq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
- rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
|
|
|
- phba->sli4_hba.hba_eq[fcp_cqidx], LPFC_WCQ, LPFC_FCP);
|
|
|
- if (rc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0527 Failed setup of fast-path FCP "
|
|
|
- "CQ (%d), rc = 0x%x\n", fcp_cqidx,
|
|
|
- (uint32_t)rc);
|
|
|
- goto out_destroy_fcp_cq;
|
|
|
+
|
|
|
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++) {
|
|
|
+ rc = lpfc_create_wq_cq(phba,
|
|
|
+ phba->sli4_hba.hba_eq[
|
|
|
+ qidx % io_channel],
|
|
|
+ phba->sli4_hba.fcp_cq[qidx],
|
|
|
+ phba->sli4_hba.fcp_wq[qidx],
|
|
|
+ &phba->sli4_hba.fcp_cq_map[qidx],
|
|
|
+ qidx, LPFC_FCP);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0535 Failed to setup fastpath "
|
|
|
+ "FCP WQ/CQ (%d), rc = 0x%x\n",
|
|
|
+ qidx, (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- /* Setup fcp_cq_map for fast lookup */
|
|
|
- phba->sli4_hba.fcp_cq_map[fcp_cqidx] =
|
|
|
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id;
|
|
|
+ /*
|
|
|
+ * Set up Slow Path Complete Queues (CQs)
|
|
|
+ */
|
|
|
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2588 FCP CQ setup: cq[%d]-id=%d, "
|
|
|
- "parent seq[%d]-id=%d\n",
|
|
|
- fcp_cqidx,
|
|
|
- phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
|
|
|
- fcp_cqidx,
|
|
|
- phba->sli4_hba.hba_eq[fcp_cqidx]->queue_id);
|
|
|
- }
|
|
|
+ /* Set up slow-path MBOX CQ/MQ */
|
|
|
|
|
|
- /* Set up fast-path FCP Work Queue */
|
|
|
- if (!phba->sli4_hba.fcp_wq) {
|
|
|
+ if (!phba->sli4_hba.mbx_cq || !phba->sli4_hba.mbx_wq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "3149 Fast-path FCP WQ array not "
|
|
|
- "allocated\n");
|
|
|
+ "0528 %s not allocated\n",
|
|
|
+ phba->sli4_hba.mbx_cq ?
|
|
|
+ "Mailbox WQ" : "Mailbox CQ");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_fcp_cq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
|
|
|
- for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_io_channel; fcp_wqidx++) {
|
|
|
- if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
|
|
|
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
|
|
+ phba->sli4_hba.mbx_cq,
|
|
|
+ phba->sli4_hba.mbx_wq,
|
|
|
+ NULL, 0, LPFC_MBOX);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0529 Failed setup of mailbox WQ/CQ: rc = 0x%x\n",
|
|
|
+ (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ if (!phba->sli4_hba.nvmet_cqset) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0534 Fast-path FCP WQ (%d) not "
|
|
|
- "allocated\n", fcp_wqidx);
|
|
|
+ "3165 Fast-path NVME CQ Set "
|
|
|
+ "array not allocated\n");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_fcp_wq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
- rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
|
|
|
- phba->sli4_hba.fcp_cq[fcp_wqidx],
|
|
|
- LPFC_FCP);
|
|
|
- if (rc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0535 Failed setup of fast-path FCP "
|
|
|
- "WQ (%d), rc = 0x%x\n", fcp_wqidx,
|
|
|
- (uint32_t)rc);
|
|
|
- goto out_destroy_fcp_wq;
|
|
|
+ if (phba->cfg_nvmet_mrq > 1) {
|
|
|
+ rc = lpfc_cq_create_set(phba,
|
|
|
+ phba->sli4_hba.nvmet_cqset,
|
|
|
+ phba->sli4_hba.hba_eq,
|
|
|
+ LPFC_WCQ, LPFC_NVMET);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "3164 Failed setup of NVME CQ "
|
|
|
+ "Set, rc = 0x%x\n",
|
|
|
+ (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /* Set up NVMET Receive Complete Queue */
|
|
|
+ rc = lpfc_cq_create(phba, phba->sli4_hba.nvmet_cqset[0],
|
|
|
+ phba->sli4_hba.hba_eq[0],
|
|
|
+ LPFC_WCQ, LPFC_NVMET);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6089 Failed setup NVMET CQ: "
|
|
|
+ "rc = 0x%x\n", (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "6090 NVMET CQ setup: cq-id=%d, "
|
|
|
+ "parent eq-id=%d\n",
|
|
|
+ phba->sli4_hba.nvmet_cqset[0]->queue_id,
|
|
|
+ phba->sli4_hba.hba_eq[0]->queue_id);
|
|
|
}
|
|
|
-
|
|
|
- /* Bind this WQ to the next FCP ring */
|
|
|
- pring = &psli->ring[MAX_SLI3_CONFIGURED_RINGS + fcp_wqidx];
|
|
|
- pring->sli.sli4.wqp = (void *)phba->sli4_hba.fcp_wq[fcp_wqidx];
|
|
|
- phba->sli4_hba.fcp_cq[fcp_wqidx]->pring = pring;
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2591 FCP WQ setup: wq[%d]-id=%d, "
|
|
|
- "parent cq[%d]-id=%d\n",
|
|
|
- fcp_wqidx,
|
|
|
- phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
|
|
|
- fcp_cq_index,
|
|
|
- phba->sli4_hba.fcp_cq[fcp_wqidx]->queue_id);
|
|
|
}
|
|
|
- /*
|
|
|
- * Set up Complete Queues (CQs)
|
|
|
- */
|
|
|
|
|
|
- /* Set up slow-path MBOX Complete Queue as the first CQ */
|
|
|
- if (!phba->sli4_hba.mbx_cq) {
|
|
|
+ /* Set up slow-path ELS WQ/CQ */
|
|
|
+ if (!phba->sli4_hba.els_cq || !phba->sli4_hba.els_wq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0528 Mailbox CQ not allocated\n");
|
|
|
+ "0530 ELS %s not allocated\n",
|
|
|
+ phba->sli4_hba.els_cq ? "WQ" : "CQ");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_fcp_wq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
- rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq,
|
|
|
- phba->sli4_hba.hba_eq[0], LPFC_MCQ, LPFC_MBOX);
|
|
|
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
|
|
+ phba->sli4_hba.els_cq,
|
|
|
+ phba->sli4_hba.els_wq,
|
|
|
+ NULL, 0, LPFC_ELS);
|
|
|
if (rc) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0529 Failed setup of slow-path mailbox CQ: "
|
|
|
- "rc = 0x%x\n", (uint32_t)rc);
|
|
|
- goto out_destroy_fcp_wq;
|
|
|
+ "0529 Failed setup of ELS WQ/CQ: rc = 0x%x\n",
|
|
|
+ (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
|
|
|
- phba->sli4_hba.mbx_cq->queue_id,
|
|
|
- phba->sli4_hba.hba_eq[0]->queue_id);
|
|
|
+ "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
|
|
|
+ phba->sli4_hba.els_wq->queue_id,
|
|
|
+ phba->sli4_hba.els_cq->queue_id);
|
|
|
|
|
|
- /* Set up slow-path ELS Complete Queue */
|
|
|
- if (!phba->sli4_hba.els_cq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0530 ELS CQ not allocated\n");
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out_destroy_mbx_cq;
|
|
|
- }
|
|
|
- rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq,
|
|
|
- phba->sli4_hba.hba_eq[0], LPFC_WCQ, LPFC_ELS);
|
|
|
- if (rc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0531 Failed setup of slow-path ELS CQ: "
|
|
|
+ if (phba->cfg_nvme_io_channel) {
|
|
|
+ /* Set up NVME LS Complete Queue */
|
|
|
+ if (!phba->sli4_hba.nvmels_cq || !phba->sli4_hba.nvmels_wq) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6091 LS %s not allocated\n",
|
|
|
+ phba->sli4_hba.nvmels_cq ? "WQ" : "CQ");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ rc = lpfc_create_wq_cq(phba, phba->sli4_hba.hba_eq[0],
|
|
|
+ phba->sli4_hba.nvmels_cq,
|
|
|
+ phba->sli4_hba.nvmels_wq,
|
|
|
+ NULL, 0, LPFC_NVME_LS);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "0529 Failed setup of NVVME LS WQ/CQ: "
|
|
|
"rc = 0x%x\n", (uint32_t)rc);
|
|
|
- goto out_destroy_mbx_cq;
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+
|
|
|
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
+ "6096 ELS WQ setup: wq-id=%d, "
|
|
|
+ "parent cq-id=%d\n",
|
|
|
+ phba->sli4_hba.nvmels_wq->queue_id,
|
|
|
+ phba->sli4_hba.nvmels_cq->queue_id);
|
|
|
}
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
|
|
|
- phba->sli4_hba.els_cq->queue_id,
|
|
|
- phba->sli4_hba.hba_eq[0]->queue_id);
|
|
|
|
|
|
/*
|
|
|
- * Set up all the Work Queues (WQs)
|
|
|
+ * Create NVMET Receive Queue (RQ)
|
|
|
*/
|
|
|
+ if (phba->nvmet_support) {
|
|
|
+ if ((!phba->sli4_hba.nvmet_cqset) ||
|
|
|
+ (!phba->sli4_hba.nvmet_mrq_hdr) ||
|
|
|
+ (!phba->sli4_hba.nvmet_mrq_data)) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6130 MRQ CQ Queues not "
|
|
|
+ "allocated\n");
|
|
|
+ rc = -ENOMEM;
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
+ if (phba->cfg_nvmet_mrq > 1) {
|
|
|
+ rc = lpfc_mrq_create(phba,
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr,
|
|
|
+ phba->sli4_hba.nvmet_mrq_data,
|
|
|
+ phba->sli4_hba.nvmet_cqset,
|
|
|
+ LPFC_NVMET);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6098 Failed setup of NVMET "
|
|
|
+ "MRQ: rc = 0x%x\n",
|
|
|
+ (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
|
|
|
- /* Set up Mailbox Command Queue */
|
|
|
- if (!phba->sli4_hba.mbx_wq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0538 Slow-path MQ not allocated\n");
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out_destroy_els_cq;
|
|
|
- }
|
|
|
- rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
|
|
|
- phba->sli4_hba.mbx_cq, LPFC_MBOX);
|
|
|
- if (rc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0539 Failed setup of slow-path MQ: "
|
|
|
- "rc = 0x%x\n", rc);
|
|
|
- goto out_destroy_els_cq;
|
|
|
- }
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
|
|
|
- phba->sli4_hba.mbx_wq->queue_id,
|
|
|
- phba->sli4_hba.mbx_cq->queue_id);
|
|
|
-
|
|
|
- /* Set up slow-path ELS Work Queue */
|
|
|
- if (!phba->sli4_hba.els_wq) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0536 Slow-path ELS WQ not allocated\n");
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out_destroy_mbx_wq;
|
|
|
- }
|
|
|
- rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
|
|
|
- phba->sli4_hba.els_cq, LPFC_ELS);
|
|
|
- if (rc) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "0537 Failed setup of slow-path ELS WQ: "
|
|
|
- "rc = 0x%x\n", (uint32_t)rc);
|
|
|
- goto out_destroy_mbx_wq;
|
|
|
- }
|
|
|
+ } else {
|
|
|
+ rc = lpfc_rq_create(phba,
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr[0],
|
|
|
+ phba->sli4_hba.nvmet_mrq_data[0],
|
|
|
+ phba->sli4_hba.nvmet_cqset[0],
|
|
|
+ LPFC_NVMET);
|
|
|
+ if (rc) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6057 Failed setup of NVMET "
|
|
|
+ "Receive Queue: rc = 0x%x\n",
|
|
|
+ (uint32_t)rc);
|
|
|
+ goto out_destroy;
|
|
|
+ }
|
|
|
|
|
|
- /* Bind this WQ to the ELS ring */
|
|
|
- pring = &psli->ring[LPFC_ELS_RING];
|
|
|
- pring->sli.sli4.wqp = (void *)phba->sli4_hba.els_wq;
|
|
|
- phba->sli4_hba.els_cq->pring = pring;
|
|
|
+ lpfc_printf_log(
|
|
|
+ phba, KERN_INFO, LOG_INIT,
|
|
|
+ "6099 NVMET RQ setup: hdr-rq-id=%d, "
|
|
|
+ "dat-rq-id=%d parent cq-id=%d\n",
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr[0]->queue_id,
|
|
|
+ phba->sli4_hba.nvmet_mrq_data[0]->queue_id,
|
|
|
+ phba->sli4_hba.nvmet_cqset[0]->queue_id);
|
|
|
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
|
|
|
- phba->sli4_hba.els_wq->queue_id,
|
|
|
- phba->sli4_hba.els_cq->queue_id);
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
- /*
|
|
|
- * Create Receive Queue (RQ)
|
|
|
- */
|
|
|
if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"0540 Receive Queue not allocated\n");
|
|
|
rc = -ENOMEM;
|
|
|
- goto out_destroy_els_wq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
|
|
|
lpfc_rq_adjust_repost(phba, phba->sli4_hba.hdr_rq, LPFC_ELS_HBQ);
|
|
|
@@ -7898,7 +8692,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"0541 Failed setup of Receive Queue: "
|
|
|
"rc = 0x%x\n", (uint32_t)rc);
|
|
|
- goto out_destroy_fcp_wq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
@@ -7914,7 +8708,7 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"0549 Failed setup of FOF Queues: "
|
|
|
"rc = 0x%x\n", rc);
|
|
|
- goto out_destroy_els_rq;
|
|
|
+ goto out_destroy;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -7922,30 +8716,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|
|
* Configure EQ delay multipier for interrupt coalescing using
|
|
|
* MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
|
|
|
*/
|
|
|
- for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_io_channel;
|
|
|
- fcp_eqidx += LPFC_MAX_EQ_DELAY)
|
|
|
- lpfc_modify_fcp_eq_delay(phba, fcp_eqidx);
|
|
|
+ for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
|
|
|
+ lpfc_modify_hba_eq_delay(phba, qidx);
|
|
|
return 0;
|
|
|
|
|
|
-out_destroy_els_rq:
|
|
|
- lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
|
|
|
-out_destroy_els_wq:
|
|
|
- lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
|
|
|
-out_destroy_mbx_wq:
|
|
|
- lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
|
|
|
-out_destroy_els_cq:
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
|
|
-out_destroy_mbx_cq:
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
|
|
-out_destroy_fcp_wq:
|
|
|
- for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
|
|
|
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
|
|
|
-out_destroy_fcp_cq:
|
|
|
- for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
|
|
|
-out_destroy_hba_eq:
|
|
|
- for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
|
|
|
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_eqidx]);
|
|
|
+out_destroy:
|
|
|
+ lpfc_sli4_queue_unset(phba);
|
|
|
out_error:
|
|
|
return rc;
|
|
|
}
|
|
|
@@ -7965,39 +8741,81 @@ out_error:
|
|
|
void
|
|
|
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- int fcp_qidx;
|
|
|
+ int qidx;
|
|
|
|
|
|
/* Unset the queues created for Flash Optimized Fabric operations */
|
|
|
if (phba->cfg_fof)
|
|
|
lpfc_fof_queue_destroy(phba);
|
|
|
+
|
|
|
/* Unset mailbox command work queue */
|
|
|
- lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
|
|
|
+ if (phba->sli4_hba.mbx_wq)
|
|
|
+ lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
|
|
|
+
|
|
|
+ /* Unset NVME LS work queue */
|
|
|
+ if (phba->sli4_hba.nvmels_wq)
|
|
|
+ lpfc_wq_destroy(phba, phba->sli4_hba.nvmels_wq);
|
|
|
+
|
|
|
/* Unset ELS work queue */
|
|
|
- lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
|
|
|
+ if (phba->sli4_hba.els_cq)
|
|
|
+ lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
|
|
|
+
|
|
|
/* Unset unsolicited receive queue */
|
|
|
- lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
|
|
|
+ if (phba->sli4_hba.hdr_rq)
|
|
|
+ lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq,
|
|
|
+ phba->sli4_hba.dat_rq);
|
|
|
+
|
|
|
/* Unset FCP work queue */
|
|
|
- if (phba->sli4_hba.fcp_wq) {
|
|
|
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
|
|
|
- fcp_qidx++)
|
|
|
- lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
|
|
|
+ if (phba->sli4_hba.fcp_wq)
|
|
|
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
|
|
|
+ lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[qidx]);
|
|
|
+
|
|
|
+ /* Unset NVME work queue */
|
|
|
+ if (phba->sli4_hba.nvme_wq) {
|
|
|
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
|
|
+ lpfc_wq_destroy(phba, phba->sli4_hba.nvme_wq[qidx]);
|
|
|
}
|
|
|
+
|
|
|
/* Unset mailbox command complete queue */
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
|
|
+ if (phba->sli4_hba.mbx_cq)
|
|
|
+ lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
|
|
|
+
|
|
|
/* Unset ELS complete queue */
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
|
|
- /* Unset FCP response complete queue */
|
|
|
- if (phba->sli4_hba.fcp_cq) {
|
|
|
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
|
|
|
- fcp_qidx++)
|
|
|
- lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
|
|
|
+ if (phba->sli4_hba.els_cq)
|
|
|
+ lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
|
|
|
+
|
|
|
+ /* Unset NVME LS complete queue */
|
|
|
+ if (phba->sli4_hba.nvmels_cq)
|
|
|
+ lpfc_cq_destroy(phba, phba->sli4_hba.nvmels_cq);
|
|
|
+
|
|
|
+ /* Unset NVME response complete queue */
|
|
|
+ if (phba->sli4_hba.nvme_cq)
|
|
|
+ for (qidx = 0; qidx < phba->cfg_nvme_io_channel; qidx++)
|
|
|
+ lpfc_cq_destroy(phba, phba->sli4_hba.nvme_cq[qidx]);
|
|
|
+
|
|
|
+ /* Unset NVMET MRQ queue */
|
|
|
+ if (phba->sli4_hba.nvmet_mrq_hdr) {
|
|
|
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
|
|
+ lpfc_rq_destroy(phba,
|
|
|
+ phba->sli4_hba.nvmet_mrq_hdr[qidx],
|
|
|
+ phba->sli4_hba.nvmet_mrq_data[qidx]);
|
|
|
}
|
|
|
- /* Unset fast-path event queue */
|
|
|
- if (phba->sli4_hba.hba_eq) {
|
|
|
- for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_io_channel;
|
|
|
- fcp_qidx++)
|
|
|
- lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[fcp_qidx]);
|
|
|
+
|
|
|
+ /* Unset NVMET CQ Set complete queue */
|
|
|
+ if (phba->sli4_hba.nvmet_cqset) {
|
|
|
+ for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++)
|
|
|
+ lpfc_cq_destroy(phba,
|
|
|
+ phba->sli4_hba.nvmet_cqset[qidx]);
|
|
|
}
|
|
|
+
|
|
|
+ /* Unset FCP response complete queue */
|
|
|
+ if (phba->sli4_hba.fcp_cq)
|
|
|
+ for (qidx = 0; qidx < phba->cfg_fcp_io_channel; qidx++)
|
|
|
+ lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[qidx]);
|
|
|
+
|
|
|
+ /* Unset fast-path event queue */
|
|
|
+ if (phba->sli4_hba.hba_eq)
|
|
|
+ for (qidx = 0; qidx < phba->io_channel_irqs; qidx++)
|
|
|
+ lpfc_eq_destroy(phba, phba->sli4_hba.hba_eq[qidx]);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -8484,16 +9302,7 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
* This routine is invoked to enable the MSI-X interrupt vectors to device
|
|
|
- * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
|
|
|
- * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
|
|
|
- * once invoked, enables either all or nothing, depending on the current
|
|
|
- * availability of PCI vector resources. The device driver is responsible
|
|
|
- * for calling the individual request_irq() to register each MSI-X vector
|
|
|
- * with a interrupt handler, which is done in this function. Note that
|
|
|
- * later when device is unloading, the driver should always call free_irq()
|
|
|
- * on all MSI-X vectors it has done request_irq() on before calling
|
|
|
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
|
|
|
- * will be left with MSI-X enabled and leaks its vectors.
|
|
|
+ * with SLI-3 interface specs.
|
|
|
*
|
|
|
* Return codes
|
|
|
* 0 - successful
|
|
|
@@ -8502,33 +9311,24 @@ lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
|
|
|
static int
|
|
|
lpfc_sli_enable_msix(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- int rc, i;
|
|
|
+ int rc;
|
|
|
LPFC_MBOXQ_t *pmb;
|
|
|
|
|
|
/* Set up MSI-X multi-message vectors */
|
|
|
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
|
|
|
- phba->msix_entries[i].entry = i;
|
|
|
-
|
|
|
- /* Configure MSI-X capability structure */
|
|
|
- rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
|
|
|
- LPFC_MSIX_VECTORS);
|
|
|
- if (rc) {
|
|
|
+ rc = pci_alloc_irq_vectors(phba->pcidev,
|
|
|
+ LPFC_MSIX_VECTORS, LPFC_MSIX_VECTORS, PCI_IRQ_MSIX);
|
|
|
+ if (rc < 0) {
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
"0420 PCI enable MSI-X failed (%d)\n", rc);
|
|
|
goto vec_fail_out;
|
|
|
}
|
|
|
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "0477 MSI-X entry[%d]: vector=x%x "
|
|
|
- "message=%d\n", i,
|
|
|
- phba->msix_entries[i].vector,
|
|
|
- phba->msix_entries[i].entry);
|
|
|
+
|
|
|
/*
|
|
|
* Assign MSI-X vectors to interrupt handlers
|
|
|
*/
|
|
|
|
|
|
/* vector-0 is associated to slow-path handler */
|
|
|
- rc = request_irq(phba->msix_entries[0].vector,
|
|
|
+ rc = request_irq(pci_irq_vector(phba->pcidev, 0),
|
|
|
&lpfc_sli_sp_intr_handler, 0,
|
|
|
LPFC_SP_DRIVER_HANDLER_NAME, phba);
|
|
|
if (rc) {
|
|
|
@@ -8539,7 +9339,7 @@ lpfc_sli_enable_msix(struct lpfc_hba *phba)
|
|
|
}
|
|
|
|
|
|
/* vector-1 is associated to fast-path handler */
|
|
|
- rc = request_irq(phba->msix_entries[1].vector,
|
|
|
+ rc = request_irq(pci_irq_vector(phba->pcidev, 1),
|
|
|
&lpfc_sli_fp_intr_handler, 0,
|
|
|
LPFC_FP_DRIVER_HANDLER_NAME, phba);
|
|
|
|
|
|
@@ -8584,41 +9384,20 @@ mbx_fail_out:
|
|
|
|
|
|
mem_fail_out:
|
|
|
/* free the irq already requested */
|
|
|
- free_irq(phba->msix_entries[1].vector, phba);
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, 1), phba);
|
|
|
|
|
|
irq_fail_out:
|
|
|
/* free the irq already requested */
|
|
|
- free_irq(phba->msix_entries[0].vector, phba);
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, 0), phba);
|
|
|
|
|
|
msi_fail_out:
|
|
|
/* Unconfigure MSI-X capability structure */
|
|
|
- pci_disable_msix(phba->pcidev);
|
|
|
+ pci_free_irq_vectors(phba->pcidev);
|
|
|
|
|
|
vec_fail_out:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to release the MSI-X vectors and then disable the
|
|
|
- * MSI-X interrupt mode to device with SLI-3 interface spec.
|
|
|
- **/
|
|
|
-static void
|
|
|
-lpfc_sli_disable_msix(struct lpfc_hba *phba)
|
|
|
-{
|
|
|
- int i;
|
|
|
-
|
|
|
- /* Free up MSI-X multi-message vectors */
|
|
|
- for (i = 0; i < LPFC_MSIX_VECTORS; i++)
|
|
|
- free_irq(phba->msix_entries[i].vector, phba);
|
|
|
- /* Disable MSI-X */
|
|
|
- pci_disable_msix(phba->pcidev);
|
|
|
-
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -8658,24 +9437,6 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to disable the MSI interrupt mode to device with
|
|
|
- * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
|
|
|
- * done request_irq() on before calling pci_disable_msi(). Failure to do so
|
|
|
- * results in a BUG_ON() and a device will be left with MSI enabled and leaks
|
|
|
- * its vector.
|
|
|
- */
|
|
|
-static void
|
|
|
-lpfc_sli_disable_msi(struct lpfc_hba *phba)
|
|
|
-{
|
|
|
- free_irq(phba->pcidev->irq, phba);
|
|
|
- pci_disable_msi(phba->pcidev);
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -8747,107 +9508,50 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
|
|
|
static void
|
|
|
lpfc_sli_disable_intr(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- /* Disable the currently initialized interrupt mode */
|
|
|
+ int nr_irqs, i;
|
|
|
+
|
|
|
if (phba->intr_type == MSIX)
|
|
|
- lpfc_sli_disable_msix(phba);
|
|
|
- else if (phba->intr_type == MSI)
|
|
|
- lpfc_sli_disable_msi(phba);
|
|
|
- else if (phba->intr_type == INTx)
|
|
|
- free_irq(phba->pcidev->irq, phba);
|
|
|
+ nr_irqs = LPFC_MSIX_VECTORS;
|
|
|
+ else
|
|
|
+ nr_irqs = 1;
|
|
|
+
|
|
|
+ for (i = 0; i < nr_irqs; i++)
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, i), phba);
|
|
|
+ pci_free_irq_vectors(phba->pcidev);
|
|
|
|
|
|
/* Reset interrupt management states */
|
|
|
phba->intr_type = NONE;
|
|
|
phba->sli.slistat.sli_intr = 0;
|
|
|
-
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * lpfc_find_next_cpu - Find next available CPU that matches the phys_id
|
|
|
+ * lpfc_cpu_affinity_check - Check vector CPU affinity mappings
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
+ * @vectors: number of msix vectors allocated.
|
|
|
*
|
|
|
- * Find next available CPU to use for IRQ to CPU affinity.
|
|
|
+ * The routine will figure out the CPU affinity assignment for every
|
|
|
+ * MSI-X vector allocated for the HBA. The hba_eq_hdl will be updated
|
|
|
+ * with a pointer to the CPU mask that defines ALL the CPUs this vector
|
|
|
+ * can be associated with. If the vector can be unquely associated with
|
|
|
+ * a single CPU, that CPU will be recorded in hba_eq_hdl[index].cpu.
|
|
|
+ * In addition, the CPU to IO channel mapping will be calculated
|
|
|
+ * and the phba->sli4_hba.cpu_map array will reflect this.
|
|
|
*/
|
|
|
-static int
|
|
|
-lpfc_find_next_cpu(struct lpfc_hba *phba, uint32_t phys_id)
|
|
|
+static void
|
|
|
+lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
|
|
|
{
|
|
|
struct lpfc_vector_map_info *cpup;
|
|
|
+ int index = 0;
|
|
|
+ int vec = 0;
|
|
|
int cpu;
|
|
|
-
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
|
|
|
- /* CPU must be online */
|
|
|
- if (cpu_online(cpu)) {
|
|
|
- if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
|
|
|
- (lpfc_used_cpu[cpu] == LPFC_VECTOR_MAP_EMPTY) &&
|
|
|
- (cpup->phys_id == phys_id)) {
|
|
|
- return cpu;
|
|
|
- }
|
|
|
- }
|
|
|
- cpup++;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * If we get here, we have used ALL CPUs for the specific
|
|
|
- * phys_id. Now we need to clear out lpfc_used_cpu and start
|
|
|
- * reusing CPUs.
|
|
|
- */
|
|
|
-
|
|
|
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
|
|
|
- if (lpfc_used_cpu[cpu] == phys_id)
|
|
|
- lpfc_used_cpu[cpu] = LPFC_VECTOR_MAP_EMPTY;
|
|
|
- }
|
|
|
-
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- for (cpu = 0; cpu < phba->sli4_hba.num_present_cpu; cpu++) {
|
|
|
- /* CPU must be online */
|
|
|
- if (cpu_online(cpu)) {
|
|
|
- if ((cpup->irq == LPFC_VECTOR_MAP_EMPTY) &&
|
|
|
- (cpup->phys_id == phys_id)) {
|
|
|
- return cpu;
|
|
|
- }
|
|
|
- }
|
|
|
- cpup++;
|
|
|
- }
|
|
|
- return LPFC_VECTOR_MAP_EMPTY;
|
|
|
-}
|
|
|
-
|
|
|
-/**
|
|
|
- * lpfc_sli4_set_affinity - Set affinity for HBA IRQ vectors
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- * @vectors: number of HBA vectors
|
|
|
- *
|
|
|
- * Affinitize MSIX IRQ vectors to CPUs. Try to equally spread vector
|
|
|
- * affinization across multple physical CPUs (numa nodes).
|
|
|
- * In addition, this routine will assign an IO channel for each CPU
|
|
|
- * to use when issuing I/Os.
|
|
|
- */
|
|
|
-static int
|
|
|
-lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
|
|
-{
|
|
|
- int i, idx, saved_chann, used_chann, cpu, phys_id;
|
|
|
- int max_phys_id, min_phys_id;
|
|
|
- int num_io_channel, first_cpu, chan;
|
|
|
- struct lpfc_vector_map_info *cpup;
|
|
|
#ifdef CONFIG_X86
|
|
|
struct cpuinfo_x86 *cpuinfo;
|
|
|
#endif
|
|
|
- uint8_t chann[LPFC_FCP_IO_CHAN_MAX+1];
|
|
|
-
|
|
|
- /* If there is no mapping, just return */
|
|
|
- if (!phba->cfg_fcp_cpu_map)
|
|
|
- return 1;
|
|
|
|
|
|
/* Init cpu_map array */
|
|
|
memset(phba->sli4_hba.cpu_map, 0xff,
|
|
|
(sizeof(struct lpfc_vector_map_info) *
|
|
|
- phba->sli4_hba.num_present_cpu));
|
|
|
-
|
|
|
- max_phys_id = 0;
|
|
|
- min_phys_id = 0xff;
|
|
|
- phys_id = 0;
|
|
|
- num_io_channel = 0;
|
|
|
- first_cpu = LPFC_VECTOR_MAP_EMPTY;
|
|
|
+ phba->sli4_hba.num_present_cpu));
|
|
|
|
|
|
/* Update CPU map with physical id and core id of each CPU */
|
|
|
cpup = phba->sli4_hba.cpu_map;
|
|
|
@@ -8861,184 +9565,16 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors)
|
|
|
cpup->phys_id = 0;
|
|
|
cpup->core_id = 0;
|
|
|
#endif
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "3328 CPU physid %d coreid %d\n",
|
|
|
- cpup->phys_id, cpup->core_id);
|
|
|
-
|
|
|
- if (cpup->phys_id > max_phys_id)
|
|
|
- max_phys_id = cpup->phys_id;
|
|
|
- if (cpup->phys_id < min_phys_id)
|
|
|
- min_phys_id = cpup->phys_id;
|
|
|
+ cpup->channel_id = index; /* For now round robin */
|
|
|
+ cpup->irq = pci_irq_vector(phba->pcidev, vec);
|
|
|
+ vec++;
|
|
|
+ if (vec >= vectors)
|
|
|
+ vec = 0;
|
|
|
+ index++;
|
|
|
+ if (index >= phba->cfg_fcp_io_channel)
|
|
|
+ index = 0;
|
|
|
cpup++;
|
|
|
}
|
|
|
-
|
|
|
- phys_id = min_phys_id;
|
|
|
- /* Now associate the HBA vectors with specific CPUs */
|
|
|
- for (idx = 0; idx < vectors; idx++) {
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- cpu = lpfc_find_next_cpu(phba, phys_id);
|
|
|
- if (cpu == LPFC_VECTOR_MAP_EMPTY) {
|
|
|
-
|
|
|
- /* Try for all phys_id's */
|
|
|
- for (i = 1; i < max_phys_id; i++) {
|
|
|
- phys_id++;
|
|
|
- if (phys_id > max_phys_id)
|
|
|
- phys_id = min_phys_id;
|
|
|
- cpu = lpfc_find_next_cpu(phba, phys_id);
|
|
|
- if (cpu == LPFC_VECTOR_MAP_EMPTY)
|
|
|
- continue;
|
|
|
- goto found;
|
|
|
- }
|
|
|
-
|
|
|
- /* Use round robin for scheduling */
|
|
|
- phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN;
|
|
|
- chan = 0;
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
|
|
|
- cpup->channel_id = chan;
|
|
|
- cpup++;
|
|
|
- chan++;
|
|
|
- if (chan >= phba->cfg_fcp_io_channel)
|
|
|
- chan = 0;
|
|
|
- }
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "3329 Cannot set affinity:"
|
|
|
- "Error mapping vector %d (%d)\n",
|
|
|
- idx, vectors);
|
|
|
- return 0;
|
|
|
- }
|
|
|
-found:
|
|
|
- cpup += cpu;
|
|
|
- if (phba->cfg_fcp_cpu_map == LPFC_DRIVER_CPU_MAP)
|
|
|
- lpfc_used_cpu[cpu] = phys_id;
|
|
|
-
|
|
|
- /* Associate vector with selected CPU */
|
|
|
- cpup->irq = phba->sli4_hba.msix_entries[idx].vector;
|
|
|
-
|
|
|
- /* Associate IO channel with selected CPU */
|
|
|
- cpup->channel_id = idx;
|
|
|
- num_io_channel++;
|
|
|
-
|
|
|
- if (first_cpu == LPFC_VECTOR_MAP_EMPTY)
|
|
|
- first_cpu = cpu;
|
|
|
-
|
|
|
- /* Now affinitize to the selected CPU */
|
|
|
- i = irq_set_affinity_hint(phba->sli4_hba.msix_entries[idx].
|
|
|
- vector, get_cpu_mask(cpu));
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "3330 Set Affinity: CPU %d channel %d "
|
|
|
- "irq %d (%x)\n",
|
|
|
- cpu, cpup->channel_id,
|
|
|
- phba->sli4_hba.msix_entries[idx].vector, i);
|
|
|
-
|
|
|
- /* Spread vector mapping across multple physical CPU nodes */
|
|
|
- phys_id++;
|
|
|
- if (phys_id > max_phys_id)
|
|
|
- phys_id = min_phys_id;
|
|
|
- }
|
|
|
-
|
|
|
- /*
|
|
|
- * Finally fill in the IO channel for any remaining CPUs.
|
|
|
- * At this point, all IO channels have been assigned to a specific
|
|
|
- * MSIx vector, mapped to a specific CPU.
|
|
|
- * Base the remaining IO channel assigned, to IO channels already
|
|
|
- * assigned to other CPUs on the same phys_id.
|
|
|
- */
|
|
|
- for (i = min_phys_id; i <= max_phys_id; i++) {
|
|
|
- /*
|
|
|
- * If there are no io channels already mapped to
|
|
|
- * this phys_id, just round robin thru the io_channels.
|
|
|
- * Setup chann[] for round robin.
|
|
|
- */
|
|
|
- for (idx = 0; idx < phba->cfg_fcp_io_channel; idx++)
|
|
|
- chann[idx] = idx;
|
|
|
-
|
|
|
- saved_chann = 0;
|
|
|
- used_chann = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * First build a list of IO channels already assigned
|
|
|
- * to this phys_id before reassigning the same IO
|
|
|
- * channels to the remaining CPUs.
|
|
|
- */
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- cpu = first_cpu;
|
|
|
- cpup += cpu;
|
|
|
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu;
|
|
|
- idx++) {
|
|
|
- if (cpup->phys_id == i) {
|
|
|
- /*
|
|
|
- * Save any IO channels that are
|
|
|
- * already mapped to this phys_id.
|
|
|
- */
|
|
|
- if (cpup->irq != LPFC_VECTOR_MAP_EMPTY) {
|
|
|
- if (saved_chann <=
|
|
|
- LPFC_FCP_IO_CHAN_MAX) {
|
|
|
- chann[saved_chann] =
|
|
|
- cpup->channel_id;
|
|
|
- saved_chann++;
|
|
|
- }
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* See if we are using round-robin */
|
|
|
- if (saved_chann == 0)
|
|
|
- saved_chann =
|
|
|
- phba->cfg_fcp_io_channel;
|
|
|
-
|
|
|
- /* Associate next IO channel with CPU */
|
|
|
- cpup->channel_id = chann[used_chann];
|
|
|
- num_io_channel++;
|
|
|
- used_chann++;
|
|
|
- if (used_chann == saved_chann)
|
|
|
- used_chann = 0;
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "3331 Set IO_CHANN "
|
|
|
- "CPU %d channel %d\n",
|
|
|
- idx, cpup->channel_id);
|
|
|
- }
|
|
|
-out:
|
|
|
- cpu++;
|
|
|
- if (cpu >= phba->sli4_hba.num_present_cpu) {
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- cpu = 0;
|
|
|
- } else {
|
|
|
- cpup++;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (phba->sli4_hba.num_online_cpu != phba->sli4_hba.num_present_cpu) {
|
|
|
- cpup = phba->sli4_hba.cpu_map;
|
|
|
- for (idx = 0; idx < phba->sli4_hba.num_present_cpu; idx++) {
|
|
|
- if (cpup->channel_id == LPFC_VECTOR_MAP_EMPTY) {
|
|
|
- cpup->channel_id = 0;
|
|
|
- num_io_channel++;
|
|
|
-
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "3332 Assign IO_CHANN "
|
|
|
- "CPU %d channel %d\n",
|
|
|
- idx, cpup->channel_id);
|
|
|
- }
|
|
|
- cpup++;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Sanity check */
|
|
|
- if (num_io_channel != phba->sli4_hba.num_present_cpu)
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "3333 Set affinity mismatch:"
|
|
|
- "%d chann != %d cpus: %d vectors\n",
|
|
|
- num_io_channel, phba->sli4_hba.num_present_cpu,
|
|
|
- vectors);
|
|
|
-
|
|
|
- /* Enable using cpu affinity for scheduling */
|
|
|
- phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU;
|
|
|
- return 1;
|
|
|
}
|
|
|
|
|
|
|
|
|
@@ -9047,14 +9583,7 @@ out:
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
*
|
|
|
* This routine is invoked to enable the MSI-X interrupt vectors to device
|
|
|
- * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
|
|
|
- * is called to enable the MSI-X vectors. The device driver is responsible
|
|
|
- * for calling the individual request_irq() to register each MSI-X vector
|
|
|
- * with a interrupt handler, which is done in this function. Note that
|
|
|
- * later when device is unloading, the driver should always call free_irq()
|
|
|
- * on all MSI-X vectors it has done request_irq() on before calling
|
|
|
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
|
|
|
- * will be left with MSI-X enabled and leaks its vectors.
|
|
|
+ * with SLI-4 interface spec.
|
|
|
*
|
|
|
* Return codes
|
|
|
* 0 - successful
|
|
|
@@ -9066,17 +9595,13 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
|
|
int vectors, rc, index;
|
|
|
|
|
|
/* Set up MSI-X multi-message vectors */
|
|
|
- for (index = 0; index < phba->cfg_fcp_io_channel; index++)
|
|
|
- phba->sli4_hba.msix_entries[index].entry = index;
|
|
|
-
|
|
|
- /* Configure MSI-X capability structure */
|
|
|
- vectors = phba->cfg_fcp_io_channel;
|
|
|
- if (phba->cfg_fof) {
|
|
|
- phba->sli4_hba.msix_entries[index].entry = index;
|
|
|
+ vectors = phba->io_channel_irqs;
|
|
|
+ if (phba->cfg_fof)
|
|
|
vectors++;
|
|
|
- }
|
|
|
- rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
|
|
|
- 2, vectors);
|
|
|
+
|
|
|
+ rc = pci_alloc_irq_vectors(phba->pcidev,
|
|
|
+ (phba->nvmet_support) ? 1 : 2,
|
|
|
+ vectors, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
|
|
if (rc < 0) {
|
|
|
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
"0484 PCI enable MSI-X failed (%d)\n", rc);
|
|
|
@@ -9084,14 +9609,6 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
|
|
}
|
|
|
vectors = rc;
|
|
|
|
|
|
- /* Log MSI-X vector assignment */
|
|
|
- for (index = 0; index < vectors; index++)
|
|
|
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
|
|
- "0489 MSI-X entry[%d]: vector=x%x "
|
|
|
- "message=%d\n", index,
|
|
|
- phba->sli4_hba.msix_entries[index].vector,
|
|
|
- phba->sli4_hba.msix_entries[index].entry);
|
|
|
-
|
|
|
/* Assign MSI-X vectors to interrupt handlers */
|
|
|
for (index = 0; index < vectors; index++) {
|
|
|
memset(&phba->sli4_hba.handler_name[index], 0, 16);
|
|
|
@@ -9099,21 +9616,19 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
|
|
LPFC_SLI4_HANDLER_NAME_SZ,
|
|
|
LPFC_DRIVER_HANDLER_NAME"%d", index);
|
|
|
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
|
|
|
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].fcp_eq_in_use, 1);
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
|
|
|
+ atomic_set(&phba->sli4_hba.hba_eq_hdl[index].hba_eq_in_use, 1);
|
|
|
if (phba->cfg_fof && (index == (vectors - 1)))
|
|
|
- rc = request_irq(
|
|
|
- phba->sli4_hba.msix_entries[index].vector,
|
|
|
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
|
|
&lpfc_sli4_fof_intr_handler, 0,
|
|
|
(char *)&phba->sli4_hba.handler_name[index],
|
|
|
- &phba->sli4_hba.fcp_eq_hdl[index]);
|
|
|
+ &phba->sli4_hba.hba_eq_hdl[index]);
|
|
|
else
|
|
|
- rc = request_irq(
|
|
|
- phba->sli4_hba.msix_entries[index].vector,
|
|
|
+ rc = request_irq(pci_irq_vector(phba->pcidev, index),
|
|
|
&lpfc_sli4_hba_intr_handler, 0,
|
|
|
(char *)&phba->sli4_hba.handler_name[index],
|
|
|
- &phba->sli4_hba.fcp_eq_hdl[index]);
|
|
|
+ &phba->sli4_hba.hba_eq_hdl[index]);
|
|
|
if (rc) {
|
|
|
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
|
|
"0486 MSI-X fast-path (%d) "
|
|
|
@@ -9125,63 +9640,37 @@ lpfc_sli4_enable_msix(struct lpfc_hba *phba)
|
|
|
if (phba->cfg_fof)
|
|
|
vectors--;
|
|
|
|
|
|
- if (vectors != phba->cfg_fcp_io_channel) {
|
|
|
+ if (vectors != phba->io_channel_irqs) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"3238 Reducing IO channels to match number of "
|
|
|
"MSI-X vectors, requested %d got %d\n",
|
|
|
- phba->cfg_fcp_io_channel, vectors);
|
|
|
- phba->cfg_fcp_io_channel = vectors;
|
|
|
+ phba->io_channel_irqs, vectors);
|
|
|
+ if (phba->cfg_fcp_io_channel > vectors)
|
|
|
+ phba->cfg_fcp_io_channel = vectors;
|
|
|
+ if (phba->cfg_nvme_io_channel > vectors)
|
|
|
+ phba->cfg_nvme_io_channel = vectors;
|
|
|
+ if (phba->cfg_fcp_io_channel > phba->cfg_nvme_io_channel)
|
|
|
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
|
|
+ else
|
|
|
+ phba->io_channel_irqs = phba->cfg_nvme_io_channel;
|
|
|
}
|
|
|
+ lpfc_cpu_affinity_check(phba, vectors);
|
|
|
|
|
|
- if (!shost_use_blk_mq(lpfc_shost_from_vport(phba->pport)))
|
|
|
- lpfc_sli4_set_affinity(phba, vectors);
|
|
|
return rc;
|
|
|
|
|
|
cfg_fail_out:
|
|
|
/* free the irq already requested */
|
|
|
- for (--index; index >= 0; index--) {
|
|
|
- irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
|
|
|
- vector, NULL);
|
|
|
- free_irq(phba->sli4_hba.msix_entries[index].vector,
|
|
|
- &phba->sli4_hba.fcp_eq_hdl[index]);
|
|
|
- }
|
|
|
+ for (--index; index >= 0; index--)
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, index),
|
|
|
+ &phba->sli4_hba.hba_eq_hdl[index]);
|
|
|
|
|
|
/* Unconfigure MSI-X capability structure */
|
|
|
- pci_disable_msix(phba->pcidev);
|
|
|
+ pci_free_irq_vectors(phba->pcidev);
|
|
|
|
|
|
vec_fail_out:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to release the MSI-X vectors and then disable the
|
|
|
- * MSI-X interrupt mode to device with SLI-4 interface spec.
|
|
|
- **/
|
|
|
-static void
|
|
|
-lpfc_sli4_disable_msix(struct lpfc_hba *phba)
|
|
|
-{
|
|
|
- int index;
|
|
|
-
|
|
|
- /* Free up MSI-X multi-message vectors */
|
|
|
- for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
|
|
|
- irq_set_affinity_hint(phba->sli4_hba.msix_entries[index].
|
|
|
- vector, NULL);
|
|
|
- free_irq(phba->sli4_hba.msix_entries[index].vector,
|
|
|
- &phba->sli4_hba.fcp_eq_hdl[index]);
|
|
|
- }
|
|
|
- if (phba->cfg_fof) {
|
|
|
- free_irq(phba->sli4_hba.msix_entries[index].vector,
|
|
|
- &phba->sli4_hba.fcp_eq_hdl[index]);
|
|
|
- }
|
|
|
- /* Disable MSI-X */
|
|
|
- pci_disable_msix(phba->pcidev);
|
|
|
-
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -9220,36 +9709,18 @@ lpfc_sli4_enable_msi(struct lpfc_hba *phba)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
- for (index = 0; index < phba->cfg_fcp_io_channel; index++) {
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
|
|
|
+ for (index = 0; index < phba->io_channel_irqs; index++) {
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
|
|
|
}
|
|
|
|
|
|
if (phba->cfg_fof) {
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].idx = index;
|
|
|
+ phba->sli4_hba.hba_eq_hdl[index].phba = phba;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-/**
|
|
|
- * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
|
|
|
- * @phba: pointer to lpfc hba data structure.
|
|
|
- *
|
|
|
- * This routine is invoked to disable the MSI interrupt mode to device with
|
|
|
- * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
|
|
|
- * done request_irq() on before calling pci_disable_msi(). Failure to do so
|
|
|
- * results in a BUG_ON() and a device will be left with MSI enabled and leaks
|
|
|
- * its vector.
|
|
|
- **/
|
|
|
-static void
|
|
|
-lpfc_sli4_disable_msi(struct lpfc_hba *phba)
|
|
|
-{
|
|
|
- free_irq(phba->pcidev->irq, phba);
|
|
|
- pci_disable_msi(phba->pcidev);
|
|
|
- return;
|
|
|
-}
|
|
|
-
|
|
|
/**
|
|
|
* lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
|
|
|
* @phba: pointer to lpfc hba data structure.
|
|
|
@@ -9270,7 +9741,7 @@ static uint32_t
|
|
|
lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
|
|
|
{
|
|
|
uint32_t intr_mode = LPFC_INTR_ERROR;
|
|
|
- int retval, index;
|
|
|
+ int retval, idx;
|
|
|
|
|
|
if (cfg_mode == 2) {
|
|
|
/* Preparation before conf_msi mbox cmd */
|
|
|
@@ -9301,21 +9772,23 @@ lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
|
|
|
retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
|
|
|
IRQF_SHARED, LPFC_DRIVER_NAME, phba);
|
|
|
if (!retval) {
|
|
|
+ struct lpfc_hba_eq_hdl *eqhdl;
|
|
|
+
|
|
|
/* Indicate initialization to INTx mode */
|
|
|
phba->intr_type = INTx;
|
|
|
intr_mode = 0;
|
|
|
- for (index = 0; index < phba->cfg_fcp_io_channel;
|
|
|
- index++) {
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
|
|
|
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
|
|
|
- fcp_eq_in_use, 1);
|
|
|
+
|
|
|
+ for (idx = 0; idx < phba->io_channel_irqs; idx++) {
|
|
|
+ eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
|
|
|
+ eqhdl->idx = idx;
|
|
|
+ eqhdl->phba = phba;
|
|
|
+ atomic_set(&eqhdl->hba_eq_in_use, 1);
|
|
|
}
|
|
|
if (phba->cfg_fof) {
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].idx = index;
|
|
|
- phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
|
|
|
- atomic_set(&phba->sli4_hba.fcp_eq_hdl[index].
|
|
|
- fcp_eq_in_use, 1);
|
|
|
+ eqhdl = &phba->sli4_hba.hba_eq_hdl[idx];
|
|
|
+ eqhdl->idx = idx;
|
|
|
+ eqhdl->phba = phba;
|
|
|
+ atomic_set(&eqhdl->hba_eq_in_use, 1);
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
@@ -9335,18 +9808,26 @@ static void
|
|
|
lpfc_sli4_disable_intr(struct lpfc_hba *phba)
|
|
|
{
|
|
|
/* Disable the currently initialized interrupt mode */
|
|
|
- if (phba->intr_type == MSIX)
|
|
|
- lpfc_sli4_disable_msix(phba);
|
|
|
- else if (phba->intr_type == MSI)
|
|
|
- lpfc_sli4_disable_msi(phba);
|
|
|
- else if (phba->intr_type == INTx)
|
|
|
+ if (phba->intr_type == MSIX) {
|
|
|
+ int index;
|
|
|
+
|
|
|
+ /* Free up MSI-X multi-message vectors */
|
|
|
+ for (index = 0; index < phba->io_channel_irqs; index++)
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, index),
|
|
|
+ &phba->sli4_hba.hba_eq_hdl[index]);
|
|
|
+
|
|
|
+ if (phba->cfg_fof)
|
|
|
+ free_irq(pci_irq_vector(phba->pcidev, index),
|
|
|
+ &phba->sli4_hba.hba_eq_hdl[index]);
|
|
|
+ } else {
|
|
|
free_irq(phba->pcidev->irq, phba);
|
|
|
+ }
|
|
|
+
|
|
|
+ pci_free_irq_vectors(phba->pcidev);
|
|
|
|
|
|
/* Reset interrupt management states */
|
|
|
phba->intr_type = NONE;
|
|
|
phba->sli.slistat.sli_intr = 0;
|
|
|
-
|
|
|
- return;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
@@ -9399,11 +9880,27 @@ static void
|
|
|
lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
|
|
{
|
|
|
int wait_time = 0;
|
|
|
- int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+ int nvme_xri_cmpl = 1;
|
|
|
+ int fcp_xri_cmpl = 1;
|
|
|
int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
|
|
+ int nvmet_xri_cmpl =
|
|
|
+ list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
|
|
+ fcp_xri_cmpl =
|
|
|
+ list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
|
|
+ nvme_xri_cmpl =
|
|
|
+ list_empty(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
|
|
|
|
|
- while (!fcp_xri_cmpl || !els_xri_cmpl) {
|
|
|
+ while (!fcp_xri_cmpl || !els_xri_cmpl || !nvme_xri_cmpl ||
|
|
|
+ !nvmet_xri_cmpl) {
|
|
|
if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
|
|
|
+ if (!nvme_xri_cmpl)
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6100 NVME XRI exchange busy "
|
|
|
+ "wait time: %d seconds.\n",
|
|
|
+ wait_time/1000);
|
|
|
if (!fcp_xri_cmpl)
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
"2877 FCP XRI exchange busy "
|
|
|
@@ -9420,10 +9917,19 @@ lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
|
|
|
msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
|
|
|
wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
|
|
|
}
|
|
|
- fcp_xri_cmpl =
|
|
|
- list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
|
|
|
+ nvme_xri_cmpl = list_empty(
|
|
|
+ &phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
|
|
+
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
|
|
+ fcp_xri_cmpl = list_empty(
|
|
|
+ &phba->sli4_hba.lpfc_abts_scsi_buf_list);
|
|
|
+
|
|
|
els_xri_cmpl =
|
|
|
list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
|
|
|
+
|
|
|
+ nvmet_xri_cmpl =
|
|
|
+ list_empty(&phba->sli4_hba.lpfc_abts_nvmet_sgl_list);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -9635,10 +10141,35 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
|
|
sli4_params->wqsize = bf_get(cfg_wqsize, mbx_sli4_parameters);
|
|
|
sli4_params->sgl_pages_max = bf_get(cfg_sgl_page_cnt,
|
|
|
mbx_sli4_parameters);
|
|
|
+ sli4_params->wqpcnt = bf_get(cfg_wqpcnt, mbx_sli4_parameters);
|
|
|
sli4_params->sgl_pp_align = bf_get(cfg_sgl_pp_align,
|
|
|
mbx_sli4_parameters);
|
|
|
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
|
|
|
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
|
|
|
+ phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
|
|
|
+ bf_get(cfg_xib, mbx_sli4_parameters));
|
|
|
+
|
|
|
+ if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
|
|
|
+ !phba->nvme_support) {
|
|
|
+ phba->nvme_support = 0;
|
|
|
+ phba->nvmet_support = 0;
|
|
|
+ phba->cfg_nvmet_mrq = 0;
|
|
|
+ phba->cfg_nvme_io_channel = 0;
|
|
|
+ phba->io_channel_irqs = phba->cfg_fcp_io_channel;
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
|
|
|
+ "6101 Disabling NVME support: "
|
|
|
+ "Not supported by firmware: %d %d\n",
|
|
|
+ bf_get(cfg_nvme, mbx_sli4_parameters),
|
|
|
+ bf_get(cfg_xib, mbx_sli4_parameters));
|
|
|
+
|
|
|
+ /* If firmware doesn't support NVME, just use SCSI support */
|
|
|
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
|
|
|
+ return -ENODEV;
|
|
|
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (bf_get(cfg_xib, mbx_sli4_parameters) && phba->cfg_suppress_rsp)
|
|
|
+ phba->sli.sli_flag |= LPFC_SLI_SUPPRESS_RSP;
|
|
|
|
|
|
/* Make sure that sge_supp_len can be handled by the driver */
|
|
|
if (sli4_params->sge_supp_len > LPFC_MAX_SGE_SIZE)
|
|
|
@@ -9713,14 +10244,6 @@ lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
goto out_disable_pci_dev;
|
|
|
}
|
|
|
|
|
|
- /* Set up phase-1 common device driver resources */
|
|
|
- error = lpfc_setup_driver_resource_phase1(phba);
|
|
|
- if (error) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "1403 Failed to set up driver resource.\n");
|
|
|
- goto out_unset_pci_mem_s3;
|
|
|
- }
|
|
|
-
|
|
|
/* Set up SLI-3 specific device driver resources */
|
|
|
error = lpfc_sli_driver_resource_setup(phba);
|
|
|
if (error) {
|
|
|
@@ -9876,7 +10399,13 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
|
|
|
/* Remove FC host and then SCSI host with the physical port */
|
|
|
fc_remove_host(shost);
|
|
|
scsi_remove_host(shost);
|
|
|
+
|
|
|
+ /* Perform ndlp cleanup on the physical port. The nvme and nvmet
|
|
|
+ * localports are destroyed after to cleanup all transport memory.
|
|
|
+ */
|
|
|
lpfc_cleanup(vport);
|
|
|
+ lpfc_nvmet_destroy_targetport(phba);
|
|
|
+ lpfc_nvme_destroy_localport(vport);
|
|
|
|
|
|
/*
|
|
|
* Bring down the SLI Layer. This step disable all interrupts,
|
|
|
@@ -10295,6 +10824,23 @@ lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+/**
|
|
|
+ * lpfc_sli4_get_iocb_cnt - Calculate the # of total IOCBs to reserve
|
|
|
+ * @phba: pointer to lpfc hba data structure.
|
|
|
+ *
|
|
|
+ * returns the number of ELS/CT + NVMET IOCBs to reserve
|
|
|
+ **/
|
|
|
+int
|
|
|
+lpfc_sli4_get_iocb_cnt(struct lpfc_hba *phba)
|
|
|
+{
|
|
|
+ int max_xri = lpfc_sli4_get_els_iocb_cnt(phba);
|
|
|
+
|
|
|
+ if (phba->nvmet_support)
|
|
|
+ max_xri += LPFC_NVMET_BUF_POST;
|
|
|
+ return max_xri;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
/**
|
|
|
* lpfc_write_firmware - attempt to write a firmware image to the port
|
|
|
* @fw: pointer to firmware image returned from request_firmware.
|
|
|
@@ -10459,7 +11005,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
struct Scsi_Host *shost = NULL;
|
|
|
int error;
|
|
|
uint32_t cfg_mode, intr_mode;
|
|
|
- int adjusted_fcp_io_channel;
|
|
|
|
|
|
/* Allocate memory for HBA structure */
|
|
|
phba = lpfc_hba_alloc(pdev);
|
|
|
@@ -10484,14 +11029,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
goto out_disable_pci_dev;
|
|
|
}
|
|
|
|
|
|
- /* Set up phase-1 common device driver resources */
|
|
|
- error = lpfc_setup_driver_resource_phase1(phba);
|
|
|
- if (error) {
|
|
|
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
- "1411 Failed to set up driver resource.\n");
|
|
|
- goto out_unset_pci_mem_s4;
|
|
|
- }
|
|
|
-
|
|
|
/* Set up SLI-4 Specific device driver resources */
|
|
|
error = lpfc_sli4_driver_resource_setup(phba);
|
|
|
if (error) {
|
|
|
@@ -10550,6 +11087,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
|
|
|
/* Put device to a known state before enabling interrupt */
|
|
|
lpfc_stop_port(phba);
|
|
|
+
|
|
|
/* Configure and enable interrupt */
|
|
|
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
|
|
|
if (intr_mode == LPFC_INTR_ERROR) {
|
|
|
@@ -10559,11 +11097,17 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
goto out_free_sysfs_attr;
|
|
|
}
|
|
|
/* Default to single EQ for non-MSI-X */
|
|
|
- if (phba->intr_type != MSIX)
|
|
|
- adjusted_fcp_io_channel = 1;
|
|
|
- else
|
|
|
- adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
|
|
|
- phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
|
|
|
+ if (phba->intr_type != MSIX) {
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP)
|
|
|
+ phba->cfg_fcp_io_channel = 1;
|
|
|
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
|
|
+ phba->cfg_nvme_io_channel = 1;
|
|
|
+ if (phba->nvmet_support)
|
|
|
+ phba->cfg_nvmet_mrq = 1;
|
|
|
+ }
|
|
|
+ phba->io_channel_irqs = 1;
|
|
|
+ }
|
|
|
+
|
|
|
/* Set up SLI-4 HBA */
|
|
|
if (lpfc_sli4_hba_setup(phba)) {
|
|
|
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
@@ -10579,6 +11123,24 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|
|
/* Perform post initialization setup */
|
|
|
lpfc_post_init_setup(phba);
|
|
|
|
|
|
+ /* NVME support in FW earlier in the driver load corrects the
|
|
|
+ * FC4 type making a check for nvme_support unnecessary.
|
|
|
+ */
|
|
|
+ if ((phba->nvmet_support == 0) &&
|
|
|
+ (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
|
|
|
+ /* Create NVME binding with nvme_fc_transport. This
|
|
|
+ * ensures the vport is initialized.
|
|
|
+ */
|
|
|
+ error = lpfc_nvme_create_localport(vport);
|
|
|
+ if (error) {
|
|
|
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
|
|
+ "6004 NVME registration failed, "
|
|
|
+ "error x%x\n",
|
|
|
+ error);
|
|
|
+ goto out_disable_intr;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* check for firmware upgrade or downgrade */
|
|
|
if (phba->cfg_request_firmware_upgrade)
|
|
|
lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
|
|
|
@@ -10650,8 +11212,12 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
|
|
|
fc_remove_host(shost);
|
|
|
scsi_remove_host(shost);
|
|
|
|
|
|
- /* Perform cleanup on the physical port */
|
|
|
+ /* Perform ndlp cleanup on the physical port. The nvme and nvmet
|
|
|
+ * localports are destroyed after to cleanup all transport memory.
|
|
|
+ */
|
|
|
lpfc_cleanup(vport);
|
|
|
+ lpfc_nvmet_destroy_targetport(phba);
|
|
|
+ lpfc_nvme_destroy_localport(vport);
|
|
|
|
|
|
/*
|
|
|
* Bring down the SLI Layer. This step disables all interrupts,
|
|
|
@@ -10669,6 +11235,8 @@ lpfc_pci_remove_one_s4(struct pci_dev *pdev)
|
|
|
* buffers are released to their corresponding pools here.
|
|
|
*/
|
|
|
lpfc_scsi_free(phba);
|
|
|
+ lpfc_nvme_free(phba);
|
|
|
+ lpfc_free_iocb_list(phba);
|
|
|
|
|
|
lpfc_sli4_driver_resource_unset(phba);
|
|
|
|
|
|
@@ -11314,7 +11882,7 @@ lpfc_sli4_oas_verify(struct lpfc_hba *phba)
|
|
|
int
|
|
|
lpfc_fof_queue_setup(struct lpfc_hba *phba)
|
|
|
{
|
|
|
- struct lpfc_sli *psli = &phba->sli;
|
|
|
+ struct lpfc_sli_ring *pring;
|
|
|
int rc;
|
|
|
|
|
|
rc = lpfc_eq_create(phba, phba->sli4_hba.fof_eq, LPFC_MAX_IMAX);
|
|
|
@@ -11333,8 +11901,11 @@ lpfc_fof_queue_setup(struct lpfc_hba *phba)
|
|
|
if (rc)
|
|
|
goto out_oas_wq;
|
|
|
|
|
|
- phba->sli4_hba.oas_cq->pring = &psli->ring[LPFC_FCP_OAS_RING];
|
|
|
- phba->sli4_hba.oas_ring = &psli->ring[LPFC_FCP_OAS_RING];
|
|
|
+ /* Bind this CQ/WQ to the NVME ring */
|
|
|
+ pring = phba->sli4_hba.oas_wq->pring;
|
|
|
+ pring->sli.sli4.wqp =
|
|
|
+ (void *)phba->sli4_hba.oas_wq;
|
|
|
+ phba->sli4_hba.oas_cq->pring = pring;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
|
@@ -11391,6 +11962,7 @@ lpfc_fof_queue_create(struct lpfc_hba *phba)
|
|
|
goto out_error;
|
|
|
|
|
|
phba->sli4_hba.oas_wq = qdesc;
|
|
|
+ list_add_tail(&qdesc->wq_list, &phba->sli4_hba.lpfc_wq_list);
|
|
|
|
|
|
}
|
|
|
return 0;
|
|
|
@@ -11476,7 +12048,6 @@ static struct miscdevice lpfc_mgmt_dev = {
|
|
|
static int __init
|
|
|
lpfc_init(void)
|
|
|
{
|
|
|
- int cpu;
|
|
|
int error = 0;
|
|
|
|
|
|
printk(LPFC_MODULE_DESC "\n");
|
|
|
@@ -11502,9 +12073,7 @@ lpfc_init(void)
|
|
|
|
|
|
/* Initialize in case vector mapping is needed */
|
|
|
lpfc_used_cpu = NULL;
|
|
|
- lpfc_present_cpu = 0;
|
|
|
- for_each_present_cpu(cpu)
|
|
|
- lpfc_present_cpu++;
|
|
|
+ lpfc_present_cpu = num_present_cpus();
|
|
|
|
|
|
error = pci_register_driver(&lpfc_driver);
|
|
|
if (error) {
|
|
|
@@ -11550,5 +12119,5 @@ module_init(lpfc_init);
|
|
|
module_exit(lpfc_exit);
|
|
|
MODULE_LICENSE("GPL");
|
|
|
MODULE_DESCRIPTION(LPFC_MODULE_DESC);
|
|
|
-MODULE_AUTHOR("Emulex Corporation - tech.support@emulex.com");
|
|
|
+MODULE_AUTHOR("Broadcom");
|
|
|
MODULE_VERSION("0:" LPFC_DRIVER_VERSION);
|