|
|
@@ -23,7 +23,6 @@
|
|
|
#include <linux/errno.h>
|
|
|
#include <linux/ioport.h>
|
|
|
#include <linux/slab.h>
|
|
|
-#include <linux/vmalloc.h>
|
|
|
#include <linux/interrupt.h>
|
|
|
#include <linux/pci.h>
|
|
|
#include <linux/init.h>
|
|
|
@@ -68,6 +67,7 @@
|
|
|
__stringify(BCM_5710_FW_ENGINEERING_VERSION)
|
|
|
#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
|
|
|
#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
|
|
|
+#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
|
|
|
|
|
|
/* Time in jiffies before concluding the transmitter is hung */
|
|
|
#define TX_TIMEOUT (5*HZ)
|
|
|
@@ -77,11 +77,13 @@ static char version[] __devinitdata =
|
|
|
DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
|
|
|
|
|
|
MODULE_AUTHOR("Eliezer Tamir");
|
|
|
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
|
|
|
+MODULE_DESCRIPTION("Broadcom NetXtreme II "
|
|
|
+ "BCM57710/57711/57711E/57712/57712E Driver");
|
|
|
MODULE_LICENSE("GPL");
|
|
|
MODULE_VERSION(DRV_MODULE_VERSION);
|
|
|
MODULE_FIRMWARE(FW_FILE_NAME_E1);
|
|
|
MODULE_FIRMWARE(FW_FILE_NAME_E1H);
|
|
|
+MODULE_FIRMWARE(FW_FILE_NAME_E2);
|
|
|
|
|
|
static int multi_mode = 1;
|
|
|
module_param(multi_mode, int, 0);
|
|
|
@@ -124,6 +126,8 @@ enum bnx2x_board_type {
|
|
|
BCM57710 = 0,
|
|
|
BCM57711 = 1,
|
|
|
BCM57711E = 2,
|
|
|
+ BCM57712 = 3,
|
|
|
+ BCM57712E = 4
|
|
|
};
|
|
|
|
|
|
/* indexed by board_type, above */
|
|
|
@@ -132,14 +136,24 @@ static struct {
|
|
|
} board_info[] __devinitdata = {
|
|
|
{ "Broadcom NetXtreme II BCM57710 XGb" },
|
|
|
{ "Broadcom NetXtreme II BCM57711 XGb" },
|
|
|
- { "Broadcom NetXtreme II BCM57711E XGb" }
|
|
|
+ { "Broadcom NetXtreme II BCM57711E XGb" },
|
|
|
+ { "Broadcom NetXtreme II BCM57712 XGb" },
|
|
|
+ { "Broadcom NetXtreme II BCM57712E XGb" }
|
|
|
};
|
|
|
|
|
|
+#ifndef PCI_DEVICE_ID_NX2_57712
|
|
|
+#define PCI_DEVICE_ID_NX2_57712 0x1662
|
|
|
+#endif
|
|
|
+#ifndef PCI_DEVICE_ID_NX2_57712E
|
|
|
+#define PCI_DEVICE_ID_NX2_57712E 0x1663
|
|
|
+#endif
|
|
|
|
|
|
static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
|
|
|
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
|
|
|
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
|
|
|
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
|
|
|
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
|
|
|
+ { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
|
|
|
{ 0 }
|
|
|
};
|
|
|
|
|
|
@@ -353,7 +367,8 @@ static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
|
|
|
u8 ticks)
|
|
|
{
|
|
|
|
|
|
- int index_offset =
|
|
|
+ int index_offset = CHIP_IS_E2(bp) ?
|
|
|
+ offsetof(struct hc_status_block_data_e2, index_data) :
|
|
|
offsetof(struct hc_status_block_data_e1x, index_data);
|
|
|
u32 addr = BAR_CSTRORM_INTMEM +
|
|
|
CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
|
|
|
@@ -369,7 +384,8 @@ static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
|
|
|
u8 disable)
|
|
|
{
|
|
|
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
|
|
|
- int index_offset =
|
|
|
+ int index_offset = CHIP_IS_E2(bp) ?
|
|
|
+ offsetof(struct hc_status_block_data_e2, index_data) :
|
|
|
offsetof(struct hc_status_block_data_e1x, index_data);
|
|
|
u32 addr = BAR_CSTRORM_INTMEM +
|
|
|
CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
|
|
|
@@ -408,6 +424,75 @@ static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
|
|
|
return val;
|
|
|
}
|
|
|
|
|
|
+#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
|
|
|
+#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
|
|
|
+#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
|
|
|
+#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
|
|
|
+#define DMAE_DP_DST_NONE "dst_addr [none]"
|
|
|
+
|
|
|
+void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
|
|
|
+{
|
|
|
+ u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
|
|
|
+
|
|
|
+ switch (dmae->opcode & DMAE_COMMAND_DST) {
|
|
|
+ case DMAE_CMD_DST_PCI:
|
|
|
+ if (src_type == DMAE_CMD_SRC_PCI)
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
|
|
|
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
|
|
|
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
|
|
|
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ else
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ "src [%08x], len [%d*4], dst [%x:%08x]\n"
|
|
|
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_lo >> 2,
|
|
|
+ dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
|
|
|
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ break;
|
|
|
+ case DMAE_CMD_DST_GRC:
|
|
|
+ if (src_type == DMAE_CMD_SRC_PCI)
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
|
|
|
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
|
|
|
+ dmae->len, dmae->dst_addr_lo >> 2,
|
|
|
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ else
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ "src [%08x], len [%d*4], dst [%08x]\n"
|
|
|
+ "comp_addr [%x:%08x], comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_lo >> 2,
|
|
|
+ dmae->len, dmae->dst_addr_lo >> 2,
|
|
|
+ dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ if (src_type == DMAE_CMD_SRC_PCI)
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
|
|
|
+ "dst_addr [none]\n"
|
|
|
+ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
|
|
|
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ else
|
|
|
+ DP(msglvl, "DMAE: opcode 0x%08x\n"
|
|
|
+ DP_LEVEL "src_addr [%08x] len [%d * 4] "
|
|
|
+ "dst_addr [none]\n"
|
|
|
+ DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
|
|
|
+ dmae->opcode, dmae->src_addr_lo >> 2,
|
|
|
+ dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
|
|
|
+ dmae->comp_val);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
const u32 dmae_reg_go_c[] = {
|
|
|
DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
|
|
|
DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
|
|
|
@@ -431,85 +516,137 @@ void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
|
|
|
REG_WR(bp, dmae_reg_go_c[idx], 1);
|
|
|
}
|
|
|
|
|
|
-void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
|
|
|
- u32 len32)
|
|
|
+u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
|
|
|
{
|
|
|
- struct dmae_command dmae;
|
|
|
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
|
|
|
- int cnt = 200;
|
|
|
+ return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
|
|
|
+ DMAE_CMD_C_ENABLE);
|
|
|
+}
|
|
|
|
|
|
- if (!bp->dmae_ready) {
|
|
|
- u32 *data = bnx2x_sp(bp, wb_data[0]);
|
|
|
+u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
|
|
|
+{
|
|
|
+ return opcode & ~DMAE_CMD_SRC_RESET;
|
|
|
+}
|
|
|
|
|
|
- DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
|
|
|
- " using indirect\n", dst_addr, len32);
|
|
|
- bnx2x_init_ind_wr(bp, dst_addr, data, len32);
|
|
|
- return;
|
|
|
- }
|
|
|
+u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
|
|
|
+ bool with_comp, u8 comp_type)
|
|
|
+{
|
|
|
+ u32 opcode = 0;
|
|
|
+
|
|
|
+ opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
|
|
|
+ (dst_type << DMAE_COMMAND_DST_SHIFT));
|
|
|
|
|
|
- memset(&dmae, 0, sizeof(struct dmae_command));
|
|
|
+ opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
|
|
|
+
|
|
|
+ opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
|
|
|
+ opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
|
|
|
+ (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
|
|
|
+ opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
|
|
|
|
|
|
- dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
|
|
|
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
|
|
|
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
|
|
|
#ifdef __BIG_ENDIAN
|
|
|
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
|
|
|
+ opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
|
|
|
#else
|
|
|
- DMAE_CMD_ENDIANITY_DW_SWAP |
|
|
|
+ opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
|
|
|
#endif
|
|
|
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
|
|
|
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
|
|
|
- dmae.src_addr_lo = U64_LO(dma_addr);
|
|
|
- dmae.src_addr_hi = U64_HI(dma_addr);
|
|
|
- dmae.dst_addr_lo = dst_addr >> 2;
|
|
|
- dmae.dst_addr_hi = 0;
|
|
|
- dmae.len = len32;
|
|
|
- dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
- dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
- dmae.comp_val = DMAE_COMP_VAL;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
|
|
|
- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
|
|
|
- "dst_addr [%x:%08x (%08x)]\n"
|
|
|
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
|
|
|
- dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
|
|
|
- dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
|
|
|
- dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
|
|
|
- DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
|
|
+ if (with_comp)
|
|
|
+ opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
|
|
|
+ return opcode;
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
|
|
|
+ u8 src_type, u8 dst_type)
|
|
|
+{
|
|
|
+ memset(dmae, 0, sizeof(struct dmae_command));
|
|
|
+
|
|
|
+ /* set the opcode */
|
|
|
+ dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
|
|
|
+ true, DMAE_COMP_PCI);
|
|
|
+
|
|
|
+ /* fill in the completion parameters */
|
|
|
+ dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
+ dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
+ dmae->comp_val = DMAE_COMP_VAL;
|
|
|
+}
|
|
|
+
|
|
|
+/* issue a dmae command over the init-channel and wailt for completion */
|
|
|
+int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
|
|
|
+{
|
|
|
+ u32 *wb_comp = bnx2x_sp(bp, wb_comp);
|
|
|
+ int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
|
|
|
+ int rc = 0;
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
|
|
bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
|
|
|
bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
|
|
|
|
+ /* lock the dmae channel */
|
|
|
mutex_lock(&bp->dmae_mutex);
|
|
|
|
|
|
+ /* reset completion */
|
|
|
*wb_comp = 0;
|
|
|
|
|
|
- bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
|
|
|
+ /* post the command on the channel used for initializations */
|
|
|
+ bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
|
|
|
|
|
|
+ /* wait for completion */
|
|
|
udelay(5);
|
|
|
-
|
|
|
- while (*wb_comp != DMAE_COMP_VAL) {
|
|
|
+ while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
|
|
|
DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
|
|
|
|
|
|
if (!cnt) {
|
|
|
BNX2X_ERR("DMAE timeout!\n");
|
|
|
- break;
|
|
|
+ rc = DMAE_TIMEOUT;
|
|
|
+ goto unlock;
|
|
|
}
|
|
|
cnt--;
|
|
|
- /* adjust delay for emulation/FPGA */
|
|
|
- if (CHIP_REV_IS_SLOW(bp))
|
|
|
- msleep(100);
|
|
|
- else
|
|
|
- udelay(5);
|
|
|
+ udelay(50);
|
|
|
}
|
|
|
+ if (*wb_comp & DMAE_PCI_ERR_FLAG) {
|
|
|
+ BNX2X_ERR("DMAE PCI error!\n");
|
|
|
+ rc = DMAE_PCI_ERROR;
|
|
|
+ }
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
|
|
+ bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
|
|
|
+ bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
|
|
|
|
+unlock:
|
|
|
mutex_unlock(&bp->dmae_mutex);
|
|
|
+ return rc;
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
|
|
|
+ u32 len32)
|
|
|
+{
|
|
|
+ struct dmae_command dmae;
|
|
|
+
|
|
|
+ if (!bp->dmae_ready) {
|
|
|
+ u32 *data = bnx2x_sp(bp, wb_data[0]);
|
|
|
+
|
|
|
+ DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
|
|
|
+ " using indirect\n", dst_addr, len32);
|
|
|
+ bnx2x_init_ind_wr(bp, dst_addr, data, len32);
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* set opcode and fixed command fields */
|
|
|
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
|
|
|
+
|
|
|
+ /* fill in addresses and len */
|
|
|
+ dmae.src_addr_lo = U64_LO(dma_addr);
|
|
|
+ dmae.src_addr_hi = U64_HI(dma_addr);
|
|
|
+ dmae.dst_addr_lo = dst_addr >> 2;
|
|
|
+ dmae.dst_addr_hi = 0;
|
|
|
+ dmae.len = len32;
|
|
|
+
|
|
|
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
|
|
|
+
|
|
|
+ /* issue the command and wait for completion */
|
|
|
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
|
|
|
}
|
|
|
|
|
|
void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
|
|
|
{
|
|
|
struct dmae_command dmae;
|
|
|
- u32 *wb_comp = bnx2x_sp(bp, wb_comp);
|
|
|
- int cnt = 200;
|
|
|
|
|
|
if (!bp->dmae_ready) {
|
|
|
u32 *data = bnx2x_sp(bp, wb_data[0]);
|
|
|
@@ -522,62 +659,20 @@ void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- memset(&dmae, 0, sizeof(struct dmae_command));
|
|
|
+ /* set opcode and fixed command fields */
|
|
|
+ bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
|
|
|
|
|
|
- dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
|
|
|
- DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
|
|
|
- DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
|
|
|
-#ifdef __BIG_ENDIAN
|
|
|
- DMAE_CMD_ENDIANITY_B_DW_SWAP |
|
|
|
-#else
|
|
|
- DMAE_CMD_ENDIANITY_DW_SWAP |
|
|
|
-#endif
|
|
|
- (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
|
|
|
- (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
|
|
|
+ /* fill in addresses and len */
|
|
|
dmae.src_addr_lo = src_addr >> 2;
|
|
|
dmae.src_addr_hi = 0;
|
|
|
dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
|
|
|
dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
|
|
|
dmae.len = len32;
|
|
|
- dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
- dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
|
|
|
- dmae.comp_val = DMAE_COMP_VAL;
|
|
|
-
|
|
|
- DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
|
|
|
- DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
|
|
|
- "dst_addr [%x:%08x (%08x)]\n"
|
|
|
- DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
|
|
|
- dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
|
|
|
- dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
|
|
|
- dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
|
|
|
-
|
|
|
- mutex_lock(&bp->dmae_mutex);
|
|
|
-
|
|
|
- memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
|
|
|
- *wb_comp = 0;
|
|
|
-
|
|
|
- bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
|
|
|
-
|
|
|
- udelay(5);
|
|
|
|
|
|
- while (*wb_comp != DMAE_COMP_VAL) {
|
|
|
+ bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
|
|
|
|
|
|
- if (!cnt) {
|
|
|
- BNX2X_ERR("DMAE timeout!\n");
|
|
|
- break;
|
|
|
- }
|
|
|
- cnt--;
|
|
|
- /* adjust delay for emulation/FPGA */
|
|
|
- if (CHIP_REV_IS_SLOW(bp))
|
|
|
- msleep(100);
|
|
|
- else
|
|
|
- udelay(5);
|
|
|
- }
|
|
|
- DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
|
|
|
- bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
|
|
|
- bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
|
|
|
-
|
|
|
- mutex_unlock(&bp->dmae_mutex);
|
|
|
+ /* issue the command and wait for completion */
|
|
|
+ bnx2x_issue_dmae_with_comp(bp, &dmae);
|
|
|
}
|
|
|
|
|
|
void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
|
|
|
@@ -744,19 +839,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
|
|
|
u32 mark, offset;
|
|
|
__be32 data[9];
|
|
|
int word;
|
|
|
-
|
|
|
+ u32 trace_shmem_base;
|
|
|
if (BP_NOMCP(bp)) {
|
|
|
BNX2X_ERR("NO MCP - can not dump\n");
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- addr = bp->common.shmem_base - 0x0800 + 4;
|
|
|
+ if (BP_PATH(bp) == 0)
|
|
|
+ trace_shmem_base = bp->common.shmem_base;
|
|
|
+ else
|
|
|
+ trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
|
|
|
+ addr = trace_shmem_base - 0x0800 + 4;
|
|
|
mark = REG_RD(bp, addr);
|
|
|
- mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
|
|
|
+ mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
|
|
|
+ + ((mark + 0x3) & ~0x3) - 0x08000000;
|
|
|
pr_err("begin fw dump (mark 0x%x)\n", mark);
|
|
|
|
|
|
pr_err("");
|
|
|
- for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
|
|
|
+ for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
|
|
|
for (word = 0; word < 8; word++)
|
|
|
data[word] = htonl(REG_RD(bp, offset + 4*word));
|
|
|
data[8] = 0x0;
|
|
|
@@ -822,10 +922,15 @@ void bnx2x_panic_dump(struct bnx2x *bp)
|
|
|
for_each_queue(bp, i) {
|
|
|
struct bnx2x_fastpath *fp = &bp->fp[i];
|
|
|
int loop;
|
|
|
+ struct hc_status_block_data_e2 sb_data_e2;
|
|
|
struct hc_status_block_data_e1x sb_data_e1x;
|
|
|
struct hc_status_block_sm *hc_sm_p =
|
|
|
+ CHIP_IS_E2(bp) ?
|
|
|
+ sb_data_e2.common.state_machine :
|
|
|
sb_data_e1x.common.state_machine;
|
|
|
struct hc_index_data *hc_index_p =
|
|
|
+ CHIP_IS_E2(bp) ?
|
|
|
+ sb_data_e2.index_data :
|
|
|
sb_data_e1x.index_data;
|
|
|
int data_size;
|
|
|
u32 *sb_data_p;
|
|
|
@@ -849,7 +954,8 @@ void bnx2x_panic_dump(struct bnx2x *bp)
|
|
|
i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
|
|
|
fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
|
|
|
|
|
|
- loop = HC_SB_MAX_INDICES_E1X;
|
|
|
+ loop = CHIP_IS_E2(bp) ?
|
|
|
+ HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
|
|
|
|
|
|
/* host sb data */
|
|
|
|
|
|
@@ -865,23 +971,36 @@ void bnx2x_panic_dump(struct bnx2x *bp)
|
|
|
fp->sb_index_values[j],
|
|
|
(j == loop - 1) ? ")" : " ");
|
|
|
/* fw sb data */
|
|
|
- data_size =
|
|
|
+ data_size = CHIP_IS_E2(bp) ?
|
|
|
+ sizeof(struct hc_status_block_data_e2) :
|
|
|
sizeof(struct hc_status_block_data_e1x);
|
|
|
data_size /= sizeof(u32);
|
|
|
- sb_data_p = (u32 *)&sb_data_e1x;
|
|
|
+ sb_data_p = CHIP_IS_E2(bp) ?
|
|
|
+ (u32 *)&sb_data_e2 :
|
|
|
+ (u32 *)&sb_data_e1x;
|
|
|
/* copy sb data in here */
|
|
|
for (j = 0; j < data_size; j++)
|
|
|
*(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
|
|
|
CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
|
|
|
j * sizeof(u32));
|
|
|
|
|
|
- pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
|
|
|
- "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
|
|
|
- sb_data_e1x.common.p_func.pf_id,
|
|
|
- sb_data_e1x.common.p_func.vf_id,
|
|
|
- sb_data_e1x.common.p_func.vf_valid,
|
|
|
- sb_data_e1x.common.p_func.vnic_id,
|
|
|
- sb_data_e1x.common.same_igu_sb_1b);
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
|
|
|
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
|
|
|
+ sb_data_e2.common.p_func.pf_id,
|
|
|
+ sb_data_e2.common.p_func.vf_id,
|
|
|
+ sb_data_e2.common.p_func.vf_valid,
|
|
|
+ sb_data_e2.common.p_func.vnic_id,
|
|
|
+ sb_data_e2.common.same_igu_sb_1b);
|
|
|
+ } else {
|
|
|
+ pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
|
|
|
+ "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
|
|
|
+ sb_data_e1x.common.p_func.pf_id,
|
|
|
+ sb_data_e1x.common.p_func.vf_id,
|
|
|
+ sb_data_e1x.common.p_func.vf_valid,
|
|
|
+ sb_data_e1x.common.p_func.vnic_id,
|
|
|
+ sb_data_e1x.common.same_igu_sb_1b);
|
|
|
+ }
|
|
|
|
|
|
/* SB_SMs data */
|
|
|
for (j = 0; j < HC_SB_MAX_SM; j++) {
|
|
|
@@ -969,7 +1088,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
|
|
|
BNX2X_ERR("end crash dump -----------------\n");
|
|
|
}
|
|
|
|
|
|
-void bnx2x_int_enable(struct bnx2x *bp)
|
|
|
+static void bnx2x_hc_int_enable(struct bnx2x *bp)
|
|
|
{
|
|
|
int port = BP_PORT(bp);
|
|
|
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
|
|
|
@@ -1011,7 +1130,7 @@ void bnx2x_int_enable(struct bnx2x *bp)
|
|
|
mmiowb();
|
|
|
barrier();
|
|
|
|
|
|
- if (CHIP_IS_E1H(bp)) {
|
|
|
+ if (!CHIP_IS_E1(bp)) {
|
|
|
/* init leading/trailing edge */
|
|
|
if (IS_MF(bp)) {
|
|
|
val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
|
|
|
@@ -1029,7 +1148,66 @@ void bnx2x_int_enable(struct bnx2x *bp)
|
|
|
mmiowb();
|
|
|
}
|
|
|
|
|
|
-void bnx2x_int_disable(struct bnx2x *bp)
|
|
|
+static void bnx2x_igu_int_enable(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ u32 val;
|
|
|
+ int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
|
|
|
+ int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
|
|
|
+
|
|
|
+ val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
|
|
|
+
|
|
|
+ if (msix) {
|
|
|
+ val &= ~(IGU_PF_CONF_INT_LINE_EN |
|
|
|
+ IGU_PF_CONF_SINGLE_ISR_EN);
|
|
|
+ val |= (IGU_PF_CONF_FUNC_EN |
|
|
|
+ IGU_PF_CONF_MSI_MSIX_EN |
|
|
|
+ IGU_PF_CONF_ATTN_BIT_EN);
|
|
|
+ } else if (msi) {
|
|
|
+ val &= ~IGU_PF_CONF_INT_LINE_EN;
|
|
|
+ val |= (IGU_PF_CONF_FUNC_EN |
|
|
|
+ IGU_PF_CONF_MSI_MSIX_EN |
|
|
|
+ IGU_PF_CONF_ATTN_BIT_EN |
|
|
|
+ IGU_PF_CONF_SINGLE_ISR_EN);
|
|
|
+ } else {
|
|
|
+ val &= ~IGU_PF_CONF_MSI_MSIX_EN;
|
|
|
+ val |= (IGU_PF_CONF_FUNC_EN |
|
|
|
+ IGU_PF_CONF_INT_LINE_EN |
|
|
|
+ IGU_PF_CONF_ATTN_BIT_EN |
|
|
|
+ IGU_PF_CONF_SINGLE_ISR_EN);
|
|
|
+ }
|
|
|
+
|
|
|
+ DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
|
|
|
+ val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
|
|
|
+
|
|
|
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
|
|
|
+
|
|
|
+ barrier();
|
|
|
+
|
|
|
+ /* init leading/trailing edge */
|
|
|
+ if (IS_MF(bp)) {
|
|
|
+ val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
|
|
|
+ if (bp->port.pmf)
|
|
|
+ /* enable nig and gpio3 attention */
|
|
|
+ val |= 0x1100;
|
|
|
+ } else
|
|
|
+ val = 0xffff;
|
|
|
+
|
|
|
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
|
|
|
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
|
|
|
+
|
|
|
+ /* Make sure that interrupts are indeed enabled from here on */
|
|
|
+ mmiowb();
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_int_enable(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC)
|
|
|
+ bnx2x_hc_int_enable(bp);
|
|
|
+ else
|
|
|
+ bnx2x_igu_int_enable(bp);
|
|
|
+}
|
|
|
+
|
|
|
+static void bnx2x_hc_int_disable(struct bnx2x *bp)
|
|
|
{
|
|
|
int port = BP_PORT(bp);
|
|
|
u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
|
|
|
@@ -1051,6 +1229,32 @@ void bnx2x_int_disable(struct bnx2x *bp)
|
|
|
BNX2X_ERR("BUG! proper val not read from IGU!\n");
|
|
|
}
|
|
|
|
|
|
+static void bnx2x_igu_int_disable(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
|
|
|
+
|
|
|
+ val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
|
|
|
+ IGU_PF_CONF_INT_LINE_EN |
|
|
|
+ IGU_PF_CONF_ATTN_BIT_EN);
|
|
|
+
|
|
|
+ DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
|
|
|
+
|
|
|
+ /* flush all outstanding writes */
|
|
|
+ mmiowb();
|
|
|
+
|
|
|
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
|
|
|
+ if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
|
|
|
+ BNX2X_ERR("BUG! proper val not read from IGU!\n");
|
|
|
+}
|
|
|
+
|
|
|
+void bnx2x_int_disable(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC)
|
|
|
+ bnx2x_hc_int_disable(bp);
|
|
|
+ else
|
|
|
+ bnx2x_igu_int_disable(bp);
|
|
|
+}
|
|
|
+
|
|
|
void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
|
|
|
{
|
|
|
int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
|
|
|
@@ -1194,7 +1398,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
|
|
|
return IRQ_HANDLED;
|
|
|
#endif
|
|
|
|
|
|
- for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
|
|
|
+ for_each_queue(bp, i) {
|
|
|
struct bnx2x_fastpath *fp = &bp->fp[i];
|
|
|
|
|
|
mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
|
|
|
@@ -1579,7 +1783,7 @@ u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
|
|
|
/* Initialize link parameters structure variables */
|
|
|
/* It is recommended to turn off RX FC for jumbo frames
|
|
|
for better performance */
|
|
|
- if (bp->dev->mtu > 5000)
|
|
|
+ if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
|
|
|
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
|
|
|
else
|
|
|
bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
|
|
|
@@ -1693,13 +1897,11 @@ static void bnx2x_init_port_minmax(struct bnx2x *bp)
|
|
|
static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
|
|
|
{
|
|
|
int all_zero = 1;
|
|
|
- int port = BP_PORT(bp);
|
|
|
int vn;
|
|
|
|
|
|
bp->vn_weight_sum = 0;
|
|
|
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
|
|
|
- int func = 2*vn + port;
|
|
|
- u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
|
|
|
+ u32 vn_cfg = bp->mf_config[vn];
|
|
|
u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
|
|
|
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
|
|
|
|
|
|
@@ -1727,11 +1929,12 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
|
|
|
CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
|
|
|
+static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
|
|
|
{
|
|
|
struct rate_shaping_vars_per_vn m_rs_vn;
|
|
|
struct fairness_vars_per_vn m_fair_vn;
|
|
|
- u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
|
|
|
+ u32 vn_cfg = bp->mf_config[vn];
|
|
|
+ int func = 2*vn + BP_PORT(bp);
|
|
|
u16 vn_min_rate, vn_max_rate;
|
|
|
int i;
|
|
|
|
|
|
@@ -1744,7 +1947,7 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
|
|
|
vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
|
|
|
FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
|
|
|
/* If min rate is zero - set it to 1 */
|
|
|
- if (!vn_min_rate)
|
|
|
+ if (bp->vn_weight_sum && (vn_min_rate == 0))
|
|
|
vn_min_rate = DEF_MIN_RATE;
|
|
|
vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
|
|
|
FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
|
|
|
@@ -1807,7 +2010,7 @@ static void bnx2x_read_mf_cfg(struct bnx2x *bp)
|
|
|
|
|
|
for (vn = VN_0; vn < E1HVN_MAX; vn++) {
|
|
|
int /*abs*/func = 2*vn + BP_PORT(bp);
|
|
|
- bp->mf_config =
|
|
|
+ bp->mf_config[vn] =
|
|
|
MF_CFG_RD(bp, func_mf_config[func].config);
|
|
|
}
|
|
|
}
|
|
|
@@ -1878,7 +2081,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
|
|
|
if (bp->link_vars.link_up) {
|
|
|
|
|
|
/* dropless flow control */
|
|
|
- if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
|
|
|
+ if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
|
|
|
int port = BP_PORT(bp);
|
|
|
u32 pause_enabled = 0;
|
|
|
|
|
|
@@ -1906,37 +2109,19 @@ static void bnx2x_link_attn(struct bnx2x *bp)
|
|
|
if (prev_link_status != bp->link_vars.link_status)
|
|
|
bnx2x_link_report(bp);
|
|
|
|
|
|
- if (IS_MF(bp)) {
|
|
|
- int port = BP_PORT(bp);
|
|
|
- int func;
|
|
|
- int vn;
|
|
|
-
|
|
|
- /* Set the attention towards other drivers on the same port */
|
|
|
- for (vn = VN_0; vn < E1HVN_MAX; vn++) {
|
|
|
- if (vn == BP_E1HVN(bp))
|
|
|
- continue;
|
|
|
-
|
|
|
- func = ((vn << 1) | port);
|
|
|
- REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
|
|
|
- (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
|
|
|
- }
|
|
|
-
|
|
|
- if (bp->link_vars.link_up) {
|
|
|
- int i;
|
|
|
-
|
|
|
- /* Init rate shaping and fairness contexts */
|
|
|
- bnx2x_init_port_minmax(bp);
|
|
|
+ if (IS_MF(bp))
|
|
|
+ bnx2x_link_sync_notify(bp);
|
|
|
|
|
|
- for (vn = VN_0; vn < E1HVN_MAX; vn++)
|
|
|
- bnx2x_init_vn_minmax(bp, 2*vn + port);
|
|
|
+ if (bp->link_vars.link_up && bp->link_vars.line_speed) {
|
|
|
+ int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
|
|
|
|
|
|
- /* Store it to internal memory */
|
|
|
- for (i = 0;
|
|
|
- i < sizeof(struct cmng_struct_per_port) / 4; i++)
|
|
|
- REG_WR(bp, BAR_XSTRORM_INTMEM +
|
|
|
- XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
|
|
|
- ((u32 *)(&bp->cmng))[i]);
|
|
|
- }
|
|
|
+ if (cmng_fns != CMNG_FNS_NONE) {
|
|
|
+ bnx2x_cmng_fns_init(bp, false, cmng_fns);
|
|
|
+ storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
|
|
|
+ } else
|
|
|
+ /* rate shaping and fairness are disabled */
|
|
|
+ DP(NETIF_MSG_IFUP,
|
|
|
+ "single function mode without fairness\n");
|
|
|
}
|
|
|
}
|
|
|
|
|
|
@@ -1952,7 +2137,9 @@ void bnx2x__link_status_update(struct bnx2x *bp)
|
|
|
else
|
|
|
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
|
|
|
|
|
|
- bnx2x_calc_vn_weight_sum(bp);
|
|
|
+ /* the link status update could be the result of a DCC event
|
|
|
+ hence re-read the shmem mf configuration */
|
|
|
+ bnx2x_read_mf_cfg(bp);
|
|
|
|
|
|
/* indicate link status */
|
|
|
bnx2x_link_report(bp);
|
|
|
@@ -1968,8 +2155,13 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
|
|
|
|
|
|
/* enable nig attention */
|
|
|
val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
|
|
|
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
|
|
|
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC) {
|
|
|
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
|
|
|
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
|
|
|
+ } else if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
|
|
|
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
|
|
|
+ }
|
|
|
|
|
|
bnx2x_stats_handle(bp, STATS_EVENT_PMF);
|
|
|
}
|
|
|
@@ -1985,22 +2177,23 @@ static void bnx2x_pmf_update(struct bnx2x *bp)
|
|
|
/* send the MCP a request, block until there is a reply */
|
|
|
u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
|
|
|
{
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ int mb_idx = BP_FW_MB_IDX(bp);
|
|
|
u32 seq = ++bp->fw_seq;
|
|
|
u32 rc = 0;
|
|
|
u32 cnt = 1;
|
|
|
u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
|
|
|
|
|
|
mutex_lock(&bp->fw_mb_mutex);
|
|
|
- SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
|
|
|
- SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
|
|
|
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
|
|
|
+ SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
|
|
|
+
|
|
|
DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
|
|
|
|
|
|
do {
|
|
|
/* let the FW do it's magic ... */
|
|
|
msleep(delay);
|
|
|
|
|
|
- rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
|
|
|
+ rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
|
|
|
|
|
|
/* Give the FW up to 5 second (500*10ms) */
|
|
|
} while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
|
|
|
@@ -2264,10 +2457,28 @@ void bnx2x_pf_init(struct bnx2x *bp)
|
|
|
if (!CHIP_IS_E1(bp))
|
|
|
storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ /* reset IGU PF statistics: MSIX + ATTN */
|
|
|
+ /* PF */
|
|
|
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
|
|
|
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
|
|
|
+ (CHIP_MODE_IS_4_PORT(bp) ?
|
|
|
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
|
|
|
+ /* ATTN */
|
|
|
+ REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
|
|
|
+ BNX2X_IGU_STAS_MSG_VF_CNT*4 +
|
|
|
+ BNX2X_IGU_STAS_MSG_PF_CNT*4 +
|
|
|
+ (CHIP_MODE_IS_4_PORT(bp) ?
|
|
|
+ BP_FUNC(bp) : BP_VN(bp))*4, 0);
|
|
|
+ }
|
|
|
+
|
|
|
/* function setup flags */
|
|
|
flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
|
|
|
|
|
|
- flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
|
|
|
+ if (CHIP_IS_E1x(bp))
|
|
|
+ flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
|
|
|
+ else
|
|
|
+ flags |= FUNC_FLG_TPA;
|
|
|
|
|
|
/**
|
|
|
* Although RSS is meaningless when there is a single HW queue we
|
|
|
@@ -2361,7 +2572,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
|
|
|
* where the bp->flags can change so it is done without any
|
|
|
* locks
|
|
|
*/
|
|
|
- if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
|
|
|
+ if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
|
|
|
DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
|
|
|
bp->flags |= MF_FUNC_DIS;
|
|
|
|
|
|
@@ -2548,14 +2759,13 @@ static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
|
|
|
static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
|
|
|
{
|
|
|
int port = BP_PORT(bp);
|
|
|
- u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
|
|
|
- COMMAND_REG_ATTN_BITS_SET);
|
|
|
u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
|
|
|
MISC_REG_AEU_MASK_ATTN_FUNC_0;
|
|
|
u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
|
|
|
NIG_REG_MASK_INTERRUPT_PORT0;
|
|
|
u32 aeu_mask;
|
|
|
u32 nig_mask = 0;
|
|
|
+ u32 reg_addr;
|
|
|
|
|
|
if (bp->attn_state & asserted)
|
|
|
BNX2X_ERR("IGU ERROR\n");
|
|
|
@@ -2630,9 +2840,15 @@ static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
|
|
|
|
|
|
} /* if hardwired */
|
|
|
|
|
|
- DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
|
|
|
- asserted, hc_addr);
|
|
|
- REG_WR(bp, hc_addr, asserted);
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC)
|
|
|
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
|
|
|
+ COMMAND_REG_ATTN_BITS_SET);
|
|
|
+ else
|
|
|
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
|
|
|
+
|
|
|
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
|
|
|
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
|
|
|
+ REG_WR(bp, reg_addr, asserted);
|
|
|
|
|
|
/* now set back the mask */
|
|
|
if (asserted & ATTN_NIG_FOR_FUNC) {
|
|
|
@@ -2753,6 +2969,10 @@ static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
|
|
|
/* RQ_USDMDP_FIFO_OVERFLOW */
|
|
|
if (val & 0x18000)
|
|
|
BNX2X_ERR("FATAL error from PXP\n");
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
|
|
|
+ BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (attn & HW_INTERRUT_ASSERT_SET_2) {
|
|
|
@@ -2783,9 +3003,10 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
|
|
|
int func = BP_FUNC(bp);
|
|
|
|
|
|
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
|
|
|
- bp->mf_config =
|
|
|
- MF_CFG_RD(bp, func_mf_config[func].config);
|
|
|
- val = SHMEM_RD(bp, func_mb[func].drv_status);
|
|
|
+ bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
|
|
|
+ func_mf_config[BP_ABS_FUNC(bp)].config);
|
|
|
+ val = SHMEM_RD(bp,
|
|
|
+ func_mb[BP_FW_MB_IDX(bp)].drv_status);
|
|
|
if (val & DRV_STATUS_DCC_EVENT_MASK)
|
|
|
bnx2x_dcc_event(bp,
|
|
|
(val & DRV_STATUS_DCC_EVENT_MASK));
|
|
|
@@ -2815,13 +3036,13 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
|
|
|
if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
|
|
|
BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
|
|
|
if (attn & BNX2X_GRC_TIMEOUT) {
|
|
|
- val = CHIP_IS_E1H(bp) ?
|
|
|
- REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
|
|
|
+ val = CHIP_IS_E1(bp) ? 0 :
|
|
|
+ REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
|
|
|
BNX2X_ERR("GRC time-out 0x%08x\n", val);
|
|
|
}
|
|
|
if (attn & BNX2X_GRC_RSV) {
|
|
|
- val = CHIP_IS_E1H(bp) ?
|
|
|
- REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
|
|
|
+ val = CHIP_IS_E1(bp) ? 0 :
|
|
|
+ REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
|
|
|
BNX2X_ERR("GRC reserved 0x%08x\n", val);
|
|
|
}
|
|
|
REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
|
|
|
@@ -3126,6 +3347,74 @@ bool bnx2x_chk_parity_attn(struct bnx2x *bp)
|
|
|
attn.sig[3]);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
|
|
|
+{
|
|
|
+ u32 val;
|
|
|
+ if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
|
|
|
+
|
|
|
+ val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
|
|
|
+ BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "ADDRESS_ERROR\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "INCORRECT_RCV_BEHAVIOR\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "WAS_ERROR_ATTN\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "VF_LENGTH_VIOLATION_ATTN\n");
|
|
|
+ if (val &
|
|
|
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "VF_GRC_SPACE_VIOLATION_ATTN\n");
|
|
|
+ if (val &
|
|
|
+ PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "VF_MSIX_BAR_VIOLATION_ATTN\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "TCPL_ERROR_ATTN\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "TCPL_IN_TWO_RCBS_ATTN\n");
|
|
|
+ if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
|
|
|
+ BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
|
|
|
+ "CSSNOOP_FIFO_OVERFLOW\n");
|
|
|
+ }
|
|
|
+ if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
|
|
|
+ val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
|
|
|
+ BNX2X_ERR("ATC hw attention 0x%x\n", val);
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG"
|
|
|
+ "_ATC_TCPL_TO_NOT_PEND\n");
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
|
|
|
+ "ATC_GPA_MULTIPLE_HITS\n");
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
|
|
|
+ "ATC_RCPL_TO_EMPTY_CNT\n");
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
|
|
|
+ if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
|
|
|
+ BNX2X_ERR("ATC_ATC_INT_STS_REG_"
|
|
|
+ "ATC_IREQ_LESS_THAN_STU\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
|
|
|
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
|
|
|
+ BNX2X_ERR("FATAL parity attention set4 0x%x\n",
|
|
|
+ (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
|
|
|
+ AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
|
|
|
+ }
|
|
|
+
|
|
|
+}
|
|
|
+
|
|
|
static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
|
|
|
{
|
|
|
struct attn_route attn, *group_mask;
|
|
|
@@ -3156,17 +3445,28 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
|
|
|
attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
|
|
|
attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
|
|
|
attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
|
|
|
- DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
|
|
|
- attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ attn.sig[4] =
|
|
|
+ REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
|
|
|
+ else
|
|
|
+ attn.sig[4] = 0;
|
|
|
+
|
|
|
+ DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
|
|
|
+ attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
|
|
|
|
|
|
for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
|
|
|
if (deasserted & (1 << index)) {
|
|
|
group_mask = &bp->attn_group[index];
|
|
|
|
|
|
- DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
|
|
|
- index, group_mask->sig[0], group_mask->sig[1],
|
|
|
- group_mask->sig[2], group_mask->sig[3]);
|
|
|
+ DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
|
|
|
+ "%08x %08x %08x\n",
|
|
|
+ index,
|
|
|
+ group_mask->sig[0], group_mask->sig[1],
|
|
|
+ group_mask->sig[2], group_mask->sig[3],
|
|
|
+ group_mask->sig[4]);
|
|
|
|
|
|
+ bnx2x_attn_int_deasserted4(bp,
|
|
|
+ attn.sig[4] & group_mask->sig[4]);
|
|
|
bnx2x_attn_int_deasserted3(bp,
|
|
|
attn.sig[3] & group_mask->sig[3]);
|
|
|
bnx2x_attn_int_deasserted1(bp,
|
|
|
@@ -3180,11 +3480,15 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
|
|
|
|
|
|
bnx2x_release_alr(bp);
|
|
|
|
|
|
- reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC)
|
|
|
+ reg_addr = (HC_REG_COMMAND_REG + port*32 +
|
|
|
+ COMMAND_REG_ATTN_BITS_CLR);
|
|
|
+ else
|
|
|
+ reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
|
|
|
|
|
|
val = ~deasserted;
|
|
|
- DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
|
|
|
- val, reg_addr);
|
|
|
+ DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
|
|
|
+ (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
|
|
|
REG_WR(bp, reg_addr, val);
|
|
|
|
|
|
if (~bp->attn_state & deasserted)
|
|
|
@@ -3471,7 +3775,7 @@ static void bnx2x_timer(unsigned long data)
|
|
|
}
|
|
|
|
|
|
if (!BP_NOMCP(bp)) {
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ int mb_idx = BP_FW_MB_IDX(bp);
|
|
|
u32 drv_pulse;
|
|
|
u32 mcp_pulse;
|
|
|
|
|
|
@@ -3479,9 +3783,9 @@ static void bnx2x_timer(unsigned long data)
|
|
|
bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
|
|
|
/* TBD - add SYSTEM_TIME */
|
|
|
drv_pulse = bp->fw_drv_pulse_wr_seq;
|
|
|
- SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
|
|
|
+ SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
|
|
|
|
|
|
- mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
|
|
|
+ mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
|
|
|
MCP_PULSE_SEQ_MASK);
|
|
|
/* The delta between driver pulse and mcp response
|
|
|
* should be 1 (before mcp response) or 0 (after mcp response)
|
|
|
@@ -3539,17 +3843,26 @@ static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
|
|
|
{
|
|
|
u32 *sb_data_p;
|
|
|
u32 data_size = 0;
|
|
|
+ struct hc_status_block_data_e2 sb_data_e2;
|
|
|
struct hc_status_block_data_e1x sb_data_e1x;
|
|
|
|
|
|
/* disable the function first */
|
|
|
- memset(&sb_data_e1x, 0,
|
|
|
- sizeof(struct hc_status_block_data_e1x));
|
|
|
- sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
|
|
|
- sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
|
|
|
- sb_data_e1x.common.p_func.vf_valid = false;
|
|
|
- sb_data_p = (u32 *)&sb_data_e1x;
|
|
|
- data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
|
|
|
-
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
|
|
|
+ sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
|
|
|
+ sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
|
|
|
+ sb_data_e2.common.p_func.vf_valid = false;
|
|
|
+ sb_data_p = (u32 *)&sb_data_e2;
|
|
|
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
|
|
|
+ } else {
|
|
|
+ memset(&sb_data_e1x, 0,
|
|
|
+ sizeof(struct hc_status_block_data_e1x));
|
|
|
+ sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
|
|
|
+ sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
|
|
|
+ sb_data_e1x.common.p_func.vf_valid = false;
|
|
|
+ sb_data_p = (u32 *)&sb_data_e1x;
|
|
|
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
|
|
|
+ }
|
|
|
bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
|
|
|
|
|
|
bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
|
|
|
@@ -3610,30 +3923,48 @@ void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
|
|
|
{
|
|
|
int igu_seg_id;
|
|
|
|
|
|
+ struct hc_status_block_data_e2 sb_data_e2;
|
|
|
struct hc_status_block_data_e1x sb_data_e1x;
|
|
|
struct hc_status_block_sm *hc_sm_p;
|
|
|
struct hc_index_data *hc_index_p;
|
|
|
int data_size;
|
|
|
u32 *sb_data_p;
|
|
|
|
|
|
- igu_seg_id = HC_SEG_ACCESS_NORM;
|
|
|
+ if (CHIP_INT_MODE_IS_BC(bp))
|
|
|
+ igu_seg_id = HC_SEG_ACCESS_NORM;
|
|
|
+ else
|
|
|
+ igu_seg_id = IGU_SEG_ACCESS_NORM;
|
|
|
|
|
|
bnx2x_zero_fp_sb(bp, fw_sb_id);
|
|
|
|
|
|
- memset(&sb_data_e1x, 0,
|
|
|
- sizeof(struct hc_status_block_data_e1x));
|
|
|
- sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
|
|
|
- sb_data_e1x.common.p_func.vf_id = 0xff;
|
|
|
- sb_data_e1x.common.p_func.vf_valid = false;
|
|
|
- sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
|
|
|
- sb_data_e1x.common.same_igu_sb_1b = true;
|
|
|
- sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
|
|
|
- sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
|
|
|
- hc_sm_p = sb_data_e1x.common.state_machine;
|
|
|
- hc_index_p = sb_data_e1x.index_data;
|
|
|
- sb_data_p = (u32 *)&sb_data_e1x;
|
|
|
- data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
|
|
|
-
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
|
|
|
+ sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
|
|
|
+ sb_data_e2.common.p_func.vf_id = vfid;
|
|
|
+ sb_data_e2.common.p_func.vf_valid = vf_valid;
|
|
|
+ sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
|
|
|
+ sb_data_e2.common.same_igu_sb_1b = true;
|
|
|
+ sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
|
|
|
+ sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
|
|
|
+ hc_sm_p = sb_data_e2.common.state_machine;
|
|
|
+ hc_index_p = sb_data_e2.index_data;
|
|
|
+ sb_data_p = (u32 *)&sb_data_e2;
|
|
|
+ data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
|
|
|
+ } else {
|
|
|
+ memset(&sb_data_e1x, 0,
|
|
|
+ sizeof(struct hc_status_block_data_e1x));
|
|
|
+ sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
|
|
|
+ sb_data_e1x.common.p_func.vf_id = 0xff;
|
|
|
+ sb_data_e1x.common.p_func.vf_valid = false;
|
|
|
+ sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
|
|
|
+ sb_data_e1x.common.same_igu_sb_1b = true;
|
|
|
+ sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
|
|
|
+ sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
|
|
|
+ hc_sm_p = sb_data_e1x.common.state_machine;
|
|
|
+ hc_index_p = sb_data_e1x.index_data;
|
|
|
+ sb_data_p = (u32 *)&sb_data_e1x;
|
|
|
+ data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
|
|
|
+ }
|
|
|
|
|
|
bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
|
|
|
igu_sb_id, igu_seg_id);
|
|
|
@@ -3666,6 +3997,7 @@ static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
|
|
|
bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
|
|
|
false, tx_usec);
|
|
|
}
|
|
|
+
|
|
|
static void bnx2x_init_def_sb(struct bnx2x *bp)
|
|
|
{
|
|
|
struct host_sp_status_block *def_sb = bp->def_status_blk;
|
|
|
@@ -3680,8 +4012,13 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
|
|
|
struct hc_sp_status_block_data sp_sb_data;
|
|
|
memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
|
|
|
|
|
|
- igu_sp_sb_index = DEF_SB_IGU_ID;
|
|
|
- igu_seg_id = HC_SEG_ACCESS_DEF;
|
|
|
+ if (CHIP_INT_MODE_IS_BC(bp)) {
|
|
|
+ igu_sp_sb_index = DEF_SB_IGU_ID;
|
|
|
+ igu_seg_id = HC_SEG_ACCESS_DEF;
|
|
|
+ } else {
|
|
|
+ igu_sp_sb_index = bp->igu_dsb_id;
|
|
|
+ igu_seg_id = IGU_SEG_ACCESS_DEF;
|
|
|
+ }
|
|
|
|
|
|
/* ATTN */
|
|
|
section = ((u64)mapping) + offsetof(struct host_sp_status_block,
|
|
|
@@ -3698,12 +4035,29 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
|
|
|
for (sindex = 0; sindex < 4; sindex++)
|
|
|
bp->attn_group[index].sig[sindex] =
|
|
|
REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
|
|
|
+
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ /*
|
|
|
+ * enable5 is separate from the rest of the registers,
|
|
|
+ * and therefore the address skip is 4
|
|
|
+ * and not 16 between the different groups
|
|
|
+ */
|
|
|
+ bp->attn_group[index].sig[4] = REG_RD(bp,
|
|
|
+ reg_offset + 0x10 + 0x4*index);
|
|
|
+ else
|
|
|
+ bp->attn_group[index].sig[4] = 0;
|
|
|
}
|
|
|
|
|
|
- reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
|
|
|
- HC_REG_ATTN_MSG0_ADDR_L);
|
|
|
- REG_WR(bp, reg_offset, U64_LO(section));
|
|
|
- REG_WR(bp, reg_offset + 4, U64_HI(section));
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC) {
|
|
|
+ reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
|
|
|
+ HC_REG_ATTN_MSG0_ADDR_L);
|
|
|
+
|
|
|
+ REG_WR(bp, reg_offset, U64_LO(section));
|
|
|
+ REG_WR(bp, reg_offset + 4, U64_HI(section));
|
|
|
+ } else if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
|
|
|
+ REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
|
|
|
+ }
|
|
|
|
|
|
section = ((u64)mapping) + offsetof(struct host_sp_status_block,
|
|
|
sp_sb);
|
|
|
@@ -3715,7 +4069,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp)
|
|
|
sp_sb_data.igu_sb_id = igu_sp_sb_index;
|
|
|
sp_sb_data.igu_seg_id = igu_seg_id;
|
|
|
sp_sb_data.p_func.pf_id = func;
|
|
|
- sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
|
|
|
+ sp_sb_data.p_func.vnic_id = BP_VN(bp);
|
|
|
sp_sb_data.p_func.vf_id = 0xff;
|
|
|
|
|
|
bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
|
|
|
@@ -3870,6 +4224,11 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
|
|
|
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
|
|
|
REG_WR(bp, BAR_USTRORM_INTMEM +
|
|
|
USTORM_AGG_DATA_OFFSET + i * 4, 0);
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
|
|
|
+ CHIP_INT_MODE_IS_BC(bp) ?
|
|
|
+ HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void bnx2x_init_internal_port(struct bnx2x *bp)
|
|
|
@@ -3881,6 +4240,7 @@ static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
|
|
|
{
|
|
|
switch (load_code) {
|
|
|
case FW_MSG_CODE_DRV_LOAD_COMMON:
|
|
|
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
|
|
|
bnx2x_init_internal_common(bp);
|
|
|
/* no break */
|
|
|
|
|
|
@@ -3911,9 +4271,11 @@ static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
|
|
|
fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
|
|
|
/* qZone id equals to FW (per path) client id */
|
|
|
fp->cl_qzone_id = fp->cl_id +
|
|
|
- BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
|
|
|
+ BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
|
|
|
+ ETH_MAX_RX_CLIENTS_E1H);
|
|
|
/* init shortcut */
|
|
|
- fp->ustorm_rx_prods_offset =
|
|
|
+ fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
|
|
|
+ USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
|
|
|
USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
|
|
|
/* Setup SB indicies */
|
|
|
fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
|
|
|
@@ -4248,9 +4610,19 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
|
|
|
static void enable_blocks_attention(struct bnx2x *bp)
|
|
|
{
|
|
|
REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
|
|
|
- REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
|
|
|
+ else
|
|
|
+ REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
|
|
|
REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
|
|
|
REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
|
|
|
+ /*
|
|
|
+ * mask read length error interrupts in brb for parser
|
|
|
+ * (parsing unit and 'checksum and crc' unit)
|
|
|
+ * these errors are legal (PU reads fixed length and CAC can cause
|
|
|
+ * read length error on truncated packets)
|
|
|
+ */
|
|
|
+ REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
|
|
|
REG_WR(bp, QM_REG_QM_INT_MASK, 0);
|
|
|
REG_WR(bp, TM_REG_TM_INT_MASK, 0);
|
|
|
REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
|
|
|
@@ -4271,6 +4643,13 @@ static void enable_blocks_attention(struct bnx2x *bp)
|
|
|
/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
|
|
|
if (CHIP_REV_IS_FPGA(bp))
|
|
|
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
|
|
|
+ else if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
|
|
|
+ (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
|
|
|
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
|
|
|
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
|
|
|
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
|
|
|
+ | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
|
|
|
else
|
|
|
REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
|
|
|
REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
|
|
|
@@ -4288,11 +4667,11 @@ static const struct {
|
|
|
u32 addr;
|
|
|
u32 mask;
|
|
|
} bnx2x_parity_mask[] = {
|
|
|
- {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
|
|
|
- {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
|
|
|
- {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
|
|
|
- {HC_REG_HC_PRTY_MASK, 0xffffffff},
|
|
|
- {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
|
|
|
+ {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
|
|
|
+ {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
|
|
|
+ {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
|
|
|
+ {HC_REG_HC_PRTY_MASK, 0x7},
|
|
|
+ {MISC_REG_MISC_PRTY_MASK, 0x1},
|
|
|
{QM_REG_QM_PRTY_MASK, 0x0},
|
|
|
{DORQ_REG_DORQ_PRTY_MASK, 0x0},
|
|
|
{GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
|
|
|
@@ -4407,23 +4786,97 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
|
|
|
REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
|
|
|
}
|
|
|
|
|
|
+static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
|
|
|
+{
|
|
|
+ u32 offset = 0;
|
|
|
+
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
+ return;
|
|
|
+ if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
|
|
|
+ return;
|
|
|
+
|
|
|
+ switch (BP_ABS_FUNC(bp)) {
|
|
|
+ case 0:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
|
|
|
+ break;
|
|
|
+ case 1:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
|
|
|
+ break;
|
|
|
+ case 2:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
|
|
|
+ break;
|
|
|
+ case 3:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
|
|
|
+ break;
|
|
|
+ case 4:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
|
|
|
+ break;
|
|
|
+ case 5:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
|
|
|
+ break;
|
|
|
+ case 6:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
|
|
|
+ break;
|
|
|
+ case 7:
|
|
|
+ offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ REG_WR(bp, offset, pretend_func_num);
|
|
|
+ REG_RD(bp, offset);
|
|
|
+ DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
|
|
|
+}
|
|
|
+
|
|
|
+static void bnx2x_pf_disable(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
|
|
|
+ val &= ~IGU_PF_CONF_FUNC_EN;
|
|
|
+
|
|
|
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
|
|
|
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
|
|
|
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
|
|
|
+}
|
|
|
+
|
|
|
static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
{
|
|
|
u32 val, i;
|
|
|
|
|
|
- DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
|
|
|
+ DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
|
|
|
|
|
|
bnx2x_reset_common(bp);
|
|
|
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
|
|
|
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
|
|
|
|
|
|
bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
|
|
|
- if (CHIP_IS_E1H(bp))
|
|
|
+ if (!CHIP_IS_E1(bp))
|
|
|
REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
|
|
|
|
|
|
- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
|
|
|
- msleep(30);
|
|
|
- REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ u8 fid;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * 4-port mode or 2-port mode we need to turn of master-enable
|
|
|
+ * for everyone, after that, turn it back on for self.
|
|
|
+ * so, we disregard multi-function or not, and always disable
|
|
|
+ * for all functions on the given path, this means 0,2,4,6 for
|
|
|
+ * path 0 and 1,3,5,7 for path 1
|
|
|
+ */
|
|
|
+ for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
|
|
|
+ if (fid == BP_ABS_FUNC(bp)) {
|
|
|
+ REG_WR(bp,
|
|
|
+ PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
|
|
|
+ 1);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ bnx2x_pretend_func(bp, fid);
|
|
|
+ /* clear pf enable */
|
|
|
+ bnx2x_pf_disable(bp);
|
|
|
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
|
|
|
if (CHIP_IS_E1(bp)) {
|
|
|
@@ -4471,9 +4924,65 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
return -EBUSY;
|
|
|
}
|
|
|
|
|
|
+ /* Timers bug workaround E2 only. We need to set the entire ILT to
|
|
|
+ * have entries with value "0" and valid bit on.
|
|
|
+ * This needs to be done by the first PF that is loaded in a path
|
|
|
+ * (i.e. common phase)
|
|
|
+ */
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ struct ilt_client_info ilt_cli;
|
|
|
+ struct bnx2x_ilt ilt;
|
|
|
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
|
|
|
+ memset(&ilt, 0, sizeof(struct bnx2x_ilt));
|
|
|
+
|
|
|
+ /* initalize dummy TM client */
|
|
|
+ ilt_cli.start = 0;
|
|
|
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
|
|
|
+ ilt_cli.client_num = ILT_CLIENT_TM;
|
|
|
+
|
|
|
+ /* Step 1: set zeroes to all ilt page entries with valid bit on
|
|
|
+ * Step 2: set the timers first/last ilt entry to point
|
|
|
+ * to the entire range to prevent ILT range error for 3rd/4th
|
|
|
+ * vnic (this code assumes existance of the vnic)
|
|
|
+ *
|
|
|
+ * both steps performed by call to bnx2x_ilt_client_init_op()
|
|
|
+ * with dummy TM client
|
|
|
+ *
|
|
|
+ * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
|
|
|
+ * and his brother are split registers
|
|
|
+ */
|
|
|
+ bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
|
|
|
+ bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
|
|
|
+ bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
|
|
|
+
|
|
|
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
|
|
|
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
|
|
|
+ REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
|
|
|
REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
|
|
|
+ (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
|
|
|
+ bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
|
|
|
+
|
|
|
+ bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
|
|
|
+
|
|
|
+ /* let the HW do it's magic ... */
|
|
|
+ do {
|
|
|
+ msleep(200);
|
|
|
+ val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
|
|
|
+ } while (factor-- && (val != 1));
|
|
|
+
|
|
|
+ if (val != 1) {
|
|
|
+ BNX2X_ERR("ATC_INIT failed\n");
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
/* clean the DMAE memory */
|
|
|
@@ -4492,6 +5001,8 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
|
|
|
bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
|
|
|
/* QM queues pointers table */
|
|
|
bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
|
|
|
|
|
|
@@ -4512,14 +5023,26 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
}
|
|
|
|
|
|
bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp)) {
|
|
|
+ REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
|
|
|
+ REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
|
|
|
+ }
|
|
|
+
|
|
|
bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
|
|
|
REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
|
|
|
#ifndef BCM_CNIC
|
|
|
/* set NIC mode */
|
|
|
REG_WR(bp, PRS_REG_NIC_MODE, 1);
|
|
|
#endif
|
|
|
- if (CHIP_IS_E1H(bp))
|
|
|
+ if (!CHIP_IS_E1(bp))
|
|
|
REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ /* Bit-map indicating which L2 hdrs may appear after the
|
|
|
+ basic Ethernet header */
|
|
|
+ int has_ovlan = IS_MF(bp);
|
|
|
+ REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
|
|
|
+ REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
|
|
|
+ }
|
|
|
|
|
|
bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
|
|
|
@@ -4536,6 +5059,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
|
|
|
+
|
|
|
/* sync semi rtc */
|
|
|
REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
|
|
|
0x80000000);
|
|
|
@@ -4546,6 +5072,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ int has_ovlan = IS_MF(bp);
|
|
|
+ REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
|
|
|
+ REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
|
|
|
+ }
|
|
|
+
|
|
|
REG_WR(bp, SRC_REG_SOFT_RST, 1);
|
|
|
for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
|
|
|
REG_WR(bp, i, random32());
|
|
|
@@ -4583,6 +5115,11 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
|
|
|
|
|
|
bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
|
|
|
+
|
|
|
+ if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
|
|
|
+ REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
|
|
|
+
|
|
|
+ bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
|
|
|
@@ -4590,16 +5127,35 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
REG_WR(bp, 0x2814, 0xffffffff);
|
|
|
REG_WR(bp, 0x3820, 0xffffffff);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
|
|
|
+ (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
|
|
|
+ PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
|
|
|
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
|
|
|
+ (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
|
|
|
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
|
|
|
+ PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
|
|
|
+ REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
|
|
|
+ (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
|
|
|
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
|
|
|
+ PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
|
|
|
+ }
|
|
|
+
|
|
|
bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
|
|
|
bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
|
|
|
|
|
|
bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
|
|
|
- if (CHIP_IS_E1H(bp)) {
|
|
|
+ if (!CHIP_IS_E1(bp)) {
|
|
|
REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
|
|
|
REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
|
|
|
}
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ /* Bit-map indicating which L2 hdrs may appear after the
|
|
|
+ basic Ethernet header */
|
|
|
+ REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
|
|
|
+ }
|
|
|
|
|
|
if (CHIP_REV_IS_SLOW(bp))
|
|
|
msleep(200);
|
|
|
@@ -4622,15 +5178,17 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
}
|
|
|
REG_WR(bp, CFC_REG_DEBUG0, 0);
|
|
|
|
|
|
- /* read NIG statistic
|
|
|
- to see if this is our first up since powerup */
|
|
|
- bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
|
|
|
- val = *bnx2x_sp(bp, wb_data[0]);
|
|
|
+ if (CHIP_IS_E1(bp)) {
|
|
|
+ /* read NIG statistic
|
|
|
+ to see if this is our first up since powerup */
|
|
|
+ bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
|
|
|
+ val = *bnx2x_sp(bp, wb_data[0]);
|
|
|
|
|
|
- /* do internal memory self test */
|
|
|
- if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
|
|
|
- BNX2X_ERR("internal mem self test failed\n");
|
|
|
- return -EBUSY;
|
|
|
+ /* do internal memory self test */
|
|
|
+ if ((val == 0) && bnx2x_int_mem_test(bp)) {
|
|
|
+ BNX2X_ERR("internal mem self test failed\n");
|
|
|
+ return -EBUSY;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
|
|
|
@@ -4647,10 +5205,23 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
|
|
|
enable_blocks_parity(bp);
|
|
|
|
|
|
if (!BP_NOMCP(bp)) {
|
|
|
- bnx2x_acquire_phy_lock(bp);
|
|
|
- bnx2x_common_init_phy(bp, bp->common.shmem_base,
|
|
|
- bp->common.shmem2_base);
|
|
|
- bnx2x_release_phy_lock(bp);
|
|
|
+ /* In E2 2-PORT mode, same ext phy is used for the two paths */
|
|
|
+ if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
|
|
|
+ CHIP_IS_E1x(bp)) {
|
|
|
+ u32 shmem_base[2], shmem2_base[2];
|
|
|
+ shmem_base[0] = bp->common.shmem_base;
|
|
|
+ shmem2_base[0] = bp->common.shmem2_base;
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ shmem_base[1] =
|
|
|
+ SHMEM2_RD(bp, other_shmem_base_addr);
|
|
|
+ shmem2_base[1] =
|
|
|
+ SHMEM2_RD(bp, other_shmem2_base_addr);
|
|
|
+ }
|
|
|
+ bnx2x_acquire_phy_lock(bp);
|
|
|
+ bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
|
|
|
+ bp->common.chip_id);
|
|
|
+ bnx2x_release_phy_lock(bp);
|
|
|
+ }
|
|
|
} else
|
|
|
BNX2X_ERR("Bootcode is missing - can not initialize link\n");
|
|
|
|
|
|
@@ -4671,6 +5242,14 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
|
|
bnx2x_init_block(bp, PXP_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
|
|
|
|
|
|
+ /* Timers bug workaround: disables the pf_master bit in pglue at
|
|
|
+ * common phase, we need to enable it here before any dmae access are
|
|
|
+ * attempted. Therefore we manually added the enable-master to the
|
|
|
+ * port phase (it also happens in the function phase)
|
|
|
+ */
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
|
|
|
+
|
|
|
bnx2x_init_block(bp, TCM_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, UCM_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, CCM_BLOCK, init_stage);
|
|
|
@@ -4687,29 +5266,41 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
|
|
|
|
|
bnx2x_init_block(bp, DQ_BLOCK, init_stage);
|
|
|
|
|
|
- bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
|
|
|
- if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
|
|
|
- /* no pause for emulation and FPGA */
|
|
|
- low = 0;
|
|
|
- high = 513;
|
|
|
- } else {
|
|
|
- if (IS_MF(bp))
|
|
|
- low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
|
|
|
- else if (bp->dev->mtu > 4096) {
|
|
|
- if (bp->flags & ONE_PORT_FLAG)
|
|
|
- low = 160;
|
|
|
- else {
|
|
|
- val = bp->dev->mtu;
|
|
|
- /* (24*1024 + val*4)/256 */
|
|
|
- low = 96 + (val/64) + ((val % 64) ? 1 : 0);
|
|
|
- }
|
|
|
- } else
|
|
|
- low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
|
|
|
- high = low + 56; /* 14*1024/256 */
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
|
|
|
+
|
|
|
+ if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
|
|
|
+ bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
|
|
|
+ if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
|
|
|
+ /* no pause for emulation and FPGA */
|
|
|
+ low = 0;
|
|
|
+ high = 513;
|
|
|
+ } else {
|
|
|
+ if (IS_MF(bp))
|
|
|
+ low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
|
|
|
+ else if (bp->dev->mtu > 4096) {
|
|
|
+ if (bp->flags & ONE_PORT_FLAG)
|
|
|
+ low = 160;
|
|
|
+ else {
|
|
|
+ val = bp->dev->mtu;
|
|
|
+ /* (24*1024 + val*4)/256 */
|
|
|
+ low = 96 + (val/64) +
|
|
|
+ ((val % 64) ? 1 : 0);
|
|
|
+ }
|
|
|
+ } else
|
|
|
+ low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
|
|
|
+ high = low + 56; /* 14*1024/256 */
|
|
|
+ }
|
|
|
+ REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
|
|
|
+ REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
|
|
|
}
|
|
|
- REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
|
|
|
- REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
|
|
|
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp)) {
|
|
|
+ REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
|
|
|
+ REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
|
|
|
+ REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
|
|
|
+ BRB1_REG_MAC_GUARANTIED_0), 40);
|
|
|
+ }
|
|
|
|
|
|
bnx2x_init_block(bp, PRS_BLOCK, init_stage);
|
|
|
|
|
|
@@ -4722,24 +5313,28 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
|
|
bnx2x_init_block(bp, USEM_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
|
|
|
|
|
|
bnx2x_init_block(bp, UPB_BLOCK, init_stage);
|
|
|
bnx2x_init_block(bp, XPB_BLOCK, init_stage);
|
|
|
|
|
|
bnx2x_init_block(bp, PBF_BLOCK, init_stage);
|
|
|
|
|
|
- /* configure PBF to work without PAUSE mtu 9000 */
|
|
|
- REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
|
|
|
+ if (!CHIP_IS_E2(bp)) {
|
|
|
+ /* configure PBF to work without PAUSE mtu 9000 */
|
|
|
+ REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
|
|
|
|
|
|
- /* update threshold */
|
|
|
- REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
|
|
|
- /* update init credit */
|
|
|
- REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
|
|
|
+ /* update threshold */
|
|
|
+ REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
|
|
|
+ /* update init credit */
|
|
|
+ REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
|
|
|
|
|
|
- /* probe changes */
|
|
|
- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
|
|
|
- msleep(5);
|
|
|
- REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
|
|
|
+ /* probe changes */
|
|
|
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
|
|
|
+ udelay(50);
|
|
|
+ REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
|
|
|
+ }
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
|
bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
|
|
|
@@ -4753,6 +5348,8 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
|
|
}
|
|
|
bnx2x_init_block(bp, HC_BLOCK, init_stage);
|
|
|
|
|
|
+ bnx2x_init_block(bp, IGU_BLOCK, init_stage);
|
|
|
+
|
|
|
bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
|
|
|
/* init aeu_mask_attn_func_0/1:
|
|
|
* - SF mode: bits 3-7 are masked. only bits 0-2 are in use
|
|
|
@@ -4771,11 +5368,25 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
|
|
|
|
|
|
REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
|
|
|
|
|
|
- if (CHIP_IS_E1H(bp)) {
|
|
|
+ if (!CHIP_IS_E1(bp)) {
|
|
|
/* 0x2 disable mf_ov, 0x1 enable */
|
|
|
REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
|
|
|
(IS_MF(bp) ? 0x1 : 0x2));
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ val = 0;
|
|
|
+ switch (bp->mf_mode) {
|
|
|
+ case MULTI_FUNCTION_SD:
|
|
|
+ val = 1;
|
|
|
+ break;
|
|
|
+ case MULTI_FUNCTION_SI:
|
|
|
+ val = 2;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
|
|
|
+ NIG_REG_LLH0_CLS_TYPE), val);
|
|
|
+ }
|
|
|
{
|
|
|
REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
|
|
|
REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
|
|
|
@@ -4805,14 +5416,26 @@ static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
|
|
|
{
|
|
|
int reg;
|
|
|
|
|
|
- if (CHIP_IS_E1H(bp))
|
|
|
- reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
|
|
|
- else /* E1 */
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
|
|
|
+ else
|
|
|
+ reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
|
|
|
|
|
|
bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
|
|
|
}
|
|
|
|
|
|
+static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
|
|
|
+{
|
|
|
+ bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
|
|
|
+}
|
|
|
+
|
|
|
+static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
|
|
|
+{
|
|
|
+ u32 i, base = FUNC_ILT_BASE(func);
|
|
|
+ for (i = base; i < base + ILT_PER_FUNC; i++)
|
|
|
+ bnx2x_ilt_wr(bp, i, 0);
|
|
|
+}
|
|
|
+
|
|
|
static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
{
|
|
|
int port = BP_PORT(bp);
|
|
|
@@ -4825,10 +5448,12 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
|
|
|
|
|
|
/* set MSI reconfigure capability */
|
|
|
- addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
|
|
|
- val = REG_RD(bp, addr);
|
|
|
- val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
|
|
|
- REG_WR(bp, addr, val);
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC) {
|
|
|
+ addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
|
|
|
+ val = REG_RD(bp, addr);
|
|
|
+ val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
|
|
|
+ REG_WR(bp, addr, val);
|
|
|
+ }
|
|
|
|
|
|
ilt = BP_ILT(bp);
|
|
|
cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
|
|
|
@@ -4854,10 +5479,38 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
REG_WR(bp, PRS_REG_NIC_MODE, 1);
|
|
|
#endif /* BCM_CNIC */
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ u32 pf_conf = IGU_PF_CONF_FUNC_EN;
|
|
|
+
|
|
|
+ /* Turn on a single ISR mode in IGU if driver is going to use
|
|
|
+ * INT#x or MSI
|
|
|
+ */
|
|
|
+ if (!(bp->flags & USING_MSIX_FLAG))
|
|
|
+ pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
|
|
|
+ /*
|
|
|
+ * Timers workaround bug: function init part.
|
|
|
+ * Need to wait 20msec after initializing ILT,
|
|
|
+ * needed to make sure there are no requests in
|
|
|
+ * one of the PXP internal queues with "old" ILT addresses
|
|
|
+ */
|
|
|
+ msleep(20);
|
|
|
+ /*
|
|
|
+ * Master enable - Due to WB DMAE writes performed before this
|
|
|
+ * register is re-initialized as part of the regular function
|
|
|
+ * init
|
|
|
+ */
|
|
|
+ REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
|
|
|
+ /* Enable the function in IGU */
|
|
|
+ REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
|
|
|
+ }
|
|
|
+
|
|
|
bp->dmae_ready = 1;
|
|
|
|
|
|
bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
|
|
|
+
|
|
|
bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
|
|
|
@@ -4868,7 +5521,24 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
|
|
|
+ BP_PATH(bp));
|
|
|
+ REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
|
|
|
+ BP_PATH(bp));
|
|
|
+ }
|
|
|
+
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
|
|
|
+
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, QM_REG_PF_EN, 1);
|
|
|
+
|
|
|
bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
|
|
|
+
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
|
|
|
+
|
|
|
bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
|
|
|
@@ -4880,10 +5550,16 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
|
|
|
bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, PBF_REG_DISABLE_PF, 0);
|
|
|
+
|
|
|
bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
|
|
|
+
|
|
|
if (IS_MF(bp)) {
|
|
|
REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
|
|
|
REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
|
|
|
@@ -4892,13 +5568,117 @@ static int bnx2x_init_hw_func(struct bnx2x *bp)
|
|
|
bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
/* HC init per function */
|
|
|
- if (CHIP_IS_E1H(bp)) {
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC) {
|
|
|
+ if (CHIP_IS_E1H(bp)) {
|
|
|
+ REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
|
|
|
+
|
|
|
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
|
|
|
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
|
|
|
+ }
|
|
|
+ bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
|
|
|
+
|
|
|
+ } else {
|
|
|
+ int num_segs, sb_idx, prod_offset;
|
|
|
+
|
|
|
REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
|
|
|
|
|
|
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
|
|
|
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
|
|
|
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
|
|
|
+ }
|
|
|
+
|
|
|
+ bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
|
|
|
+
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ int dsb_idx = 0;
|
|
|
+ /**
|
|
|
+ * Producer memory:
|
|
|
+ * E2 mode: address 0-135 match to the mapping memory;
|
|
|
+ * 136 - PF0 default prod; 137 - PF1 default prod;
|
|
|
+ * 138 - PF2 default prod; 139 - PF3 default prod;
|
|
|
+ * 140 - PF0 attn prod; 141 - PF1 attn prod;
|
|
|
+ * 142 - PF2 attn prod; 143 - PF3 attn prod;
|
|
|
+ * 144-147 reserved.
|
|
|
+ *
|
|
|
+ * E1.5 mode - In backward compatible mode;
|
|
|
+ * for non default SB; each even line in the memory
|
|
|
+ * holds the U producer and each odd line hold
|
|
|
+ * the C producer. The first 128 producers are for
|
|
|
+ * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
|
|
|
+ * producers are for the DSB for each PF.
|
|
|
+ * Each PF has five segments: (the order inside each
|
|
|
+ * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
|
|
|
+ * 132-135 C prods; 136-139 X prods; 140-143 T prods;
|
|
|
+ * 144-147 attn prods;
|
|
|
+ */
|
|
|
+ /* non-default-status-blocks */
|
|
|
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
|
|
|
+ IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
|
|
|
+ for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
|
|
|
+ prod_offset = (bp->igu_base_sb + sb_idx) *
|
|
|
+ num_segs;
|
|
|
+
|
|
|
+ for (i = 0; i < num_segs; i++) {
|
|
|
+ addr = IGU_REG_PROD_CONS_MEMORY +
|
|
|
+ (prod_offset + i) * 4;
|
|
|
+ REG_WR(bp, addr, 0);
|
|
|
+ }
|
|
|
+ /* send consumer update with value 0 */
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
|
|
|
+ USTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_igu_clear_sb(bp,
|
|
|
+ bp->igu_base_sb + sb_idx);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* default-status-blocks */
|
|
|
+ num_segs = CHIP_INT_MODE_IS_BC(bp) ?
|
|
|
+ IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
|
|
|
+
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ dsb_idx = BP_FUNC(bp);
|
|
|
+ else
|
|
|
+ dsb_idx = BP_E1HVN(bp);
|
|
|
+
|
|
|
+ prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
|
|
|
+ IGU_BC_BASE_DSB_PROD + dsb_idx :
|
|
|
+ IGU_NORM_BASE_DSB_PROD + dsb_idx);
|
|
|
+
|
|
|
+ for (i = 0; i < (num_segs * E1HVN_MAX);
|
|
|
+ i += E1HVN_MAX) {
|
|
|
+ addr = IGU_REG_PROD_CONS_MEMORY +
|
|
|
+ (prod_offset + i)*4;
|
|
|
+ REG_WR(bp, addr, 0);
|
|
|
+ }
|
|
|
+ /* send consumer update with 0 */
|
|
|
+ if (CHIP_INT_MODE_IS_BC(bp)) {
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ USTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ CSTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ XSTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ TSTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ } else {
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ USTORM_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ bnx2x_ack_sb(bp, bp->igu_dsb_id,
|
|
|
+ ATTENTION_ID, 0, IGU_INT_NOP, 1);
|
|
|
+ }
|
|
|
+ bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
|
|
|
+
|
|
|
+ /* !!! these should become driver const once
|
|
|
+ rf-tool supports split-68 const */
|
|
|
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
|
|
|
+ REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
|
|
|
+ REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
|
|
|
+ REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
|
|
|
+ REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
|
|
|
+ REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
|
|
|
+ }
|
|
|
}
|
|
|
- bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
|
|
|
|
|
|
/* Reset PCIE errors for debug */
|
|
|
REG_WR(bp, 0x2114, 0xffffffff);
|
|
|
@@ -4920,7 +5700,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
|
|
|
int rc = 0;
|
|
|
|
|
|
DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
|
|
|
- BP_FUNC(bp), load_code);
|
|
|
+ BP_ABS_FUNC(bp), load_code);
|
|
|
|
|
|
bp->dmae_ready = 0;
|
|
|
mutex_init(&bp->dmae_mutex);
|
|
|
@@ -4930,6 +5710,7 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
|
|
|
|
|
|
switch (load_code) {
|
|
|
case FW_MSG_CODE_DRV_LOAD_COMMON:
|
|
|
+ case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
|
|
|
rc = bnx2x_init_hw_common(bp, load_code);
|
|
|
if (rc)
|
|
|
goto init_hw_err;
|
|
|
@@ -4953,10 +5734,10 @@ int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
|
|
|
}
|
|
|
|
|
|
if (!BP_NOMCP(bp)) {
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ int mb_idx = BP_FW_MB_IDX(bp);
|
|
|
|
|
|
bp->fw_drv_pulse_wr_seq =
|
|
|
- (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
|
|
|
+ (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
|
|
|
DRV_PULSE_SEQ_MASK);
|
|
|
DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
|
|
|
}
|
|
|
@@ -4993,9 +5774,14 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|
|
/* Common */
|
|
|
for_each_queue(bp, i) {
|
|
|
/* status blocks */
|
|
|
- BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
|
|
|
- bnx2x_fp(bp, i, status_blk_mapping),
|
|
|
- sizeof(struct host_hc_status_block_e1x));
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
|
|
|
+ bnx2x_fp(bp, i, status_blk_mapping),
|
|
|
+ sizeof(struct host_hc_status_block_e2));
|
|
|
+ else
|
|
|
+ BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
|
|
|
+ bnx2x_fp(bp, i, status_blk_mapping),
|
|
|
+ sizeof(struct host_hc_status_block_e1x));
|
|
|
}
|
|
|
/* Rx */
|
|
|
for_each_queue(bp, i) {
|
|
|
@@ -5041,9 +5827,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|
|
|
|
|
BNX2X_FREE(bp->ilt->lines);
|
|
|
#ifdef BCM_CNIC
|
|
|
-
|
|
|
- BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
|
|
|
- sizeof(struct host_hc_status_block_e1x));
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
|
|
|
+ sizeof(struct host_hc_status_block_e2));
|
|
|
+ else
|
|
|
+ BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
|
|
|
+ sizeof(struct host_hc_status_block_e1x));
|
|
|
BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
|
|
|
#endif
|
|
|
BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
|
|
|
@@ -5055,6 +5844,22 @@ void bnx2x_free_mem(struct bnx2x *bp)
|
|
|
#undef BNX2X_KFREE
|
|
|
}
|
|
|
|
|
|
+static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
|
|
|
+{
|
|
|
+ union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ bnx2x_fp(bp, index, sb_index_values) =
|
|
|
+ (__le16 *)status_blk.e2_sb->sb.index_values;
|
|
|
+ bnx2x_fp(bp, index, sb_running_index) =
|
|
|
+ (__le16 *)status_blk.e2_sb->sb.running_index;
|
|
|
+ } else {
|
|
|
+ bnx2x_fp(bp, index, sb_index_values) =
|
|
|
+ (__le16 *)status_blk.e1x_sb->sb.index_values;
|
|
|
+ bnx2x_fp(bp, index, sb_running_index) =
|
|
|
+ (__le16 *)status_blk.e1x_sb->sb.running_index;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
int bnx2x_alloc_mem(struct bnx2x *bp)
|
|
|
{
|
|
|
|
|
|
@@ -5074,25 +5879,23 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
|
|
|
} while (0)
|
|
|
|
|
|
int i;
|
|
|
- void *p;
|
|
|
|
|
|
/* fastpath */
|
|
|
/* Common */
|
|
|
for_each_queue(bp, i) {
|
|
|
+ union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
|
|
|
bnx2x_fp(bp, i, bp) = bp;
|
|
|
-
|
|
|
/* status blocks */
|
|
|
- BNX2X_PCI_ALLOC(p,
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ BNX2X_PCI_ALLOC(sb->e2_sb,
|
|
|
+ &bnx2x_fp(bp, i, status_blk_mapping),
|
|
|
+ sizeof(struct host_hc_status_block_e2));
|
|
|
+ else
|
|
|
+ BNX2X_PCI_ALLOC(sb->e1x_sb,
|
|
|
&bnx2x_fp(bp, i, status_blk_mapping),
|
|
|
sizeof(struct host_hc_status_block_e1x));
|
|
|
|
|
|
- bnx2x_fp(bp, i, status_blk.e1x_sb) =
|
|
|
- (struct host_hc_status_block_e1x *)p;
|
|
|
-
|
|
|
- bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
|
|
|
- (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
|
|
|
- bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
|
|
|
- (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
|
|
|
+ set_sb_shortcuts(bp, i);
|
|
|
}
|
|
|
/* Rx */
|
|
|
for_each_queue(bp, i) {
|
|
|
@@ -5129,8 +5932,12 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
|
|
|
/* end of fastpath */
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
|
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
|
|
|
- sizeof(struct host_hc_status_block_e1x));
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
|
|
|
+ sizeof(struct host_hc_status_block_e2));
|
|
|
+ else
|
|
|
+ BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
|
|
|
+ sizeof(struct host_hc_status_block_e1x));
|
|
|
|
|
|
/* allocate searcher T2 table */
|
|
|
BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
|
|
|
@@ -5210,11 +6017,6 @@ static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
|
|
|
bp->set_mac_pending = 1;
|
|
|
smp_wmb();
|
|
|
|
|
|
- config->hdr.length = 1 + (is_bcast ? 1 : 0);
|
|
|
- config->hdr.offset = cam_offset;
|
|
|
- config->hdr.client_id = 0xff;
|
|
|
- config->hdr.reserved1 = 0;
|
|
|
-
|
|
|
config->hdr.length = 1;
|
|
|
config->hdr.offset = cam_offset;
|
|
|
config->hdr.client_id = 0xff;
|
|
|
@@ -5312,7 +6114,12 @@ int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
|
|
|
|
|
|
u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
|
|
|
{
|
|
|
- return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
|
|
|
+ if (CHIP_IS_E1H(bp))
|
|
|
+ return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
|
|
|
+ else if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ return BP_FUNC(bp) * 32 + rel_offset;
|
|
|
+ else
|
|
|
+ return BP_VN(bp) * 32 + rel_offset;
|
|
|
}
|
|
|
|
|
|
void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
|
|
|
@@ -5804,9 +6611,11 @@ static void bnx2x_reset_func(struct bnx2x *bp)
|
|
|
{
|
|
|
int port = BP_PORT(bp);
|
|
|
int func = BP_FUNC(bp);
|
|
|
- int base, i;
|
|
|
+ int i;
|
|
|
int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
|
|
|
- offsetof(struct hc_status_block_data_e1x, common);
|
|
|
+ (CHIP_IS_E2(bp) ?
|
|
|
+ offsetof(struct hc_status_block_data_e2, common) :
|
|
|
+ offsetof(struct hc_status_block_data_e1x, common));
|
|
|
int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
|
|
|
int pfid_offset = offsetof(struct pci_entity, pf_id);
|
|
|
|
|
|
@@ -5839,8 +6648,13 @@ static void bnx2x_reset_func(struct bnx2x *bp)
|
|
|
0);
|
|
|
|
|
|
/* Configure IGU */
|
|
|
- REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
|
|
|
- REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
|
|
|
+ if (bp->common.int_block == INT_BLOCK_HC) {
|
|
|
+ REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
|
|
|
+ REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
|
|
|
+ } else {
|
|
|
+ REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
|
|
|
+ REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
|
|
|
+ }
|
|
|
|
|
|
#ifdef BCM_CNIC
|
|
|
/* Disable Timer scan */
|
|
|
@@ -5856,9 +6670,25 @@ static void bnx2x_reset_func(struct bnx2x *bp)
|
|
|
}
|
|
|
#endif
|
|
|
/* Clear ILT */
|
|
|
- base = FUNC_ILT_BASE(func);
|
|
|
- for (i = base; i < base + ILT_PER_FUNC; i++)
|
|
|
- bnx2x_ilt_wr(bp, i, 0);
|
|
|
+ bnx2x_clear_func_ilt(bp, func);
|
|
|
+
|
|
|
+ /* Timers workaround bug for E2: if this is vnic-3,
|
|
|
+ * we need to set the entire ilt range for this timers.
|
|
|
+ */
|
|
|
+ if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
|
|
|
+ struct ilt_client_info ilt_cli;
|
|
|
+ /* use dummy TM client */
|
|
|
+ memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
|
|
|
+ ilt_cli.start = 0;
|
|
|
+ ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
|
|
|
+ ilt_cli.client_num = ILT_CLIENT_TM;
|
|
|
+
|
|
|
+ bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
|
|
|
+ }
|
|
|
+
|
|
|
+ /* this assumes that reset_port() called before reset_func()*/
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ bnx2x_pf_disable(bp);
|
|
|
|
|
|
bp->dmae_ready = 0;
|
|
|
}
|
|
|
@@ -5892,7 +6722,7 @@ static void bnx2x_reset_port(struct bnx2x *bp)
|
|
|
static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
|
|
|
{
|
|
|
DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
|
|
|
- BP_FUNC(bp), reset_code);
|
|
|
+ BP_ABS_FUNC(bp), reset_code);
|
|
|
|
|
|
switch (reset_code) {
|
|
|
case FW_MSG_CODE_DRV_UNLOAD_COMMON:
|
|
|
@@ -6024,15 +6854,20 @@ unload_error:
|
|
|
if (!BP_NOMCP(bp))
|
|
|
reset_code = bnx2x_fw_command(bp, reset_code, 0);
|
|
|
else {
|
|
|
- DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
|
|
|
- load_count[0], load_count[1], load_count[2]);
|
|
|
- load_count[0]--;
|
|
|
- load_count[1 + port]--;
|
|
|
- DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
|
|
|
- load_count[0], load_count[1], load_count[2]);
|
|
|
- if (load_count[0] == 0)
|
|
|
+ DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
|
|
|
+ "%d, %d, %d\n", BP_PATH(bp),
|
|
|
+ load_count[BP_PATH(bp)][0],
|
|
|
+ load_count[BP_PATH(bp)][1],
|
|
|
+ load_count[BP_PATH(bp)][2]);
|
|
|
+ load_count[BP_PATH(bp)][0]--;
|
|
|
+ load_count[BP_PATH(bp)][1 + port]--;
|
|
|
+ DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
|
|
|
+ "%d, %d, %d\n", BP_PATH(bp),
|
|
|
+ load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
|
|
|
+ load_count[BP_PATH(bp)][2]);
|
|
|
+ if (load_count[BP_PATH(bp)][0] == 0)
|
|
|
reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
|
|
|
- else if (load_count[1 + port] == 0)
|
|
|
+ else if (load_count[BP_PATH(bp)][1 + port] == 0)
|
|
|
reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
|
|
|
else
|
|
|
reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
|
|
|
@@ -6531,39 +7366,23 @@ reset_task_exit:
|
|
|
* Init service functions
|
|
|
*/
|
|
|
|
|
|
-static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
|
|
|
-{
|
|
|
- switch (func) {
|
|
|
- case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
|
|
|
- case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
|
|
|
- case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
|
|
|
- case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
|
|
|
- case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
|
|
|
- case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
|
|
|
- case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
|
|
|
- case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
|
|
|
- default:
|
|
|
- BNX2X_ERR("Unsupported function index: %d\n", func);
|
|
|
- return (u32)(-1);
|
|
|
- }
|
|
|
+u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
|
|
|
+ u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
|
|
|
+ return base + (BP_ABS_FUNC(bp)) * stride;
|
|
|
}
|
|
|
|
|
|
-static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
|
|
|
+static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
|
|
|
{
|
|
|
- u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
|
|
|
+ u32 reg = bnx2x_get_pretend_reg(bp);
|
|
|
|
|
|
/* Flush all outstanding writes */
|
|
|
mmiowb();
|
|
|
|
|
|
/* Pretend to be function 0 */
|
|
|
REG_WR(bp, reg, 0);
|
|
|
- /* Flush the GRC transaction (in the chip) */
|
|
|
- new_val = REG_RD(bp, reg);
|
|
|
- if (new_val != 0) {
|
|
|
- BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
|
|
|
- new_val);
|
|
|
- BUG();
|
|
|
- }
|
|
|
+ REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
|
|
|
|
|
|
/* From now we are in the "like-E1" mode */
|
|
|
bnx2x_int_disable(bp);
|
|
|
@@ -6571,22 +7390,17 @@ static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
|
|
|
/* Flush all outstanding writes */
|
|
|
mmiowb();
|
|
|
|
|
|
- /* Restore the original funtion settings */
|
|
|
- REG_WR(bp, reg, orig_func);
|
|
|
- new_val = REG_RD(bp, reg);
|
|
|
- if (new_val != orig_func) {
|
|
|
- BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
|
|
|
- orig_func, new_val);
|
|
|
- BUG();
|
|
|
- }
|
|
|
+ /* Restore the original function */
|
|
|
+ REG_WR(bp, reg, BP_ABS_FUNC(bp));
|
|
|
+ REG_RD(bp, reg);
|
|
|
}
|
|
|
|
|
|
-static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
|
|
|
+static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
|
|
|
{
|
|
|
- if (CHIP_IS_E1H(bp))
|
|
|
- bnx2x_undi_int_disable_e1h(bp, func);
|
|
|
- else
|
|
|
+ if (CHIP_IS_E1(bp))
|
|
|
bnx2x_int_disable(bp);
|
|
|
+ else
|
|
|
+ bnx2x_undi_int_disable_e1h(bp);
|
|
|
}
|
|
|
|
|
|
static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
@@ -6603,8 +7417,8 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
|
|
|
if (val == 0x7) {
|
|
|
u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
|
|
|
- /* save our func */
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ /* save our pf_num */
|
|
|
+ int orig_pf_num = bp->pf_num;
|
|
|
u32 swap_en;
|
|
|
u32 swap_val;
|
|
|
|
|
|
@@ -6614,9 +7428,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
BNX2X_DEV_INFO("UNDI is active! reset device\n");
|
|
|
|
|
|
/* try unload UNDI on port 0 */
|
|
|
- bp->func = 0;
|
|
|
+ bp->pf_num = 0;
|
|
|
bp->fw_seq =
|
|
|
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
|
|
|
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
|
|
|
DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
reset_code = bnx2x_fw_command(bp, reset_code, 0);
|
|
|
|
|
|
@@ -6628,9 +7442,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
DRV_MSG_CODE_UNLOAD_DONE, 0);
|
|
|
|
|
|
/* unload UNDI on port 1 */
|
|
|
- bp->func = 1;
|
|
|
+ bp->pf_num = 1;
|
|
|
bp->fw_seq =
|
|
|
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
|
|
|
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
|
|
|
DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
|
|
|
|
|
|
@@ -6640,7 +7454,7 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
/* now it's safe to release the lock */
|
|
|
bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
|
|
|
|
|
|
- bnx2x_undi_int_disable(bp, func);
|
|
|
+ bnx2x_undi_int_disable(bp);
|
|
|
|
|
|
/* close input traffic and wait for it */
|
|
|
/* Do not rcv packets to BRB */
|
|
|
@@ -6679,9 +7493,9 @@ static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
|
|
|
bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
|
|
|
|
|
|
/* restore our func and fw_seq */
|
|
|
- bp->func = func;
|
|
|
+ bp->pf_num = orig_pf_num;
|
|
|
bp->fw_seq =
|
|
|
- (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
|
|
|
+ (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
|
|
|
DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
|
|
|
} else
|
|
|
@@ -6705,20 +7519,42 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|
|
val = REG_RD(bp, MISC_REG_BOND_ID);
|
|
|
id |= (val & 0xf);
|
|
|
bp->common.chip_id = id;
|
|
|
- bp->link_params.chip_id = bp->common.chip_id;
|
|
|
- BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
|
|
|
-
|
|
|
- bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
|
|
|
|
|
|
/* Set doorbell size */
|
|
|
bp->db_size = (1 << BNX2X_DB_SHIFT);
|
|
|
|
|
|
+ if (CHIP_IS_E2(bp)) {
|
|
|
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
|
|
|
+ if ((val & 1) == 0)
|
|
|
+ val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
|
|
|
+ else
|
|
|
+ val = (val >> 1) & 1;
|
|
|
+ BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
|
|
|
+ "2_PORT_MODE");
|
|
|
+ bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
|
|
|
+ CHIP_2_PORT_MODE;
|
|
|
+
|
|
|
+ if (CHIP_MODE_IS_4_PORT(bp))
|
|
|
+ bp->pfid = (bp->pf_num >> 1); /* 0..3 */
|
|
|
+ else
|
|
|
+ bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
|
|
|
+ } else {
|
|
|
+ bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
|
|
|
+ bp->pfid = bp->pf_num; /* 0..7 */
|
|
|
+ }
|
|
|
+
|
|
|
/*
|
|
|
* set base FW non-default (fast path) status block id, this value is
|
|
|
* used to initialize the fw_sb_id saved on the fp/queue structure to
|
|
|
* determine the id used by the FW.
|
|
|
*/
|
|
|
- bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
|
|
|
+ if (CHIP_IS_E1x(bp))
|
|
|
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
|
|
|
+ else /* E2 */
|
|
|
+ bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
|
|
|
+
|
|
|
+ bp->link_params.chip_id = bp->common.chip_id;
|
|
|
+ BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
|
|
|
|
|
|
val = (REG_RD(bp, 0x2874) & 0x55);
|
|
|
if ((bp->common.chip_id & 0x1) ||
|
|
|
@@ -6734,15 +7570,15 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|
|
bp->common.flash_size, bp->common.flash_size);
|
|
|
|
|
|
bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
|
|
|
- bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
|
|
|
+ bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
|
|
|
+ MISC_REG_GENERIC_CR_1 :
|
|
|
+ MISC_REG_GENERIC_CR_0));
|
|
|
bp->link_params.shmem_base = bp->common.shmem_base;
|
|
|
bp->link_params.shmem2_base = bp->common.shmem2_base;
|
|
|
BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
|
|
|
bp->common.shmem_base, bp->common.shmem2_base);
|
|
|
|
|
|
- if (!bp->common.shmem_base ||
|
|
|
- (bp->common.shmem_base < 0xA0000) ||
|
|
|
- (bp->common.shmem_base >= 0xC0000)) {
|
|
|
+ if (!bp->common.shmem_base) {
|
|
|
BNX2X_DEV_INFO("MCP not active\n");
|
|
|
bp->flags |= NO_MCP_FLAG;
|
|
|
return;
|
|
|
@@ -6751,7 +7587,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|
|
val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
|
|
|
if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
|
!= (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
|
|
|
- BNX2X_ERROR("BAD MCP validity signature\n");
|
|
|
+ BNX2X_ERR("BAD MCP validity signature\n");
|
|
|
|
|
|
bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
|
|
|
BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
|
|
|
@@ -6775,8 +7611,8 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|
|
if (val < BNX2X_BC_VER) {
|
|
|
/* for now only warn
|
|
|
* later we might need to enforce this */
|
|
|
- BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
|
|
|
- "please upgrade BC\n", BNX2X_BC_VER, val);
|
|
|
+ BNX2X_ERR("This driver needs bc_ver %X but found %X, "
|
|
|
+ "please upgrade BC\n", BNX2X_BC_VER, val);
|
|
|
}
|
|
|
bp->link_params.feature_config_flags |=
|
|
|
(val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
|
|
|
@@ -6804,6 +7640,57 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
|
|
|
val, val2, val3, val4);
|
|
|
}
|
|
|
|
|
|
+#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
|
|
|
+#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
|
|
|
+
|
|
|
+static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
|
|
|
+{
|
|
|
+ int pfid = BP_FUNC(bp);
|
|
|
+ int vn = BP_E1HVN(bp);
|
|
|
+ int igu_sb_id;
|
|
|
+ u32 val;
|
|
|
+ u8 fid;
|
|
|
+
|
|
|
+ bp->igu_base_sb = 0xff;
|
|
|
+ bp->igu_sb_cnt = 0;
|
|
|
+ if (CHIP_INT_MODE_IS_BC(bp)) {
|
|
|
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
|
|
|
+ bp->l2_cid_count);
|
|
|
+
|
|
|
+ bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
|
|
|
+ FP_SB_MAX_E1x;
|
|
|
+
|
|
|
+ bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
|
|
|
+ (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
|
|
|
+
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* IGU in normal mode - read CAM */
|
|
|
+ for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
|
|
|
+ igu_sb_id++) {
|
|
|
+ val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
|
|
|
+ if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
|
|
|
+ continue;
|
|
|
+ fid = IGU_FID(val);
|
|
|
+ if ((fid & IGU_FID_ENCODE_IS_PF)) {
|
|
|
+ if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
|
|
|
+ continue;
|
|
|
+ if (IGU_VEC(val) == 0)
|
|
|
+ /* default status block */
|
|
|
+ bp->igu_dsb_id = igu_sb_id;
|
|
|
+ else {
|
|
|
+ if (bp->igu_base_sb == 0xff)
|
|
|
+ bp->igu_base_sb = igu_sb_id;
|
|
|
+ bp->igu_sb_cnt++;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
|
|
|
+ if (bp->igu_sb_cnt == 0)
|
|
|
+ BNX2X_ERR("CAM configuration error\n");
|
|
|
+}
|
|
|
+
|
|
|
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
|
|
|
u32 switch_cfg)
|
|
|
{
|
|
|
@@ -7178,26 +8065,49 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
|
|
|
|
|
|
static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
|
|
|
{
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ int func = BP_ABS_FUNC(bp);
|
|
|
+ int vn;
|
|
|
u32 val, val2;
|
|
|
int rc = 0;
|
|
|
|
|
|
bnx2x_get_common_hwinfo(bp);
|
|
|
|
|
|
- bp->common.int_block = INT_BLOCK_HC;
|
|
|
+ if (CHIP_IS_E1x(bp)) {
|
|
|
+ bp->common.int_block = INT_BLOCK_HC;
|
|
|
+
|
|
|
+ bp->igu_dsb_id = DEF_SB_IGU_ID;
|
|
|
+ bp->igu_base_sb = 0;
|
|
|
+ bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
|
|
|
+ } else {
|
|
|
+ bp->common.int_block = INT_BLOCK_IGU;
|
|
|
+ val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
|
|
|
+ if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
|
|
|
+ DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
|
|
|
+ bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
|
|
|
+ } else
|
|
|
+ DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
|
|
|
|
|
|
- bp->igu_dsb_id = DEF_SB_IGU_ID;
|
|
|
- bp->igu_base_sb = 0;
|
|
|
- bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
|
|
|
+ bnx2x_get_igu_cam_info(bp);
|
|
|
+
|
|
|
+ }
|
|
|
+ DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
|
|
|
+ bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Initialize MF configuration
|
|
|
+ */
|
|
|
|
|
|
bp->mf_ov = 0;
|
|
|
bp->mf_mode = 0;
|
|
|
- if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
|
|
|
-
|
|
|
- bp->common.mf_cfg_base = bp->common.shmem_base +
|
|
|
+ vn = BP_E1HVN(bp);
|
|
|
+ if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
|
|
|
+ if (SHMEM2_HAS(bp, mf_cfg_addr))
|
|
|
+ bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
|
|
|
+ else
|
|
|
+ bp->common.mf_cfg_base = bp->common.shmem_base +
|
|
|
offsetof(struct shmem_region, func_mb) +
|
|
|
E1H_FUNC_MAX * sizeof(struct drv_func_mb);
|
|
|
- bp->mf_config =
|
|
|
+ bp->mf_config[vn] =
|
|
|
MF_CFG_RD(bp, func_mf_config[func].config);
|
|
|
|
|
|
val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
|
|
|
@@ -7213,16 +8123,16 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
|
|
|
FUNC_MF_CFG_E1HOV_TAG_MASK);
|
|
|
if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
|
|
|
bp->mf_ov = val;
|
|
|
- BNX2X_DEV_INFO("E1HOV for func %d is %d "
|
|
|
+ BNX2X_DEV_INFO("MF OV for func %d is %d "
|
|
|
"(0x%04x)\n",
|
|
|
func, bp->mf_ov, bp->mf_ov);
|
|
|
} else {
|
|
|
- BNX2X_ERROR("No valid E1HOV for func %d,"
|
|
|
+ BNX2X_ERROR("No valid MF OV for func %d,"
|
|
|
" aborting\n", func);
|
|
|
rc = -EPERM;
|
|
|
}
|
|
|
} else {
|
|
|
- if (BP_E1HVN(bp)) {
|
|
|
+ if (BP_VN(bp)) {
|
|
|
BNX2X_ERROR("VN %d in single function mode,"
|
|
|
" aborting\n", BP_E1HVN(bp));
|
|
|
rc = -EPERM;
|
|
|
@@ -7230,15 +8140,25 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* adjust igu_sb_cnt to MF */
|
|
|
- if (IS_MF(bp))
|
|
|
+ /* adjust igu_sb_cnt to MF for E1x */
|
|
|
+ if (CHIP_IS_E1x(bp) && IS_MF(bp))
|
|
|
bp->igu_sb_cnt /= E1HVN_MAX;
|
|
|
|
|
|
+ /*
|
|
|
+ * adjust E2 sb count: to be removed when FW will support
|
|
|
+ * more then 16 L2 clients
|
|
|
+ */
|
|
|
+#define MAX_L2_CLIENTS 16
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
|
|
|
+ MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
|
|
|
+
|
|
|
if (!BP_NOMCP(bp)) {
|
|
|
bnx2x_get_port_hwinfo(bp);
|
|
|
|
|
|
- bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
|
|
|
- DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
+ bp->fw_seq =
|
|
|
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
|
|
|
+ DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
|
|
|
}
|
|
|
|
|
|
@@ -7338,7 +8258,7 @@ out_not_found:
|
|
|
|
|
|
static int __devinit bnx2x_init_bp(struct bnx2x *bp)
|
|
|
{
|
|
|
- int func = BP_FUNC(bp);
|
|
|
+ int func;
|
|
|
int timer_interval;
|
|
|
int rc;
|
|
|
|
|
|
@@ -7362,6 +8282,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
|
|
|
rc = bnx2x_alloc_mem_bp(bp);
|
|
|
|
|
|
bnx2x_read_fwinfo(bp);
|
|
|
+
|
|
|
+ func = BP_FUNC(bp);
|
|
|
+
|
|
|
/* need to reset chip if undi was active */
|
|
|
if (!BP_NOMCP(bp))
|
|
|
bnx2x_undi_unload(bp);
|
|
|
@@ -7650,7 +8573,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
|
|
|
bp->dev = dev;
|
|
|
bp->pdev = pdev;
|
|
|
bp->flags = 0;
|
|
|
- bp->func = PCI_FUNC(pdev->devfn);
|
|
|
+ bp->pf_num = PCI_FUNC(pdev->devfn);
|
|
|
|
|
|
rc = pci_enable_device(pdev);
|
|
|
if (rc) {
|
|
|
@@ -7964,6 +8887,8 @@ int bnx2x_init_firmware(struct bnx2x *bp)
|
|
|
fw_file_name = FW_FILE_NAME_E1;
|
|
|
else if (CHIP_IS_E1H(bp))
|
|
|
fw_file_name = FW_FILE_NAME_E1H;
|
|
|
+ else if (CHIP_IS_E2(bp))
|
|
|
+ fw_file_name = FW_FILE_NAME_E2;
|
|
|
else {
|
|
|
BNX2X_ERR("Unsupported chip revision\n");
|
|
|
return -EINVAL;
|
|
|
@@ -8047,8 +8972,25 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
|
|
|
int pcie_width, pcie_speed;
|
|
|
int rc, cid_count;
|
|
|
|
|
|
- cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
|
|
|
+ switch (ent->driver_data) {
|
|
|
+ case BCM57710:
|
|
|
+ case BCM57711:
|
|
|
+ case BCM57711E:
|
|
|
+ cid_count = FP_SB_MAX_E1x;
|
|
|
+ break;
|
|
|
+
|
|
|
+ case BCM57712:
|
|
|
+ case BCM57712E:
|
|
|
+ cid_count = FP_SB_MAX_E2;
|
|
|
+ break;
|
|
|
|
|
|
+ default:
|
|
|
+ pr_err("Unknown board_type (%ld), aborting\n",
|
|
|
+ ent->driver_data);
|
|
|
+ return ENODEV;
|
|
|
+ }
|
|
|
+
|
|
|
+ cid_count += CNIC_CONTEXT_USE;
|
|
|
/* dev zeroed in init_etherdev */
|
|
|
dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
|
|
|
if (!dev) {
|
|
|
@@ -8086,7 +9028,10 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
|
|
|
netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
|
|
|
" IRQ %d, ", board_info[ent->driver_data].name,
|
|
|
(CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
|
|
|
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
|
|
|
+ pcie_width,
|
|
|
+ ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
|
|
|
+ (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
|
|
|
+ "5GHz (Gen2)" : "2.5GHz",
|
|
|
dev->base_addr, bp->pdev->irq);
|
|
|
pr_cont("node addr %pM\n", dev->dev_addr);
|
|
|
|
|
|
@@ -8199,8 +9144,9 @@ static void bnx2x_eeh_recover(struct bnx2x *bp)
|
|
|
BNX2X_ERR("BAD MCP validity signature\n");
|
|
|
|
|
|
if (!BP_NOMCP(bp)) {
|
|
|
- bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
|
|
|
- & DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
+ bp->fw_seq =
|
|
|
+ (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
|
|
|
+ DRV_MSG_SEQ_NUMBER_MASK);
|
|
|
BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
|
|
|
}
|
|
|
}
|
|
|
@@ -8283,7 +9229,8 @@ static void bnx2x_io_resume(struct pci_dev *pdev)
|
|
|
struct bnx2x *bp = netdev_priv(dev);
|
|
|
|
|
|
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
|
|
|
- printk(KERN_ERR "Handling parity error recovery. Try again later\n");
|
|
|
+ printk(KERN_ERR "Handling parity error recovery. "
|
|
|
+ "Try again later\n");
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
@@ -8560,7 +9507,11 @@ void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
|
|
|
cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
|
|
|
cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
|
|
|
}
|
|
|
- cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
|
|
|
+ if (CHIP_IS_E2(bp))
|
|
|
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
|
|
|
+ else
|
|
|
+ cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
|
|
|
+
|
|
|
cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
|
|
|
cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
|
|
|
cp->irq_arr[1].status_blk = bp->def_status_blk;
|