|
@@ -2317,6 +2317,7 @@ static int bnxt_alloc_rx_rings(struct bnxt *bp)
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
+ ring->grp_idx = i;
|
|
|
rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
|
|
|
mem_size = rxr->rx_agg_bmap_size / 8;
|
|
|
rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
|
|
@@ -2389,6 +2390,7 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp)
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
|
|
|
+ ring->grp_idx = txr->bnapi->index;
|
|
|
if (bp->tx_push_size) {
|
|
|
dma_addr_t mapping;
|
|
|
|
|
@@ -2442,8 +2444,10 @@ static void bnxt_free_cp_rings(struct bnxt *bp)
|
|
|
|
|
|
static int bnxt_alloc_cp_rings(struct bnxt *bp)
|
|
|
{
|
|
|
- int i, rc;
|
|
|
+ int i, rc, ulp_base_vec, ulp_msix;
|
|
|
|
|
|
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
|
|
|
+ ulp_base_vec = bnxt_get_ulp_msix_base(bp);
|
|
|
for (i = 0; i < bp->cp_nr_rings; i++) {
|
|
|
struct bnxt_napi *bnapi = bp->bnapi[i];
|
|
|
struct bnxt_cp_ring_info *cpr;
|
|
@@ -2458,6 +2462,11 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
|
|
|
rc = bnxt_alloc_ring(bp, ring);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
+
|
|
|
+ if (ulp_msix && i >= ulp_base_vec)
|
|
|
+ ring->map_idx = i + ulp_msix;
|
|
|
+ else
|
|
|
+ ring->map_idx = i;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -3059,12 +3068,21 @@ static void bnxt_free_stats(struct bnxt *bp)
|
|
|
u32 size, i;
|
|
|
struct pci_dev *pdev = bp->pdev;
|
|
|
|
|
|
+ bp->flags &= ~BNXT_FLAG_PORT_STATS;
|
|
|
+ bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
|
|
|
+
|
|
|
if (bp->hw_rx_port_stats) {
|
|
|
dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
|
|
|
bp->hw_rx_port_stats,
|
|
|
bp->hw_rx_port_stats_map);
|
|
|
bp->hw_rx_port_stats = NULL;
|
|
|
- bp->flags &= ~BNXT_FLAG_PORT_STATS;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (bp->hw_rx_port_stats_ext) {
|
|
|
+ dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
|
|
|
+ bp->hw_rx_port_stats_ext,
|
|
|
+ bp->hw_rx_port_stats_ext_map);
|
|
|
+ bp->hw_rx_port_stats_ext = NULL;
|
|
|
}
|
|
|
|
|
|
if (!bp->bnapi)
|
|
@@ -3120,6 +3138,21 @@ static int bnxt_alloc_stats(struct bnxt *bp)
|
|
|
bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
|
|
|
sizeof(struct rx_port_stats) + 512;
|
|
|
bp->flags |= BNXT_FLAG_PORT_STATS;
|
|
|
+
|
|
|
+ /* Display extended statistics only if FW supports it */
|
|
|
+ if (bp->hwrm_spec_code < 0x10804 ||
|
|
|
+ bp->hwrm_spec_code == 0x10900)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ bp->hw_rx_port_stats_ext =
|
|
|
+ dma_zalloc_coherent(&pdev->dev,
|
|
|
+ sizeof(struct rx_port_stats_ext),
|
|
|
+ &bp->hw_rx_port_stats_ext_map,
|
|
|
+ GFP_KERNEL);
|
|
|
+ if (!bp->hw_rx_port_stats_ext)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
|
|
|
}
|
|
|
return 0;
|
|
|
}
|
|
@@ -3357,6 +3390,15 @@ static void bnxt_disable_int(struct bnxt *bp)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
|
|
|
+{
|
|
|
+ struct bnxt_napi *bnapi = bp->bnapi[n];
|
|
|
+ struct bnxt_cp_ring_info *cpr;
|
|
|
+
|
|
|
+ cpr = &bnapi->cp_ring;
|
|
|
+ return cpr->cp_ring_struct.map_idx;
|
|
|
+}
|
|
|
+
|
|
|
static void bnxt_disable_int_sync(struct bnxt *bp)
|
|
|
{
|
|
|
int i;
|
|
@@ -3364,8 +3406,11 @@ static void bnxt_disable_int_sync(struct bnxt *bp)
|
|
|
atomic_inc(&bp->intr_sem);
|
|
|
|
|
|
bnxt_disable_int(bp);
|
|
|
- for (i = 0; i < bp->cp_nr_rings; i++)
|
|
|
- synchronize_irq(bp->irq_tbl[i].vector);
|
|
|
+ for (i = 0; i < bp->cp_nr_rings; i++) {
|
|
|
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
|
|
|
+
|
|
|
+ synchronize_irq(bp->irq_tbl[map_idx].vector);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void bnxt_enable_int(struct bnxt *bp)
|
|
@@ -3398,7 +3443,8 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
|
int i, intr_process, rc, tmo_count;
|
|
|
struct input *req = msg;
|
|
|
u32 *data = msg;
|
|
|
- __le32 *resp_len, *valid;
|
|
|
+ __le32 *resp_len;
|
|
|
+ u8 *valid;
|
|
|
u16 cp_ring_id, len = 0;
|
|
|
struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
|
|
|
u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
|
|
@@ -3450,6 +3496,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
|
|
|
|
i = 0;
|
|
|
tmo_count = timeout * 40;
|
|
|
+ resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
|
|
|
if (intr_process) {
|
|
|
/* Wait until hwrm response cmpl interrupt is processed */
|
|
|
while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
|
|
@@ -3462,9 +3509,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
|
le16_to_cpu(req->req_type));
|
|
|
return -1;
|
|
|
}
|
|
|
+ len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
|
|
+ HWRM_RESP_LEN_SFT;
|
|
|
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
|
|
|
} else {
|
|
|
/* Check if response len is updated */
|
|
|
- resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
|
|
|
for (i = 0; i < tmo_count; i++) {
|
|
|
len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
|
|
|
HWRM_RESP_LEN_SFT;
|
|
@@ -3480,10 +3529,12 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
|
return -1;
|
|
|
}
|
|
|
|
|
|
- /* Last word of resp contains valid bit */
|
|
|
- valid = bp->hwrm_cmd_resp_addr + len - 4;
|
|
|
+ /* Last byte of resp contains valid bit */
|
|
|
+ valid = bp->hwrm_cmd_resp_addr + len - 1;
|
|
|
for (i = 0; i < 5; i++) {
|
|
|
- if (le32_to_cpu(*valid) & HWRM_RESP_VALID_MASK)
|
|
|
+ /* make sure we read from updated DMA memory */
|
|
|
+ dma_rmb();
|
|
|
+ if (*valid)
|
|
|
break;
|
|
|
udelay(1);
|
|
|
}
|
|
@@ -3496,6 +3547,11 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ /* Zero valid bit for compatibility. Valid bit in an older spec
|
|
|
+ * may become a new field in a newer spec. We must make sure that
|
|
|
+ * a new field not implemented by old spec will read zero.
|
|
|
+ */
|
|
|
+ *valid = 0;
|
|
|
rc = le16_to_cpu(resp->error_code);
|
|
|
if (rc && !silent)
|
|
|
netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
|
|
@@ -3577,9 +3633,13 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
|
|
|
FUNC_DRV_RGTR_REQ_ENABLES_VER);
|
|
|
|
|
|
req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
|
|
|
- req.ver_maj = DRV_VER_MAJ;
|
|
|
- req.ver_min = DRV_VER_MIN;
|
|
|
- req.ver_upd = DRV_VER_UPD;
|
|
|
+ req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
|
|
|
+ req.ver_maj_8b = DRV_VER_MAJ;
|
|
|
+ req.ver_min_8b = DRV_VER_MIN;
|
|
|
+ req.ver_upd_8b = DRV_VER_UPD;
|
|
|
+ req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
|
|
|
+ req.ver_min = cpu_to_le16(DRV_VER_MIN);
|
|
|
+ req.ver_upd = cpu_to_le16(DRV_VER_UPD);
|
|
|
|
|
|
if (BNXT_PF(bp)) {
|
|
|
u32 data[8];
|
|
@@ -3998,6 +4058,13 @@ static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
|
|
|
+{
|
|
|
+ if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
|
|
|
+ return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
|
|
|
+ return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
|
|
|
+}
|
|
|
+
|
|
|
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
|
|
|
{
|
|
|
unsigned int ring = 0, grp_idx;
|
|
@@ -4053,8 +4120,7 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
|
|
|
if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
|
|
|
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
|
|
|
if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
|
|
|
- req.flags |=
|
|
|
- cpu_to_le32(VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE);
|
|
|
+ req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
|
|
|
|
|
|
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
|
}
|
|
@@ -4135,9 +4201,13 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
|
|
|
mutex_lock(&bp->hwrm_cmd_lock);
|
|
|
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
|
if (!rc) {
|
|
|
- if (resp->flags &
|
|
|
- cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
|
|
|
+ u32 flags = le32_to_cpu(resp->flags);
|
|
|
+
|
|
|
+ if (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP)
|
|
|
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
|
|
|
+ if (flags &
|
|
|
+ VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
|
|
|
+ bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
|
|
|
}
|
|
|
mutex_unlock(&bp->hwrm_cmd_lock);
|
|
|
return rc;
|
|
@@ -4204,12 +4274,12 @@ static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
|
|
|
|
|
|
static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
|
|
|
struct bnxt_ring_struct *ring,
|
|
|
- u32 ring_type, u32 map_index,
|
|
|
- u32 stats_ctx_id)
|
|
|
+ u32 ring_type, u32 map_index)
|
|
|
{
|
|
|
int rc = 0, err = 0;
|
|
|
struct hwrm_ring_alloc_input req = {0};
|
|
|
struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
|
|
|
+ struct bnxt_ring_grp_info *grp_info;
|
|
|
u16 ring_id;
|
|
|
|
|
|
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
|
|
@@ -4231,10 +4301,10 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
|
|
|
case HWRM_RING_ALLOC_TX:
|
|
|
req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
|
|
|
/* Association of transmit ring with completion ring */
|
|
|
- req.cmpl_ring_id =
|
|
|
- cpu_to_le16(bp->grp_info[map_index].cp_fw_ring_id);
|
|
|
+ grp_info = &bp->grp_info[ring->grp_idx];
|
|
|
+ req.cmpl_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
|
|
|
req.length = cpu_to_le32(bp->tx_ring_mask + 1);
|
|
|
- req.stat_ctx_id = cpu_to_le32(stats_ctx_id);
|
|
|
+ req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
|
|
|
req.queue_id = cpu_to_le16(ring->queue_id);
|
|
|
break;
|
|
|
case HWRM_RING_ALLOC_RX:
|
|
@@ -4321,10 +4391,11 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|
|
struct bnxt_napi *bnapi = bp->bnapi[i];
|
|
|
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
|
|
|
struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
|
|
|
+ u32 map_idx = ring->map_idx;
|
|
|
|
|
|
- cpr->cp_doorbell = bp->bar1 + i * 0x80;
|
|
|
- rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL, i,
|
|
|
- INVALID_STATS_CTX_ID);
|
|
|
+ cpr->cp_doorbell = bp->bar1 + map_idx * 0x80;
|
|
|
+ rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_CMPL,
|
|
|
+ map_idx);
|
|
|
if (rc)
|
|
|
goto err_out;
|
|
|
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
|
|
@@ -4340,11 +4411,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|
|
for (i = 0; i < bp->tx_nr_rings; i++) {
|
|
|
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
|
|
struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
|
|
|
- u32 map_idx = txr->bnapi->index;
|
|
|
- u16 fw_stats_ctx = bp->grp_info[map_idx].fw_stats_ctx;
|
|
|
+ u32 map_idx = i;
|
|
|
|
|
|
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_TX,
|
|
|
- map_idx, fw_stats_ctx);
|
|
|
+ map_idx);
|
|
|
if (rc)
|
|
|
goto err_out;
|
|
|
txr->tx_doorbell = bp->bar1 + map_idx * 0x80;
|
|
@@ -4356,7 +4426,7 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|
|
u32 map_idx = rxr->bnapi->index;
|
|
|
|
|
|
rc = hwrm_ring_alloc_send_msg(bp, ring, HWRM_RING_ALLOC_RX,
|
|
|
- map_idx, INVALID_STATS_CTX_ID);
|
|
|
+ map_idx);
|
|
|
if (rc)
|
|
|
goto err_out;
|
|
|
rxr->rx_doorbell = bp->bar1 + map_idx * 0x80;
|
|
@@ -4369,13 +4439,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
|
|
|
struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
|
|
|
struct bnxt_ring_struct *ring =
|
|
|
&rxr->rx_agg_ring_struct;
|
|
|
- u32 grp_idx = rxr->bnapi->index;
|
|
|
+ u32 grp_idx = ring->grp_idx;
|
|
|
u32 map_idx = grp_idx + bp->rx_nr_rings;
|
|
|
|
|
|
rc = hwrm_ring_alloc_send_msg(bp, ring,
|
|
|
HWRM_RING_ALLOC_AGG,
|
|
|
- map_idx,
|
|
|
- INVALID_STATS_CTX_ID);
|
|
|
+ map_idx);
|
|
|
if (rc)
|
|
|
goto err_out;
|
|
|
|
|
@@ -4669,20 +4738,59 @@ static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
|
|
|
return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
|
|
|
}
|
|
|
|
|
|
+static int bnxt_cp_rings_in_use(struct bnxt *bp)
|
|
|
+{
|
|
|
+ int cp = bp->cp_nr_rings;
|
|
|
+ int ulp_msix, ulp_base;
|
|
|
+
|
|
|
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
|
|
|
+ if (ulp_msix) {
|
|
|
+ ulp_base = bnxt_get_ulp_msix_base(bp);
|
|
|
+ cp += ulp_msix;
|
|
|
+ if ((ulp_base + ulp_msix) > cp)
|
|
|
+ cp = ulp_base + ulp_msix;
|
|
|
+ }
|
|
|
+ return cp;
|
|
|
+}
|
|
|
+
|
|
|
+static bool bnxt_need_reserve_rings(struct bnxt *bp)
|
|
|
+{
|
|
|
+ struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
|
|
+ int cp = bnxt_cp_rings_in_use(bp);
|
|
|
+ int rx = bp->rx_nr_rings;
|
|
|
+ int vnic = 1, grp = rx;
|
|
|
+
|
|
|
+ if (bp->hwrm_spec_code < 0x10601)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
|
|
|
+ return true;
|
|
|
+
|
|
|
+ if (bp->flags & BNXT_FLAG_RFS)
|
|
|
+ vnic = rx + 1;
|
|
|
+ if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
|
|
+ rx <<= 1;
|
|
|
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
|
|
|
+ (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
|
|
|
+ hw_resc->resv_hw_ring_grps != grp || hw_resc->resv_vnics != vnic))
|
|
|
+ return true;
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
|
|
|
bool shared);
|
|
|
|
|
|
static int __bnxt_reserve_rings(struct bnxt *bp)
|
|
|
{
|
|
|
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
|
|
+ int cp = bnxt_cp_rings_in_use(bp);
|
|
|
int tx = bp->tx_nr_rings;
|
|
|
int rx = bp->rx_nr_rings;
|
|
|
- int cp = bp->cp_nr_rings;
|
|
|
int grp, rx_rings, rc;
|
|
|
bool sh = false;
|
|
|
int vnic = 1;
|
|
|
|
|
|
- if (bp->hwrm_spec_code < 0x10601)
|
|
|
+ if (!bnxt_need_reserve_rings(bp))
|
|
|
return 0;
|
|
|
|
|
|
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
|
|
@@ -4691,14 +4799,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
|
|
|
vnic = rx + 1;
|
|
|
if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
|
|
rx <<= 1;
|
|
|
-
|
|
|
grp = bp->rx_nr_rings;
|
|
|
- if (tx == hw_resc->resv_tx_rings &&
|
|
|
- (!(bp->flags & BNXT_FLAG_NEW_RM) ||
|
|
|
- (rx == hw_resc->resv_rx_rings &&
|
|
|
- grp == hw_resc->resv_hw_ring_grps &&
|
|
|
- cp == hw_resc->resv_cp_rings && vnic == hw_resc->resv_vnics)))
|
|
|
- return 0;
|
|
|
|
|
|
rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
|
|
|
if (rc)
|
|
@@ -4742,30 +4843,6 @@ static int __bnxt_reserve_rings(struct bnxt *bp)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static bool bnxt_need_reserve_rings(struct bnxt *bp)
|
|
|
-{
|
|
|
- struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
|
|
- int rx = bp->rx_nr_rings;
|
|
|
- int vnic = 1;
|
|
|
-
|
|
|
- if (bp->hwrm_spec_code < 0x10601)
|
|
|
- return false;
|
|
|
-
|
|
|
- if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
|
|
|
- return true;
|
|
|
-
|
|
|
- if (bp->flags & BNXT_FLAG_RFS)
|
|
|
- vnic = rx + 1;
|
|
|
- if (bp->flags & BNXT_FLAG_AGG_RINGS)
|
|
|
- rx <<= 1;
|
|
|
- if ((bp->flags & BNXT_FLAG_NEW_RM) &&
|
|
|
- (hw_resc->resv_rx_rings != rx ||
|
|
|
- hw_resc->resv_cp_rings != bp->cp_nr_rings ||
|
|
|
- hw_resc->resv_vnics != vnic))
|
|
|
- return true;
|
|
|
- return false;
|
|
|
-}
|
|
|
-
|
|
|
static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
|
|
|
int ring_grps, int cp_rings, int vnics)
|
|
|
{
|
|
@@ -5055,7 +5132,7 @@ func_qcfg_exit:
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
-static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
|
|
|
+int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
|
|
|
{
|
|
|
struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
|
|
|
struct hwrm_func_resource_qcaps_input req = {0};
|
|
@@ -5072,6 +5149,10 @@ static int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp)
|
|
|
goto hwrm_func_resc_qcaps_exit;
|
|
|
}
|
|
|
|
|
|
+ hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
|
|
|
+ if (!all)
|
|
|
+ goto hwrm_func_resc_qcaps_exit;
|
|
|
+
|
|
|
hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
|
|
|
hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
|
|
|
hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
|
|
@@ -5178,7 +5259,7 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
if (bp->hwrm_spec_code >= 0x10803) {
|
|
|
- rc = bnxt_hwrm_func_resc_qcaps(bp);
|
|
|
+ rc = bnxt_hwrm_func_resc_qcaps(bp, true);
|
|
|
if (!rc)
|
|
|
bp->flags |= BNXT_FLAG_NEW_RM;
|
|
|
}
|
|
@@ -5326,6 +5407,21 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp)
|
|
|
return rc;
|
|
|
}
|
|
|
|
|
|
+static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
|
|
|
+{
|
|
|
+ struct hwrm_port_qstats_ext_input req = {0};
|
|
|
+ struct bnxt_pf_info *pf = &bp->pf;
|
|
|
+
|
|
|
+ if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
|
|
|
+ req.port_id = cpu_to_le16(pf->port_id);
|
|
|
+ req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
|
|
|
+ req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
|
|
|
+ return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
|
+}
|
|
|
+
|
|
|
static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
|
|
|
{
|
|
|
if (bp->vxlan_port_cnt) {
|
|
@@ -5418,10 +5514,9 @@ static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
|
|
|
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
|
|
|
req.fid = cpu_to_le16(0xffff);
|
|
|
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
|
|
|
- req.cache_linesize = FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_64;
|
|
|
+ req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
|
|
|
if (size == 128)
|
|
|
- req.cache_linesize =
|
|
|
- FUNC_QCFG_RESP_CACHE_LINESIZE_CACHE_LINESIZE_128;
|
|
|
+ req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
|
|
|
|
|
|
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
|
|
|
if (rc)
|
|
@@ -5740,6 +5835,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < bp->cp_nr_rings; i++) {
|
|
|
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
|
|
|
char *attr;
|
|
|
|
|
|
if (bp->flags & BNXT_FLAG_SHARED_RINGS)
|
|
@@ -5749,9 +5845,9 @@ static void bnxt_setup_msix(struct bnxt *bp)
|
|
|
else
|
|
|
attr = "tx";
|
|
|
|
|
|
- snprintf(bp->irq_tbl[i].name, len, "%s-%s-%d", dev->name, attr,
|
|
|
- i);
|
|
|
- bp->irq_tbl[i].handler = bnxt_msix;
|
|
|
+ snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
|
|
|
+ attr, i);
|
|
|
+ bp->irq_tbl[map_idx].handler = bnxt_msix;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -5812,7 +5908,7 @@ void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max)
|
|
|
bp->hw_resc.max_cp_rings = max;
|
|
|
}
|
|
|
|
|
|
-static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
|
|
|
+unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
|
|
|
{
|
|
|
struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
|
|
|
|
|
@@ -5824,12 +5920,44 @@ void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
|
|
|
bp->hw_resc.max_irqs = max_irqs;
|
|
|
}
|
|
|
|
|
|
+int bnxt_get_avail_msix(struct bnxt *bp, int num)
|
|
|
+{
|
|
|
+ int max_cp = bnxt_get_max_func_cp_rings(bp);
|
|
|
+ int max_irq = bnxt_get_max_func_irqs(bp);
|
|
|
+ int total_req = bp->cp_nr_rings + num;
|
|
|
+ int max_idx, avail_msix;
|
|
|
+
|
|
|
+ max_idx = min_t(int, bp->total_irqs, max_cp);
|
|
|
+ avail_msix = max_idx - bp->cp_nr_rings;
|
|
|
+ if (!(bp->flags & BNXT_FLAG_NEW_RM) || avail_msix >= num)
|
|
|
+ return avail_msix;
|
|
|
+
|
|
|
+ if (max_irq < total_req) {
|
|
|
+ num = max_irq - bp->cp_nr_rings;
|
|
|
+ if (num <= 0)
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ return num;
|
|
|
+}
|
|
|
+
|
|
|
+static int bnxt_get_num_msix(struct bnxt *bp)
|
|
|
+{
|
|
|
+ if (!(bp->flags & BNXT_FLAG_NEW_RM))
|
|
|
+ return bnxt_get_max_func_irqs(bp);
|
|
|
+
|
|
|
+ return bnxt_cp_rings_in_use(bp);
|
|
|
+}
|
|
|
+
|
|
|
static int bnxt_init_msix(struct bnxt *bp)
|
|
|
{
|
|
|
- int i, total_vecs, rc = 0, min = 1;
|
|
|
+ int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
|
|
|
struct msix_entry *msix_ent;
|
|
|
|
|
|
- total_vecs = bnxt_get_max_func_irqs(bp);
|
|
|
+ total_vecs = bnxt_get_num_msix(bp);
|
|
|
+ max = bnxt_get_max_func_irqs(bp);
|
|
|
+ if (total_vecs > max)
|
|
|
+ total_vecs = max;
|
|
|
+
|
|
|
msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
|
|
|
if (!msix_ent)
|
|
|
return -ENOMEM;
|
|
@@ -5843,7 +5971,8 @@ static int bnxt_init_msix(struct bnxt *bp)
|
|
|
min = 2;
|
|
|
|
|
|
total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
|
|
|
- if (total_vecs < 0) {
|
|
|
+ ulp_msix = bnxt_get_ulp_msix_num(bp);
|
|
|
+ if (total_vecs < 0 || total_vecs < ulp_msix) {
|
|
|
rc = -ENODEV;
|
|
|
goto msix_setup_exit;
|
|
|
}
|
|
@@ -5856,7 +5985,7 @@ static int bnxt_init_msix(struct bnxt *bp)
|
|
|
bp->total_irqs = total_vecs;
|
|
|
/* Trim rings based upon num of vectors allocated */
|
|
|
rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
|
|
|
- total_vecs, min == 1);
|
|
|
+ total_vecs - ulp_msix, min == 1);
|
|
|
if (rc)
|
|
|
goto msix_setup_exit;
|
|
|
|
|
@@ -5920,9 +6049,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
|
|
|
bp->flags &= ~BNXT_FLAG_USING_MSIX;
|
|
|
}
|
|
|
|
|
|
-static int bnxt_reserve_rings(struct bnxt *bp)
|
|
|
+int bnxt_reserve_rings(struct bnxt *bp)
|
|
|
{
|
|
|
- int orig_cp = bp->hw_resc.resv_cp_rings;
|
|
|
int tcs = netdev_get_num_tc(bp->dev);
|
|
|
int rc;
|
|
|
|
|
@@ -5934,9 +6062,12 @@ static int bnxt_reserve_rings(struct bnxt *bp)
|
|
|
netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
|
|
|
return rc;
|
|
|
}
|
|
|
- if ((bp->flags & BNXT_FLAG_NEW_RM) && bp->cp_nr_rings > orig_cp) {
|
|
|
+ if ((bp->flags & BNXT_FLAG_NEW_RM) &&
|
|
|
+ (bnxt_get_num_msix(bp) != bp->total_irqs)) {
|
|
|
+ bnxt_ulp_irq_stop(bp);
|
|
|
bnxt_clear_int_mode(bp);
|
|
|
rc = bnxt_init_int_mode(bp);
|
|
|
+ bnxt_ulp_irq_restart(bp, rc);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
}
|
|
@@ -5963,7 +6094,9 @@ static void bnxt_free_irq(struct bnxt *bp)
|
|
|
return;
|
|
|
|
|
|
for (i = 0; i < bp->cp_nr_rings; i++) {
|
|
|
- irq = &bp->irq_tbl[i];
|
|
|
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
|
|
|
+
|
|
|
+ irq = &bp->irq_tbl[map_idx];
|
|
|
if (irq->requested) {
|
|
|
if (irq->have_cpumask) {
|
|
|
irq_set_affinity_hint(irq->vector, NULL);
|
|
@@ -5982,14 +6115,25 @@ static int bnxt_request_irq(struct bnxt *bp)
|
|
|
int i, j, rc = 0;
|
|
|
unsigned long flags = 0;
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
- struct cpu_rmap *rmap = bp->dev->rx_cpu_rmap;
|
|
|
+ struct cpu_rmap *rmap;
|
|
|
#endif
|
|
|
|
|
|
+ rc = bnxt_setup_int_mode(bp);
|
|
|
+ if (rc) {
|
|
|
+ netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
|
|
|
+ rc);
|
|
|
+ return rc;
|
|
|
+ }
|
|
|
+#ifdef CONFIG_RFS_ACCEL
|
|
|
+ rmap = bp->dev->rx_cpu_rmap;
|
|
|
+#endif
|
|
|
if (!(bp->flags & BNXT_FLAG_USING_MSIX))
|
|
|
flags = IRQF_SHARED;
|
|
|
|
|
|
for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
|
|
|
- struct bnxt_irq *irq = &bp->irq_tbl[i];
|
|
|
+ int map_idx = bnxt_cp_num_to_irq_num(bp, i);
|
|
|
+ struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
|
|
|
+
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
if (rmap && bp->bnapi[i]->rx_ring) {
|
|
|
rc = irq_cpu_rmap_add(rmap, irq->vector);
|
|
@@ -6709,13 +6853,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|
|
rc = bnxt_reserve_rings(bp);
|
|
|
if (rc)
|
|
|
return rc;
|
|
|
-
|
|
|
- rc = bnxt_setup_int_mode(bp);
|
|
|
- if (rc) {
|
|
|
- netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
|
|
|
- rc);
|
|
|
- return rc;
|
|
|
- }
|
|
|
}
|
|
|
if ((bp->flags & BNXT_FLAG_RFS) &&
|
|
|
!(bp->flags & BNXT_FLAG_USING_MSIX)) {
|
|
@@ -7478,8 +7615,10 @@ static void bnxt_sp_task(struct work_struct *work)
|
|
|
bnxt_hwrm_tunnel_dst_port_free(
|
|
|
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
|
|
|
}
|
|
|
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
|
|
|
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
|
|
|
bnxt_hwrm_port_qstats(bp);
|
|
|
+ bnxt_hwrm_port_qstats_ext(bp);
|
|
|
+ }
|
|
|
|
|
|
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
|
|
|
int rc;
|
|
@@ -8193,6 +8332,7 @@ static const struct net_device_ops bnxt_netdev_ops = {
|
|
|
.ndo_set_vf_rate = bnxt_set_vf_bw,
|
|
|
.ndo_set_vf_link_state = bnxt_set_vf_link_state,
|
|
|
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
|
|
|
+ .ndo_set_vf_trust = bnxt_set_vf_trust,
|
|
|
#endif
|
|
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
|
|
.ndo_poll_controller = bnxt_poll_controller,
|
|
@@ -8390,9 +8530,15 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
|
|
|
if (sh)
|
|
|
bp->flags |= BNXT_FLAG_SHARED_RINGS;
|
|
|
dflt_rings = netif_get_num_default_rss_queues();
|
|
|
- /* Reduce default rings to reduce memory usage on multi-port cards */
|
|
|
- if (bp->port_count > 1)
|
|
|
- dflt_rings = min_t(int, dflt_rings, 4);
|
|
|
+ /* Reduce default rings on multi-port cards so that total default
|
|
|
+ * rings do not exceed CPU count.
|
|
|
+ */
|
|
|
+ if (bp->port_count > 1) {
|
|
|
+ int max_rings =
|
|
|
+ max_t(int, num_online_cpus() / bp->port_count, 1);
|
|
|
+
|
|
|
+ dflt_rings = min_t(int, dflt_rings, max_rings);
|
|
|
+ }
|
|
|
rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
|
|
|
if (rc)
|
|
|
return rc;
|
|
@@ -8431,16 +8577,15 @@ int bnxt_restore_pf_fw_resources(struct bnxt *bp)
|
|
|
int rc;
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
- if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
|
|
|
- return 0;
|
|
|
-
|
|
|
bnxt_hwrm_func_qcaps(bp);
|
|
|
|
|
|
if (netif_running(bp->dev))
|
|
|
__bnxt_close_nic(bp, true, false);
|
|
|
|
|
|
+ bnxt_ulp_irq_stop(bp);
|
|
|
bnxt_clear_int_mode(bp);
|
|
|
rc = bnxt_init_int_mode(bp);
|
|
|
+ bnxt_ulp_irq_restart(bp, rc);
|
|
|
|
|
|
if (netif_running(bp->dev)) {
|
|
|
if (rc)
|