|
@@ -423,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
|
|
rq->ix = c->ix;
|
|
rq->ix = c->ix;
|
|
rq->mdev = mdev;
|
|
rq->mdev = mdev;
|
|
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
|
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
|
|
|
|
+ rq->stats = &c->priv->channel_stats[c->ix].rq;
|
|
|
|
|
|
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
|
rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
|
|
if (IS_ERR(rq->xdp_prog)) {
|
|
if (IS_ERR(rq->xdp_prog)) {
|
|
@@ -646,8 +647,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
|
|
MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
|
|
|
|
|
|
- mlx5_fill_page_array(&rq->wq_ctrl.buf,
|
|
|
|
- (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
|
|
|
|
|
+ mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
|
|
|
|
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
|
|
|
|
|
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
|
|
err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
|
|
|
|
|
|
@@ -836,13 +837,15 @@ err_free_rq:
|
|
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
|
|
static void mlx5e_activate_rq(struct mlx5e_rq *rq)
|
|
{
|
|
{
|
|
struct mlx5e_icosq *sq = &rq->channel->icosq;
|
|
struct mlx5e_icosq *sq = &rq->channel->icosq;
|
|
- u16 pi = sq->pc & sq->wq.sz_m1;
|
|
|
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
struct mlx5e_tx_wqe *nopwqe;
|
|
struct mlx5e_tx_wqe *nopwqe;
|
|
|
|
|
|
|
|
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
|
|
|
+
|
|
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
|
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
|
|
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
|
sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
|
|
- nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
|
|
|
|
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
|
|
|
|
|
+ nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
|
|
|
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
|
|
}
|
|
}
|
|
|
|
|
|
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
|
static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
|
|
@@ -885,6 +888,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
|
{
|
|
{
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
sq->pdev = c->pdev;
|
|
sq->pdev = c->pdev;
|
|
@@ -894,10 +898,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
|
|
sq->min_inline_mode = params->tx_min_inline_mode;
|
|
sq->min_inline_mode = params->tx_min_inline_mode;
|
|
|
|
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
- err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
|
|
|
|
|
+ err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
|
|
|
|
|
+ wq->db = &wq->db[MLX5_SND_DBR];
|
|
|
|
|
|
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
|
|
err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
|
|
if (err)
|
|
if (err)
|
|
@@ -940,23 +944,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
|
|
{
|
|
{
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
sq->channel = c;
|
|
sq->channel = c;
|
|
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
|
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
|
|
|
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
- err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
|
|
|
|
|
+ err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
|
|
|
|
|
+ wq->db = &wq->db[MLX5_SND_DBR];
|
|
|
|
|
|
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
|
|
err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
|
|
if (err)
|
|
if (err)
|
|
goto err_sq_wq_destroy;
|
|
goto err_sq_wq_destroy;
|
|
|
|
|
|
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
err_sq_wq_destroy:
|
|
err_sq_wq_destroy:
|
|
@@ -1001,10 +1004,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|
int txq_ix,
|
|
int txq_ix,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_sq_param *param,
|
|
struct mlx5e_sq_param *param,
|
|
- struct mlx5e_txqsq *sq)
|
|
|
|
|
|
+ struct mlx5e_txqsq *sq,
|
|
|
|
+ int tc)
|
|
{
|
|
{
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
struct mlx5_core_dev *mdev = c->mdev;
|
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
sq->pdev = c->pdev;
|
|
sq->pdev = c->pdev;
|
|
@@ -1015,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|
sq->txq_ix = txq_ix;
|
|
sq->txq_ix = txq_ix;
|
|
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
|
sq->uar_map = mdev->mlx5e_res.bfreg.map;
|
|
sq->min_inline_mode = params->tx_min_inline_mode;
|
|
sq->min_inline_mode = params->tx_min_inline_mode;
|
|
|
|
+ sq->stats = &c->priv->channel_stats[c->ix].sq[tc];
|
|
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
|
|
INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
|
|
if (MLX5_IPSEC_DEV(c->priv->mdev))
|
|
if (MLX5_IPSEC_DEV(c->priv->mdev))
|
|
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
|
|
set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
|
|
@@ -1022,10 +1028,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
|
|
set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
|
|
|
|
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
param->wq.db_numa_node = cpu_to_node(c->cpu);
|
|
- err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
|
|
|
|
|
|
+ err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, wq, &sq->wq_ctrl);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
- sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
|
|
|
|
|
|
+ wq->db = &wq->db[MLX5_SND_DBR];
|
|
|
|
|
|
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
|
|
err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
|
|
if (err)
|
|
if (err)
|
|
@@ -1034,8 +1040,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
|
|
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
|
|
INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
|
|
sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
|
|
sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
|
|
|
|
|
|
- sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
|
|
|
|
-
|
|
|
|
return 0;
|
|
return 0;
|
|
|
|
|
|
err_sq_wq_destroy:
|
|
err_sq_wq_destroy:
|
|
@@ -1095,7 +1099,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
|
|
MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
|
|
|
|
|
|
- mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
|
|
|
|
|
+ mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
|
|
|
|
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
|
|
|
|
|
|
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
|
|
err = mlx5_core_create_sq(mdev, in, inlen, sqn);
|
|
|
|
|
|
@@ -1174,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
|
|
int txq_ix,
|
|
int txq_ix,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_sq_param *param,
|
|
struct mlx5e_sq_param *param,
|
|
- struct mlx5e_txqsq *sq)
|
|
|
|
|
|
+ struct mlx5e_txqsq *sq,
|
|
|
|
+ int tc)
|
|
{
|
|
{
|
|
struct mlx5e_create_sq_param csp = {};
|
|
struct mlx5e_create_sq_param csp = {};
|
|
u32 tx_rate;
|
|
u32 tx_rate;
|
|
int err;
|
|
int err;
|
|
|
|
|
|
- err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
|
|
|
|
|
|
+ err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
|
|
if (err)
|
|
if (err)
|
|
return err;
|
|
return err;
|
|
|
|
|
|
@@ -1238,6 +1244,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
|
|
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
|
static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
|
{
|
|
{
|
|
struct mlx5e_channel *c = sq->channel;
|
|
struct mlx5e_channel *c = sq->channel;
|
|
|
|
+ struct mlx5_wq_cyc *wq = &sq->wq;
|
|
|
|
|
|
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
|
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
|
|
/* prevent netif_tx_wake_queue */
|
|
/* prevent netif_tx_wake_queue */
|
|
@@ -1246,12 +1253,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
|
|
netif_tx_disable_queue(sq->txq);
|
|
netif_tx_disable_queue(sq->txq);
|
|
|
|
|
|
/* last doorbell out, godspeed .. */
|
|
/* last doorbell out, godspeed .. */
|
|
- if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
|
|
|
|
|
|
+ if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
|
|
|
|
+ u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
|
|
struct mlx5e_tx_wqe *nop;
|
|
struct mlx5e_tx_wqe *nop;
|
|
|
|
|
|
- sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
|
|
|
|
- nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
|
|
|
|
- mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
|
|
|
|
|
|
+ sq->db.wqe_info[pi].skb = NULL;
|
|
|
|
+ nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
|
|
|
|
+ mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1366,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work)
|
|
return;
|
|
return;
|
|
|
|
|
|
mlx5e_reset_txqsq_cc_pc(sq);
|
|
mlx5e_reset_txqsq_cc_pc(sq);
|
|
- sq->stats.recover++;
|
|
|
|
|
|
+ sq->stats->recover++;
|
|
recover->last_recover = jiffies;
|
|
recover->last_recover = jiffies;
|
|
mlx5e_activate_txqsq(sq);
|
|
mlx5e_activate_txqsq(sq);
|
|
}
|
|
}
|
|
@@ -1535,7 +1543,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
|
|
|
|
|
|
static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
|
static void mlx5e_free_cq(struct mlx5e_cq *cq)
|
|
{
|
|
{
|
|
- mlx5_cqwq_destroy(&cq->wq_ctrl);
|
|
|
|
|
|
+ mlx5_wq_destroy(&cq->wq_ctrl);
|
|
}
|
|
}
|
|
|
|
|
|
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|
static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|
@@ -1551,7 +1559,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|
int err;
|
|
int err;
|
|
|
|
|
|
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
|
inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
|
|
- sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
|
|
|
|
|
|
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
|
|
in = kvzalloc(inlen, GFP_KERNEL);
|
|
in = kvzalloc(inlen, GFP_KERNEL);
|
|
if (!in)
|
|
if (!in)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
@@ -1560,7 +1568,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|
|
|
|
|
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
|
memcpy(cqc, param->cqc, sizeof(param->cqc));
|
|
|
|
|
|
- mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
|
|
|
|
|
|
+ mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
|
|
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
|
(__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
|
|
|
|
|
|
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
|
mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
|
|
@@ -1568,7 +1576,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
|
|
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
|
MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
|
|
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
|
MLX5_SET(cqc, cqc, c_eqn, eqn);
|
|
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
|
MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
|
|
- MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
|
|
|
|
|
|
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_ADAPTER_PAGE_SHIFT);
|
|
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
|
MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
|
|
|
|
|
|
@@ -1661,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_params *params,
|
|
struct mlx5e_channel_param *cparam)
|
|
struct mlx5e_channel_param *cparam)
|
|
{
|
|
{
|
|
- int err;
|
|
|
|
- int tc;
|
|
|
|
|
|
+ struct mlx5e_priv *priv = c->priv;
|
|
|
|
+ int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
|
|
|
|
|
|
for (tc = 0; tc < params->num_tc; tc++) {
|
|
for (tc = 0; tc < params->num_tc; tc++) {
|
|
- int txq_ix = c->ix + tc * params->num_channels;
|
|
|
|
|
|
+ int txq_ix = c->ix + tc * max_nch;
|
|
|
|
|
|
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
|
|
err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
|
|
- params, &cparam->sq, &c->sq[tc]);
|
|
|
|
|
|
+ params, &cparam->sq, &c->sq[tc], tc);
|
|
if (err)
|
|
if (err)
|
|
goto err_close_sqs;
|
|
goto err_close_sqs;
|
|
}
|
|
}
|
|
@@ -1798,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
|
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
|
|
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
|
|
c->num_tc = params->num_tc;
|
|
c->num_tc = params->num_tc;
|
|
c->xdp = !!params->xdp_prog;
|
|
c->xdp = !!params->xdp_prog;
|
|
|
|
+ c->stats = &priv->channel_stats[ix].ch;
|
|
|
|
|
|
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
|
mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
|
|
c->irq_desc = irq_to_desc(irq);
|
|
c->irq_desc = irq_to_desc(irq);
|
|
@@ -2630,7 +2639,7 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
|
|
struct mlx5e_txqsq *sq;
|
|
struct mlx5e_txqsq *sq;
|
|
int i, tc;
|
|
int i, tc;
|
|
|
|
|
|
- for (i = 0; i < priv->channels.num; i++)
|
|
|
|
|
|
+ for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
|
|
for (tc = 0; tc < priv->profile->max_tc; tc++)
|
|
for (tc = 0; tc < priv->profile->max_tc; tc++)
|
|
priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
|
|
priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
|
|
|
|
|
|
@@ -2654,6 +2663,9 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
|
|
|
|
|
|
mlx5e_build_channels_tx_maps(priv);
|
|
mlx5e_build_channels_tx_maps(priv);
|
|
mlx5e_activate_channels(&priv->channels);
|
|
mlx5e_activate_channels(&priv->channels);
|
|
|
|
+ write_lock(&priv->stats_lock);
|
|
|
|
+ priv->channels_active = true;
|
|
|
|
+ write_unlock(&priv->stats_lock);
|
|
netif_tx_start_all_queues(priv->netdev);
|
|
netif_tx_start_all_queues(priv->netdev);
|
|
|
|
|
|
if (MLX5_VPORT_MANAGER(priv->mdev))
|
|
if (MLX5_VPORT_MANAGER(priv->mdev))
|
|
@@ -2675,6 +2687,9 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
|
|
*/
|
|
*/
|
|
netif_tx_stop_all_queues(priv->netdev);
|
|
netif_tx_stop_all_queues(priv->netdev);
|
|
netif_tx_disable(priv->netdev);
|
|
netif_tx_disable(priv->netdev);
|
|
|
|
+ write_lock(&priv->stats_lock);
|
|
|
|
+ priv->channels_active = false;
|
|
|
|
+ write_unlock(&priv->stats_lock);
|
|
mlx5e_deactivate_channels(&priv->channels);
|
|
mlx5e_deactivate_channels(&priv->channels);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3129,6 +3144,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
|
|
if (err)
|
|
if (err)
|
|
goto out;
|
|
goto out;
|
|
|
|
|
|
|
|
+ priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
|
|
|
|
+ new_channels.params.num_tc);
|
|
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
|
mlx5e_switch_priv_channels(priv, &new_channels, NULL);
|
|
out:
|
|
out:
|
|
mutex_unlock(&priv->state_lock);
|
|
mutex_unlock(&priv->state_lock);
|
|
@@ -3219,6 +3236,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
|
|
stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
|
|
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
|
|
stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
|
|
} else {
|
|
} else {
|
|
|
|
+ mlx5e_grp_sw_update_stats(priv);
|
|
stats->rx_packets = sstats->rx_packets;
|
|
stats->rx_packets = sstats->rx_packets;
|
|
stats->rx_bytes = sstats->rx_bytes;
|
|
stats->rx_bytes = sstats->rx_bytes;
|
|
stats->tx_packets = sstats->tx_packets;
|
|
stats->tx_packets = sstats->tx_packets;
|
|
@@ -3815,7 +3833,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
|
|
return false;
|
|
return false;
|
|
|
|
|
|
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
|
|
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
|
|
- sq->channel->stats.eq_rearm++;
|
|
|
|
|
|
+ sq->channel->stats->eq_rearm++;
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -4239,11 +4257,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
|
|
priv->profile = profile;
|
|
priv->profile = profile;
|
|
priv->ppriv = ppriv;
|
|
priv->ppriv = ppriv;
|
|
priv->msglevel = MLX5E_MSG_LEVEL;
|
|
priv->msglevel = MLX5E_MSG_LEVEL;
|
|
|
|
+ priv->max_opened_tc = 1;
|
|
|
|
|
|
mlx5e_build_nic_params(mdev, &priv->channels.params,
|
|
mlx5e_build_nic_params(mdev, &priv->channels.params,
|
|
profile->max_nch(mdev), netdev->mtu);
|
|
profile->max_nch(mdev), netdev->mtu);
|
|
|
|
|
|
mutex_init(&priv->state_lock);
|
|
mutex_init(&priv->state_lock);
|
|
|
|
+ rwlock_init(&priv->stats_lock);
|
|
|
|
|
|
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
|
|
INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
|
|
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
|
|
INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
|