|
@@ -1214,8 +1214,8 @@ static void mlx4_en_netpoll(struct net_device *dev)
|
|
|
struct mlx4_en_cq *cq;
|
|
|
int i;
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- cq = priv->tx_cq[i];
|
|
|
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
|
|
|
+ cq = priv->tx_cq[TX][i];
|
|
|
napi_schedule(&cq->napi);
|
|
|
}
|
|
|
}
|
|
@@ -1299,12 +1299,14 @@ static void mlx4_en_tx_timeout(struct net_device *dev)
|
|
|
if (netif_msg_timer(priv))
|
|
|
en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
|
|
|
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][i];
|
|
|
+
|
|
|
if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, i)))
|
|
|
continue;
|
|
|
en_warn(priv, "TX timeout on queue: %d, QP: 0x%x, CQ: 0x%x, Cons: 0x%x, Prod: 0x%x\n",
|
|
|
- i, priv->tx_ring[i]->qpn, priv->tx_ring[i]->cqn,
|
|
|
- priv->tx_ring[i]->cons, priv->tx_ring[i]->prod);
|
|
|
+ i, tx_ring->qpn, tx_ring->cqn,
|
|
|
+ tx_ring->cons, tx_ring->prod);
|
|
|
}
|
|
|
|
|
|
priv->port_stats.tx_timeout++;
|
|
@@ -1328,7 +1330,7 @@ mlx4_en_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
|
|
|
static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
{
|
|
|
struct mlx4_en_cq *cq;
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
|
|
|
/* If we haven't received a specific coalescing setting
|
|
|
* (module param), we set the moderation parameters as follows:
|
|
@@ -1353,10 +1355,12 @@ static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
|
|
|
priv->last_moder_bytes[i] = 0;
|
|
|
}
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- cq = priv->tx_cq[i];
|
|
|
- cq->moder_cnt = priv->tx_frames;
|
|
|
- cq->moder_time = priv->tx_usecs;
|
|
|
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ cq = priv->tx_cq[t][i];
|
|
|
+ cq->moder_cnt = priv->tx_frames;
|
|
|
+ cq->moder_time = priv->tx_usecs;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Reset auto-moderation params */
|
|
@@ -1526,19 +1530,13 @@ static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
|
|
|
static void mlx4_en_init_recycle_ring(struct mlx4_en_priv *priv,
|
|
|
int tx_ring_idx)
|
|
|
{
|
|
|
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[tx_ring_idx];
|
|
|
- int rr_index;
|
|
|
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX_XDP][tx_ring_idx];
|
|
|
+ int rr_index = tx_ring_idx;
|
|
|
|
|
|
- rr_index = (priv->xdp_ring_num - priv->tx_ring_num) + tx_ring_idx;
|
|
|
- if (rr_index >= 0) {
|
|
|
- tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
|
|
|
- tx_ring->recycle_ring = priv->rx_ring[rr_index];
|
|
|
- en_dbg(DRV, priv,
|
|
|
- "Set tx_ring[%d]->recycle_ring = rx_ring[%d]\n",
|
|
|
- tx_ring_idx, rr_index);
|
|
|
- } else {
|
|
|
- tx_ring->recycle_ring = NULL;
|
|
|
- }
|
|
|
+ tx_ring->free_tx_desc = mlx4_en_recycle_tx_desc;
|
|
|
+ tx_ring->recycle_ring = priv->rx_ring[rr_index];
|
|
|
+ en_dbg(DRV, priv, "Set tx_ring[%d][%d]->recycle_ring = rx_ring[%d]\n",
|
|
|
+ TX_XDP, tx_ring_idx, rr_index);
|
|
|
}
|
|
|
|
|
|
int mlx4_en_start_port(struct net_device *dev)
|
|
@@ -1548,9 +1546,8 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
struct mlx4_en_cq *cq;
|
|
|
struct mlx4_en_tx_ring *tx_ring;
|
|
|
int rx_index = 0;
|
|
|
- int tx_index = 0;
|
|
|
int err = 0;
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
int j;
|
|
|
u8 mc_list[16] = {0};
|
|
|
|
|
@@ -1635,43 +1632,51 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
goto rss_err;
|
|
|
|
|
|
/* Configure tx cq's and rings */
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- /* Configure cq */
|
|
|
- cq = priv->tx_cq[i];
|
|
|
- err = mlx4_en_activate_cq(priv, cq, i);
|
|
|
- if (err) {
|
|
|
- en_err(priv, "Failed allocating Tx CQ\n");
|
|
|
- goto tx_err;
|
|
|
- }
|
|
|
- err = mlx4_en_set_cq_moder(priv, cq);
|
|
|
- if (err) {
|
|
|
- en_err(priv, "Failed setting cq moderation parameters\n");
|
|
|
- mlx4_en_deactivate_cq(priv, cq);
|
|
|
- goto tx_err;
|
|
|
- }
|
|
|
- en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
|
|
|
- cq->buf->wqe_index = cpu_to_be16(0xffff);
|
|
|
+ for (t = 0 ; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ u8 num_tx_rings_p_up = t == TX ? priv->num_tx_rings_p_up : 1;
|
|
|
|
|
|
- /* Configure ring */
|
|
|
- tx_ring = priv->tx_ring[i];
|
|
|
- err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
|
|
|
- i / priv->num_tx_rings_p_up);
|
|
|
- if (err) {
|
|
|
- en_err(priv, "Failed allocating Tx ring\n");
|
|
|
- mlx4_en_deactivate_cq(priv, cq);
|
|
|
- goto tx_err;
|
|
|
- }
|
|
|
- tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
|
|
|
-
|
|
|
- mlx4_en_init_recycle_ring(priv, i);
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ /* Configure cq */
|
|
|
+ cq = priv->tx_cq[t][i];
|
|
|
+ err = mlx4_en_activate_cq(priv, cq, i);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "Failed allocating Tx CQ\n");
|
|
|
+ goto tx_err;
|
|
|
+ }
|
|
|
+ err = mlx4_en_set_cq_moder(priv, cq);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "Failed setting cq moderation parameters\n");
|
|
|
+ mlx4_en_deactivate_cq(priv, cq);
|
|
|
+ goto tx_err;
|
|
|
+ }
|
|
|
+ en_dbg(DRV, priv,
|
|
|
+ "Resetting index of collapsed CQ:%d to -1\n", i);
|
|
|
+ cq->buf->wqe_index = cpu_to_be16(0xffff);
|
|
|
+
|
|
|
+ /* Configure ring */
|
|
|
+ tx_ring = priv->tx_ring[t][i];
|
|
|
+ err = mlx4_en_activate_tx_ring(priv, tx_ring,
|
|
|
+ cq->mcq.cqn,
|
|
|
+ i / num_tx_rings_p_up);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "Failed allocating Tx ring\n");
|
|
|
+ mlx4_en_deactivate_cq(priv, cq);
|
|
|
+ goto tx_err;
|
|
|
+ }
|
|
|
+ if (t != TX_XDP) {
|
|
|
+ tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
|
|
|
+ tx_ring->recycle_ring = NULL;
|
|
|
+ } else {
|
|
|
+ mlx4_en_init_recycle_ring(priv, i);
|
|
|
+ }
|
|
|
|
|
|
- /* Arm CQ for TX completions */
|
|
|
- mlx4_en_arm_cq(priv, cq);
|
|
|
+ /* Arm CQ for TX completions */
|
|
|
+ mlx4_en_arm_cq(priv, cq);
|
|
|
|
|
|
- /* Set initial ownership of all Tx TXBBs to SW (1) */
|
|
|
- for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
|
|
|
- *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
|
|
|
- ++tx_index;
|
|
|
+ /* Set initial ownership of all Tx TXBBs to SW (1) */
|
|
|
+ for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
|
|
|
+ *((u32 *)(tx_ring->buf + j)) = 0xffffffff;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Configure port */
|
|
@@ -1746,9 +1751,18 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
return 0;
|
|
|
|
|
|
tx_err:
|
|
|
- while (tx_index--) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]);
|
|
|
- mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]);
|
|
|
+ if (t == MLX4_EN_NUM_TX_TYPES) {
|
|
|
+ t--;
|
|
|
+ i = priv->tx_ring_num[t];
|
|
|
+ }
|
|
|
+ while (t >= 0) {
|
|
|
+ while (i--) {
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
|
|
|
+ }
|
|
|
+ if (!t--)
|
|
|
+ break;
|
|
|
+ i = priv->tx_ring_num[t];
|
|
|
}
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
rss_err:
|
|
@@ -1773,7 +1787,7 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
struct mlx4_en_mc_list *mclist, *tmp;
|
|
|
struct ethtool_flow_id *flow, *tmp_flow;
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
u8 mc_list[16] = {0};
|
|
|
|
|
|
if (!priv->port_up) {
|
|
@@ -1859,14 +1873,17 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
|
|
|
mlx4_en_destroy_drop_qp(priv);
|
|
|
|
|
|
/* Free TX Rings */
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]);
|
|
|
- mlx4_en_deactivate_cq(priv, priv->tx_cq[i]);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[t][i]);
|
|
|
+ mlx4_en_deactivate_cq(priv, priv->tx_cq[t][i]);
|
|
|
+ }
|
|
|
}
|
|
|
msleep(10);
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++)
|
|
|
- mlx4_en_free_tx_buf(dev, priv->tx_ring[i]);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++)
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++)
|
|
|
+ mlx4_en_free_tx_buf(dev, priv->tx_ring[t][i]);
|
|
|
|
|
|
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
|
|
mlx4_en_delete_rss_steer_rules(priv);
|
|
@@ -1915,6 +1932,7 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|
|
{
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
+ struct mlx4_en_tx_ring **tx_ring;
|
|
|
int i;
|
|
|
|
|
|
if (!mlx4_is_slave(mdev->dev))
|
|
@@ -1932,15 +1950,16 @@ static void mlx4_en_clear_stats(struct net_device *dev)
|
|
|
sizeof(priv->tx_priority_flowstats));
|
|
|
memset(&priv->pf_stats, 0, sizeof(priv->pf_stats));
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- priv->tx_ring[i]->bytes = 0;
|
|
|
- priv->tx_ring[i]->packets = 0;
|
|
|
- priv->tx_ring[i]->tx_csum = 0;
|
|
|
- priv->tx_ring[i]->tx_dropped = 0;
|
|
|
- priv->tx_ring[i]->queue_stopped = 0;
|
|
|
- priv->tx_ring[i]->wake_queue = 0;
|
|
|
- priv->tx_ring[i]->tso_packets = 0;
|
|
|
- priv->tx_ring[i]->xmit_more = 0;
|
|
|
+ tx_ring = priv->tx_ring[TX];
|
|
|
+ for (i = 0; i < priv->tx_ring_num[TX]; i++) {
|
|
|
+ tx_ring[i]->bytes = 0;
|
|
|
+ tx_ring[i]->packets = 0;
|
|
|
+ tx_ring[i]->tx_csum = 0;
|
|
|
+ tx_ring[i]->tx_dropped = 0;
|
|
|
+ tx_ring[i]->queue_stopped = 0;
|
|
|
+ tx_ring[i]->wake_queue = 0;
|
|
|
+ tx_ring[i]->tso_packets = 0;
|
|
|
+ tx_ring[i]->xmit_more = 0;
|
|
|
}
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
priv->rx_ring[i]->bytes = 0;
|
|
@@ -1996,17 +2015,20 @@ static int mlx4_en_close(struct net_device *dev)
|
|
|
|
|
|
static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|
|
{
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
|
|
|
#ifdef CONFIG_RFS_ACCEL
|
|
|
priv->dev->rx_cpu_rmap = NULL;
|
|
|
#endif
|
|
|
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- if (priv->tx_ring && priv->tx_ring[i])
|
|
|
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- if (priv->tx_cq && priv->tx_cq[i])
|
|
|
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ if (priv->tx_ring[t] && priv->tx_ring[t][i])
|
|
|
+ mlx4_en_destroy_tx_ring(priv,
|
|
|
+ &priv->tx_ring[t][i]);
|
|
|
+ if (priv->tx_cq[t] && priv->tx_cq[t][i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
@@ -2022,20 +2044,22 @@ static void mlx4_en_free_resources(struct mlx4_en_priv *priv)
|
|
|
static int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
|
|
|
{
|
|
|
struct mlx4_en_port_profile *prof = priv->prof;
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
int node;
|
|
|
|
|
|
/* Create tx Rings */
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- node = cpu_to_node(i % num_online_cpus());
|
|
|
- if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
|
|
|
- prof->tx_ring_size, i, TX, node))
|
|
|
- goto err;
|
|
|
-
|
|
|
- if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i],
|
|
|
- prof->tx_ring_size, TXBB_SIZE,
|
|
|
- node, i))
|
|
|
- goto err;
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ node = cpu_to_node(i % num_online_cpus());
|
|
|
+ if (mlx4_en_create_cq(priv, &priv->tx_cq[t][i],
|
|
|
+ prof->tx_ring_size, i, t, node))
|
|
|
+ goto err;
|
|
|
+
|
|
|
+ if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[t][i],
|
|
|
+ prof->tx_ring_size,
|
|
|
+ TXBB_SIZE, node, i))
|
|
|
+ goto err;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/* Create rx Rings */
|
|
@@ -2067,11 +2091,14 @@ err:
|
|
|
if (priv->rx_cq[i])
|
|
|
mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
|
|
|
}
|
|
|
- for (i = 0; i < priv->tx_ring_num; i++) {
|
|
|
- if (priv->tx_ring[i])
|
|
|
- mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
|
|
|
- if (priv->tx_cq[i])
|
|
|
- mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ for (i = 0; i < priv->tx_ring_num[t]; i++) {
|
|
|
+ if (priv->tx_ring[t][i])
|
|
|
+ mlx4_en_destroy_tx_ring(priv,
|
|
|
+ &priv->tx_ring[t][i]);
|
|
|
+ if (priv->tx_cq[t][i])
|
|
|
+ mlx4_en_destroy_cq(priv, &priv->tx_cq[t][i]);
|
|
|
+ }
|
|
|
}
|
|
|
return -ENOMEM;
|
|
|
}
|
|
@@ -2088,10 +2115,11 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
|
|
|
struct mlx4_en_priv *src,
|
|
|
struct mlx4_en_port_profile *prof)
|
|
|
{
|
|
|
+ int t;
|
|
|
+
|
|
|
memcpy(&dst->hwtstamp_config, &prof->hwtstamp_config,
|
|
|
sizeof(dst->hwtstamp_config));
|
|
|
dst->num_tx_rings_p_up = src->mdev->profile.num_tx_rings_p_up;
|
|
|
- dst->tx_ring_num = prof->tx_ring_num;
|
|
|
dst->rx_ring_num = prof->rx_ring_num;
|
|
|
dst->flags = prof->flags;
|
|
|
dst->mdev = src->mdev;
|
|
@@ -2101,33 +2129,50 @@ static int mlx4_en_copy_priv(struct mlx4_en_priv *dst,
|
|
|
dst->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
|
|
|
DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
|
|
|
|
|
|
- dst->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!dst->tx_ring)
|
|
|
- return -ENOMEM;
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ dst->tx_ring_num[t] = prof->tx_ring_num[t];
|
|
|
+ if (!dst->tx_ring_num[t])
|
|
|
+ continue;
|
|
|
|
|
|
- dst->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!dst->tx_cq) {
|
|
|
- kfree(dst->tx_ring);
|
|
|
- return -ENOMEM;
|
|
|
+ dst->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
|
|
|
+ MAX_TX_RINGS, GFP_KERNEL);
|
|
|
+ if (!dst->tx_ring[t])
|
|
|
+ goto err_free_tx;
|
|
|
+
|
|
|
+ dst->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
|
|
|
+ MAX_TX_RINGS, GFP_KERNEL);
|
|
|
+ if (!dst->tx_cq[t]) {
|
|
|
+ kfree(dst->tx_ring[t]);
|
|
|
+ goto err_free_tx;
|
|
|
+ }
|
|
|
}
|
|
|
+
|
|
|
return 0;
|
|
|
+
|
|
|
+err_free_tx:
|
|
|
+ while (t--) {
|
|
|
+ kfree(dst->tx_ring[t]);
|
|
|
+ kfree(dst->tx_cq[t]);
|
|
|
+ }
|
|
|
+ return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
static void mlx4_en_update_priv(struct mlx4_en_priv *dst,
|
|
|
struct mlx4_en_priv *src)
|
|
|
{
|
|
|
+ int t;
|
|
|
memcpy(dst->rx_ring, src->rx_ring,
|
|
|
sizeof(struct mlx4_en_rx_ring *) * src->rx_ring_num);
|
|
|
memcpy(dst->rx_cq, src->rx_cq,
|
|
|
sizeof(struct mlx4_en_cq *) * src->rx_ring_num);
|
|
|
memcpy(&dst->hwtstamp_config, &src->hwtstamp_config,
|
|
|
sizeof(dst->hwtstamp_config));
|
|
|
- dst->tx_ring_num = src->tx_ring_num;
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ dst->tx_ring_num[t] = src->tx_ring_num[t];
|
|
|
+ dst->tx_ring[t] = src->tx_ring[t];
|
|
|
+ dst->tx_cq[t] = src->tx_cq[t];
|
|
|
+ }
|
|
|
dst->rx_ring_num = src->rx_ring_num;
|
|
|
- dst->tx_ring = src->tx_ring;
|
|
|
- dst->tx_cq = src->tx_cq;
|
|
|
memcpy(dst->prof, src->prof, sizeof(struct mlx4_en_port_profile));
|
|
|
}
|
|
|
|
|
@@ -2135,14 +2180,18 @@ int mlx4_en_try_alloc_resources(struct mlx4_en_priv *priv,
|
|
|
struct mlx4_en_priv *tmp,
|
|
|
struct mlx4_en_port_profile *prof)
|
|
|
{
|
|
|
+ int t;
|
|
|
+
|
|
|
mlx4_en_copy_priv(tmp, priv, prof);
|
|
|
|
|
|
if (mlx4_en_alloc_resources(tmp)) {
|
|
|
en_warn(priv,
|
|
|
"%s: Resource allocation failed, using previous configuration\n",
|
|
|
__func__);
|
|
|
- kfree(tmp->tx_ring);
|
|
|
- kfree(tmp->tx_cq);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ kfree(tmp->tx_ring[t]);
|
|
|
+ kfree(tmp->tx_cq[t]);
|
|
|
+ }
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
return 0;
|
|
@@ -2161,6 +2210,7 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
bool shutdown = mdev->dev->persist->interface_state &
|
|
|
MLX4_INTERFACE_STATE_SHUTDOWN;
|
|
|
+ int t;
|
|
|
|
|
|
en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
|
|
|
|
|
@@ -2197,8 +2247,10 @@ void mlx4_en_destroy_netdev(struct net_device *dev)
|
|
|
|
|
|
mlx4_en_free_resources(priv);
|
|
|
|
|
|
- kfree(priv->tx_ring);
|
|
|
- kfree(priv->tx_cq);
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ kfree(priv->tx_ring[t]);
|
|
|
+ kfree(priv->tx_cq[t]);
|
|
|
+ }
|
|
|
|
|
|
if (!shutdown)
|
|
|
free_netdev(dev);
|
|
@@ -2214,7 +2266,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
|
|
|
en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
|
|
|
dev->mtu, new_mtu);
|
|
|
|
|
|
- if (priv->xdp_ring_num && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
|
|
|
+ if (priv->tx_ring_num[TX_XDP] && MLX4_EN_EFF_MTU(new_mtu) > FRAG_SZ0) {
|
|
|
en_err(priv, "MTU size:%d requires frags but XDP running\n",
|
|
|
new_mtu);
|
|
|
return -EOPNOTSUPP;
|
|
@@ -2605,7 +2657,7 @@ static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
|
|
|
static int mlx4_en_set_tx_maxrate(struct net_device *dev, int queue_index, u32 maxrate)
|
|
|
{
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
- struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[queue_index];
|
|
|
+ struct mlx4_en_tx_ring *tx_ring = priv->tx_ring[TX][queue_index];
|
|
|
struct mlx4_update_qp_params params;
|
|
|
int err;
|
|
|
|
|
@@ -2633,18 +2685,21 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|
|
{
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
struct mlx4_en_dev *mdev = priv->mdev;
|
|
|
+ struct mlx4_en_port_profile new_prof;
|
|
|
struct bpf_prog *old_prog;
|
|
|
+ struct mlx4_en_priv *tmp;
|
|
|
+ int tx_changed = 0;
|
|
|
int xdp_ring_num;
|
|
|
int port_up = 0;
|
|
|
int err;
|
|
|
int i;
|
|
|
|
|
|
- xdp_ring_num = prog ? ALIGN(priv->rx_ring_num, MLX4_EN_NUM_UP) : 0;
|
|
|
+ xdp_ring_num = prog ? priv->rx_ring_num : 0;
|
|
|
|
|
|
/* No need to reconfigure buffers when simply swapping the
|
|
|
* program for a new one.
|
|
|
*/
|
|
|
- if (priv->xdp_ring_num == xdp_ring_num) {
|
|
|
+ if (priv->tx_ring_num[TX_XDP] == xdp_ring_num) {
|
|
|
if (prog) {
|
|
|
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
|
|
if (IS_ERR(prog))
|
|
@@ -2668,28 +2723,41 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|
|
return -EOPNOTSUPP;
|
|
|
}
|
|
|
|
|
|
- if (priv->tx_ring_num < xdp_ring_num + MLX4_EN_NUM_UP) {
|
|
|
- en_err(priv,
|
|
|
- "Minimum %d tx channels required to run XDP\n",
|
|
|
- (xdp_ring_num + MLX4_EN_NUM_UP) / MLX4_EN_NUM_UP);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
+ tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
|
|
|
+ if (!tmp)
|
|
|
+ return -ENOMEM;
|
|
|
|
|
|
if (prog) {
|
|
|
prog = bpf_prog_add(prog, priv->rx_ring_num - 1);
|
|
|
- if (IS_ERR(prog))
|
|
|
- return PTR_ERR(prog);
|
|
|
+ if (IS_ERR(prog)) {
|
|
|
+ err = PTR_ERR(prog);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
mutex_lock(&mdev->state_lock);
|
|
|
+ memcpy(&new_prof, priv->prof, sizeof(struct mlx4_en_port_profile));
|
|
|
+ new_prof.tx_ring_num[TX_XDP] = xdp_ring_num;
|
|
|
+
|
|
|
+ if (priv->tx_ring_num[TX] + xdp_ring_num > MAX_TX_RINGS) {
|
|
|
+ tx_changed = 1;
|
|
|
+ new_prof.tx_ring_num[TX] =
|
|
|
+ MAX_TX_RINGS - ALIGN(xdp_ring_num, MLX4_EN_NUM_UP);
|
|
|
+ en_warn(priv, "Reducing the number of TX rings, to not exceed the max total rings number.\n");
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlx4_en_try_alloc_resources(priv, tmp, &new_prof);
|
|
|
+ if (err)
|
|
|
+ goto unlock_out;
|
|
|
+
|
|
|
if (priv->port_up) {
|
|
|
port_up = 1;
|
|
|
mlx4_en_stop_port(dev, 1);
|
|
|
}
|
|
|
|
|
|
- priv->xdp_ring_num = xdp_ring_num;
|
|
|
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num -
|
|
|
- priv->xdp_ring_num);
|
|
|
+ mlx4_en_safe_replace_resources(priv, tmp);
|
|
|
+ if (tx_changed)
|
|
|
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
|
|
|
|
|
|
for (i = 0; i < priv->rx_ring_num; i++) {
|
|
|
old_prog = rcu_dereference_protected(
|
|
@@ -2709,15 +2777,18 @@ static int mlx4_xdp_set(struct net_device *dev, struct bpf_prog *prog)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+unlock_out:
|
|
|
mutex_unlock(&mdev->state_lock);
|
|
|
- return 0;
|
|
|
+out:
|
|
|
+ kfree(tmp);
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
static bool mlx4_xdp_attached(struct net_device *dev)
|
|
|
{
|
|
|
struct mlx4_en_priv *priv = netdev_priv(dev);
|
|
|
|
|
|
- return !!priv->xdp_ring_num;
|
|
|
+ return !!priv->tx_ring_num[TX_XDP];
|
|
|
}
|
|
|
|
|
|
static int mlx4_xdp(struct net_device *dev, struct netdev_xdp *xdp)
|
|
@@ -3061,7 +3132,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
{
|
|
|
struct net_device *dev;
|
|
|
struct mlx4_en_priv *priv;
|
|
|
- int i;
|
|
|
+ int i, t;
|
|
|
int err;
|
|
|
|
|
|
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
|
|
@@ -3069,7 +3140,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
if (dev == NULL)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
|
|
|
+ netif_set_real_num_tx_queues(dev, prof->tx_ring_num[TX]);
|
|
|
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
|
|
|
|
|
|
SET_NETDEV_DEV(dev, &mdev->dev->persist->pdev->dev);
|
|
@@ -3106,21 +3177,27 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
|
|
|
MLX4_WQE_CTRL_SOLICITED);
|
|
|
priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
|
|
|
- priv->tx_ring_num = prof->tx_ring_num;
|
|
|
priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
|
|
|
netdev_rss_key_fill(priv->rss_key, sizeof(priv->rss_key));
|
|
|
|
|
|
- priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!priv->tx_ring) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq *) * MAX_TX_RINGS,
|
|
|
- GFP_KERNEL);
|
|
|
- if (!priv->tx_cq) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto out;
|
|
|
+ for (t = 0; t < MLX4_EN_NUM_TX_TYPES; t++) {
|
|
|
+ priv->tx_ring_num[t] = prof->tx_ring_num[t];
|
|
|
+ if (!priv->tx_ring_num[t])
|
|
|
+ continue;
|
|
|
+
|
|
|
+ priv->tx_ring[t] = kzalloc(sizeof(struct mlx4_en_tx_ring *) *
|
|
|
+ MAX_TX_RINGS, GFP_KERNEL);
|
|
|
+ if (!priv->tx_ring[t]) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_free_tx;
|
|
|
+ }
|
|
|
+ priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
|
|
|
+ MAX_TX_RINGS, GFP_KERNEL);
|
|
|
+ if (!priv->tx_cq[t]) {
|
|
|
+ kfree(priv->tx_ring[t]);
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
}
|
|
|
priv->rx_ring_num = prof->rx_ring_num;
|
|
|
priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
|
|
@@ -3203,7 +3280,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
else
|
|
|
dev->netdev_ops = &mlx4_netdev_ops;
|
|
|
dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
|
|
|
- netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
|
|
|
+ netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
|
|
|
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
|
|
|
|
|
|
dev->ethtool_ops = &mlx4_en_ethtool_ops;
|
|
@@ -3303,7 +3380,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
netif_carrier_off(dev);
|
|
|
mlx4_en_set_default_moderation(priv);
|
|
|
|
|
|
- en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
|
|
|
+ en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num[TX]);
|
|
|
en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
|
|
|
|
|
|
mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
|
|
@@ -3363,6 +3440,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
+err_free_tx:
|
|
|
+ while (t--) {
|
|
|
+ kfree(priv->tx_ring[t]);
|
|
|
+ kfree(priv->tx_cq[t]);
|
|
|
+ }
|
|
|
out:
|
|
|
mlx4_en_destroy_netdev(dev);
|
|
|
return err;
|