|
@@ -468,6 +468,53 @@ static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
|
|
|
memset(&dst_mac[ETH_ALEN], 0, 2);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *addr,
|
|
|
+ int qpn, u64 *reg_id)
|
|
|
+{
|
|
|
+ int err;
|
|
|
+ struct mlx4_spec_list spec_eth_outer = { {NULL} };
|
|
|
+ struct mlx4_spec_list spec_vxlan = { {NULL} };
|
|
|
+ struct mlx4_spec_list spec_eth_inner = { {NULL} };
|
|
|
+
|
|
|
+ struct mlx4_net_trans_rule rule = {
|
|
|
+ .queue_mode = MLX4_NET_TRANS_Q_FIFO,
|
|
|
+ .exclusive = 0,
|
|
|
+ .allow_loopback = 1,
|
|
|
+ .promisc_mode = MLX4_FS_REGULAR,
|
|
|
+ .priority = MLX4_DOMAIN_NIC,
|
|
|
+ };
|
|
|
+
|
|
|
+ __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
|
|
|
+
|
|
|
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
|
|
|
+ return 0; /* do nothing */
|
|
|
+
|
|
|
+ rule.port = priv->port;
|
|
|
+ rule.qpn = qpn;
|
|
|
+ INIT_LIST_HEAD(&rule.list);
|
|
|
+
|
|
|
+ spec_eth_outer.id = MLX4_NET_TRANS_RULE_ID_ETH;
|
|
|
+ memcpy(spec_eth_outer.eth.dst_mac, addr, ETH_ALEN);
|
|
|
+ memcpy(spec_eth_outer.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
|
|
|
+
|
|
|
+ spec_vxlan.id = MLX4_NET_TRANS_RULE_ID_VXLAN; /* any vxlan header */
|
|
|
+ spec_eth_inner.id = MLX4_NET_TRANS_RULE_ID_ETH; /* any inner eth header */
|
|
|
+
|
|
|
+ list_add_tail(&spec_eth_outer.list, &rule.list);
|
|
|
+ list_add_tail(&spec_vxlan.list, &rule.list);
|
|
|
+ list_add_tail(&spec_eth_inner.list, &rule.list);
|
|
|
+
|
|
|
+ err = mlx4_flow_attach(priv->mdev->dev, &rule, reg_id);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "failed to add vxlan steering rule, err %d\n", err);
|
|
|
+ return err;
|
|
|
+ }
|
|
|
+ en_dbg(DRV, priv, "added vxlan steering rule, mac %pM reg_id %llx\n", addr, *reg_id);
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
|
|
|
unsigned char *mac, int *qpn, u64 *reg_id)
|
|
|
{
|
|
@@ -585,6 +632,10 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|
|
if (err)
|
|
|
goto steer_err;
|
|
|
|
|
|
+ if (mlx4_en_tunnel_steer_add(priv, priv->dev->dev_addr, *qpn,
|
|
|
+ &priv->tunnel_reg_id))
|
|
|
+ goto tunnel_err;
|
|
|
+
|
|
|
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
|
|
if (!entry) {
|
|
|
err = -ENOMEM;
|
|
@@ -599,6 +650,9 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
|
|
|
return 0;
|
|
|
|
|
|
alloc_err:
|
|
|
+ if (priv->tunnel_reg_id)
|
|
|
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
|
|
+tunnel_err:
|
|
|
mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
|
|
|
|
|
|
steer_err:
|
|
@@ -642,6 +696,11 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ if (priv->tunnel_reg_id) {
|
|
|
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
|
|
|
+ priv->tunnel_reg_id = 0;
|
|
|
+ }
|
|
|
+
|
|
|
en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
|
|
|
priv->port, qpn);
|
|
|
mlx4_qp_release_range(dev, qpn, 1);
|
|
@@ -1044,6 +1103,12 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
|
|
if (err)
|
|
|
en_err(priv, "Fail to detach multicast address\n");
|
|
|
|
|
|
+ if (mclist->tunnel_reg_id) {
|
|
|
+ err = mlx4_flow_detach(priv->mdev->dev, mclist->tunnel_reg_id);
|
|
|
+ if (err)
|
|
|
+ en_err(priv, "Failed to detach multicast address\n");
|
|
|
+ }
|
|
|
+
|
|
|
/* remove from list */
|
|
|
list_del(&mclist->list);
|
|
|
kfree(mclist);
|
|
@@ -1061,6 +1126,10 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
|
|
|
if (err)
|
|
|
en_err(priv, "Fail to attach multicast address\n");
|
|
|
|
|
|
+ err = mlx4_en_tunnel_steer_add(priv, &mc_list[10], priv->base_qpn,
|
|
|
+ &mclist->tunnel_reg_id);
|
|
|
+ if (err)
|
|
|
+ en_err(priv, "Failed to attach multicast address\n");
|
|
|
}
|
|
|
}
|
|
|
}
|
|
@@ -1598,6 +1667,15 @@ int mlx4_en_start_port(struct net_device *dev)
|
|
|
goto tx_err;
|
|
|
}
|
|
|
|
|
|
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
|
|
|
+ err);
|
|
|
+ goto tx_err;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* Init port */
|
|
|
en_dbg(HW, priv, "Initializing port\n");
|
|
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|
|
@@ -2400,6 +2478,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
|
|
|
dev->priv_flags |= IFF_UNICAST_FLT;
|
|
|
|
|
|
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
+ dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
|
|
|
+ NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
|
|
|
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
|
|
+ }
|
|
|
+
|
|
|
mdev->pndev[port] = dev;
|
|
|
|
|
|
netif_carrier_off(dev);
|
|
@@ -2429,6 +2514,15 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
|
|
|
goto out;
|
|
|
}
|
|
|
|
|
|
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
|
|
|
+ err = mlx4_SET_PORT_VXLAN(mdev->dev, priv->port, VXLAN_STEER_BY_OUTER_MAC);
|
|
|
+ if (err) {
|
|
|
+ en_err(priv, "Failed setting port L2 tunnel configuration, err %d\n",
|
|
|
+ err);
|
|
|
+ goto out;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/* Init port */
|
|
|
en_warn(priv, "Initializing port\n");
|
|
|
err = mlx4_INIT_PORT(mdev->dev, priv->port);
|