|
@@ -383,7 +383,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
|
|
match_v,
|
|
match_v,
|
|
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
|
|
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
|
|
0, &dest);
|
|
0, &dest);
|
|
- if (IS_ERR_OR_NULL(flow_rule)) {
|
|
|
|
|
|
+ if (IS_ERR(flow_rule)) {
|
|
pr_warn(
|
|
pr_warn(
|
|
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
|
|
"FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
|
|
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
|
|
dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
|
|
@@ -457,7 +457,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|
|
|
|
|
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
|
table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
|
|
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
|
|
fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0);
|
|
- if (IS_ERR_OR_NULL(fdb)) {
|
|
|
|
|
|
+ if (IS_ERR(fdb)) {
|
|
err = PTR_ERR(fdb);
|
|
err = PTR_ERR(fdb);
|
|
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
|
|
esw_warn(dev, "Failed to create FDB Table err %d\n", err);
|
|
goto out;
|
|
goto out;
|
|
@@ -474,7 +474,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
|
|
eth_broadcast_addr(dmac);
|
|
eth_broadcast_addr(dmac);
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
|
|
esw_warn(dev, "Failed to create flow group err(%d)\n", err);
|
|
goto out;
|
|
goto out;
|
|
@@ -489,7 +489,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|
eth_zero_addr(dmac);
|
|
eth_zero_addr(dmac);
|
|
dmac[0] = 0x01;
|
|
dmac[0] = 0x01;
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
|
|
esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
|
|
goto out;
|
|
goto out;
|
|
@@ -506,7 +506,7 @@ static int esw_create_fdb_table(struct mlx5_eswitch *esw, int nvports)
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
g = mlx5_create_flow_group(fdb, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
|
|
esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
|
|
goto out;
|
|
goto out;
|
|
@@ -529,7 +529,7 @@ out:
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- kfree(flow_group_in);
|
|
|
|
|
|
+ kvfree(flow_group_in);
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -651,6 +651,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw,
|
|
esw_fdb_set_vport_rule(esw,
|
|
esw_fdb_set_vport_rule(esw,
|
|
mac,
|
|
mac,
|
|
vport_idx);
|
|
vport_idx);
|
|
|
|
+ iter_vaddr->mc_promisc = true;
|
|
break;
|
|
break;
|
|
case MLX5_ACTION_DEL:
|
|
case MLX5_ACTION_DEL:
|
|
if (!iter_vaddr)
|
|
if (!iter_vaddr)
|
|
@@ -1060,7 +1061,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|
return;
|
|
return;
|
|
|
|
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
|
- if (IS_ERR_OR_NULL(acl)) {
|
|
|
|
|
|
+ if (IS_ERR(acl)) {
|
|
err = PTR_ERR(acl);
|
|
err = PTR_ERR(acl);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1075,7 +1076,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
|
|
|
|
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
|
|
vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(vlan_grp)) {
|
|
|
|
|
|
+ if (IS_ERR(vlan_grp)) {
|
|
err = PTR_ERR(vlan_grp);
|
|
err = PTR_ERR(vlan_grp);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1086,7 +1087,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
|
|
drop_grp = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(drop_grp)) {
|
|
|
|
|
|
+ if (IS_ERR(drop_grp)) {
|
|
err = PTR_ERR(drop_grp);
|
|
err = PTR_ERR(drop_grp);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1097,7 +1098,7 @@ static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|
vport->egress.drop_grp = drop_grp;
|
|
vport->egress.drop_grp = drop_grp;
|
|
vport->egress.allowed_vlans_grp = vlan_grp;
|
|
vport->egress.allowed_vlans_grp = vlan_grp;
|
|
out:
|
|
out:
|
|
- kfree(flow_group_in);
|
|
|
|
|
|
+ kvfree(flow_group_in);
|
|
if (err && !IS_ERR_OR_NULL(vlan_grp))
|
|
if (err && !IS_ERR_OR_NULL(vlan_grp))
|
|
mlx5_destroy_flow_group(vlan_grp);
|
|
mlx5_destroy_flow_group(vlan_grp);
|
|
if (err && !IS_ERR_OR_NULL(acl))
|
|
if (err && !IS_ERR_OR_NULL(acl))
|
|
@@ -1174,7 +1175,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
return;
|
|
return;
|
|
|
|
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
|
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
|
- if (IS_ERR_OR_NULL(acl)) {
|
|
|
|
|
|
+ if (IS_ERR(acl)) {
|
|
err = PTR_ERR(acl);
|
|
err = PTR_ERR(acl);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1192,7 +1193,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1207,7 +1208,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1223,7 +1224,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1236,7 +1237,7 @@ static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
|
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
|
|
|
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
g = mlx5_create_flow_group(acl, flow_group_in);
|
|
- if (IS_ERR_OR_NULL(g)) {
|
|
|
|
|
|
+ if (IS_ERR(g)) {
|
|
err = PTR_ERR(g);
|
|
err = PTR_ERR(g);
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
|
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1259,7 +1260,7 @@ out:
|
|
mlx5_destroy_flow_table(vport->ingress.acl);
|
|
mlx5_destroy_flow_table(vport->ingress.acl);
|
|
}
|
|
}
|
|
|
|
|
|
- kfree(flow_group_in);
|
|
|
|
|
|
+ kvfree(flow_group_in);
|
|
}
|
|
}
|
|
|
|
|
|
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
|
static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
|
@@ -1363,7 +1364,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|
match_v,
|
|
match_v,
|
|
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
|
|
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
|
|
0, NULL);
|
|
0, NULL);
|
|
- if (IS_ERR_OR_NULL(vport->ingress.allow_rule)) {
|
|
|
|
|
|
+ if (IS_ERR(vport->ingress.allow_rule)) {
|
|
err = PTR_ERR(vport->ingress.allow_rule);
|
|
err = PTR_ERR(vport->ingress.allow_rule);
|
|
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
|
|
pr_warn("vport[%d] configure ingress allow rule, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1380,7 +1381,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|
match_v,
|
|
match_v,
|
|
MLX5_FLOW_CONTEXT_ACTION_DROP,
|
|
MLX5_FLOW_CONTEXT_ACTION_DROP,
|
|
0, NULL);
|
|
0, NULL);
|
|
- if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
|
|
|
|
|
|
+ if (IS_ERR(vport->ingress.drop_rule)) {
|
|
err = PTR_ERR(vport->ingress.drop_rule);
|
|
err = PTR_ERR(vport->ingress.drop_rule);
|
|
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
|
|
pr_warn("vport[%d] configure ingress drop rule, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1439,7 +1440,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|
match_v,
|
|
match_v,
|
|
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
|
|
MLX5_FLOW_CONTEXT_ACTION_ALLOW,
|
|
0, NULL);
|
|
0, NULL);
|
|
- if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
|
|
|
|
|
|
+ if (IS_ERR(vport->egress.allowed_vlan)) {
|
|
err = PTR_ERR(vport->egress.allowed_vlan);
|
|
err = PTR_ERR(vport->egress.allowed_vlan);
|
|
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
|
|
pr_warn("vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1457,7 +1458,7 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|
match_v,
|
|
match_v,
|
|
MLX5_FLOW_CONTEXT_ACTION_DROP,
|
|
MLX5_FLOW_CONTEXT_ACTION_DROP,
|
|
0, NULL);
|
|
0, NULL);
|
|
- if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
|
|
|
|
|
|
+ if (IS_ERR(vport->egress.drop_rule)) {
|
|
err = PTR_ERR(vport->egress.drop_rule);
|
|
err = PTR_ERR(vport->egress.drop_rule);
|
|
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
|
|
pr_warn("vport[%d] configure egress drop rule failed, err(%d)\n",
|
|
vport->vport, err);
|
|
vport->vport, err);
|
|
@@ -1491,14 +1492,11 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
|
|
|
|
|
|
/* Sync with current vport context */
|
|
/* Sync with current vport context */
|
|
vport->enabled_events = enable_events;
|
|
vport->enabled_events = enable_events;
|
|
- esw_vport_change_handle_locked(vport);
|
|
|
|
-
|
|
|
|
vport->enabled = true;
|
|
vport->enabled = true;
|
|
|
|
|
|
/* only PF is trusted by default */
|
|
/* only PF is trusted by default */
|
|
vport->trusted = (vport_num) ? false : true;
|
|
vport->trusted = (vport_num) ? false : true;
|
|
-
|
|
|
|
- arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
|
|
|
|
|
|
+ esw_vport_change_handle_locked(vport);
|
|
|
|
|
|
esw->enabled_vports++;
|
|
esw->enabled_vports++;
|
|
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
|
|
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
|
|
@@ -1728,11 +1726,24 @@ void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
|
|
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
|
|
(esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
|
|
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
|
|
#define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
|
|
|
|
|
|
|
|
+static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
|
|
|
|
+{
|
|
|
|
+ ((u8 *)node_guid)[7] = mac[0];
|
|
|
|
+ ((u8 *)node_guid)[6] = mac[1];
|
|
|
|
+ ((u8 *)node_guid)[5] = mac[2];
|
|
|
|
+ ((u8 *)node_guid)[4] = 0xff;
|
|
|
|
+ ((u8 *)node_guid)[3] = 0xfe;
|
|
|
|
+ ((u8 *)node_guid)[2] = mac[3];
|
|
|
|
+ ((u8 *)node_guid)[1] = mac[4];
|
|
|
|
+ ((u8 *)node_guid)[0] = mac[5];
|
|
|
|
+}
|
|
|
|
+
|
|
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|
int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|
int vport, u8 mac[ETH_ALEN])
|
|
int vport, u8 mac[ETH_ALEN])
|
|
{
|
|
{
|
|
- int err = 0;
|
|
|
|
struct mlx5_vport *evport;
|
|
struct mlx5_vport *evport;
|
|
|
|
+ u64 node_guid;
|
|
|
|
+ int err = 0;
|
|
|
|
|
|
if (!ESW_ALLOWED(esw))
|
|
if (!ESW_ALLOWED(esw))
|
|
return -EPERM;
|
|
return -EPERM;
|
|
@@ -1756,11 +1767,17 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ node_guid_gen_from_mac(&node_guid, mac);
|
|
|
|
+ err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
|
|
|
|
+ if (err)
|
|
|
|
+ mlx5_core_warn(esw->dev,
|
|
|
|
+ "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
|
|
|
|
+ vport, err);
|
|
|
|
+
|
|
mutex_lock(&esw->state_lock);
|
|
mutex_lock(&esw->state_lock);
|
|
if (evport->enabled)
|
|
if (evport->enabled)
|
|
err = esw_vport_ingress_config(esw, evport);
|
|
err = esw_vport_ingress_config(esw, evport);
|
|
mutex_unlock(&esw->state_lock);
|
|
mutex_unlock(&esw->state_lock);
|
|
-
|
|
|
|
return err;
|
|
return err;
|
|
}
|
|
}
|
|
|
|
|