|
@@ -48,6 +48,7 @@
|
|
|
#include <linux/workqueue.h>
|
|
|
#include <linux/jiffies.h>
|
|
|
#include <linux/bitops.h>
|
|
|
+#include <linux/list.h>
|
|
|
#include <net/switchdev.h>
|
|
|
#include <generated/utsrelease.h>
|
|
|
|
|
@@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
-static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
|
|
|
-{
|
|
|
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
|
|
|
- int err;
|
|
|
-
|
|
|
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID,
|
|
|
- MLXSW_SP_VFID_BASE + vfid, 0);
|
|
|
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
|
|
|
-
|
|
|
- if (err)
|
|
|
- return err;
|
|
|
-
|
|
|
- set_bit(vfid, mlxsw_sp->active_vfids);
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
|
|
|
-{
|
|
|
- char sfmr_pl[MLXSW_REG_SFMR_LEN];
|
|
|
-
|
|
|
- clear_bit(vfid, mlxsw_sp->active_vfids);
|
|
|
-
|
|
|
- mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID,
|
|
|
- MLXSW_SP_VFID_BASE + vfid, 0);
|
|
|
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
|
|
|
-}
|
|
|
-
|
|
|
static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
unsigned char *addr)
|
|
|
{
|
|
@@ -549,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
+static struct mlxsw_sp_vfid *
|
|
|
+mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
+
|
|
|
+ list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) {
|
|
|
+ if (vfid->vid == vid)
|
|
|
+ return vfid;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp)
|
|
|
+{
|
|
|
+ return find_first_zero_bit(mlxsw_sp->port_vfids.mapped,
|
|
|
+ MLXSW_SP_VFID_PORT_MAX);
|
|
|
+}
|
|
|
+
|
|
|
+static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid)
|
|
|
+{
|
|
|
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
|
|
|
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
|
|
|
+
|
|
|
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0);
|
|
|
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
|
|
|
+}
|
|
|
+
|
|
|
+static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid)
|
|
|
+{
|
|
|
+ u16 fid = mlxsw_sp_vfid_to_fid(vfid);
|
|
|
+ char sfmr_pl[MLXSW_REG_SFMR_LEN];
|
|
|
+
|
|
|
+ mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0);
|
|
|
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl);
|
|
|
+}
|
|
|
+
|
|
|
+static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp,
|
|
|
+ u16 vid)
|
|
|
+{
|
|
|
+ struct device *dev = mlxsw_sp->bus_info->dev;
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
+ u16 n_vfid;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp);
|
|
|
+ if (n_vfid == MLXSW_SP_VFID_PORT_MAX) {
|
|
|
+ dev_err(dev, "No available vFIDs\n");
|
|
|
+ return ERR_PTR(-ERANGE);
|
|
|
+ }
|
|
|
+
|
|
|
+ err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
|
|
|
+ if (err) {
|
|
|
+ dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
|
|
|
+ return ERR_PTR(err);
|
|
|
+ }
|
|
|
+
|
|
|
+ vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
|
|
|
+ if (!vfid)
|
|
|
+ goto err_allocate_vfid;
|
|
|
+
|
|
|
+ vfid->vfid = n_vfid;
|
|
|
+ vfid->vid = vid;
|
|
|
+
|
|
|
+ list_add(&vfid->list, &mlxsw_sp->port_vfids.list);
|
|
|
+ set_bit(n_vfid, mlxsw_sp->port_vfids.mapped);
|
|
|
+
|
|
|
+ return vfid;
|
|
|
+
|
|
|
+err_allocate_vfid:
|
|
|
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
|
|
|
+ struct mlxsw_sp_vfid *vfid)
|
|
|
+{
|
|
|
+ clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped);
|
|
|
+ list_del(&vfid->list);
|
|
|
+
|
|
|
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
|
|
|
+
|
|
|
+ kfree(vfid);
|
|
|
+}
|
|
|
+
|
|
|
+static struct mlxsw_sp_port *
|
|
|
+mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
+ struct mlxsw_sp_vfid *vfid)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+
|
|
|
+ mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL);
|
|
|
+ if (!mlxsw_sp_vport)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ /* dev will be set correctly after the VLAN device is linked
|
|
|
+ * with the real device. In case of bridge SELF invocation, dev
|
|
|
+ * will remain as is.
|
|
|
+ */
|
|
|
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
|
|
|
+ mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
+ mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port;
|
|
|
+ mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING;
|
|
|
+ mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged;
|
|
|
+ mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id;
|
|
|
+ mlxsw_sp_vport->vport.vfid = vfid;
|
|
|
+ mlxsw_sp_vport->vport.vid = vfid->vid;
|
|
|
+
|
|
|
+ list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list);
|
|
|
+
|
|
|
+ return mlxsw_sp_vport;
|
|
|
+}
|
|
|
+
|
|
|
+static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
|
|
|
+{
|
|
|
+ list_del(&mlxsw_sp_vport->vport.list);
|
|
|
+ kfree(mlxsw_sp_vport);
|
|
|
+}
|
|
|
+
|
|
|
int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
|
|
|
u16 vid)
|
|
|
{
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
|
|
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
|
|
|
- char *sftr_pl;
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
int err;
|
|
|
|
|
|
/* VLAN 0 is added to HW filter when device goes up, but it is
|
|
@@ -563,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
|
|
|
if (!vid)
|
|
|
return 0;
|
|
|
|
|
|
- if (test_bit(vid, mlxsw_sp_port->active_vfids)) {
|
|
|
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
|
|
|
netdev_warn(dev, "VID=%d already configured\n", vid);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- if (!test_bit(vid, mlxsw_sp->active_vfids)) {
|
|
|
- err = mlxsw_sp_vfid_create(mlxsw_sp, vid);
|
|
|
- if (err) {
|
|
|
- netdev_err(dev, "Failed to create vFID=%d\n",
|
|
|
- MLXSW_SP_VFID_BASE + vid);
|
|
|
- return err;
|
|
|
+ vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
|
|
|
+ if (!vfid) {
|
|
|
+ vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
|
|
|
+ if (IS_ERR(vfid)) {
|
|
|
+ netdev_err(dev, "Failed to create vFID for VID=%d\n",
|
|
|
+ vid);
|
|
|
+ return PTR_ERR(vfid);
|
|
|
}
|
|
|
+ }
|
|
|
|
|
|
- sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
|
|
|
- if (!sftr_pl) {
|
|
|
- err = -ENOMEM;
|
|
|
- goto err_flood_table_alloc;
|
|
|
- }
|
|
|
- mlxsw_reg_sftr_pack(sftr_pl, 0, vid,
|
|
|
- MLXSW_REG_SFGC_TABLE_TYPE_FID, 0,
|
|
|
- MLXSW_PORT_CPU_PORT, true);
|
|
|
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
|
|
|
- kfree(sftr_pl);
|
|
|
+ mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid);
|
|
|
+ if (!mlxsw_sp_vport) {
|
|
|
+ netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_port_vport_create;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (!vfid->nr_vports) {
|
|
|
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid,
|
|
|
+ true, false);
|
|
|
if (err) {
|
|
|
- netdev_err(dev, "Failed to configure flood table\n");
|
|
|
- goto err_flood_table_config;
|
|
|
+ netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
|
|
|
+ vfid->vfid);
|
|
|
+ goto err_vport_flood_set;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- /* In case we fail in the following steps, we intentionally do not
|
|
|
- * destroy the associated vFID.
|
|
|
- */
|
|
|
-
|
|
|
/* When adding the first VLAN interface on a bridged port we need to
|
|
|
* transition all the active 802.1Q bridge VLANs to use explicit
|
|
|
* {Port, VID} to FID mappings and set the port's mode to Virtual mode.
|
|
|
*/
|
|
|
- if (!mlxsw_sp_port->nr_vfids) {
|
|
|
+ if (list_is_singular(&mlxsw_sp_port->vports_list)) {
|
|
|
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set to Virtual mode\n");
|
|
|
- return err;
|
|
|
+ goto err_port_vp_mode_trans;
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
- true, MLXSW_SP_VFID_BASE + vid, vid);
|
|
|
+ true,
|
|
|
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
|
|
|
+ vid);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n",
|
|
|
- vid, MLXSW_SP_VFID_BASE + vid);
|
|
|
+ vid, vfid->vfid);
|
|
|
goto err_port_vid_to_fid_set;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false);
|
|
|
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
|
|
|
goto err_port_vid_learning_set;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false);
|
|
|
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
|
|
|
vid);
|
|
|
goto err_port_add_vid;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
|
|
|
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
|
|
|
MLXSW_REG_SPMS_STATE_FORWARDING);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
|
|
|
goto err_port_stp_state_set;
|
|
|
}
|
|
|
|
|
|
- mlxsw_sp_port->nr_vfids++;
|
|
|
- set_bit(vid, mlxsw_sp_port->active_vfids);
|
|
|
+ vfid->nr_vports++;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
-err_flood_table_config:
|
|
|
-err_flood_table_alloc:
|
|
|
- mlxsw_sp_vfid_destroy(mlxsw_sp, vid);
|
|
|
- return err;
|
|
|
-
|
|
|
err_port_stp_state_set:
|
|
|
- mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
|
|
|
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
|
|
|
err_port_add_vid:
|
|
|
- mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
|
|
|
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
|
|
|
err_port_vid_learning_set:
|
|
|
- mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
|
|
|
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
|
|
|
- MLXSW_SP_VFID_BASE + vid, vid);
|
|
|
+ mlxsw_sp_vfid_to_fid(vfid->vfid), vid);
|
|
|
err_port_vid_to_fid_set:
|
|
|
- mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
|
|
|
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
|
|
|
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
|
|
|
+err_port_vp_mode_trans:
|
|
|
+ if (!vfid->nr_vports)
|
|
|
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
|
|
|
+ false);
|
|
|
+err_vport_flood_set:
|
|
|
+ mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
|
|
|
+err_port_vport_create:
|
|
|
+ if (!vfid->nr_vports)
|
|
|
+ mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -664,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
|
|
|
__be16 __always_unused proto, u16 vid)
|
|
|
{
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
int err;
|
|
|
|
|
|
/* VLAN 0 is removed from HW filter when device goes down, but
|
|
@@ -672,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
|
|
|
if (!vid)
|
|
|
return 0;
|
|
|
|
|
|
- if (!test_bit(vid, mlxsw_sp_port->active_vfids)) {
|
|
|
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
|
|
|
+ if (!mlxsw_sp_vport) {
|
|
|
netdev_warn(dev, "VID=%d does not exist\n", vid);
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid,
|
|
|
+ vfid = mlxsw_sp_vport->vport.vfid;
|
|
|
+
|
|
|
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
|
|
|
MLXSW_REG_SPMS_STATE_DISCARDING);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set STP state for VID=%d\n", vid);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
|
|
|
+ err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
|
|
|
vid);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
|
|
|
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
- err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port,
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
- false, MLXSW_SP_VFID_BASE + vid,
|
|
|
+ false,
|
|
|
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
|
|
|
vid);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n",
|
|
|
- vid, MLXSW_SP_VFID_BASE + vid);
|
|
|
+ vid, vfid->vfid);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -711,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
|
|
|
* transition all active 802.1Q bridge VLANs to use VID to FID
|
|
|
* mappings and set port's mode to VLAN mode.
|
|
|
*/
|
|
|
- if (mlxsw_sp_port->nr_vfids == 1) {
|
|
|
+ if (list_is_singular(&mlxsw_sp_port->vports_list)) {
|
|
|
err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
|
|
|
if (err) {
|
|
|
netdev_err(dev, "Failed to set to VLAN mode\n");
|
|
@@ -719,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- mlxsw_sp_port->nr_vfids--;
|
|
|
- clear_bit(vid, mlxsw_sp_port->active_vfids);
|
|
|
+ vfid->nr_vports--;
|
|
|
+ mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
|
|
|
+
|
|
|
+ /* Destroy the vFID if no vPorts are assigned to it anymore. */
|
|
|
+ if (!vfid->nr_vports)
|
|
|
+ mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid);
|
|
|
|
|
|
return 0;
|
|
|
}
|
|
@@ -1245,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
|
|
|
struct mlxsw_sp_port *mlxsw_sp_port;
|
|
|
struct net_device *dev;
|
|
|
bool usable;
|
|
|
+ size_t bytes;
|
|
|
int err;
|
|
|
|
|
|
dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
|
|
@@ -1258,6 +1368,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port)
|
|
|
mlxsw_sp_port->learning_sync = 1;
|
|
|
mlxsw_sp_port->uc_flood = 1;
|
|
|
mlxsw_sp_port->pvid = 1;
|
|
|
+ bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE);
|
|
|
+ mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL);
|
|
|
+ if (!mlxsw_sp_port->active_vlans) {
|
|
|
+ err = -ENOMEM;
|
|
|
+ goto err_port_active_vlans_alloc;
|
|
|
+ }
|
|
|
+ INIT_LIST_HEAD(&mlxsw_sp_port->vports_list);
|
|
|
|
|
|
mlxsw_sp_port->pcpu_stats =
|
|
|
netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
|
|
@@ -1359,16 +1476,27 @@ err_port_module_check:
|
|
|
err_dev_addr_init:
|
|
|
free_percpu(mlxsw_sp_port->pcpu_stats);
|
|
|
err_alloc_stats:
|
|
|
+ kfree(mlxsw_sp_port->active_vlans);
|
|
|
+err_port_active_vlans_alloc:
|
|
|
free_netdev(dev);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
|
-static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp)
|
|
|
+static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port)
|
|
|
{
|
|
|
- u16 vfid;
|
|
|
+ struct net_device *dev = mlxsw_sp_port->dev;
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport, *tmp;
|
|
|
|
|
|
- for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID)
|
|
|
- mlxsw_sp_vfid_destroy(mlxsw_sp, vfid);
|
|
|
+ list_for_each_entry_safe(mlxsw_sp_vport, tmp,
|
|
|
+ &mlxsw_sp_port->vports_list, vport.list) {
|
|
|
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
|
|
|
+
|
|
|
+ /* vPorts created for VLAN devices should already be gone
|
|
|
+ * by now, since we unregistered the port netdev.
|
|
|
+ */
|
|
|
+ WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev));
|
|
|
+ mlxsw_sp_port_kill_vid(dev, 0, vid);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
|
|
@@ -1377,10 +1505,11 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
|
|
|
|
|
|
if (!mlxsw_sp_port)
|
|
|
return;
|
|
|
- mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
|
|
|
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
|
|
|
+ mlxsw_sp_port_vports_fini(mlxsw_sp_port);
|
|
|
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
|
|
|
free_percpu(mlxsw_sp_port->pcpu_stats);
|
|
|
+ kfree(mlxsw_sp_port->active_vlans);
|
|
|
free_netdev(mlxsw_sp_port->dev);
|
|
|
}
|
|
|
|
|
@@ -1662,16 +1791,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core,
|
|
|
enum mlxsw_sp_flood_table flood_table;
|
|
|
char sfgc_pl[MLXSW_REG_SFGC_LEN];
|
|
|
|
|
|
- if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) {
|
|
|
+ if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID)
|
|
|
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID;
|
|
|
- flood_table = 0;
|
|
|
- } else {
|
|
|
+ else
|
|
|
table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
|
|
|
- if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
|
|
|
- flood_table = MLXSW_SP_FLOOD_TABLE_UC;
|
|
|
- else
|
|
|
- flood_table = MLXSW_SP_FLOOD_TABLE_BM;
|
|
|
- }
|
|
|
+
|
|
|
+ if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST)
|
|
|
+ flood_table = MLXSW_SP_FLOOD_TABLE_UC;
|
|
|
+ else
|
|
|
+ flood_table = MLXSW_SP_FLOOD_TABLE_BM;
|
|
|
|
|
|
mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type,
|
|
|
flood_table);
|
|
@@ -1682,9 +1810,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
{
|
|
|
int type, err;
|
|
|
|
|
|
- /* For non-offloaded netdevs, flood all traffic types to CPU
|
|
|
- * port.
|
|
|
- */
|
|
|
for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
|
|
|
if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
|
|
|
continue;
|
|
@@ -1693,15 +1818,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp)
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_VFID);
|
|
|
if (err)
|
|
|
return err;
|
|
|
- }
|
|
|
-
|
|
|
- /* For bridged ports, use one flooding table for unknown unicast
|
|
|
- * traffic and a second table for unregistered multicast and
|
|
|
- * broadcast.
|
|
|
- */
|
|
|
- for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) {
|
|
|
- if (type == MLXSW_REG_SFGC_TYPE_RESERVED)
|
|
|
- continue;
|
|
|
|
|
|
err = __mlxsw_sp_flood_init(mlxsw_sp->core, type,
|
|
|
MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID);
|
|
@@ -1736,6 +1852,8 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
|
|
|
|
|
|
mlxsw_sp->core = mlxsw_core;
|
|
|
mlxsw_sp->bus_info = mlxsw_bus_info;
|
|
|
+ INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list);
|
|
|
+ INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list);
|
|
|
|
|
|
err = mlxsw_sp_base_mac_get(mlxsw_sp);
|
|
|
if (err) {
|
|
@@ -1746,7 +1864,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core,
|
|
|
err = mlxsw_sp_ports_create(mlxsw_sp);
|
|
|
if (err) {
|
|
|
dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
|
|
|
- goto err_ports_create;
|
|
|
+ return err;
|
|
|
}
|
|
|
|
|
|
err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
|
|
@@ -1796,8 +1914,6 @@ err_rx_listener_register:
|
|
|
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
|
|
|
err_event_register:
|
|
|
mlxsw_sp_ports_remove(mlxsw_sp);
|
|
|
-err_ports_create:
|
|
|
- mlxsw_sp_vfids_fini(mlxsw_sp);
|
|
|
return err;
|
|
|
}
|
|
|
|
|
@@ -1809,7 +1925,6 @@ static void mlxsw_sp_fini(void *priv)
|
|
|
mlxsw_sp_traps_fini(mlxsw_sp);
|
|
|
mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE);
|
|
|
mlxsw_sp_ports_remove(mlxsw_sp);
|
|
|
- mlxsw_sp_vfids_fini(mlxsw_sp);
|
|
|
}
|
|
|
|
|
|
static struct mlxsw_config_profile mlxsw_sp_config_profile = {
|
|
@@ -1834,8 +1949,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = {
|
|
|
.flood_mode = 3,
|
|
|
.max_fid_offset_flood_tables = 2,
|
|
|
.fid_offset_flood_table_size = VLAN_N_VID - 1,
|
|
|
- .max_fid_flood_tables = 1,
|
|
|
- .fid_flood_table_size = VLAN_N_VID,
|
|
|
+ .max_fid_flood_tables = 2,
|
|
|
+ .fid_flood_table_size = MLXSW_SP_VFID_MAX,
|
|
|
.used_max_ib_mc = 1,
|
|
|
.max_ib_mc = 0,
|
|
|
.used_max_pkey = 1,
|
|
@@ -2147,6 +2262,54 @@ static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled);
|
|
|
}
|
|
|
|
|
|
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
|
|
|
+ struct net_device *br_dev);
|
|
|
+
|
|
|
+static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
+ struct net_device *vlan_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
|
|
|
+
|
|
|
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
|
|
|
+ if (!mlxsw_sp_vport) {
|
|
|
+ WARN_ON(!mlxsw_sp_vport);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ mlxsw_sp_vport->dev = vlan_dev;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
+ struct net_device *vlan_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
|
|
|
+
|
|
|
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
|
|
|
+ if (!mlxsw_sp_vport) {
|
|
|
+ WARN_ON(!mlxsw_sp_vport);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* When removing a VLAN device while still bridged we should first
|
|
|
+ * remove it from the bridge, as we receive the bridge's notification
|
|
|
+ * when the vPort is already gone.
|
|
|
+ */
|
|
|
+ if (mlxsw_sp_vport->bridged) {
|
|
|
+ struct net_device *br_dev;
|
|
|
+
|
|
|
+ br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport);
|
|
|
+ mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev);
|
|
|
+ }
|
|
|
+
|
|
|
+ mlxsw_sp_vport->dev = mlxsw_sp_port->dev;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
|
|
|
unsigned long event, void *ptr)
|
|
|
{
|
|
@@ -2176,9 +2339,23 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev,
|
|
|
break;
|
|
|
case NETDEV_CHANGEUPPER:
|
|
|
upper_dev = info->upper_dev;
|
|
|
- if (!info->master)
|
|
|
- break;
|
|
|
- if (netif_is_bridge_master(upper_dev)) {
|
|
|
+ if (is_vlan_dev(upper_dev)) {
|
|
|
+ if (info->linking) {
|
|
|
+ err = mlxsw_sp_port_vlan_link(mlxsw_sp_port,
|
|
|
+ upper_dev);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to link VLAN device\n");
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port,
|
|
|
+ upper_dev);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to unlink VLAN device\n");
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ } else if (netif_is_bridge_master(upper_dev)) {
|
|
|
if (info->linking) {
|
|
|
err = mlxsw_sp_port_bridge_join(mlxsw_sp_port);
|
|
|
if (err)
|
|
@@ -2271,6 +2448,370 @@ static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
|
|
|
return NOTIFY_DONE;
|
|
|
}
|
|
|
|
|
|
+static struct mlxsw_sp_vfid *
|
|
|
+mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp,
|
|
|
+ const struct net_device *br_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
+
|
|
|
+ list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) {
|
|
|
+ if (vfid->br_dev == br_dev)
|
|
|
+ return vfid;
|
|
|
+ }
|
|
|
+
|
|
|
+ return NULL;
|
|
|
+}
|
|
|
+
|
|
|
+static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid)
|
|
|
+{
|
|
|
+ return vfid - MLXSW_SP_VFID_PORT_MAX;
|
|
|
+}
|
|
|
+
|
|
|
+static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid)
|
|
|
+{
|
|
|
+ return MLXSW_SP_VFID_PORT_MAX + br_vfid;
|
|
|
+}
|
|
|
+
|
|
|
+static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp)
|
|
|
+{
|
|
|
+ return find_first_zero_bit(mlxsw_sp->br_vfids.mapped,
|
|
|
+ MLXSW_SP_VFID_BR_MAX);
|
|
|
+}
|
|
|
+
|
|
|
+static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp,
|
|
|
+ struct net_device *br_dev)
|
|
|
+{
|
|
|
+ struct device *dev = mlxsw_sp->bus_info->dev;
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
+ u16 n_vfid;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp));
|
|
|
+ if (n_vfid == MLXSW_SP_VFID_MAX) {
|
|
|
+ dev_err(dev, "No available vFIDs\n");
|
|
|
+ return ERR_PTR(-ERANGE);
|
|
|
+ }
|
|
|
+
|
|
|
+ err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid);
|
|
|
+ if (err) {
|
|
|
+ dev_err(dev, "Failed to create vFID=%d\n", n_vfid);
|
|
|
+ return ERR_PTR(err);
|
|
|
+ }
|
|
|
+
|
|
|
+ vfid = kzalloc(sizeof(*vfid), GFP_KERNEL);
|
|
|
+ if (!vfid)
|
|
|
+ goto err_allocate_vfid;
|
|
|
+
|
|
|
+ vfid->vfid = n_vfid;
|
|
|
+ vfid->br_dev = br_dev;
|
|
|
+
|
|
|
+ list_add(&vfid->list, &mlxsw_sp->br_vfids.list);
|
|
|
+ set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped);
|
|
|
+
|
|
|
+ return vfid;
|
|
|
+
|
|
|
+err_allocate_vfid:
|
|
|
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid);
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
+}
|
|
|
+
|
|
|
+static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp,
|
|
|
+ struct mlxsw_sp_vfid *vfid)
|
|
|
+{
|
|
|
+ u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid);
|
|
|
+
|
|
|
+ clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped);
|
|
|
+ list_del(&vfid->list);
|
|
|
+
|
|
|
+ __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid);
|
|
|
+
|
|
|
+ kfree(vfid);
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport,
|
|
|
+ struct net_device *br_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
|
|
|
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
|
|
|
+ struct net_device *dev = mlxsw_sp_vport->dev;
|
|
|
+ struct mlxsw_sp_vfid *vfid, *new_vfid;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
|
|
|
+ if (!vfid) {
|
|
|
+ WARN_ON(!vfid);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We need a vFID to go back to after leaving the bridge's vFID. */
|
|
|
+ new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid);
|
|
|
+ if (!new_vfid) {
|
|
|
+ new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid);
|
|
|
+ if (IS_ERR(new_vfid)) {
|
|
|
+ netdev_err(dev, "Failed to create vFID for VID=%d\n",
|
|
|
+ vid);
|
|
|
+ return PTR_ERR(new_vfid);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Invalidate existing {Port, VID} to vFID mapping and create a new
|
|
|
+ * one for the new vFID.
|
|
|
+ */
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
+ false,
|
|
|
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
|
|
|
+ vid);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
|
|
|
+ vfid->vfid);
|
|
|
+ goto err_port_vid_to_fid_invalidate;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
+ true,
|
|
|
+ mlxsw_sp_vfid_to_fid(new_vfid->vfid),
|
|
|
+ vid);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
|
|
|
+ new_vfid->vfid);
|
|
|
+ goto err_port_vid_to_fid_validate;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to disable learning\n");
|
|
|
+ goto err_port_vid_learning_set;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false,
|
|
|
+ false);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed clear to clear flooding\n");
|
|
|
+ goto err_vport_flood_set;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Switch between the vFIDs and destroy the old one if needed. */
|
|
|
+ new_vfid->nr_vports++;
|
|
|
+ mlxsw_sp_vport->vport.vfid = new_vfid;
|
|
|
+ vfid->nr_vports--;
|
|
|
+ if (!vfid->nr_vports)
|
|
|
+ mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
|
|
|
+
|
|
|
+ mlxsw_sp_vport->learning = 0;
|
|
|
+ mlxsw_sp_vport->learning_sync = 0;
|
|
|
+ mlxsw_sp_vport->uc_flood = 0;
|
|
|
+ mlxsw_sp_vport->bridged = 0;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_vport_flood_set:
|
|
|
+err_port_vid_learning_set:
|
|
|
+err_port_vid_to_fid_validate:
|
|
|
+err_port_vid_to_fid_invalidate:
|
|
|
+ /* Rollback vFID only if new. */
|
|
|
+ if (!new_vfid->nr_vports)
|
|
|
+ mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport,
|
|
|
+ struct net_device *br_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid;
|
|
|
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp;
|
|
|
+ u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport);
|
|
|
+ struct net_device *dev = mlxsw_sp_vport->dev;
|
|
|
+ struct mlxsw_sp_vfid *vfid;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev);
|
|
|
+ if (!vfid) {
|
|
|
+ vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev);
|
|
|
+ if (IS_ERR(vfid)) {
|
|
|
+ netdev_err(dev, "Failed to create bridge vFID\n");
|
|
|
+ return PTR_ERR(vfid);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to setup flooding for vFID=%d\n",
|
|
|
+ vfid->vfid);
|
|
|
+ goto err_port_flood_set;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to enable learning\n");
|
|
|
+ goto err_port_vid_learning_set;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* We need to invalidate existing {Port, VID} to vFID mapping and
|
|
|
+ * create a new one for the bridge's vFID.
|
|
|
+ */
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
+ false,
|
|
|
+ mlxsw_sp_vfid_to_fid(old_vfid->vfid),
|
|
|
+ vid);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n",
|
|
|
+ old_vfid->vfid);
|
|
|
+ goto err_port_vid_to_fid_invalidate;
|
|
|
+ }
|
|
|
+
|
|
|
+ err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID,
|
|
|
+ true,
|
|
|
+ mlxsw_sp_vfid_to_fid(vfid->vfid),
|
|
|
+ vid);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n",
|
|
|
+ vfid->vfid);
|
|
|
+ goto err_port_vid_to_fid_validate;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Switch between the vFIDs and destroy the old one if needed. */
|
|
|
+ vfid->nr_vports++;
|
|
|
+ mlxsw_sp_vport->vport.vfid = vfid;
|
|
|
+ old_vfid->nr_vports--;
|
|
|
+ if (!old_vfid->nr_vports)
|
|
|
+ mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid);
|
|
|
+
|
|
|
+ mlxsw_sp_vport->learning = 1;
|
|
|
+ mlxsw_sp_vport->learning_sync = 1;
|
|
|
+ mlxsw_sp_vport->uc_flood = 1;
|
|
|
+ mlxsw_sp_vport->bridged = 1;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+
|
|
|
+err_port_vid_to_fid_validate:
|
|
|
+ mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport,
|
|
|
+ MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false,
|
|
|
+ mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid);
|
|
|
+err_port_vid_to_fid_invalidate:
|
|
|
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
|
|
|
+err_port_vid_learning_set:
|
|
|
+ mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false);
|
|
|
+err_port_flood_set:
|
|
|
+ if (!vfid->nr_vports)
|
|
|
+ mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid);
|
|
|
+ return err;
|
|
|
+}
|
|
|
+
|
|
|
+static bool
|
|
|
+mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port,
|
|
|
+ const struct net_device *br_dev)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+
|
|
|
+ list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list,
|
|
|
+ vport.list) {
|
|
|
+ if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev)
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_netdevice_vport_event(struct net_device *dev,
|
|
|
+ unsigned long event, void *ptr,
|
|
|
+ u16 vid)
|
|
|
+{
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
|
|
|
+ struct netdev_notifier_changeupper_info *info = ptr;
|
|
|
+ struct mlxsw_sp_port *mlxsw_sp_vport;
|
|
|
+ struct net_device *upper_dev;
|
|
|
+ int err;
|
|
|
+
|
|
|
+ mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
|
|
|
+
|
|
|
+ switch (event) {
|
|
|
+ case NETDEV_PRECHANGEUPPER:
|
|
|
+ upper_dev = info->upper_dev;
|
|
|
+ if (!info->master || !info->linking)
|
|
|
+ break;
|
|
|
+ if (!netif_is_bridge_master(upper_dev))
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ /* We can't have multiple VLAN interfaces configured on
|
|
|
+ * the same port and being members in the same bridge.
|
|
|
+ */
|
|
|
+ if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port,
|
|
|
+ upper_dev))
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ break;
|
|
|
+ case NETDEV_CHANGEUPPER:
|
|
|
+ upper_dev = info->upper_dev;
|
|
|
+ if (!info->master)
|
|
|
+ break;
|
|
|
+ if (info->linking) {
|
|
|
+ if (!mlxsw_sp_vport) {
|
|
|
+ WARN_ON(!mlxsw_sp_vport);
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ }
|
|
|
+ err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport,
|
|
|
+ upper_dev);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to join bridge\n");
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ /* We ignore bridge's unlinking notifications if vPort
|
|
|
+ * is gone, since we already left the bridge when the
|
|
|
+ * VLAN device was unlinked from the real device.
|
|
|
+ */
|
|
|
+ if (!mlxsw_sp_vport)
|
|
|
+ return NOTIFY_DONE;
|
|
|
+ err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport,
|
|
|
+ upper_dev);
|
|
|
+ if (err) {
|
|
|
+ netdev_err(dev, "Failed to leave bridge\n");
|
|
|
+ return NOTIFY_BAD;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return NOTIFY_DONE;
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev,
|
|
|
+ unsigned long event, void *ptr,
|
|
|
+ u16 vid)
|
|
|
+{
|
|
|
+ struct net_device *dev;
|
|
|
+ struct list_head *iter;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ netdev_for_each_lower_dev(lag_dev, dev, iter) {
|
|
|
+ if (mlxsw_sp_port_dev_check(dev)) {
|
|
|
+ ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr,
|
|
|
+ vid);
|
|
|
+ if (ret == NOTIFY_BAD)
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ return NOTIFY_DONE;
|
|
|
+}
|
|
|
+
|
|
|
+static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
|
|
|
+ unsigned long event, void *ptr)
|
|
|
+{
|
|
|
+ struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
|
|
|
+ u16 vid = vlan_dev_vlan_id(vlan_dev);
|
|
|
+
|
|
|
+ if (mlxsw_sp_port_dev_check(real_dev))
|
|
|
+ return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr,
|
|
|
+ vid);
|
|
|
+ else if (netif_is_lag_master(real_dev))
|
|
|
+ return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr,
|
|
|
+ vid);
|
|
|
+
|
|
|
+ return NOTIFY_DONE;
|
|
|
+}
|
|
|
+
|
|
|
static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
|
|
|
unsigned long event, void *ptr)
|
|
|
{
|
|
@@ -2282,6 +2823,9 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
|
|
|
if (netif_is_lag_master(dev))
|
|
|
return mlxsw_sp_netdevice_lag_event(dev, event, ptr);
|
|
|
|
|
|
+ if (is_vlan_dev(dev))
|
|
|
+ return mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
|
|
|
+
|
|
|
return NOTIFY_DONE;
|
|
|
}
|
|
|
|