diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw/spectrum.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlxsw/spectrum.c | 1210 |
1 files changed, 1068 insertions, 142 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 3be4a2355ead..ce6845d534a8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -48,6 +48,7 @@ #include <linux/workqueue.h> #include <linux/jiffies.h> #include <linux/bitops.h> +#include <linux/list.h> #include <net/switchdev.h> #include <generated/utsrelease.h> @@ -186,33 +187,6 @@ static int mlxsw_sp_port_oper_status_get(struct mlxsw_sp_port *mlxsw_sp_port, return 0; } -static int mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - int err; - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); - - if (err) - return err; - - set_bit(vfid, mlxsw_sp->active_vfids); - return 0; -} - -static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) -{ - char sfmr_pl[MLXSW_REG_SFMR_LEN]; - - clear_bit(vfid, mlxsw_sp->active_vfids); - - mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, - MLXSW_SP_VFID_BASE + vfid, 0); - mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); -} - static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port, unsigned char *addr) { @@ -417,6 +391,10 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } +static void mlxsw_sp_set_rx_mode(struct net_device *dev) +{ +} + static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); @@ -545,12 +523,132 @@ static int mlxsw_sp_port_vlan_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) return 0; } +static struct mlxsw_sp_vfid * +mlxsw_sp_vfid_find(const struct mlxsw_sp *mlxsw_sp, u16 vid) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->port_vfids.list, list) { + if (vfid->vid == vid) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_avail_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->port_vfids.mapped, + MLXSW_SP_VFID_PORT_MAX); +} + +static int __mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_CREATE_FID, fid, 0); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static void __mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, u16 vfid) +{ + u16 fid = mlxsw_sp_vfid_to_fid(vfid); + char sfmr_pl[MLXSW_REG_SFMR_LEN]; + + mlxsw_reg_sfmr_pack(sfmr_pl, MLXSW_REG_SFMR_OP_DESTROY_FID, fid, 0); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfmr), sfmr_pl); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_vfid_create(struct mlxsw_sp *mlxsw_sp, + u16 vid) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_avail_vfid_get(mlxsw_sp); + if (n_vfid == MLXSW_SP_VFID_PORT_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->vid = vid; + + list_add(&vfid->list, &mlxsw_sp->port_vfids.list); + set_bit(n_vfid, mlxsw_sp->port_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + clear_bit(vfid->vfid, mlxsw_sp->port_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static struct mlxsw_sp_port * +mlxsw_sp_port_vport_create(struct mlxsw_sp_port *mlxsw_sp_port, + struct mlxsw_sp_vfid *vfid) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + mlxsw_sp_vport = kzalloc(sizeof(*mlxsw_sp_vport), GFP_KERNEL); + if (!mlxsw_sp_vport) + return NULL; + + /* dev will be set correctly after the VLAN device is linked + * with the real device. In case of bridge SELF invocation, dev + * will remain as is. + */ + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + mlxsw_sp_vport->mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + mlxsw_sp_vport->local_port = mlxsw_sp_port->local_port; + mlxsw_sp_vport->stp_state = BR_STATE_FORWARDING; + mlxsw_sp_vport->lagged = mlxsw_sp_port->lagged; + mlxsw_sp_vport->lag_id = mlxsw_sp_port->lag_id; + mlxsw_sp_vport->vport.vfid = vfid; + mlxsw_sp_vport->vport.vid = vfid->vid; + + list_add(&mlxsw_sp_vport->vport.list, &mlxsw_sp_port->vports_list); + + return mlxsw_sp_vport; +} + +static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport) +{ + list_del(&mlxsw_sp_vport->vport.list); + kfree(mlxsw_sp_vport); +} + int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char *sftr_pl; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is added to HW filter when device goes up, but it is @@ -559,100 +657,105 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, if (!vid) return 0; - if (test_bit(vid, mlxsw_sp_port->active_vfids)) { + if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) { netdev_warn(dev, "VID=%d already configured\n", vid); return 0; } - if (!test_bit(vid, mlxsw_sp->active_vfids)) { - err = mlxsw_sp_vfid_create(mlxsw_sp, vid); - if (err) { - netdev_err(dev, "Failed to create vFID=%d\n", - MLXSW_SP_VFID_BASE + vid); - return err; + vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!vfid) { + vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(vfid); } + } - sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL); - if (!sftr_pl) { - err = -ENOMEM; - goto err_flood_table_alloc; - } - mlxsw_reg_sftr_pack(sftr_pl, 0, vid, - MLXSW_REG_SFGC_TABLE_TYPE_FID, 0, - MLXSW_PORT_CPU_PORT, true); - err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl); - kfree(sftr_pl); + mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vfid); + if (!mlxsw_sp_vport) { + netdev_err(dev, "Failed to create vPort for VID=%d\n", vid); + err = -ENOMEM; + goto err_port_vport_create; + } + + if (!vfid->nr_vports) { + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, + true, false); if (err) { - netdev_err(dev, "Failed to configure flood table\n"); - goto err_flood_table_config; + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_vport_flood_set; } } - /* In case we fail in the following steps, we intentionally do not - * destroy the associated vFID. - */ - /* When adding the first VLAN interface on a bridged port we need to * transition all the active 802.1Q bridge VLANs to use explicit * {Port, VID} to FID mappings and set the port's mode to Virtual mode. */ - if (!mlxsw_sp_port->nr_vfids) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to Virtual mode\n"); - return err; + goto err_port_vp_mode_trans; } } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - true, MLXSW_SP_VFID_BASE + vid, vid); + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); if (err) { netdev_err(dev, "Failed to map {Port, VID=%d} to vFID=%d\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); goto err_port_vid_to_fid_set; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, false); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); if (err) { netdev_err(dev, "Failed to disable learning for VID=%d\n", vid); goto err_port_vid_learning_set; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); goto err_port_add_vid; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_FORWARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); goto err_port_stp_state_set; } - mlxsw_sp_port->nr_vfids++; - set_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports++; return 0; -err_flood_table_config: -err_flood_table_alloc: - mlxsw_sp_vfid_destroy(mlxsw_sp, vid); - return err; - err_port_stp_state_set: - mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); err_port_add_vid: - mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); err_port_vid_learning_set: - mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, - MLXSW_SP_VFID_BASE + vid, vid); + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); err_port_vid_to_fid_set: - mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); + if (list_is_singular(&mlxsw_sp_port->vports_list)) + mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); +err_port_vp_mode_trans: + if (!vfid->nr_vports) + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); +err_vport_flood_set: + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); +err_port_vport_create: + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); return err; } @@ -660,6 +763,8 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp_port *mlxsw_sp_vport; + struct mlxsw_sp_vfid *vfid; int err; /* VLAN 0 is removed from HW filter when device goes down, but @@ -668,38 +773,42 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, if (!vid) return 0; - if (!test_bit(vid, mlxsw_sp_port->active_vfids)) { + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { netdev_warn(dev, "VID=%d does not exist\n", vid); return 0; } - err = mlxsw_sp_port_stp_state_set(mlxsw_sp_port, vid, + vfid = mlxsw_sp_vport->vport.vfid; + + err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid, MLXSW_REG_SPMS_STATE_DISCARDING); if (err) { netdev_err(dev, "Failed to set STP state for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false); if (err) { netdev_err(dev, "Failed to set VLAN membership for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); if (err) { netdev_err(dev, "Failed to enable learning for VID=%d\n", vid); return err; } - err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_port, + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, - false, MLXSW_SP_VFID_BASE + vid, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), vid); if (err) { netdev_err(dev, "Failed to invalidate {Port, VID=%d} to vFID=%d mapping\n", - vid, MLXSW_SP_VFID_BASE + vid); + vid, vfid->vfid); return err; } @@ -707,7 +816,7 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, * transition all active 802.1Q bridge VLANs to use VID to FID * mappings and set port's mode to VLAN mode. */ - if (mlxsw_sp_port->nr_vfids == 1) { + if (list_is_singular(&mlxsw_sp_port->vports_list)) { err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port); if (err) { netdev_err(dev, "Failed to set to VLAN mode\n"); @@ -715,8 +824,12 @@ int mlxsw_sp_port_kill_vid(struct net_device *dev, } } - mlxsw_sp_port->nr_vfids--; - clear_bit(vid, mlxsw_sp_port->active_vfids); + vfid->nr_vports--; + mlxsw_sp_port_vport_destroy(mlxsw_sp_vport); + + /* Destroy the vFID if no vPorts are assigned to it anymore. */ + if (!vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp_port->mlxsw_sp, vfid); return 0; } @@ -725,6 +838,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_open = mlxsw_sp_port_open, .ndo_stop = mlxsw_sp_port_stop, .ndo_start_xmit = mlxsw_sp_port_xmit, + .ndo_set_rx_mode = mlxsw_sp_set_rx_mode, .ndo_set_mac_address = mlxsw_sp_port_set_mac_address, .ndo_change_mtu = mlxsw_sp_port_change_mtu, .ndo_get_stats64 = mlxsw_sp_port_get_stats64, @@ -859,6 +973,29 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, } } +static int mlxsw_sp_port_set_phys_id(struct net_device *dev, + enum ethtool_phys_id_state state) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char mlcr_pl[MLXSW_REG_MLCR_LEN]; + bool active; + + switch (state) { + case ETHTOOL_ID_ACTIVE: + active = true; + break; + case ETHTOOL_ID_INACTIVE: + active = false; + break; + default: + return -EOPNOTSUPP; + } + + mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl); +} + static void mlxsw_sp_port_get_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { @@ -1205,6 +1342,7 @@ static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_drvinfo = mlxsw_sp_port_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlxsw_sp_port_get_strings, + .set_phys_id = mlxsw_sp_port_set_phys_id, .get_ethtool_stats = mlxsw_sp_port_get_stats, .get_sset_count = mlxsw_sp_port_get_sset_count, .get_settings = mlxsw_sp_port_get_settings, @@ -1216,6 +1354,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *dev; bool usable; + size_t bytes; int err; dev = alloc_etherdev(sizeof(struct mlxsw_sp_port)); @@ -1225,10 +1364,18 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port) mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; - mlxsw_sp_port->learning = 1; - mlxsw_sp_port->learning_sync = 1; - mlxsw_sp_port->uc_flood = 1; - mlxsw_sp_port->pvid = 1; + bytes = DIV_ROUND_UP(VLAN_N_VID, BITS_PER_BYTE); + mlxsw_sp_port->active_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->active_vlans) { + err = -ENOMEM; + goto err_port_active_vlans_alloc; + } + mlxsw_sp_port->untagged_vlans = kzalloc(bytes, GFP_KERNEL); + if (!mlxsw_sp_port->untagged_vlans) { + err = -ENOMEM; + goto err_port_untagged_vlans_alloc; + } + INIT_LIST_HEAD(&mlxsw_sp_port->vports_list); mlxsw_sp_port->pcpu_stats = netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats); @@ -1330,16 +1477,29 @@ err_port_module_check: err_dev_addr_init: free_percpu(mlxsw_sp_port->pcpu_stats); err_alloc_stats: + kfree(mlxsw_sp_port->untagged_vlans); +err_port_untagged_vlans_alloc: + kfree(mlxsw_sp_port->active_vlans); +err_port_active_vlans_alloc: free_netdev(dev); return err; } -static void mlxsw_sp_vfids_fini(struct mlxsw_sp *mlxsw_sp) +static void mlxsw_sp_port_vports_fini(struct mlxsw_sp_port *mlxsw_sp_port) { - u16 vfid; + struct net_device *dev = mlxsw_sp_port->dev; + struct mlxsw_sp_port *mlxsw_sp_vport, *tmp; - for_each_set_bit(vfid, mlxsw_sp->active_vfids, VLAN_N_VID) - mlxsw_sp_vfid_destroy(mlxsw_sp, vfid); + list_for_each_entry_safe(mlxsw_sp_vport, tmp, + &mlxsw_sp_port->vports_list, vport.list) { + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + + /* vPorts created for VLAN devices should already be gone + * by now, since we unregistered the port netdev. + */ + WARN_ON(is_vlan_dev(mlxsw_sp_vport->dev)); + mlxsw_sp_port_kill_vid(dev, 0, vid); + } } static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) @@ -1348,10 +1508,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) if (!mlxsw_sp_port) return; - mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1); unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ + mlxsw_sp_port_vports_fini(mlxsw_sp_port); mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); free_percpu(mlxsw_sp_port->pcpu_stats); + kfree(mlxsw_sp_port->untagged_vlans); + kfree(mlxsw_sp_port->active_vlans); free_netdev(mlxsw_sp_port->dev); } @@ -1633,16 +1795,15 @@ static int __mlxsw_sp_flood_init(struct mlxsw_core *mlxsw_core, enum mlxsw_sp_flood_table flood_table; char sfgc_pl[MLXSW_REG_SFGC_LEN]; - if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) { + if (bridge_type == MLXSW_REG_SFGC_BRIDGE_TYPE_VFID) table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID; - flood_table = 0; - } else { + else table_type = MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST; - if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) - flood_table = MLXSW_SP_FLOOD_TABLE_UC; - else - flood_table = MLXSW_SP_FLOOD_TABLE_BM; - } + + if (type == MLXSW_REG_SFGC_TYPE_UNKNOWN_UNICAST) + flood_table = MLXSW_SP_FLOOD_TABLE_UC; + else + flood_table = MLXSW_SP_FLOOD_TABLE_BM; mlxsw_reg_sfgc_pack(sfgc_pl, type, bridge_type, table_type, flood_table); @@ -1653,9 +1814,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) { int type, err; - /* For non-offloaded netdevs, flood all traffic types to CPU - * port. - */ for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { if (type == MLXSW_REG_SFGC_TYPE_RESERVED) continue; @@ -1664,15 +1822,6 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) MLXSW_REG_SFGC_BRIDGE_TYPE_VFID); if (err) return err; - } - - /* For bridged ports, use one flooding table for unknown unicast - * traffic and a second table for unregistered multicast and - * broadcast. - */ - for (type = 0; type < MLXSW_REG_SFGC_TYPE_MAX; type++) { - if (type == MLXSW_REG_SFGC_TYPE_RESERVED) - continue; err = __mlxsw_sp_flood_init(mlxsw_sp->core, type, MLXSW_REG_SFGC_BRIDGE_TYPE_1Q_FID); @@ -1683,6 +1832,22 @@ static int mlxsw_sp_flood_init(struct mlxsw_sp *mlxsw_sp) return 0; } +static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp) +{ + char slcr_pl[MLXSW_REG_SLCR_LEN]; + + mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC | + MLXSW_REG_SLCR_LAG_HASH_DMAC | + MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE | + MLXSW_REG_SLCR_LAG_HASH_VLANID | + MLXSW_REG_SLCR_LAG_HASH_SIP | + MLXSW_REG_SLCR_LAG_HASH_DIP | + MLXSW_REG_SLCR_LAG_HASH_SPORT | + MLXSW_REG_SLCR_LAG_HASH_DPORT | + MLXSW_REG_SLCR_LAG_HASH_IPPROTO); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl); +} + static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info) { @@ -1691,6 +1856,9 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + INIT_LIST_HEAD(&mlxsw_sp->port_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_vfids.list); + INIT_LIST_HEAD(&mlxsw_sp->br_mids.list); err = mlxsw_sp_base_mac_get(mlxsw_sp); if (err) { @@ -1701,7 +1869,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, err = mlxsw_sp_ports_create(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n"); - goto err_ports_create; + return err; } err = mlxsw_sp_event_register(mlxsw_sp, MLXSW_TRAP_ID_PUDE); @@ -1728,6 +1896,12 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, goto err_buffers_init; } + err = mlxsw_sp_lag_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n"); + goto err_lag_init; + } + err = mlxsw_sp_switchdev_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n"); @@ -1737,6 +1911,7 @@ static int mlxsw_sp_init(void *priv, struct mlxsw_core *mlxsw_core, return 0; err_switchdev_init: +err_lag_init: err_buffers_init: err_flood_init: mlxsw_sp_traps_fini(mlxsw_sp); @@ -1744,8 +1919,6 @@ err_rx_listener_register: mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); err_event_register: mlxsw_sp_ports_remove(mlxsw_sp); -err_ports_create: - mlxsw_sp_vfids_fini(mlxsw_sp); return err; } @@ -1757,18 +1930,17 @@ static void mlxsw_sp_fini(void *priv) mlxsw_sp_traps_fini(mlxsw_sp); mlxsw_sp_event_unregister(mlxsw_sp, MLXSW_TRAP_ID_PUDE); mlxsw_sp_ports_remove(mlxsw_sp); - mlxsw_sp_vfids_fini(mlxsw_sp); } static struct mlxsw_config_profile mlxsw_sp_config_profile = { .used_max_vepa_channels = 1, .max_vepa_channels = 0, .used_max_lag = 1, - .max_lag = 64, + .max_lag = MLXSW_SP_LAG_MAX, .used_max_port_per_lag = 1, - .max_port_per_lag = 16, + .max_port_per_lag = MLXSW_SP_PORT_PER_LAG_MAX, .used_max_mid = 1, - .max_mid = 7000, + .max_mid = MLXSW_SP_MID_MAX, .used_max_pgt = 1, .max_pgt = 0, .used_max_system_port = 1, @@ -1782,8 +1954,8 @@ static struct mlxsw_config_profile mlxsw_sp_config_profile = { .flood_mode = 3, .max_fid_offset_flood_tables = 2, .fid_offset_flood_table_size = VLAN_N_VID - 1, - .max_fid_flood_tables = 1, - .fid_flood_table_size = VLAN_N_VID, + .max_fid_flood_tables = 2, + .fid_flood_table_size = MLXSW_SP_VFID_MAX, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -1824,24 +1996,29 @@ static int mlxsw_sp_port_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port) */ err = mlxsw_sp_port_kill_vid(dev, 0, 1); if (err) - netdev_err(dev, "Failed to remove VID 1\n"); + return err; - return err; + mlxsw_sp_port->learning = 1; + mlxsw_sp_port->learning_sync = 1; + mlxsw_sp_port->uc_flood = 1; + mlxsw_sp_port->bridged = 1; + + return 0; } static int mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port) { struct net_device *dev = mlxsw_sp_port->dev; - int err; + + mlxsw_sp_port->learning = 0; + mlxsw_sp_port->learning_sync = 0; + mlxsw_sp_port->uc_flood = 0; + mlxsw_sp_port->bridged = 0; /* Add implicit VLAN interface in the device, so that untagged * packets will be classified to the default vFID. */ - err = mlxsw_sp_port_add_vid(dev, 0, 1); - if (err) - netdev_err(dev, "Failed to add VID 1\n"); - - return err; + return mlxsw_sp_port_add_vid(dev, 0, 1); } static bool mlxsw_sp_master_bridge_check(struct mlxsw_sp *mlxsw_sp, @@ -1865,19 +2042,293 @@ static void mlxsw_sp_master_bridge_dec(struct mlxsw_sp *mlxsw_sp, mlxsw_sp->master_bridge.dev = NULL; } -static int mlxsw_sp_netdevice_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id) +{ + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id, u8 port_index) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id, port_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char slcor_pl[MLXSW_REG_SLCOR_LEN]; + + mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port, + lag_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl); +} + +static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + u16 *p_lag_id) +{ + struct mlxsw_sp_upper *lag; + int free_lag_id = -1; + int i; + + for (i = 0; i < MLXSW_SP_LAG_MAX; i++) { + lag = mlxsw_sp_lag_get(mlxsw_sp, i); + if (lag->ref_count) { + if (lag->dev == lag_dev) { + *p_lag_id = i; + return 0; + } + } else if (free_lag_id < 0) { + free_lag_id = i; + } + } + if (free_lag_id < 0) + return -EBUSY; + *p_lag_id = free_lag_id; + return 0; +} + +static bool +mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp, + struct net_device *lag_dev, + struct netdev_lag_upper_info *lag_upper_info) +{ + u16 lag_id; + + if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) + return false; + if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) + return false; + return true; +} + +static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp, + u16 lag_id, u8 *p_port_index) +{ + int i; + + for (i = 0; i < MLXSW_SP_PORT_PER_LAG_MAX; i++) { + if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) { + *p_port_index = i; + return 0; + } + } + return -EBUSY; +} + +static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id; + u8 port_index; + int err; + + err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id); + if (err) + return err; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + if (!lag->ref_count) { + err = mlxsw_sp_lag_create(mlxsw_sp, lag_id); + if (err) + return err; + lag->dev = lag_dev; + } + + err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index); + if (err) + return err; + err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index); + if (err) + goto err_col_port_add; + err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port, lag_id); + if (err) + goto err_col_port_enable; + + mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lag_id = lag_id; + mlxsw_sp_port->lagged = 1; + lag->ref_count++; + return 0; + +err_col_port_add: + if (!lag->ref_count) + mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); +err_col_port_enable: + mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + return err; +} + +static int mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_upper *lag; + u16 lag_id = mlxsw_sp_port->lag_id; + int err; + + if (!mlxsw_sp_port->lagged) + return 0; + lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id); + WARN_ON(lag->ref_count == 0); + + err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, lag_id); + if (err) + return err; + err = mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); + if (err) + return err; + + if (lag->ref_count == 1) { + err = mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); + if (err) + return err; + } + + mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id, + mlxsw_sp_port->local_port); + mlxsw_sp_port->lagged = 0; + lag->ref_count--; + return 0; +} + +static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port, + u16 lag_id) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + char sldr_pl[MLXSW_REG_SLDR_LEN]; + + mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id, + mlxsw_sp_port->local_port); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl); +} + +static int mlxsw_sp_port_lag_tx_en_set(struct mlxsw_sp_port *mlxsw_sp_port, + bool lag_tx_enabled) +{ + if (lag_tx_enabled) + return mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, + mlxsw_sp_port->lag_id); + else + return mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port, + mlxsw_sp_port->lag_id); +} + +static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port, + struct netdev_lag_lower_state_info *info) +{ + return mlxsw_sp_port_lag_tx_en_set(mlxsw_sp_port, info->tx_enabled); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev); + +static int mlxsw_sp_port_vlan_link(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + mlxsw_sp_vport->dev = vlan_dev; + + return 0; +} + +static int mlxsw_sp_port_vlan_unlink(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *vlan_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + u16 vid = vlan_dev_vlan_id(vlan_dev); + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return -EINVAL; + } + + /* When removing a VLAN device while still bridged we should first + * remove it from the bridge, as we receive the bridge's notification + * when the vPort is already gone. + */ + if (mlxsw_sp_vport->bridged) { + struct net_device *br_dev; + + br_dev = mlxsw_sp_vport_br_get(mlxsw_sp_vport); + mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, br_dev); + } + + mlxsw_sp_vport->dev = mlxsw_sp_port->dev; + + return 0; +} + +static int mlxsw_sp_netdevice_port_upper_event(struct net_device *dev, + unsigned long event, void *ptr) { - struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct mlxsw_sp_port *mlxsw_sp_port; struct net_device *upper_dev; struct mlxsw_sp *mlxsw_sp; int err; - if (!mlxsw_sp_port_dev_check(dev)) - return NOTIFY_DONE; - mlxsw_sp_port = netdev_priv(dev); mlxsw_sp = mlxsw_sp_port->mlxsw_sp; info = ptr; @@ -1885,28 +2336,66 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, switch (event) { case NETDEV_PRECHANGEUPPER: upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; /* HW limitation forbids to put ports to multiple bridges. */ - if (info->master && info->linking && - netif_is_bridge_master(upper_dev) && + if (netif_is_bridge_master(upper_dev) && !mlxsw_sp_master_bridge_check(mlxsw_sp, upper_dev)) return NOTIFY_BAD; + if (netif_is_lag_master(upper_dev) && + !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev, + info->upper_info)) + return NOTIFY_BAD; break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; - if (info->master && - netif_is_bridge_master(upper_dev)) { + if (is_vlan_dev(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_vlan_link(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to link VLAN device\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_vlan_unlink(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to unlink VLAN device\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_bridge_master(upper_dev)) { if (info->linking) { err = mlxsw_sp_port_bridge_join(mlxsw_sp_port); - if (err) + if (err) { netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } mlxsw_sp_master_bridge_inc(mlxsw_sp, upper_dev); - mlxsw_sp_port->bridged = 1; } else { err = mlxsw_sp_port_bridge_leave(mlxsw_sp_port); - if (err) - netdev_err(dev, "Failed to leave bridge\n"); - mlxsw_sp_port->bridged = 0; mlxsw_sp_master_bridge_dec(mlxsw_sp, upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } else if (netif_is_lag_master(upper_dev)) { + if (info->linking) { + err = mlxsw_sp_port_lag_join(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join link aggregation\n"); + return NOTIFY_BAD; + } + } else { + err = mlxsw_sp_port_lag_leave(mlxsw_sp_port, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave link aggregation\n"); + return NOTIFY_BAD; + } } } break; @@ -1915,6 +2404,443 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, return NOTIFY_DONE; } +static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct netdev_notifier_changelowerstate_info *info; + struct mlxsw_sp_port *mlxsw_sp_port; + int err; + + mlxsw_sp_port = netdev_priv(dev); + info = ptr; + + switch (event) { + case NETDEV_CHANGELOWERSTATE: + if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) { + err = mlxsw_sp_port_lag_changed(mlxsw_sp_port, + info->lower_state_info); + if (err) + netdev_err(dev, "Failed to reflect link aggregation lower state change\n"); + } + break; + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + switch (event) { + case NETDEV_PRECHANGEUPPER: + case NETDEV_CHANGEUPPER: + return mlxsw_sp_netdevice_port_upper_event(dev, event, ptr); + case NETDEV_CHANGELOWERSTATE: + return mlxsw_sp_netdevice_port_lower_event(dev, event, ptr); + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev, + unsigned long event, void *ptr) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_port_event(dev, event, ptr); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static struct mlxsw_sp_vfid * +mlxsw_sp_br_vfid_find(const struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *vfid; + + list_for_each_entry(vfid, &mlxsw_sp->br_vfids.list, list) { + if (vfid->br_dev == br_dev) + return vfid; + } + + return NULL; +} + +static u16 mlxsw_sp_vfid_to_br_vfid(u16 vfid) +{ + return vfid - MLXSW_SP_VFID_PORT_MAX; +} + +static u16 mlxsw_sp_br_vfid_to_vfid(u16 br_vfid) +{ + return MLXSW_SP_VFID_PORT_MAX + br_vfid; +} + +static u16 mlxsw_sp_avail_br_vfid_get(const struct mlxsw_sp *mlxsw_sp) +{ + return find_first_zero_bit(mlxsw_sp->br_vfids.mapped, + MLXSW_SP_VFID_BR_MAX); +} + +static struct mlxsw_sp_vfid *mlxsw_sp_br_vfid_create(struct mlxsw_sp *mlxsw_sp, + struct net_device *br_dev) +{ + struct device *dev = mlxsw_sp->bus_info->dev; + struct mlxsw_sp_vfid *vfid; + u16 n_vfid; + int err; + + n_vfid = mlxsw_sp_br_vfid_to_vfid(mlxsw_sp_avail_br_vfid_get(mlxsw_sp)); + if (n_vfid == MLXSW_SP_VFID_MAX) { + dev_err(dev, "No available vFIDs\n"); + return ERR_PTR(-ERANGE); + } + + err = __mlxsw_sp_vfid_create(mlxsw_sp, n_vfid); + if (err) { + dev_err(dev, "Failed to create vFID=%d\n", n_vfid); + return ERR_PTR(err); + } + + vfid = kzalloc(sizeof(*vfid), GFP_KERNEL); + if (!vfid) + goto err_allocate_vfid; + + vfid->vfid = n_vfid; + vfid->br_dev = br_dev; + + list_add(&vfid->list, &mlxsw_sp->br_vfids.list); + set_bit(mlxsw_sp_vfid_to_br_vfid(n_vfid), mlxsw_sp->br_vfids.mapped); + + return vfid; + +err_allocate_vfid: + __mlxsw_sp_vfid_destroy(mlxsw_sp, n_vfid); + return ERR_PTR(-ENOMEM); +} + +static void mlxsw_sp_br_vfid_destroy(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_vfid *vfid) +{ + u16 br_vfid = mlxsw_sp_vfid_to_br_vfid(vfid->vfid); + + clear_bit(br_vfid, mlxsw_sp->br_vfids.mapped); + list_del(&vfid->list); + + __mlxsw_sp_vfid_destroy(mlxsw_sp, vfid->vfid); + + kfree(vfid); +} + +static int mlxsw_sp_vport_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid, *new_vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + WARN_ON(!vfid); + return -EINVAL; + } + + /* We need a vFID to go back to after leaving the bridge's vFID. */ + new_vfid = mlxsw_sp_vfid_find(mlxsw_sp, vid); + if (!new_vfid) { + new_vfid = mlxsw_sp_vfid_create(mlxsw_sp, vid); + if (IS_ERR(new_vfid)) { + netdev_err(dev, "Failed to create vFID for VID=%d\n", + vid); + return PTR_ERR(new_vfid); + } + } + + /* Invalidate existing {Port, VID} to vFID mapping and create a new + * one for the new vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(new_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + new_vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); + if (err) { + netdev_err(dev, "Failed to disable learning\n"); + goto err_port_vid_learning_set; + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, + false); + if (err) { + netdev_err(dev, "Failed clear to clear flooding\n"); + goto err_vport_flood_set; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + new_vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = new_vfid; + vfid->nr_vports--; + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + + mlxsw_sp_vport->learning = 0; + mlxsw_sp_vport->learning_sync = 0; + mlxsw_sp_vport->uc_flood = 0; + mlxsw_sp_vport->bridged = 0; + + return 0; + +err_vport_flood_set: +err_port_vid_learning_set: +err_port_vid_to_fid_validate: +err_port_vid_to_fid_invalidate: + /* Rollback vFID only if new. */ + if (!new_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, new_vfid); + return err; +} + +static int mlxsw_sp_vport_bridge_join(struct mlxsw_sp_port *mlxsw_sp_vport, + struct net_device *br_dev) +{ + struct mlxsw_sp_vfid *old_vfid = mlxsw_sp_vport->vport.vfid; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_vport->mlxsw_sp; + u16 vid = mlxsw_sp_vport_vid_get(mlxsw_sp_vport); + struct net_device *dev = mlxsw_sp_vport->dev; + struct mlxsw_sp_vfid *vfid; + int err; + + vfid = mlxsw_sp_br_vfid_find(mlxsw_sp, br_dev); + if (!vfid) { + vfid = mlxsw_sp_br_vfid_create(mlxsw_sp, br_dev); + if (IS_ERR(vfid)) { + netdev_err(dev, "Failed to create bridge vFID\n"); + return PTR_ERR(vfid); + } + } + + err = mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, true, false); + if (err) { + netdev_err(dev, "Failed to setup flooding for vFID=%d\n", + vfid->vfid); + goto err_port_flood_set; + } + + err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true); + if (err) { + netdev_err(dev, "Failed to enable learning\n"); + goto err_port_vid_learning_set; + } + + /* We need to invalidate existing {Port, VID} to vFID mapping and + * create a new one for the bridge's vFID. + */ + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to invalidate {Port, VID} to vFID=%d mapping\n", + old_vfid->vfid); + goto err_port_vid_to_fid_invalidate; + } + + err = mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, + true, + mlxsw_sp_vfid_to_fid(vfid->vfid), + vid); + if (err) { + netdev_err(dev, "Failed to map {Port, VID} to vFID=%d\n", + vfid->vfid); + goto err_port_vid_to_fid_validate; + } + + /* Switch between the vFIDs and destroy the old one if needed. */ + vfid->nr_vports++; + mlxsw_sp_vport->vport.vfid = vfid; + old_vfid->nr_vports--; + if (!old_vfid->nr_vports) + mlxsw_sp_vfid_destroy(mlxsw_sp, old_vfid); + + mlxsw_sp_vport->learning = 1; + mlxsw_sp_vport->learning_sync = 1; + mlxsw_sp_vport->uc_flood = 1; + mlxsw_sp_vport->bridged = 1; + + return 0; + +err_port_vid_to_fid_validate: + mlxsw_sp_port_vid_to_fid_set(mlxsw_sp_vport, + MLXSW_REG_SVFA_MT_PORT_VID_TO_FID, false, + mlxsw_sp_vfid_to_fid(old_vfid->vfid), vid); +err_port_vid_to_fid_invalidate: + mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false); +err_port_vid_learning_set: + mlxsw_sp_vport_flood_set(mlxsw_sp_vport, vfid->vfid, false, false); +err_port_flood_set: + if (!vfid->nr_vports) + mlxsw_sp_br_vfid_destroy(mlxsw_sp, vfid); + return err; +} + +static bool +mlxsw_sp_port_master_bridge_check(const struct mlxsw_sp_port *mlxsw_sp_port, + const struct net_device *br_dev) +{ + struct mlxsw_sp_port *mlxsw_sp_vport; + + list_for_each_entry(mlxsw_sp_vport, &mlxsw_sp_port->vports_list, + vport.list) { + if (mlxsw_sp_vport_br_get(mlxsw_sp_vport) == br_dev) + return false; + } + + return true; +} + +static int mlxsw_sp_netdevice_vport_event(struct net_device *dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct mlxsw_sp_port *mlxsw_sp_vport; + struct net_device *upper_dev; + int err; + + mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master || !info->linking) + break; + if (!netif_is_bridge_master(upper_dev)) + return NOTIFY_BAD; + /* We can't have multiple VLAN interfaces configured on + * the same port and being members in the same bridge. + */ + if (!mlxsw_sp_port_master_bridge_check(mlxsw_sp_port, + upper_dev)) + return NOTIFY_BAD; + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (!info->master) + break; + if (info->linking) { + if (!mlxsw_sp_vport) { + WARN_ON(!mlxsw_sp_vport); + return NOTIFY_BAD; + } + err = mlxsw_sp_vport_bridge_join(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to join bridge\n"); + return NOTIFY_BAD; + } + } else { + /* We ignore bridge's unlinking notifications if vPort + * is gone, since we already left the bridge when the + * VLAN device was unlinked from the real device. + */ + if (!mlxsw_sp_vport) + return NOTIFY_DONE; + err = mlxsw_sp_vport_bridge_leave(mlxsw_sp_vport, + upper_dev); + if (err) { + netdev_err(dev, "Failed to leave bridge\n"); + return NOTIFY_BAD; + } + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_lag_vport_event(struct net_device *lag_dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct net_device *dev; + struct list_head *iter; + int ret; + + netdev_for_each_lower_dev(lag_dev, dev, iter) { + if (mlxsw_sp_port_dev_check(dev)) { + ret = mlxsw_sp_netdevice_vport_event(dev, event, ptr, + vid); + if (ret == NOTIFY_BAD) + return ret; + } + } + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, + unsigned long event, void *ptr) +{ + struct net_device *real_dev = vlan_dev_real_dev(vlan_dev); + u16 vid = vlan_dev_vlan_id(vlan_dev); + + if (mlxsw_sp_port_dev_check(real_dev)) + return mlxsw_sp_netdevice_vport_event(real_dev, event, ptr, + vid); + else if (netif_is_lag_master(real_dev)) + return mlxsw_sp_netdevice_lag_vport_event(real_dev, event, ptr, + vid); + + return NOTIFY_DONE; +} + +static int mlxsw_sp_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + + if (mlxsw_sp_port_dev_check(dev)) + return mlxsw_sp_netdevice_port_event(dev, event, ptr); + + if (netif_is_lag_master(dev)) + return mlxsw_sp_netdevice_lag_event(dev, event, ptr); + + if (is_vlan_dev(dev)) + return mlxsw_sp_netdevice_vlan_event(dev, event, ptr); + + return NOTIFY_DONE; +} + static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = { .notifier_call = mlxsw_sp_netdevice_event, }; |