summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c141
1 files changed, 97 insertions, 44 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 5d331b940f4d..f0a074b2fcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
return;
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
+ if (ldev->ports > 2)
+ ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+ }
}
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
@@ -550,6 +553,29 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
}
}
+static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int err;
+ int i;
+
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
+
+ err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
+ slave_esw, ldev->ports);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ for (; i > MLX5_LAG_P1; i--)
+ mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
+ ldev->pf[i].dev->priv.eswitch);
+ return err;
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
enum mlx5_lag_mode mode,
@@ -557,7 +583,6 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
@@ -575,8 +600,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
}
if (shared_fdb) {
- err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
- dev1->priv.eswitch);
+ err = mlx5_lag_create_single_fdb(ldev);
if (err)
mlx5_core_err(dev0, "Can't enable single FDB mode\n");
else
@@ -647,19 +671,21 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
int err;
+ int i;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
- dev1->priv.eswitch);
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
+ ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
}
@@ -685,7 +711,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return 0;
}
-#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
+#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
@@ -711,7 +737,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
- if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
+ if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
return false;
#else
for (i = 0; i < ldev->ports; i++)
@@ -759,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
bool roce_lag;
int err;
int i;
@@ -784,28 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
- if (shared_fdb) {
- if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
- }
+ if (shared_fdb)
+ for (i = 0; i < ldev->ports; i++)
+ if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
}
-bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
-
- if (is_mdev_switchdev_mode(dev0) &&
- is_mdev_switchdev_mode(dev1) &&
- mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
- mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
- mlx5_devcom_is_paired(dev0->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS) &&
- MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
- MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
- MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
+ struct mlx5_core_dev *dev;
+ int i;
+
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ if (is_mdev_switchdev_mode(dev) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
+ MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
+ MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
+ mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
+ MLX5_CAP_GEN(dev, num_lag_ports) - 1)
+ continue;
+ return false;
+ }
+
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if (is_mdev_switchdev_mode(dev) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
+ mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
+ MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
+ mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true;
return false;
@@ -842,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
@@ -883,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) {
+ int i;
+
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!err)
- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++) {
+ err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ break;
+ }
if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++)
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
@@ -1233,14 +1268,21 @@ recheck:
mlx5_ldev_put(ldev);
}
+bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+ return false;
+ return true;
+}
+
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
- MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
+ if (!mlx5_lag_is_supported(dev))
return;
recheck:
@@ -1496,26 +1538,37 @@ u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_get_num_ports);
-struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
- peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
- ldev->pf[MLX5_LAG_P2].dev :
- ldev->pf[MLX5_LAG_P1].dev;
+ if (*i == ldev->ports)
+ goto unlock;
+ for (idx = *i; idx < ldev->ports; idx++)
+ if (ldev->pf[idx].dev != dev)
+ break;
+
+ if (idx == ldev->ports) {
+ *i = idx;
+ goto unlock;
+ }
+ *i = idx + 1;
+
+ peer_dev = ldev->pf[idx].dev;
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
-EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
+EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,