summaryrefslogtreecommitdiff
path: root/include/linux/mlx5/driver.h
diff options
context:
space:
mode:
authorLiu, Changcheng <jerrliu@nvidia.com>2022-09-08 02:36:26 +0300
committerSaeed Mahameed <saeedm@nvidia.com>2022-09-27 22:50:27 +0300
commita83bb5df2ac604ab418fbe0a8720f55de46652eb (patch)
tree33308b93f47884b42dc1961cd1a08e2a6795ee82 /include/linux/mlx5/driver.h
parent8d1ac895fff96a228db20db92243e93687659ef7 (diff)
downloadlinux-a83bb5df2ac604ab418fbe0a8720f55de46652eb.tar.xz
RDMA/mlx5: Don't set tx affinity when lag is in hash mode
In hash mode, without setting tx affinity explicitly, the port select flow table decides which port is used for the traffic. If port_select_flow_table_bypass capability is supported and tx affinity is set explicitly for QP/TIS, they will be added into the explicit affinity table in FW to check which port is used for the traffic. 1. The overloaded explicit affinity table may affect performance. To avoid this, do not set tx affinity explicitly by default. 2. The packets of the same flow need to be transmitted on the same port. Because the packets of the same flow use different QPs in slow & fast path, it shouldn't set tx affinity explicitly for these QPs. Signed-off-by: Liu, Changcheng <jerrliu@nvidia.com> Reviewed-by: Mark Bloch <mbloch@nvidia.com> Reviewed-by: Vlad Buslov <vladbu@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
Diffstat (limited to 'include/linux/mlx5/driver.h')
-rw-r--r--include/linux/mlx5/driver.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 481506376344..9114caceb2b5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -1153,6 +1153,7 @@ int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev);
bool mlx5_lag_is_roce(struct mlx5_core_dev *dev);
bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev);
bool mlx5_lag_is_active(struct mlx5_core_dev *dev);
+bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev);
bool mlx5_lag_is_master(struct mlx5_core_dev *dev);
bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev);
struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);