summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/esw
diff options
context:
space:
mode:
authorDima Chumak <dchumak@nvidia.com>2023-08-25 09:28:36 +0300
committerJakub Kicinski <kuba@kernel.org>2023-08-28 03:08:45 +0300
commitb691b1116e820450de8a5d50eb4ce546a7de93dc (patch)
tree4f4d8f7f4ee050ad280c74a389588a07fc6bd45d /drivers/net/ethernet/mellanox/mlx5/core/esw
parent06bab69658a8afb493204448f29703e12e6d3960 (diff)
downloadlinux-b691b1116e820450de8a5d50eb4ce546a7de93dc.tar.xz
net/mlx5: Implement devlink port function cmds to control ipsec_packet
Implement devlink port function commands to enable / disable IPsec packet offloads. This is used to control the IPsec capability of the device. When ipsec_offload is enabled for a VF, it prevents adding IPsec packet offloads on the PF, because the two cannot be active simultaneously due to HW constraints. Conversely, if there are any active IPsec packet offloads on the PF, it's not allowed to enable ipsec_packet on a VF, until PF IPsec offloads are cleared. Signed-off-by: Dima Chumak <dchumak@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Link: https://lore.kernel.org/r/20230825062836.103744-9-saeed@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/esw')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c62
2 files changed, 63 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 12205e913417..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -95,6 +95,8 @@ static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
#ifdef CONFIG_XFRM_OFFLOAD
.port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
.port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
#endif /* CONFIG_XFRM_OFFLOAD */
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
index 187fb5f2d0cb..da10e04777cf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -37,6 +37,7 @@ free:
enum esw_vport_ipsec_offload {
MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
};
int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
@@ -55,6 +56,7 @@ int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *
if (!ipsec_enabled) {
vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
return 0;
}
@@ -69,6 +71,8 @@ int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *
hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
vport->info.ipsec_crypto_enabled =
MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
free:
kvfree(query_cap);
return err;
@@ -143,6 +147,9 @@ static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport
case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
default:
ret = -EOPNOTSUPP;
goto free;
@@ -222,15 +229,28 @@ static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5
err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
if (err)
return err;
- err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
if (err)
return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
}
switch (type) {
case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
vport->info.ipsec_crypto_enabled = enable;
break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
default:
return -EINVAL;
}
@@ -301,9 +321,49 @@ free:
return err;
}
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
bool enable)
{
return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}