summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox/mlx5/core/esw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/esw')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c197
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c369
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c325
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h67
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c6
8 files changed, 875 insertions, 130 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index f4fe1daa4afd..e36294b7ade2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -652,30 +652,30 @@ mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge)
{
- struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- int i;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- tmp, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
+
if (!peer_esw) {
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return ERR_PTR(-ENODEV);
+ handle = ERR_PTR(-ENODEV);
+ goto out;
}
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
+out:
+ mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
@@ -1391,8 +1391,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
- esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n",
- vport_num, err);
+ esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d,peer=%d)\n",
+ vport_num, err, peer);
goto err_ingress_flow_create;
}
entry->ingress_handle = handle;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
index b6a45eff28f5..dbd7cbe6cbf3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
@@ -64,7 +64,7 @@ void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_
bridge->debugfs_dir = debugfs_create_dir(br_netdev->name,
bridge->br_offloads->debugfs_root);
- debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge,
+ debugfs_create_file("fdb", 0400, bridge->debugfs_dir, bridge,
&mlx5_esw_bridge_debugfs_fops);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index 2455f8b93c1e..7a01714b3780 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -539,30 +539,29 @@ mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
- struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- int i;
- if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ if (!mlx5_devcom_for_each_peer_begin(devcom))
return ERR_PTR(-ENODEV);
- mlx5_devcom_for_each_peer_entry(devcom,
- MLX5_DEVCOM_ESW_OFFLOADS,
- tmp, i) {
+ mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
peer_esw = tmp;
break;
}
}
+
if (!peer_esw) {
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- return ERR_PTR(-ENODEV);
+ handle = ERR_PTR(-ENODEV);
+ goto out;
}
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
- mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+out:
+ mlx5_devcom_for_each_peer_end(devcom);
return handle;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index af779c700278..d8e739cbcbce 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -16,39 +16,28 @@ mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_i
static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num)
{
- return vport_num == MLX5_VPORT_UPLINK ||
- (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
+ return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
mlx5_eswitch_is_vf_vport(esw, vport_num) ||
mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
}
-static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
+static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
+ u16 vport_num,
+ struct devlink_port *dl_port)
{
struct mlx5_core_dev *dev = esw->dev;
- struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
- struct devlink_port *dl_port;
u32 controller_num = 0;
bool external;
u16 pfnum;
- dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
- if (!dl_port)
- return NULL;
-
mlx5_esw_get_port_parent_id(dev, &ppid);
pfnum = mlx5_get_dev_index(dev);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
- if (vport_num == MLX5_VPORT_UPLINK) {
- attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
- attrs.phys.port_number = pfnum;
- memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
- attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_set(dl_port, &attrs);
- } else if (vport_num == MLX5_VPORT_PF) {
+ if (vport_num == MLX5_VPORT_PF) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
@@ -60,94 +49,86 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
}
- return dl_port;
}
-static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
+int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- kfree(dl_port);
-}
-
-static const struct devlink_port_ops mlx5_esw_dl_port_ops = {
- .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
- .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
- .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
- .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
- .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
- .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
-};
-
-int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
-{
- struct mlx5_core_dev *dev = esw->dev;
- struct devlink_port *dl_port;
- unsigned int dl_port_index;
- struct mlx5_vport *vport;
- struct devlink *devlink;
- int err;
+ struct mlx5_devlink_port *dl_port;
+ u16 vport_num = vport->vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
- dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
+ dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return -ENOMEM;
- devlink = priv_to_devlink(dev);
- dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
- &mlx5_esw_dl_port_ops);
- if (err)
- goto reg_err;
-
- err = devl_rate_leaf_create(dl_port, vport, NULL);
- if (err)
- goto rate_err;
+ mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num,
+ &dl_port->dl_port);
vport->dl_port = dl_port;
+ mlx5_devlink_port_init(dl_port, vport);
return 0;
-
-rate_err:
- devl_port_unregister(dl_port);
-reg_err:
- mlx5_esw_dl_port_free(dl_port);
- return err;
}
-void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw,
+ struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
-
- if (!mlx5_esw_devlink_port_supported(esw, vport_num))
+ if (!vport->dl_port)
return;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return;
+ kfree(vport->dl_port);
+ vport->dl_port = NULL;
+}
- if (vport->dl_port->devlink_rate) {
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devl_rate_leaf_destroy(vport->dl_port);
- }
+static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = {
+ .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+#ifdef CONFIG_XFRM_OFFLOAD
+ .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get,
+ .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set,
+ .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get,
+ .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set,
+#endif /* CONFIG_XFRM_OFFLOAD */
+};
- devl_port_unregister(vport->dl_port);
- mlx5_esw_dl_port_free(vport->dl_port);
- vport->dl_port = NULL;
+static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw,
+ struct devlink_port *dl_port,
+ u32 controller, u32 sfnum)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ struct netdev_phys_item_id ppid = {};
+ u16 pfnum;
+
+ pfnum = mlx5_get_dev_index(dev);
+ mlx5_esw_get_port_parent_id(dev, &ppid);
+ memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
}
-struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ struct mlx5_devlink_port *dl_port,
+ u32 controller, u32 sfnum)
{
- struct mlx5_vport *vport;
+ mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum);
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
+ vport->dl_port = dl_port;
+ mlx5_devlink_port_init(dl_port, vport);
+ return 0;
+}
+
+void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
+{
+ vport->dl_port = NULL;
}
static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
@@ -164,58 +145,62 @@ static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
#endif
};
-int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
- u16 vport_num, u32 controller, u32 sfnum)
+int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
struct mlx5_core_dev *dev = esw->dev;
- struct netdev_phys_item_id ppid = {};
+ const struct devlink_port_ops *ops;
+ struct mlx5_devlink_port *dl_port;
+ u16 vport_num = vport->vport;
unsigned int dl_port_index;
- struct mlx5_vport *vport;
struct devlink *devlink;
- u16 pfnum;
int err;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ dl_port = vport->dl_port;
+ if (!dl_port)
+ return 0;
+
+ if (mlx5_esw_is_sf_vport(esw, vport_num))
+ ops = &mlx5_esw_dl_sf_port_ops;
+ else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num))
+ ops = &mlx5_esw_pf_vf_dl_port_ops;
+ else
+ ops = NULL;
- pfnum = mlx5_get_dev_index(dev);
- mlx5_esw_get_port_parent_id(dev, &ppid);
- memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len);
- dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
- &mlx5_esw_dl_sf_port_ops);
+ err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops);
if (err)
return err;
- err = devl_rate_leaf_create(dl_port, vport, NULL);
+ err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL);
if (err)
goto rate_err;
- vport->dl_port = dl_port;
return 0;
rate_err:
- devl_port_unregister(dl_port);
+ devl_port_unregister(&dl_port->dl_port);
return err;
}
-void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
+void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
- struct mlx5_vport *vport;
+ struct mlx5_devlink_port *dl_port;
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
+ if (!vport->dl_port)
return;
+ dl_port = vport->dl_port;
- if (vport->dl_port->devlink_rate) {
- mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
- devl_rate_leaf_destroy(vport->dl_port);
- }
+ mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL);
+ devl_rate_leaf_destroy(&dl_port->dl_port);
- devl_port_unregister(vport->dl_port);
- vport->dl_port = NULL;
+ devl_port_unregister(&dl_port->dl_port);
+}
+
+struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
+{
+ struct mlx5_vport *vport;
+
+ vport = mlx5_eswitch_get_vport(esw, vport_num);
+ return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
new file mode 100644
index 000000000000..da10e04777cf
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec.c
@@ -0,0 +1,369 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include <linux/mlx5/device.h>
+#include <linux/mlx5/vport.h>
+#include "mlx5_core.h"
+#include "eswitch.h"
+
+static int esw_ipsec_vf_query_generic(struct mlx5_core_dev *dev, u16 vport_num, bool *result)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev)) {
+ *result = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ *result = MLX5_GET(cmd_hca_cap, hca_cap, ipsec_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+enum esw_vport_ipsec_offload {
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD,
+};
+
+int mlx5_esw_ipsec_vf_offload_get(struct mlx5_core_dev *dev, struct mlx5_vport *vport)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ bool ipsec_enabled;
+ int err;
+
+ /* Querying IPsec caps only makes sense when generic ipsec_offload
+ * HCA cap is enabled
+ */
+ err = esw_ipsec_vf_query_generic(dev, vport->vport, &ipsec_enabled);
+ if (err)
+ return err;
+
+ if (!ipsec_enabled) {
+ vport->info.ipsec_crypto_enabled = false;
+ vport->info.ipsec_packet_enabled = false;
+ return 0;
+ }
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ vport->info.ipsec_crypto_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_crypto_offload);
+ vport->info.ipsec_packet_enabled =
+ MLX5_GET(ipsec_cap, hca_cap, ipsec_full_offload);
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+static int esw_ipsec_vf_set_generic(struct mlx5_core_dev *dev, u16 vport_num, bool ipsec_ofld)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport_num, query_cap);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(cmd_hca_cap, cap, ipsec_offload, ipsec_ofld);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_set_bytype(struct mlx5_core_dev *dev, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ if (!MLX5_CAP_GEN(dev, vhca_resource_manager))
+ return -EOPNOTSUPP;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport->vport, query_cap, MLX5_CAP_IPSEC);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_crypto_offload, enable);
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ MLX5_SET(ipsec_cap, cap, ipsec_full_offload, enable);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ goto free;
+ }
+
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport->vport);
+
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_IPSEC << 1);
+ ret = mlx5_cmd_exec_in(dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_crypto_aux_caps_set(struct mlx5_core_dev *dev, u16 vport_num, bool enable)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ struct mlx5_eswitch *esw = dev->priv.eswitch;
+ void *hca_cap, *query_cap, *cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ hca_cap = kvzalloc(set_sz, GFP_KERNEL);
+ if (!hca_cap || !query_cap) {
+ ret = -ENOMEM;
+ goto free;
+ }
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (ret)
+ goto free;
+
+ cap = MLX5_ADDR_OF(set_hca_cap_in, hca_cap, capability);
+ memcpy(cap, MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability),
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ MLX5_SET(per_protocol_networking_offload_caps, cap, insert_trailer, enable);
+ MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
+ MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, function_id, vport_num);
+ MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
+ MLX5_SET_HCA_CAP_OP_MOD_ETHERNET_OFFLOADS << 1);
+ ret = mlx5_cmd_exec_in(esw->dev, set_hca_cap, hca_cap);
+free:
+ kvfree(hca_cap);
+ kvfree(query_cap);
+ return ret;
+}
+
+static int esw_ipsec_vf_offload_set_bytype(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable, enum esw_vport_ipsec_offload type)
+{
+ struct mlx5_core_dev *dev = esw->dev;
+ int err;
+
+ if (vport->vport == MLX5_VPORT_PF)
+ return -EOPNOTSUPP;
+
+ if (type == MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD) {
+ err = esw_ipsec_vf_crypto_aux_caps_set(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+
+ if (enable) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ } else {
+ err = esw_ipsec_vf_set_bytype(dev, vport, enable, type);
+ if (err)
+ return err;
+ err = mlx5_esw_ipsec_vf_offload_get(dev, vport);
+ if (err)
+ return err;
+
+ /* The generic ipsec_offload cap can be disabled only if both
+ * ipsec_crypto_offload and ipsec_full_offload aren't enabled.
+ */
+ if (!vport->info.ipsec_crypto_enabled &&
+ !vport->info.ipsec_packet_enabled) {
+ err = esw_ipsec_vf_set_generic(dev, vport->vport, enable);
+ if (err)
+ return err;
+ }
+ }
+
+ switch (type) {
+ case MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD:
+ vport->info.ipsec_crypto_enabled = enable;
+ break;
+ case MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD:
+ vport->info.ipsec_packet_enabled = enable;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int esw_ipsec_offload_supported(struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_GENERAL);
+ if (ret)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(cmd_hca_cap, hca_cap, log_max_dek))
+ ret = -EOPNOTSUPP;
+free:
+ kvfree(query_cap);
+ return ret;
+}
+
+bool mlx5_esw_ipsec_vf_offload_supported(struct mlx5_core_dev *dev)
+{
+ /* Old firmware doesn't support ipsec_offload capability for VFs. This
+ * can be detected by checking reformat_add_esp_trasport capability -
+ * when this cap isn't supported it means firmware cannot be trusted
+ * about what it reports for ipsec_offload cap.
+ */
+ return MLX5_CAP_FLOWTABLE_NIC_TX(dev, reformat_add_esp_trasport);
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int err;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ err = esw_ipsec_offload_supported(dev, vport_num);
+ if (err)
+ return err;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ err = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ goto free;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(per_protocol_networking_offload_caps, hca_cap, swp))
+ goto free;
+
+free:
+ kvfree(query_cap);
+ return err;
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev,
+ u16 vport_num)
+{
+ int query_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
+ void *hca_cap, *query_cap;
+ int ret;
+
+ if (!mlx5_esw_ipsec_vf_offload_supported(dev))
+ return -EOPNOTSUPP;
+
+ ret = esw_ipsec_offload_supported(dev, vport_num);
+ if (ret)
+ return ret;
+
+ query_cap = kvzalloc(query_sz, GFP_KERNEL);
+ if (!query_cap)
+ return -ENOMEM;
+
+ ret = mlx5_vport_get_other_func_cap(dev, vport_num, query_cap, MLX5_CAP_FLOW_TABLE);
+ if (ret)
+ goto out;
+
+ hca_cap = MLX5_ADDR_OF(query_hca_cap_out, query_cap, capability);
+ if (!MLX5_GET(flow_table_nic_cap, hca_cap, flow_table_properties_nic_receive.decap)) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
+out:
+ kvfree(query_cap);
+ return ret;
+}
+
+int mlx5_esw_ipsec_vf_crypto_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_CRYPTO_OFFLOAD);
+}
+
+int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
+ bool enable)
+{
+ return esw_ipsec_vf_offload_set_bytype(esw, vport, enable,
+ MLX5_ESW_VPORT_IPSEC_PACKET_OFFLOAD);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
new file mode 100644
index 000000000000..095f31f380fa
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+// Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
+
+#include "fs_core.h"
+#include "eswitch.h"
+#include "en_accel/ipsec.h"
+#include "esw/ipsec_fs.h"
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+#include "en/tc_priv.h"
+#endif
+
+enum {
+ MLX5_ESW_IPSEC_RX_POL_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL,
+ MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL,
+};
+
+enum {
+ MLX5_ESW_IPSEC_TX_POL_FT_LEVEL,
+ MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL,
+ MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL,
+};
+
+static void esw_ipsec_rx_status_drop_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status_drop.rule);
+ mlx5_destroy_flow_group(rx->status_drop.group);
+ mlx5_fc_destroy(ipsec->mdev, rx->status_drop_cnt);
+}
+
+static void esw_ipsec_rx_status_pass_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ mlx5_del_flow_rules(rx->status.rule);
+ mlx5_chains_put_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+}
+
+static int esw_ipsec_rx_status_drop_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table *ft = rx->ft.status;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_flow_destination dest = {};
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_fc *flow_counter;
+ struct mlx5_flow_spec *spec;
+ struct mlx5_flow_group *g;
+ u32 *flow_group_in;
+ int err = 0;
+
+ flow_group_in = kvzalloc(inlen, GFP_KERNEL);
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!flow_group_in || !spec) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+
+ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ft->max_fte - 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ft->max_fte - 1);
+ g = mlx5_create_flow_group(ft, flow_group_in);
+ if (IS_ERR(g)) {
+ err = PTR_ERR(g);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop flow group, err=%d\n", err);
+ goto err_out;
+ }
+
+ flow_counter = mlx5_fc_create(mdev, false);
+ if (IS_ERR(flow_counter)) {
+ err = PTR_ERR(flow_counter);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule counter, err=%d\n", err);
+ goto err_cnt;
+ }
+
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
+ dest.counter_id = mlx5_fc_id(flow_counter);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_err(mdev,
+ "Failed to add ipsec rx status drop rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status_drop.group = g;
+ rx->status_drop.rule = rule;
+ rx->status_drop_cnt = flow_counter;
+
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ mlx5_fc_destroy(mdev, flow_counter);
+err_cnt:
+ mlx5_destroy_flow_group(g);
+err_out:
+ kvfree(flow_group_in);
+ kvfree(spec);
+ return err;
+}
+
+static int esw_ipsec_rx_status_pass_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {};
+ struct mlx5_flow_handle *rule;
+ struct mlx5_flow_spec *spec;
+ int err;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec)
+ return -ENOMEM;
+
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ misc_parameters_2.ipsec_syndrome);
+ MLX5_SET(fte_match_param, spec->match_value,
+ misc_parameters_2.ipsec_syndrome, 0);
+ spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
+ spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
+ flow_act.flags = FLOW_ACT_NO_APPEND;
+ flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+ MLX5_FLOW_CONTEXT_ACTION_COUNT;
+ rule = mlx5_add_flow_rules(rx->ft.status, spec, &flow_act, dest, 2);
+ if (IS_ERR(rule)) {
+ err = PTR_ERR(rule);
+ mlx5_core_warn(ipsec->mdev,
+ "Failed to add ipsec rx status pass rule, err=%d\n", err);
+ goto err_rule;
+ }
+
+ rx->status.rule = rule;
+ kvfree(spec);
+ return 0;
+
+err_rule:
+ kvfree(spec);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx)
+{
+ esw_ipsec_rx_status_pass_destroy(ipsec, rx);
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+}
+
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ int err;
+
+ err = esw_ipsec_rx_status_drop_create(ipsec, rx);
+ if (err)
+ return err;
+
+ err = esw_ipsec_rx_status_pass_create(ipsec, rx, dest);
+ if (err)
+ goto err_pass_create;
+
+ return 0;
+
+err_pass_create:
+ esw_ipsec_rx_status_drop_destroy(ipsec, rx);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr)
+{
+ attr->prio = FDB_CRYPTO_INGRESS;
+ attr->pol_level = MLX5_ESW_IPSEC_RX_POL_FT_LEVEL;
+ attr->sa_level = MLX5_ESW_IPSEC_RX_ESP_FT_LEVEL;
+ attr->status_level = MLX5_ESW_IPSEC_RX_ESP_FT_CHK_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ dest->type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
+ dest->ft = mlx5_chains_get_table(esw_chains(ipsec->mdev->priv.eswitch), 0, 1, 0);
+
+ return 0;
+}
+
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+ struct mlx5_core_dev *mdev = ipsec->mdev;
+ struct mlx5_modify_hdr *modify_hdr;
+ u32 mapped_id;
+ int err;
+
+ err = xa_alloc_bh(&ipsec->rx_esw->ipsec_obj_id_map, &mapped_id,
+ xa_mk_value(sa_entry->ipsec_obj_id),
+ XA_LIMIT(1, ESW_IPSEC_RX_MAPPED_ID_MASK), 0);
+ if (err)
+ return err;
+
+ /* reuse tunnel bits for ipsec,
+ * tun_id is always 0 and tun_opts is mapped to ipsec_obj_id.
+ */
+ MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET);
+ MLX5_SET(set_action_in, action, field,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
+ MLX5_SET(set_action_in, action, offset, ESW_ZONE_ID_BITS);
+ MLX5_SET(set_action_in, action, length,
+ ESW_TUN_ID_BITS + ESW_TUN_OPTS_BITS);
+ MLX5_SET(set_action_in, action, data, mapped_id);
+
+ modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
+ 1, action);
+ if (IS_ERR(modify_hdr)) {
+ err = PTR_ERR(modify_hdr);
+ goto err_header_alloc;
+ }
+
+ sa_entry->rx_mapped_id = mapped_id;
+ flow_act->modify_hdr = modify_hdr;
+ flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
+
+ return 0;
+
+err_header_alloc:
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map, mapped_id);
+ return err;
+}
+
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry)
+{
+ struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
+
+ if (sa_entry->rx_mapped_id)
+ xa_erase_bh(&ipsec->rx_esw->ipsec_obj_id_map,
+ sa_entry->rx_mapped_id);
+}
+
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ struct mlx5e_ipsec *ipsec = priv->ipsec;
+ void *val;
+
+ val = xa_load(&ipsec->rx_esw->ipsec_obj_id_map, id);
+ if (!val)
+ return -ENOENT;
+
+ *ipsec_obj_id = xa_to_value(val);
+
+ return 0;
+}
+
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr)
+{
+ attr->prio = FDB_CRYPTO_EGRESS;
+ attr->pol_level = MLX5_ESW_IPSEC_TX_POL_FT_LEVEL;
+ attr->sa_level = MLX5_ESW_IPSEC_TX_ESP_FT_LEVEL;
+ attr->cnt_level = MLX5_ESW_IPSEC_TX_ESP_FT_CNT_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_FDB;
+}
+
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+static int mlx5_esw_ipsec_modify_flow_dests(struct mlx5_eswitch *esw,
+ struct mlx5e_tc_flow *flow)
+{
+ struct mlx5_esw_flow_attr *esw_attr;
+ struct mlx5_flow_attr *attr;
+ int err;
+
+ attr = flow->attr;
+ esw_attr = attr->esw_attr;
+ if (esw_attr->out_count - esw_attr->split_count > 1)
+ return 0;
+
+ err = mlx5_eswitch_restore_ipsec_rule(esw, flow->rule[0], esw_attr,
+ esw_attr->out_count - 1);
+
+ return err;
+}
+#endif
+
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev)
+{
+#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+ struct mlx5_eswitch_rep *rep;
+ struct mlx5e_rep_priv *rpriv;
+ struct rhashtable_iter iter;
+ struct mlx5e_tc_flow *flow;
+ unsigned long i;
+ int err;
+
+ xa_for_each(&esw->offloads.vport_reps, i, rep) {
+ rpriv = rep->rep_data[REP_ETH].priv;
+ if (!rpriv || !rpriv->netdev)
+ continue;
+
+ rhashtable_walk_enter(&rpriv->tc_ht, &iter);
+ rhashtable_walk_start(&iter);
+ while ((flow = rhashtable_walk_next(&iter)) != NULL) {
+ if (IS_ERR(flow))
+ continue;
+
+ err = mlx5_esw_ipsec_modify_flow_dests(esw, flow);
+ if (err)
+ mlx5_core_warn_once(mdev,
+ "Failed to modify flow dests for IPsec");
+ }
+ rhashtable_walk_stop(&iter);
+ rhashtable_walk_exit(&iter);
+ }
+#endif
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
new file mode 100644
index 000000000000..0c90f7a8b0d3
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/ipsec_fs.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __MLX5_ESW_IPSEC_FS_H__
+#define __MLX5_ESW_IPSEC_FS_H__
+
+struct mlx5e_ipsec;
+struct mlx5e_ipsec_sa_entry;
+
+#ifdef CONFIG_MLX5_ESWITCH
+void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx);
+int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest);
+void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr);
+int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest);
+int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act);
+void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry);
+int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id);
+void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr);
+void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev);
+#else
+static inline void mlx5_esw_ipsec_rx_status_destroy(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx) {}
+
+static inline int mlx5_esw_ipsec_rx_status_create(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx *rx,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_rx_create_attr *attr) {}
+
+static inline int mlx5_esw_ipsec_rx_status_pass_dest_get(struct mlx5e_ipsec *ipsec,
+ struct mlx5_flow_destination *dest)
+{
+ return -EINVAL;
+}
+
+static inline int mlx5_esw_ipsec_rx_setup_modify_header(struct mlx5e_ipsec_sa_entry *sa_entry,
+ struct mlx5_flow_act *flow_act)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_rx_id_mapping_remove(struct mlx5e_ipsec_sa_entry *sa_entry) {}
+
+static inline int mlx5_esw_ipsec_rx_ipsec_obj_id_search(struct mlx5e_priv *priv, u32 id,
+ u32 *ipsec_obj_id)
+{
+ return -EINVAL;
+}
+
+static inline void mlx5_esw_ipsec_tx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ struct mlx5e_ipsec_tx_create_attr *attr) {}
+
+static inline void mlx5_esw_ipsec_restore_dest_uplink(struct mlx5_core_dev *mdev) {}
+#endif /* CONFIG_MLX5_ESWITCH */
+#endif /* __MLX5_ESW_IPSEC_FS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
index 7c79476cc5f9..1887a24ee414 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
@@ -740,7 +740,7 @@ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32
static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name,
u64 *rate, struct netlink_ext_ack *extack)
{
- u32 link_speed_max, reminder;
+ u32 link_speed_max, remainder;
u64 value;
int err;
@@ -750,8 +750,8 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
return err;
}
- value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder);
- if (reminder) {
+ value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
+ if (remainder) {
pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
name, *rate);
NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps");