summaryrefslogtreecommitdiff
path: root/drivers/net/ethernet/mellanox
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-06-29 02:43:10 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-06-29 02:43:10 +0300
commit3a8a670eeeaa40d87bd38a587438952741980c18 (patch)
treed5546d311271503eadf75b45d87e12720e72899f /drivers/net/ethernet/mellanox
parent6a8cbd9253abc1bd0df4d60c4c24fa555190376d (diff)
parentae230642190a51b85656d6da2df744d534d59544 (diff)
downloadlinux-3a8a670eeeaa40d87bd38a587438952741980c18.tar.xz
Merge tag 'net-next-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next
Pull networking changes from Jakub Kicinski: "WiFi 7 and sendpage changes are the biggest pieces of work for this release. The latter will definitely require fixes but I think that we got it to a reasonable point. Core: - Rework the sendpage & splice implementations Instead of feeding data into sockets page by page extend sendmsg handlers to support taking a reference on the data, controlled by a new flag called MSG_SPLICE_PAGES Rework the handling of unexpected-end-of-file to invoke an additional callback instead of trying to predict what the right combination of MORE/NOTLAST flags is Remove the MSG_SENDPAGE_NOTLAST flag completely - Implement SCM_PIDFD, a new type of CMSG type analogous to SCM_CREDENTIALS, but it contains pidfd instead of plain pid - Enable socket busy polling with CONFIG_RT - Improve reliability and efficiency of reporting for ref_tracker - Auto-generate a user space C library for various Netlink families Protocols: - Allow TCP to shrink the advertised window when necessary, prevent sk_rcvbuf auto-tuning from growing the window all the way up to tcp_rmem[2] - Use per-VMA locking for "page-flipping" TCP receive zerocopy - Prepare TCP for device-to-device data transfers, by making sure that payloads are always attached to skbs as page frags - Make the backoff time for the first N TCP SYN retransmissions linear. Exponential backoff is unnecessarily conservative - Create a new MPTCP getsockopt to retrieve all info (MPTCP_FULL_INFO) - Avoid waking up applications using TLS sockets until we have a full record - Allow using kernel memory for protocol ioctl callbacks, paving the way to issuing ioctls over io_uring - Add nolocalbypass option to VxLAN, forcing packets to be fully encapsulated even if they are destined for a local IP address - Make TCPv4 use consistent hash in TIME_WAIT and SYN_RECV. Ensure in-kernel ECMP implementation (e.g. Open vSwitch) select the same link for all packets. Support L4 symmetric hashing in Open vSwitch - PPPoE: make number of hash bits configurable - Allow DNS to be overwritten by DHCPACK in the in-kernel DHCP client (ipconfig) - Add layer 2 miss indication and filtering, allowing higher layers (e.g. ACL filters) to make forwarding decisions based on whether packet matched forwarding state in lower devices (bridge) - Support matching on Connectivity Fault Management (CFM) packets - Hide the "link becomes ready" IPv6 messages by demoting their printk level to debug - HSR: don't enable promiscuous mode if device offloads the proto - Support active scanning in IEEE 802.15.4 - Continue work on Multi-Link Operation for WiFi 7 BPF: - Add precision propagation for subprogs and callbacks. This allows maintaining verification efficiency when subprograms are used, or in fact passing the verifier at all for complex programs, especially those using open-coded iterators - Improve BPF's {g,s}setsockopt() length handling. Previously BPF assumed the length is always equal to the amount of written data. But some protos allow passing a NULL buffer to discover what the output buffer *should* be, without writing anything - Accept dynptr memory as memory arguments passed to helpers - Add routing table ID to bpf_fib_lookup BPF helper - Support O_PATH FDs in BPF_OBJ_PIN and BPF_OBJ_GET commands - Drop bpf_capable() check in BPF_MAP_FREEZE command (used to mark maps as read-only) - Show target_{obj,btf}_id in tracing link fdinfo - Addition of several new kfuncs (most of the names are self-explanatory): - Add a set of new dynptr kfuncs: bpf_dynptr_adjust(), bpf_dynptr_is_null(), bpf_dynptr_is_rdonly(), bpf_dynptr_size() and bpf_dynptr_clone(). - bpf_task_under_cgroup() - bpf_sock_destroy() - force closing sockets - bpf_cpumask_first_and(), rework bpf_cpumask_any*() kfuncs Netfilter: - Relax set/map validation checks in nf_tables. Allow checking presence of an entry in a map without using the value - Increase ip_vs_conn_tab_bits range for 64BIT builds - Allow updating size of a set - Improve NAT tuple selection when connection is closing Driver API: - Integrate netdev with LED subsystem, to allow configuring HW "offloaded" blinking of LEDs based on link state and activity (i.e. packets coming in and out) - Support configuring rate selection pins of SFP modules - Factor Clause 73 auto-negotiation code out of the drivers, provide common helper routines - Add more fool-proof helpers for managing lifetime of MDIO devices associated with the PCS layer - Allow drivers to report advanced statistics related to Time Aware scheduler offload (taprio) - Allow opting out of VF statistics in link dump, to allow more VFs to fit into the message - Split devlink instance and devlink port operations New hardware / drivers: - Ethernet: - Synopsys EMAC4 IP support (stmmac) - Marvell 88E6361 8 port (5x1GE + 3x2.5GE) switches - Marvell 88E6250 7 port switches - Microchip LAN8650/1 Rev.B0 PHYs - MediaTek MT7981/MT7988 built-in 1GE PHY driver - WiFi: - Realtek RTL8192FU, 2.4 GHz, b/g/n mode, 2T2R, 300 Mbps - Realtek RTL8723DS (SDIO variant) - Realtek RTL8851BE - CAN: - Fintek F81604 Drivers: - Ethernet NICs: - Intel (100G, ice): - support dynamic interrupt allocation - use meta data match instead of VF MAC addr on slow-path - nVidia/Mellanox: - extend link aggregation to handle 4, rather than just 2 ports - spawn sub-functions without any features by default - OcteonTX2: - support HTB (Tx scheduling/QoS) offload - make RSS hash generation configurable - support selecting Rx queue using TC filters - Wangxun (ngbe/txgbe): - add basic Tx/Rx packet offloads - add phylink support (SFP/PCS control) - Freescale/NXP (enetc): - report TAPRIO packet statistics - Solarflare/AMD: - support matching on IP ToS and UDP source port of outer header - VxLAN and GENEVE tunnel encapsulation over IPv4 or IPv6 - add devlink dev info support for EF10 - Virtual NICs: - Microsoft vNIC: - size the Rx indirection table based on requested configuration - support VLAN tagging - Amazon vNIC: - try to reuse Rx buffers if not fully consumed, useful for ARM servers running with 16kB pages - Google vNIC: - support TCP segmentation of >64kB frames - Ethernet embedded switches: - Marvell (mv88e6xxx): - enable USXGMII (88E6191X) - Microchip: - lan966x: add support for Egress Stage 0 ACL engine - lan966x: support mapping packet priority to internal switch priority (based on PCP or DSCP) - Ethernet PHYs: - Broadcom PHYs: - support for Wake-on-LAN for BCM54210E/B50212E - report LPI counter - Microsemi PHYs: support RGMII delay configuration (VSC85xx) - Micrel PHYs: receive timestamp in the frame (LAN8841) - Realtek PHYs: support optional external PHY clock - Altera TSE PCS: merge the driver into Lynx PCS which it is a variant of - CAN: Kvaser PCIEcan: - support packet timestamping - WiFi: - Intel (iwlwifi): - major update for new firmware and Multi-Link Operation (MLO) - configuration rework to drop test devices and split the different families - support for segmented PNVM images and power tables - new vendor entries for PPAG (platform antenna gain) feature - Qualcomm 802.11ax (ath11k): - Multiple Basic Service Set Identifier (MBSSID) and Enhanced MBSSID Advertisement (EMA) support in AP mode - support factory test mode - RealTek (rtw89): - add RSSI based antenna diversity - support U-NII-4 channels on 5 GHz band - RealTek (rtl8xxxu): - AP mode support for 8188f - support USB RX aggregation for the newer chips" * tag 'net-next-6.5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next: (1602 commits) net: scm: introduce and use scm_recv_unix helper af_unix: Skip SCM_PIDFD if scm->pid is NULL. net: lan743x: Simplify comparison netlink: Add __sock_i_ino() for __netlink_diag_dump(). net: dsa: avoid suspicious RCU usage for synced VLAN-aware MAC addresses Revert "af_unix: Call scm_recv() only after scm_set_cred()." phylink: ReST-ify the phylink_pcs_neg_mode() kdoc libceph: Partially revert changes to support MSG_SPLICE_PAGES net: phy: mscc: fix packet loss due to RGMII delays net: mana: use vmalloc_array and vcalloc net: enetc: use vmalloc_array and vcalloc ionic: use vmalloc_array and vcalloc pds_core: use vmalloc_array and vcalloc gve: use vmalloc_array and vcalloc octeon_ep: use vmalloc_array and vcalloc net: usb: qmi_wwan: add u-blox 0x1312 composition perf trace: fix MSG_SPLICE_PAGES build error ipvlan: Fix return value of ipvlan_queue_xmit() netfilter: nf_tables: fix underflow in chain reference counter netfilter: nf_tables: unbind non-anonymous set if rule construction fails ...
Diffstat (limited to 'drivers/net/ethernet/mellanox')
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c65
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/debugfs.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/devlink.c87
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/qos.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c137
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.h7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c89
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c203
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h80
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c451
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/events.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c160
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c141
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c31
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c129
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/events.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c212
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/rdma.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c50
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/i2c.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c832
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h12
93 files changed, 2644 insertions, 1188 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 277738c50c56..61286b0d9b0c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1374,16 +1374,13 @@ static int mlx4_mf_bond(struct mlx4_dev *dev)
int nvfs;
struct mlx4_slaves_pport slaves_port1;
struct mlx4_slaves_pport slaves_port2;
- DECLARE_BITMAP(slaves_port_1_2, MLX4_MFUNC_MAX);
slaves_port1 = mlx4_phys_to_slaves_pport(dev, 1);
slaves_port2 = mlx4_phys_to_slaves_pport(dev, 2);
- bitmap_and(slaves_port_1_2,
- slaves_port1.slaves, slaves_port2.slaves,
- dev->persist->num_vfs + 1);
/* only single port vfs are allowed */
- if (bitmap_weight(slaves_port_1_2, dev->persist->num_vfs + 1) > 1) {
+ if (bitmap_weight_and(slaves_port1.slaves, slaves_port2.slaves,
+ dev->persist->num_vfs + 1) > 1) {
mlx4_warn(dev, "HA mode unsupported for dual ported VFs\n");
return -EINVAL;
}
@@ -3027,13 +3024,43 @@ no_msi:
}
}
+static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
+ enum devlink_port_type port_type)
+{
+ struct mlx4_port_info *info = container_of(devlink_port,
+ struct mlx4_port_info,
+ devlink_port);
+ enum mlx4_port_type mlx4_port_type;
+
+ switch (port_type) {
+ case DEVLINK_PORT_TYPE_AUTO:
+ mlx4_port_type = MLX4_PORT_TYPE_AUTO;
+ break;
+ case DEVLINK_PORT_TYPE_ETH:
+ mlx4_port_type = MLX4_PORT_TYPE_ETH;
+ break;
+ case DEVLINK_PORT_TYPE_IB:
+ mlx4_port_type = MLX4_PORT_TYPE_IB;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return __set_port_type(info, mlx4_port_type);
+}
+
+static const struct devlink_port_ops mlx4_devlink_port_ops = {
+ .port_type_set = mlx4_devlink_port_type_set,
+};
+
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
{
struct devlink *devlink = priv_to_devlink(mlx4_priv(dev));
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err;
- err = devl_port_register(devlink, &info->devlink_port, port);
+ err = devl_port_register_with_ops(devlink, &info->devlink_port, port,
+ &mlx4_devlink_port_ops);
if (err)
return err;
@@ -3877,31 +3904,6 @@ err_disable_pdev:
return err;
}
-static int mlx4_devlink_port_type_set(struct devlink_port *devlink_port,
- enum devlink_port_type port_type)
-{
- struct mlx4_port_info *info = container_of(devlink_port,
- struct mlx4_port_info,
- devlink_port);
- enum mlx4_port_type mlx4_port_type;
-
- switch (port_type) {
- case DEVLINK_PORT_TYPE_AUTO:
- mlx4_port_type = MLX4_PORT_TYPE_AUTO;
- break;
- case DEVLINK_PORT_TYPE_ETH:
- mlx4_port_type = MLX4_PORT_TYPE_ETH;
- break;
- case DEVLINK_PORT_TYPE_IB:
- mlx4_port_type = MLX4_PORT_TYPE_IB;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return __set_port_type(info, mlx4_port_type);
-}
-
static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
{
struct mlx4_priv *priv = devlink_priv(devlink);
@@ -3986,7 +3988,6 @@ static int mlx4_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
}
static const struct devlink_ops mlx4_devlink_ops = {
- .port_type_set = mlx4_devlink_port_type_set,
.reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
.reload_down = mlx4_devlink_reload_down,
.reload_up = mlx4_devlink_reload_up,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index ddf1e352f51d..35f00700a4d6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -75,7 +75,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
-mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o en/rep/bridge.o
+mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o esw/bridge_mcast.o esw/bridge_debugfs.o \
+ en/rep/bridge.o
mlx5_core-$(CONFIG_THERMAL) += thermal.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
index bb95b40d25eb..fc13b41cc9b2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c
@@ -246,6 +246,7 @@ void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev)
debugfs_create_u32("fw_pages_total", 0400, pages, &dev->priv.fw_pages);
debugfs_create_u32("fw_pages_vfs", 0400, pages, &dev->priv.page_counters[MLX5_VF]);
+ debugfs_create_u32("fw_pages_ec_vfs", 0400, pages, &dev->priv.page_counters[MLX5_EC_VF]);
debugfs_create_u32("fw_pages_sfs", 0400, pages, &dev->priv.page_counters[MLX5_SF]);
debugfs_create_u32("fw_pages_host_pf", 0400, pages, &dev->priv.page_counters[MLX5_HOST_PF]);
debugfs_create_u32("fw_pages_alloc_failed", 0400, pages, &dev->priv.fw_pages_alloc_failed);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 1b33533b15de..edb06fb9bbc5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -151,12 +151,6 @@ static bool is_ib_rep_supported(struct mlx5_core_dev *dev)
if (!is_eth_rep_supported(dev))
return false;
- if (!MLX5_ESWITCH_MANAGER(dev))
- return false;
-
- if (!is_mdev_switchdev_mode(dev))
- return false;
-
if (mlx5_core_mp_enabled(dev))
return false;
@@ -323,6 +317,18 @@ static void del_adev(struct auxiliary_device *adev)
auxiliary_device_uninit(adev);
}
+void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev)
+{
+ mutex_lock(&mlx5_intf_mutex);
+ dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
+bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev)
+{
+ return dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV;
+}
+
int mlx5_attach_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -457,6 +463,10 @@ static int add_drivers(struct mlx5_core_dev *dev)
if (priv->adev[i])
continue;
+ if (mlx5_adev_devices[i].is_enabled &&
+ !(mlx5_adev_devices[i].is_enabled(dev)))
+ continue;
+
if (mlx5_adev_devices[i].is_supported)
is_supported = mlx5_adev_devices[i].is_supported(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
index 4b607785d694..3d82ec890666 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c
@@ -7,7 +7,6 @@
#include "fw_reset.h"
#include "fs_core.h"
#include "eswitch.h"
-#include "lag/lag.h"
#include "esw/qos.h"
#include "sf/dev/dev.h"
#include "sf/sf.h"
@@ -142,6 +141,13 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
bool sf_dev_allocated;
int ret = 0;
+ if (mlx5_dev_is_lightweight(dev)) {
+ if (action != DEVLINK_RELOAD_ACTION_DRIVER_REINIT)
+ return -EOPNOTSUPP;
+ mlx5_unload_one_light(dev);
+ return 0;
+ }
+
sf_dev_allocated = mlx5_sf_dev_allocated(dev);
if (sf_dev_allocated) {
/* Reload results in deleting SF device which further results in
@@ -162,9 +168,8 @@ static int mlx5_devlink_reload_down(struct devlink *devlink, bool netns_change,
return -EOPNOTSUPP;
}
- if (pci_num_vf(pdev)) {
+ if (mlx5_core_is_pf(dev) && pci_num_vf(pdev))
NL_SET_ERR_MSG_MOD(extack, "reload while VFs are present is unfavorable");
- }
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
@@ -195,6 +200,10 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
*actions_performed = BIT(action);
switch (action) {
case DEVLINK_RELOAD_ACTION_DRIVER_REINIT:
+ if (mlx5_dev_is_lightweight(dev)) {
+ mlx5_fw_reporters_create(dev);
+ return mlx5_init_one_devl_locked(dev);
+ }
ret = mlx5_load_one_devl_locked(dev, false);
break;
case DEVLINK_RELOAD_ACTION_FW_ACTIVATE:
@@ -311,8 +320,6 @@ static const struct devlink_ops mlx5_devlink_ops = {
.eswitch_inline_mode_get = mlx5_devlink_eswitch_inline_mode_get,
.eswitch_encap_mode_set = mlx5_devlink_eswitch_encap_mode_set,
.eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get,
- .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get,
- .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set,
.rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set,
.rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set,
.rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set,
@@ -320,16 +327,9 @@ static const struct devlink_ops mlx5_devlink_ops = {
.rate_node_new = mlx5_esw_devlink_rate_node_new,
.rate_node_del = mlx5_esw_devlink_rate_node_del,
.rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set,
- .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
- .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
- .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
- .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
#endif
#ifdef CONFIG_MLX5_SF_MANAGER
.port_new = mlx5_devlink_sf_port_new,
- .port_del = mlx5_devlink_sf_port_del,
- .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
- .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
#endif
.flash_update = mlx5_devlink_flash_update,
.info_get = mlx5_devlink_info_get,
@@ -437,54 +437,6 @@ static int mlx5_devlink_large_group_num_validate(struct devlink *devlink, u32 id
return 0;
}
-
-static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!MLX5_ESWITCH_MANAGER(dev))
- return -EOPNOTSUPP;
-
- if (ctx->val.vbool)
- return mlx5_lag_mpesw_enable(dev);
-
- mlx5_lag_mpesw_disable(dev);
- return 0;
-}
-
-static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
- struct devlink_param_gset_ctx *ctx)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!MLX5_ESWITCH_MANAGER(dev))
- return -EOPNOTSUPP;
-
- ctx->val.vbool = mlx5_lag_is_mpesw(dev);
- return 0;
-}
-
-static int mlx5_devlink_esw_multiport_validate(struct devlink *devlink, u32 id,
- union devlink_param_value val,
- struct netlink_ext_ack *extack)
-{
- struct mlx5_core_dev *dev = devlink_priv(devlink);
-
- if (!MLX5_ESWITCH_MANAGER(dev)) {
- NL_SET_ERR_MSG_MOD(extack, "E-Switch is unsupported");
- return -EOPNOTSUPP;
- }
-
- if (mlx5_eswitch_mode(dev) != MLX5_ESWITCH_OFFLOADS) {
- NL_SET_ERR_MSG_MOD(extack,
- "E-Switch must be in switchdev mode");
- return -EBUSY;
- }
-
- return 0;
-}
-
#endif
static int mlx5_devlink_eq_depth_validate(struct devlink *devlink, u32 id,
@@ -558,12 +510,6 @@ static const struct devlink_param mlx5_devlink_params[] = {
BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL,
mlx5_devlink_large_group_num_validate),
- DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
- "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
- BIT(DEVLINK_PARAM_CMODE_RUNTIME),
- mlx5_devlink_esw_multiport_get,
- mlx5_devlink_esw_multiport_set,
- mlx5_devlink_esw_multiport_validate),
#endif
DEVLINK_PARAM_GENERIC(IO_EQ_SIZE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
NULL, NULL, mlx5_devlink_eq_depth_validate),
@@ -576,7 +522,7 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
- value.vbool = MLX5_CAP_GEN(dev, roce);
+ value.vbool = MLX5_CAP_GEN(dev, roce) && !mlx5_dev_is_lightweight(dev);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
value);
@@ -626,7 +572,7 @@ static int mlx5_devlink_eth_params_register(struct devlink *devlink)
if (err)
return err;
- value.vbool = true;
+ value.vbool = !mlx5_dev_is_lightweight(dev);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH,
value);
@@ -666,6 +612,7 @@ static const struct devlink_param mlx5_devlink_rdma_params[] = {
static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
union devlink_param_value value;
int err;
@@ -677,7 +624,7 @@ static int mlx5_devlink_rdma_params_register(struct devlink *devlink)
if (err)
return err;
- value.vbool = true;
+ value.vbool = !mlx5_dev_is_lightweight(dev);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA,
value);
@@ -712,7 +659,7 @@ static int mlx5_devlink_vnet_params_register(struct devlink *devlink)
if (err)
return err;
- value.vbool = true;
+ value.vbool = !mlx5_dev_is_lightweight(dev);
devl_param_driverinit_value_set(devlink,
DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET,
value);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index 9114661cd967..b0128336ff01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -76,6 +76,16 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
if (err)
return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
+ if (err)
+ return err;
+
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 8e999f238194..b1807bfb815f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -165,15 +165,6 @@ struct page_pool;
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev))
-#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
-
-#define mlx5e_dbg(mlevel, priv, format, ...) \
-do { \
- if (NETIF_MSG_##mlevel & (priv)->msglevel) \
- netdev_warn(priv->netdev, format, \
- ##__VA_ARGS__); \
-} while (0)
-
#define mlx5e_state_dereference(priv, p) \
rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock))
@@ -594,13 +585,6 @@ struct mlx5e_mpw_info {
#define MLX5E_MAX_RX_FRAGS 4
-/* a single cache unit is capable to serve one napi call (for non-striding rq)
- * or a MPWQE (for striding rq).
- */
-#define MLX5E_CACHE_UNIT (MLX5_MPWRQ_MAX_PAGES_PER_WQE > NAPI_POLL_WEIGHT ? \
- MLX5_MPWRQ_MAX_PAGES_PER_WQE : NAPI_POLL_WEIGHT)
-#define MLX5E_CACHE_SIZE (4 * roundup_pow_of_two(MLX5E_CACHE_UNIT))
-
struct mlx5e_rq;
typedef void (*mlx5e_fp_handle_rx_cqe)(struct mlx5e_rq*, struct mlx5_cqe64*);
typedef struct sk_buff *
@@ -887,7 +871,6 @@ struct mlx5e_priv {
#endif
/* priv data path fields - end */
- u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index 7e8e96cc5cd0..8e25f4ef5ccc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -65,12 +65,13 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
MLX5_GET(bufferx_reg, buffer, xoff_threshold) * port_buff_cell_sz;
total_used += port_buffer->buffer[i].size;
- mlx5e_dbg(HW, priv, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n", i,
- port_buffer->buffer[i].size,
- port_buffer->buffer[i].xon,
- port_buffer->buffer[i].xoff,
- port_buffer->buffer[i].epsb,
- port_buffer->buffer[i].lossy);
+ netdev_dbg(priv->netdev, "buffer %d: size=%d, xon=%d, xoff=%d, epsb=%d, lossy=%d\n",
+ i,
+ port_buffer->buffer[i].size,
+ port_buffer->buffer[i].xon,
+ port_buffer->buffer[i].xoff,
+ port_buffer->buffer[i].epsb,
+ port_buffer->buffer[i].lossy);
}
port_buffer->internal_buffers_size = 0;
@@ -87,11 +88,11 @@ int mlx5e_port_query_buffer(struct mlx5e_priv *priv,
port_buffer->internal_buffers_size -
port_buffer->headroom_size;
- mlx5e_dbg(HW, priv,
- "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
- port_buffer->port_buffer_size, port_buffer->headroom_size,
- port_buffer->internal_buffers_size,
- port_buffer->spare_buffer_size);
+ netdev_dbg(priv->netdev,
+ "total buffer size=%u, headroom buffer size=%u, internal buffers size=%u, spare buffer size=%u\n",
+ port_buffer->port_buffer_size, port_buffer->headroom_size,
+ port_buffer->internal_buffers_size,
+ port_buffer->spare_buffer_size);
out:
kfree(out);
return err;
@@ -352,7 +353,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
- mlx5e_dbg(HW, priv, "%s: xoff=%d\n", __func__, xoff);
+ netdev_dbg(priv->netdev, "%s: xoff=%d\n", __func__, xoff);
return xoff;
}
@@ -484,6 +485,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u8 *prio2buffer)
{
u16 port_buff_cell_sz = priv->dcbx.port_buff_cell_sz;
+ struct net_device *netdev = priv->netdev;
struct mlx5e_port_buffer port_buffer;
u32 xoff = calculate_xoff(priv, mtu);
bool update_prio2buffer = false;
@@ -495,7 +497,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
int err;
int i;
- mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ netdev_dbg(netdev, "%s: change=%x\n", __func__, change);
max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
@@ -510,8 +512,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
}
if (change & MLX5E_PORT_BUFFER_PFC) {
- mlx5e_dbg(HW, priv, "%s: requested PFC per priority bitmask: 0x%x\n",
- __func__, pfc->pfc_en);
+ netdev_dbg(netdev, "%s: requested PFC per priority bitmask: 0x%x\n",
+ __func__, pfc->pfc_en);
err = mlx5e_port_query_priority2buffer(priv->mdev, buffer);
if (err)
return err;
@@ -526,8 +528,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_PRIO2BUFFER) {
update_prio2buffer = true;
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++)
- mlx5e_dbg(HW, priv, "%s: requested to map prio[%d] to buffer %d\n",
- __func__, i, prio2buffer[i]);
+ netdev_dbg(priv->netdev, "%s: requested to map prio[%d] to buffer %d\n",
+ __func__, i, prio2buffer[i]);
err = fill_pfc_en(priv->mdev, &curr_pfc_en);
if (err)
@@ -541,10 +543,10 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_SIZE) {
for (i = 0; i < MLX5E_MAX_NETWORK_BUFFER; i++) {
- mlx5e_dbg(HW, priv, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
+ netdev_dbg(priv->netdev, "%s: buffer[%d]=%d\n", __func__, i, buffer_size[i]);
if (!port_buffer.buffer[i].lossy && !buffer_size[i]) {
- mlx5e_dbg(HW, priv, "%s: lossless buffer[%d] size cannot be zero\n",
- __func__, i);
+ netdev_dbg(priv->netdev, "%s: lossless buffer[%d] size cannot be zero\n",
+ __func__, i);
return -EINVAL;
}
@@ -552,7 +554,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
total_used += buffer_size[i];
}
- mlx5e_dbg(HW, priv, "%s: total buffer requested=%d\n", __func__, total_used);
+ netdev_dbg(priv->netdev, "%s: total buffer requested=%d\n", __func__, total_used);
if (total_used > port_buffer.headroom_size &&
(total_used - port_buffer.headroom_size) >
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
index 2842195ee548..1874c2f0587f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c
@@ -379,6 +379,12 @@ int mlx5e_htb_setup_tc(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb_
if (!htb && htb_qopt->command != TC_HTB_CREATE)
return -EINVAL;
+ if (htb_qopt->prio) {
+ NL_SET_ERR_MSG_MOD(htb_qopt->extack,
+ "prio parameter is not supported by device with HTB offload enabled.");
+ return -EOPNOTSUPP;
+ }
+
switch (htb_qopt->command) {
case TC_HTB_CREATE:
if (!mlx5_qos_is_supported(priv->mdev)) {
@@ -515,4 +521,3 @@ int mlx5e_mqprio_rl_get_node_hw_id(struct mlx5e_mqprio_rl *rl, int tc, u32 *hw_i
*hw_id = rl->leaves_id[tc];
return 0;
}
-
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
index fd191925ab4b..560800246573 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c
@@ -136,7 +136,6 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
struct mlx5_eswitch *esw = br_offloads->esw;
u16 vport_num, esw_owner_vhca_id;
struct netlink_ext_ack *extack;
- int ifindex = upper->ifindex;
int err = 0;
if (!netif_is_bridge_master(upper))
@@ -150,15 +149,15 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr
if (mlx5_esw_bridge_is_local(dev, rep, esw))
err = info->linking ?
- mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id,
+ mlx5_esw_bridge_vport_link(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack) :
- mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ mlx5_esw_bridge_vport_unlink(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack);
else if (mlx5_esw_bridge_dev_same_hw(rep, esw))
err = info->linking ?
- mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id,
+ mlx5_esw_bridge_vport_peer_link(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack) :
- mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id,
+ mlx5_esw_bridge_vport_peer_unlink(upper, vport_num, esw_owner_vhca_id,
br_offloads, extack);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
index 07c1895a2b23..7aa926e542d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c
@@ -25,8 +25,8 @@ struct mlx5e_tc_act_stats {
static const struct rhashtable_params act_counters_ht_params = {
.head_offset = offsetof(struct mlx5e_tc_act_stats, hash),
- .key_offset = 0,
- .key_len = offsetof(struct mlx5e_tc_act_stats, counter),
+ .key_offset = offsetof(struct mlx5e_tc_act_stats, tc_act_cookie),
+ .key_len = sizeof_field(struct mlx5e_tc_act_stats, tc_act_cookie),
.automatic_shrinking = true,
};
@@ -169,14 +169,11 @@ mlx5e_tc_act_stats_fill_stats(struct mlx5e_tc_act_stats_handle *handle,
{
struct rhashtable *ht = &handle->ht;
struct mlx5e_tc_act_stats *item;
- struct mlx5e_tc_act_stats key;
u64 pkts, bytes, lastused;
int err = 0;
- key.tc_act_cookie = fl_act->cookie;
-
rcu_read_lock();
- item = rhashtable_lookup(ht, &key, act_counters_ht_params);
+ item = rhashtable_lookup(ht, &fl_act->cookie, act_counters_ht_params);
if (!item) {
rcu_read_unlock();
err = -ENOENT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
index 0290e0dea539..4e923a2874ae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c
@@ -112,10 +112,8 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *po
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
- if (!handle) {
- kfree(handle);
+ if (!handle)
return ERR_PTR(-ENOMEM);
- }
post_attr->chain = 0;
post_attr->prio = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
index ba2b1f24ff14..6cc23af66b5b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h
@@ -94,13 +94,13 @@ struct mlx5e_tc_flow {
* destinations.
*/
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
- struct mlx5e_tc_flow *peer_flow;
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */
- struct list_head peer; /* flows with peer flow */
+ struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */
struct list_head unready; /* flows not ready to be offloaded (e.g
* due to missing route)
*/
+ struct list_head peer_flows; /* flows on peer */
struct net_device *orig_dev; /* netdev adding flow first */
int tmp_entry_index;
struct list_head tmp_list; /* temporary flow list used by neigh update */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
index c964644ee866..bac4717548c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
@@ -125,7 +125,7 @@ static inline bool mlx5e_accel_tx_begin(struct net_device *dev,
#ifdef CONFIG_MLX5_EN_TLS
/* May send WQEs. */
- if (mlx5e_ktls_skb_offloaded(skb))
+ if (tls_is_skb_tx_device_offloaded(skb))
if (unlikely(!mlx5e_ktls_handle_tx_skb(dev, sq, skb,
&state->tls)))
return false;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
index 0e4c0a093293..efb2cf74ad6a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c
@@ -846,7 +846,7 @@ bool mlx5e_ktls_handle_tx_skb(struct net_device *netdev, struct mlx5e_txqsq *sq,
tls_ctx = tls_get_ctx(skb->sk);
tls_netdev = rcu_dereference_bh(tls_ctx->netdev);
/* Don't WARN on NULL: if tls_device_down is running in parallel,
- * netdev might become NULL, even if tls_is_sk_tx_device_offloaded was
+ * netdev might become NULL, even if tls_is_skb_tx_device_offloaded was
* true. Rather continue processing this packet.
*/
if (WARN_ON_ONCE(tls_netdev && tls_netdev != netdev))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
index 2dd78dd4ad65..f87b65c560ea 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_txrx.h
@@ -49,11 +49,6 @@ mlx5e_ktls_rx_pending_resync_list(struct mlx5e_channel *c, int budget)
return budget && test_bit(MLX5E_SQ_STATE_PENDING_TLS_RX_RESYNC, &c->async_icosq.state);
}
-static inline bool mlx5e_ktls_skb_offloaded(struct sk_buff *skb)
-{
- return skb->sk && tls_is_sk_tx_device_offloaded(skb->sk);
-}
-
static inline void
mlx5e_ktls_handle_tx_wqe(struct mlx5_wqe_ctrl_seg *cseg,
struct mlx5e_accel_tx_tls_state *state)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
index 6b7b563f844a..592b165530ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
@@ -349,15 +349,6 @@ static void mlx5e_macsec_cleanup_sa(struct mlx5e_macsec *macsec,
sa->macsec_rule = NULL;
}
-static struct mlx5e_priv *macsec_netdev_priv(const struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_VLAN_8021Q)
- if (is_vlan_dev(dev))
- return netdev_priv(vlan_dev_priv(dev)->real_dev);
-#endif
- return netdev_priv(dev);
-}
-
static int mlx5e_macsec_init_sa(struct macsec_context *ctx,
struct mlx5e_macsec_sa *sa,
bool encrypt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index bed0c2d043e7..933a7772a7a3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -570,10 +570,10 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
- mlx5e_dbg(HW, priv,
- "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
- __func__, arfs_rule->filter_id, arfs_rule->rxq,
- tuple->ip_proto, err);
+ netdev_dbg(priv->netdev,
+ "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
+ __func__, arfs_rule->filter_id, arfs_rule->rxq,
+ tuple->ip_proto, err);
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index ebee52a8361a..8705cffc747f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,10 +275,10 @@ static int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
- __func__, i, ets->prio_tc[i]);
- mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
- __func__, i, tc_tx_bw[i], tc_group[i]);
+ netdev_dbg(priv->netdev, "%s: prio_%d <=> tc_%d\n",
+ __func__, i, ets->prio_tc[i]);
+ netdev_dbg(priv->netdev, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
+ __func__, i, tc_tx_bw[i], tc_group[i]);
}
return err;
@@ -399,9 +399,9 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
}
if (!ret) {
- mlx5e_dbg(HW, priv,
- "%s: PFC per priority bit mask: 0x%x\n",
- __func__, pfc->pfc_en);
+ netdev_dbg(dev,
+ "%s: PFC per priority bit mask: 0x%x\n",
+ __func__, pfc->pfc_en);
}
return ret;
}
@@ -611,8 +611,8 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
- __func__, i, max_bw_value[i]);
+ netdev_dbg(netdev, "%s: tc_%d <=> max_bw %d Gbps\n",
+ __func__, i, max_bw_value[i]);
}
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
@@ -640,10 +640,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
- mlx5e_dbg(HW, priv,
- "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
- __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
- ets.prio_tc[i]);
+ netdev_dbg(netdev,
+ "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
+ __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
+ ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 1f5a2110d31f..27861b68ced5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -1689,16 +1689,6 @@ static int mlx5e_set_fecparam(struct net_device *netdev,
return 0;
}
-static u32 mlx5e_get_msglevel(struct net_device *dev)
-{
- return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
-}
-
-static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
-{
- ((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
-}
-
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@@ -1952,9 +1942,9 @@ int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val
if (err)
return err;
- mlx5e_dbg(DRV, priv, "MLX5E: RxCqeCmprss was turned %s\n",
- MLX5E_GET_PFLAG(&priv->channels.params,
- MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
+ netdev_dbg(priv->netdev, "MLX5E: RxCqeCmprss was turned %s\n",
+ MLX5E_GET_PFLAG(&priv->channels.params,
+ MLX5E_PFLAG_RX_CQE_COMPRESS) ? "ON" : "OFF");
return 0;
}
@@ -2444,8 +2434,6 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
- .get_msglevel = mlx5e_get_msglevel,
- .set_msglevel = mlx5e_set_msglevel,
.get_fec_stats = mlx5e_get_fec_stats,
.get_fecparam = mlx5e_get_fecparam,
.set_fecparam = mlx5e_set_fecparam,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 33bfe4d7338b..934b0d5ce1b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -283,7 +283,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs,
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- fs_err(fs, "%s: add rule failed\n", __func__);
+ fs_err(fs, "add rule failed\n");
}
return err;
@@ -395,8 +395,7 @@ int mlx5e_add_vlan_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
fs->vlan->trap_rule = NULL;
- fs_err(fs, "%s: add VLAN trap rule failed, err %d\n",
- __func__, err);
+ fs_err(fs, "add VLAN trap rule failed, err %d\n", err);
return err;
}
fs->vlan->trap_rule = rule;
@@ -421,8 +420,7 @@ int mlx5e_add_mac_trap(struct mlx5e_flow_steering *fs, int trap_id, int tir_num)
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
fs->l2.trap_rule = NULL;
- fs_err(fs, "%s: add MAC trap rule failed, err %d\n",
- __func__, err);
+ fs_err(fs, "add MAC trap rule failed, err %d\n", err);
return err;
}
fs->l2.trap_rule = rule;
@@ -763,7 +761,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs)
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
- fs_err(fs, "%s: add promiscuous rule failed\n", __func__);
+ fs_err(fs, "add promiscuous rule failed\n");
}
kvfree(spec);
return err;
@@ -995,7 +993,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs,
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(ai->rule)) {
- fs_err(fs, "%s: add l2 rule(mac:%pM) failed\n", __func__, mv_dmac);
+ fs_err(fs, "add l2 rule(mac:%pM) failed\n", mv_dmac);
err = PTR_ERR(ai->rule);
ai->rule = NULL;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index a5bdf78955d7..defb1efccb78 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2402,7 +2402,7 @@ static int mlx5e_channel_stats_alloc(struct mlx5e_priv *priv, int ix, int cpu)
/* Asymmetric dynamic memory allocation.
* Freed in mlx5e_priv_arrays_free, not on channel closure.
*/
- mlx5e_dbg(DRV, priv, "Creating channel stats %d\n", ix);
+ netdev_dbg(priv->netdev, "Creating channel stats %d\n", ix);
priv->channel_stats[ix] = kvzalloc_node(sizeof(**priv->channel_stats),
GFP_KERNEL, cpu_to_node(cpu));
if (!priv->channel_stats[ix])
@@ -2780,7 +2780,7 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv)
if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS))
num_txqs += ntc;
- mlx5e_dbg(DRV, priv, "Setting num_txqs %d\n", num_txqs);
+ netdev_dbg(priv->netdev, "Setting num_txqs %d\n", num_txqs);
err = netif_set_real_num_tx_queues(priv->netdev, num_txqs);
if (err)
netdev_warn(priv->netdev, "netif_set_real_num_tx_queues failed, %d\n", err);
@@ -5586,7 +5586,6 @@ int mlx5e_priv_init(struct mlx5e_priv *priv,
/* priv init */
priv->mdev = mdev;
priv->netdev = netdev;
- priv->msglevel = MLX5E_MSG_LEVEL;
priv->max_nch = nch;
priv->max_opened_tc = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 3e7041bd5705..152b62138450 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -374,7 +374,9 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_sq *rep_sq, *tmp;
+ struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv;
+ unsigned long i;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
@@ -382,31 +384,78 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
- if (rep_sq->send_to_vport_rule_peer)
- mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
+ xa_for_each(&rep_sq->sq_peer, i, sq_peer) {
+ if (sq_peer->rule)
+ mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
+
+ xa_erase(&rep_sq->sq_peer, i);
+ kfree(sq_peer);
+ }
+
+ xa_destroy(&rep_sq->sq_peer);
list_del(&rep_sq->list);
kfree(rep_sq);
}
}
+static int mlx5e_sqs2vport_add_peers_rules(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep,
+ struct mlx5_devcom *devcom,
+ struct mlx5e_rep_sq *rep_sq, int i)
+{
+ struct mlx5_eswitch *peer_esw = NULL;
+ struct mlx5_flow_handle *flow_rule;
+ int tmp;
+
+ mlx5_devcom_for_each_peer_entry(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ peer_esw, tmp) {
+ u16 peer_rule_idx = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
+ struct mlx5e_rep_sq_peer *sq_peer;
+ int err;
+
+ sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
+ if (!sq_peer)
+ return -ENOMEM;
+
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
+ rep, rep_sq->sqn);
+ if (IS_ERR(flow_rule)) {
+ kfree(sq_peer);
+ return PTR_ERR(flow_rule);
+ }
+
+ sq_peer->rule = flow_rule;
+ sq_peer->peer = peer_esw;
+ err = xa_insert(&rep_sq->sq_peer, peer_rule_idx, sq_peer, GFP_KERNEL);
+ if (err) {
+ kfree(sq_peer);
+ mlx5_eswitch_del_send_to_vport_rule(flow_rule);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
u32 *sqns_array, int sqns_num)
{
- struct mlx5_eswitch *peer_esw = NULL;
struct mlx5_flow_handle *flow_rule;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
+ struct mlx5_devcom *devcom;
+ bool devcom_locked = false;
int err;
int i;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
+ devcom = esw->dev->priv.devcom;
rpriv = mlx5e_rep_to_rep_priv(rep);
- if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS))
- peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS);
+ if (mlx5_devcom_comp_is_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
+ mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
+ devcom_locked = true;
for (i = 0; i < sqns_num; i++) {
rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
@@ -426,31 +475,30 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
rep_sq->send_to_vport_rule = flow_rule;
rep_sq->sqn = sqns_array[i];
- if (peer_esw) {
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw,
- rep, sqns_array[i]);
- if (IS_ERR(flow_rule)) {
- err = PTR_ERR(flow_rule);
+ xa_init(&rep_sq->sq_peer);
+ if (devcom_locked) {
+ err = mlx5e_sqs2vport_add_peers_rules(esw, rep, devcom, rep_sq, i);
+ if (err) {
mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
+ xa_destroy(&rep_sq->sq_peer);
kfree(rep_sq);
goto out_err;
}
- rep_sq->send_to_vport_rule_peer = flow_rule;
}
list_add(&rep_sq->list, &rpriv->vport_sqs_list);
}
- if (peer_esw)
- mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (devcom_locked)
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return 0;
out_err:
mlx5e_sqs2vport_stop(esw, rep);
- if (peer_esw)
- mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ if (devcom_locked)
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err;
}
@@ -1530,17 +1578,24 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
return rpriv->netdev;
}
-static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep)
+static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep,
+ struct mlx5_eswitch *peer_esw)
{
+ u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
+ WARN_ON_ONCE(!peer_esw);
rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
- if (!rep_sq->send_to_vport_rule_peer)
+ struct mlx5e_rep_sq_peer *sq_peer = xa_load(&rep_sq->sq_peer, i);
+
+ if (!sq_peer || sq_peer->peer != peer_esw)
continue;
- mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer);
- rep_sq->send_to_vport_rule_peer = NULL;
+
+ mlx5_eswitch_del_send_to_vport_rule(sq_peer->rule);
+ xa_erase(&rep_sq->sq_peer, i);
+ kfree(sq_peer);
}
}
@@ -1548,24 +1603,52 @@ static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep,
struct mlx5_eswitch *peer_esw)
{
+ u16 i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
struct mlx5_flow_handle *flow_rule;
+ struct mlx5e_rep_sq_peer *sq_peer;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_sq *rep_sq;
+ int err;
rpriv = mlx5e_rep_to_rep_priv(rep);
list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) {
- if (rep_sq->send_to_vport_rule_peer)
+ sq_peer = xa_load(&rep_sq->sq_peer, i);
+
+ if (sq_peer && sq_peer->peer)
continue;
- flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn);
- if (IS_ERR(flow_rule))
+
+ flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep,
+ rep_sq->sqn);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
goto err_out;
- rep_sq->send_to_vport_rule_peer = flow_rule;
+ }
+
+ if (sq_peer) {
+ sq_peer->rule = flow_rule;
+ sq_peer->peer = peer_esw;
+ continue;
+ }
+ sq_peer = kzalloc(sizeof(*sq_peer), GFP_KERNEL);
+ if (!sq_peer) {
+ err = -ENOMEM;
+ goto err_sq_alloc;
+ }
+ err = xa_insert(&rep_sq->sq_peer, i, sq_peer, GFP_KERNEL);
+ if (err)
+ goto err_xa;
+ sq_peer->rule = flow_rule;
+ sq_peer->peer = peer_esw;
}
return 0;
+err_xa:
+ kfree(sq_peer);
+err_sq_alloc:
+ mlx5_eswitch_del_send_to_vport_rule(flow_rule);
err_out:
- mlx5e_vport_rep_event_unpair(rep);
- return PTR_ERR(flow_rule);
+ mlx5e_vport_rep_event_unpair(rep, peer_esw);
+ return err;
}
static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
@@ -1578,7 +1661,7 @@ static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw,
if (event == MLX5_SWITCHDEV_EVENT_PAIR)
err = mlx5e_vport_rep_event_pair(esw, rep, data);
else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR)
- mlx5e_vport_rep_event_unpair(rep);
+ mlx5e_vport_rep_event_unpair(rep, data);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
index 80b7f5079a5a..70640fa1ad7b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h
@@ -225,9 +225,14 @@ struct mlx5e_encap_entry {
struct rcu_head rcu;
};
+struct mlx5e_rep_sq_peer {
+ struct mlx5_flow_handle *rule;
+ void *peer;
+};
+
struct mlx5e_rep_sq {
struct mlx5_flow_handle *send_to_vport_rule;
- struct mlx5_flow_handle *send_to_vport_rule_peer;
+ struct xarray sq_peer;
u32 sqn;
struct list_head list;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 69634829558e..704b022cd1f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -491,9 +491,7 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf
}
frag = &sinfo->frags[sinfo->nr_frags++];
- __skb_frag_set_page(frag, frag_page->page);
- skb_frag_off_set(frag, frag_offset);
- skb_frag_size_set(frag, len);
+ skb_frag_fill_page_desc(frag, frag_page->page, frag_offset, len);
if (page_is_pfmemalloc(frag_page->page))
xdp_buff_set_frag_pfmemalloc(xdp);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index f1d9596905c6..4d77055abd4b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
-#include "lib/mlx5.h"
+#include "lib/events.h"
#include "en.h"
#include "en_accel/ktls.h"
#include "en_accel/en_accel.h"
@@ -748,11 +748,22 @@ static const struct counter_desc vport_stats_desc[] = {
VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
};
+static const struct counter_desc vport_loopback_stats_desc[] = {
+ { "vport_loopback_packets",
+ VPORT_COUNTER_OFF(local_loopback.packets) },
+ { "vport_loopback_bytes",
+ VPORT_COUNTER_OFF(local_loopback.octets) },
+};
+
#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+#define NUM_VPORT_LOOPBACK_COUNTERS(dev) \
+ (MLX5_CAP_GEN(dev, vport_counter_local_loopback) ? \
+ ARRAY_SIZE(vport_loopback_stats_desc) : 0)
static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(vport)
{
- return NUM_VPORT_COUNTERS;
+ return NUM_VPORT_COUNTERS +
+ NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev);
}
static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
@@ -761,6 +772,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STRS(vport)
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+
+ for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_loopback_stats_desc[i].format);
+
return idx;
}
@@ -771,6 +787,11 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(vport)
for (i = 0; i < NUM_VPORT_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
vport_stats_desc, i);
+
+ for (i = 0; i < NUM_VPORT_LOOPBACK_COUNTERS(priv->mdev); i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_loopback_stats_desc, i);
+
return idx;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b9b1da751a3b..41dc26800f48 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1667,8 +1667,11 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
{
struct mlx5e_priv *out_priv, *route_priv;
struct mlx5_core_dev *route_mdev;
+ struct mlx5_devcom *devcom;
struct mlx5_eswitch *esw;
u16 vhca_id;
+ int err;
+ int i;
out_priv = netdev_priv(out_dev);
esw = out_priv->mdev->priv.eswitch;
@@ -1676,28 +1679,25 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
route_mdev = route_priv->mdev;
vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
- if (mlx5_lag_is_active(out_priv->mdev)) {
- struct mlx5_devcom *devcom;
- int err;
-
- /* In lag case we may get devices from different eswitch instances.
- * If we failed to get vport num, it means, mostly, that we on the wrong
- * eswitch.
- */
- err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
- if (err != -ENOENT)
- return err;
-
- rcu_read_lock();
- devcom = out_priv->mdev->priv.devcom;
- esw = mlx5_devcom_get_peer_data_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- err = esw ? mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport) : -ENODEV;
- rcu_read_unlock();
+ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ if (!err)
+ return err;
+ if (!mlx5_lag_is_active(out_priv->mdev))
return err;
+
+ rcu_read_lock();
+ devcom = out_priv->mdev->priv.devcom;
+ err = -ENODEV;
+ mlx5_devcom_for_each_peer_entry_rcu(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ esw, i) {
+ err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ if (!err)
+ break;
}
+ rcu_read_unlock();
- return mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
+ return err;
}
static int
@@ -1987,47 +1987,59 @@ void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
mlx5e_flow_put(priv, flow);
}
-static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
+static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
+ int peer_index)
{
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
+ struct mlx5e_tc_flow *peer_flow;
+ struct mlx5e_tc_flow *tmp;
if (!flow_flag_test(flow, ESWITCH) ||
!flow_flag_test(flow, DUP))
return;
mutex_lock(&esw->offloads.peer_mutex);
- list_del(&flow->peer);
+ list_del(&flow->peer[peer_index]);
mutex_unlock(&esw->offloads.peer_mutex);
- flow_flag_clear(flow, DUP);
-
- if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
- mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
- kfree(flow->peer_flow);
+ list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
+ if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
+ continue;
+ if (refcount_dec_and_test(&peer_flow->refcnt)) {
+ mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
+ list_del(&peer_flow->peer_flows);
+ kfree(peer_flow);
+ }
}
- flow->peer_flow = NULL;
+ if (list_empty(&flow->peer_flows))
+ flow_flag_clear(flow, DUP);
}
-static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
+static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow)
{
- struct mlx5_core_dev *dev = flow->priv->mdev;
- struct mlx5_devcom *devcom = dev->priv.devcom;
- struct mlx5_eswitch *peer_esw;
-
- peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!peer_esw)
- return;
+ int i;
- __mlx5e_tc_del_fdb_peer_flow(flow);
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (i == mlx5_get_dev_index(flow->priv->mdev))
+ continue;
+ mlx5e_tc_del_fdb_peer_flow(flow, i);
+ }
}
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
if (mlx5e_is_eswitch_flow(flow)) {
- mlx5e_tc_del_fdb_peer_flow(flow);
+ struct mlx5_devcom *devcom = flow->priv->mdev->priv.devcom;
+
+ if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ mlx5e_tc_del_fdb_flow(priv, flow);
+ return;
+ }
+
+ mlx5e_tc_del_fdb_peers_flow(flow);
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
mlx5e_tc_del_fdb_flow(priv, flow);
} else {
mlx5e_tc_del_nic_flow(priv, flow);
@@ -2503,6 +2515,12 @@ static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
return 0;
flow_rule_match_meta(rule, &match);
+
+ if (match.mask->l2_miss) {
+ NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on \"l2_miss\"");
+ return -EOPNOTSUPP;
+ }
+
if (!match.mask->ingress_ifindex)
return 0;
@@ -4198,8 +4216,8 @@ static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
flow_flag_test(flow, INGRESS);
bool act_is_encap = !!(attr->action &
MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
- bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS);
+ bool esw_paired = mlx5_devcom_comp_is_ready(esw_attr->in_mdev->priv.devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS);
if (!esw_paired)
return false;
@@ -4290,6 +4308,7 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
INIT_LIST_HEAD(&flow->hairpin);
INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
INIT_LIST_HEAD(&flow->attrs);
+ INIT_LIST_HEAD(&flow->peer_flows);
refcount_set(&flow->refcnt, 1);
init_completion(&flow->init_done);
init_completion(&flow->del_hw_done);
@@ -4398,22 +4417,19 @@ out:
static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
struct mlx5e_tc_flow *flow,
- unsigned long flow_flags)
+ unsigned long flow_flags,
+ struct mlx5_eswitch *peer_esw)
{
struct mlx5e_priv *priv = flow->priv, *peer_priv;
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
- struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_tc_flow_parse_attr *parse_attr;
+ int i = mlx5_get_dev_index(peer_esw->dev);
struct mlx5e_rep_priv *peer_urpriv;
struct mlx5e_tc_flow *peer_flow;
struct mlx5_core_dev *in_mdev;
int err = 0;
- peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!peer_esw)
- return -ENODEV;
-
peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
peer_priv = netdev_priv(peer_urpriv->netdev);
@@ -4438,14 +4454,13 @@ static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
goto out;
}
- flow->peer_flow = peer_flow;
+ list_add_tail(&peer_flow->peer_flows, &flow->peer_flows);
flow_flag_set(flow, DUP);
mutex_lock(&esw->offloads.peer_mutex);
- list_add_tail(&flow->peer, &esw->offloads.peer_flows);
+ list_add_tail(&flow->peer[i], &esw->offloads.peer_flows[i]);
mutex_unlock(&esw->offloads.peer_mutex);
out:
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return err;
}
@@ -4456,30 +4471,48 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
struct net_device *filter_dev,
struct mlx5e_tc_flow **__flow)
{
+ struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *in_rep = rpriv->rep;
struct mlx5_core_dev *in_mdev = priv->mdev;
+ struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
int err;
+ int i;
flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
in_mdev);
if (IS_ERR(flow))
return PTR_ERR(flow);
- if (is_peer_flow_needed(flow)) {
- err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
- if (err) {
- mlx5e_tc_del_fdb_flow(priv, flow);
- goto out;
- }
+ if (!is_peer_flow_needed(flow)) {
+ *__flow = flow;
+ return 0;
}
- *__flow = flow;
+ if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) {
+ err = -ENODEV;
+ goto clean_flow;
+ }
+
+ mlx5_devcom_for_each_peer_entry(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ peer_esw, i) {
+ err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags, peer_esw);
+ if (err)
+ goto peer_clean;
+ }
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+
+ *__flow = flow;
return 0;
-out:
+peer_clean:
+ mlx5e_tc_del_fdb_peers_flow(flow);
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+clean_flow:
+ mlx5e_tc_del_fdb_flow(priv, flow);
return err;
}
@@ -4697,7 +4730,6 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
{
struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
struct rhashtable *tc_ht = get_tc_ht(priv, flags);
- struct mlx5_eswitch *peer_esw;
struct mlx5e_tc_flow *flow;
struct mlx5_fc *counter;
u64 lastuse = 0;
@@ -4732,23 +4764,29 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
/* Under multipath it's possible for one rule to be currently
* un-offloaded while the other rule is offloaded.
*/
- peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!peer_esw)
+ if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
goto out;
- if (flow_flag_test(flow, DUP) &&
- flow_flag_test(flow->peer_flow, OFFLOADED)) {
- u64 bytes2;
- u64 packets2;
- u64 lastuse2;
+ if (flow_flag_test(flow, DUP)) {
+ struct mlx5e_tc_flow *peer_flow;
- if (flow_flag_test(flow, USE_ACT_STATS)) {
- f->use_act_stats = true;
- } else {
- counter = mlx5e_tc_get_counter(flow->peer_flow);
+ list_for_each_entry(peer_flow, &flow->peer_flows, peer_flows) {
+ u64 packets2;
+ u64 lastuse2;
+ u64 bytes2;
+
+ if (!flow_flag_test(peer_flow, OFFLOADED))
+ continue;
+ if (flow_flag_test(flow, USE_ACT_STATS)) {
+ f->use_act_stats = true;
+ break;
+ }
+
+ counter = mlx5e_tc_get_counter(peer_flow);
if (!counter)
goto no_peer_counter;
- mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
+ mlx5_fc_query_cached(counter, &bytes2, &packets2,
+ &lastuse2);
bytes += bytes2;
packets += packets2;
@@ -4757,7 +4795,7 @@ int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
}
no_peer_counter:
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
out:
flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
@@ -5275,9 +5313,14 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
{
struct mlx5e_tc_flow *flow, *tmp;
+ int i;
- list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
- __mlx5e_tc_del_fdb_peer_flow(flow);
+ for (i = 0; i < MLX5_MAX_PORTS; i++) {
+ if (i == mlx5_get_dev_index(esw->dev))
+ continue;
+ list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i])
+ mlx5e_tc_del_fdb_peers_flow(flow);
+ }
}
void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index fbb2d963fb7e..a7d9b7cb4297 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -207,7 +207,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget)
}
ch_stats->aff_change++;
aff_change = true;
- if (budget && work_done == budget)
+ if (work_done == budget)
work_done--;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
index 2e504c7461c6..24b1ca4e4ff8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c
@@ -15,13 +15,27 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport)
vport->egress.offloads.fwd_rule = NULL;
}
-static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport)
+void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index)
{
- if (!vport->egress.offloads.bounce_rule)
+ struct mlx5_flow_handle *bounce_rule =
+ xa_load(&vport->egress.offloads.bounce_rules, rule_index);
+
+ if (!bounce_rule)
return;
- mlx5_del_flow_rules(vport->egress.offloads.bounce_rule);
- vport->egress.offloads.bounce_rule = NULL;
+ mlx5_del_flow_rules(bounce_rule);
+ xa_erase(&vport->egress.offloads.bounce_rules, rule_index);
+}
+
+static void esw_acl_egress_ofld_bounce_rules_destroy(struct mlx5_vport *vport)
+{
+ struct mlx5_flow_handle *bounce_rule;
+ unsigned long i;
+
+ xa_for_each(&vport->egress.offloads.bounce_rules, i, bounce_rule) {
+ mlx5_del_flow_rules(bounce_rule);
+ xa_erase(&vport->egress.offloads.bounce_rules, i);
+ }
}
static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw,
@@ -96,7 +110,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport)
{
esw_acl_egress_vlan_destroy(vport);
esw_acl_egress_ofld_fwd2vport_destroy(vport);
- esw_acl_egress_ofld_bounce_rule_destroy(vport);
+ esw_acl_egress_ofld_bounce_rules_destroy(vport);
}
static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw,
@@ -194,6 +208,7 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
vport->egress.acl = NULL;
return err;
}
+ vport->egress.type = VPORT_EGRESS_ACL_TYPE_DEFAULT;
err = esw_acl_egress_ofld_groups_create(esw, vport);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
index 45b839116212..d599e50af346 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/helper.c
@@ -35,7 +35,8 @@ esw_acl_table_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, int ns,
}
ft_attr.max_fte = size;
- ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
+ if (vport_num || mlx5_core_is_ecpf(esw->dev))
+ ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
acl = mlx5_create_vport_flow_table(root_ns, &ft_attr, vport_num);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
index c9f8469e9a47..536b04e83618 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ofld.h
@@ -10,6 +10,7 @@
/* Eswitch acl egress external APIs */
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport);
void esw_acl_egress_ofld_cleanup(struct mlx5_vport *vport);
+void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport, int rule_index);
int mlx5_esw_acl_egress_vport_bond(struct mlx5_eswitch *esw, u16 active_vport_num,
u16 passive_vport_num);
int mlx5_esw_acl_egress_vport_unbond(struct mlx5_eswitch *esw, u16 vport_num);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
index 1ba03e219111..f4fe1daa4afd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c
@@ -647,22 +647,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr,
}
static struct mlx5_flow_handle *
-mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr,
+mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, u16 esw_owner_vhca_id,
+ const unsigned char *addr,
struct mlx5_esw_bridge_vlan *vlan, u32 counter_id,
struct mlx5_esw_bridge *bridge)
{
struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- struct mlx5_eswitch *peer_esw;
+ int i;
- peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!peer_esw)
+ if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
return ERR_PTR(-ENODEV);
+ mlx5_devcom_for_each_peer_entry(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ tmp, i) {
+ if (mlx5_esw_is_owner(tmp, vport_num, esw_owner_vhca_id)) {
+ peer_esw = tmp;
+ break;
+ }
+ }
+ if (!peer_esw) {
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return ERR_PTR(-ENODEV);
+ }
+
handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id,
bridge, peer_esw);
-
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle;
}
@@ -821,7 +834,7 @@ mlx5_esw_bridge_egress_miss_flow_create(struct mlx5_flow_table *egress_ft,
return handle;
}
-static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
+static struct mlx5_esw_bridge *mlx5_esw_bridge_create(struct net_device *br_netdev,
struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge *bridge;
@@ -845,11 +858,12 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex,
goto err_mdb_ht;
INIT_LIST_HEAD(&bridge->fdb_list);
- bridge->ifindex = ifindex;
+ bridge->ifindex = br_netdev->ifindex;
bridge->refcnt = 1;
bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME);
bridge->vlan_proto = ETH_P_8021Q;
list_add(&bridge->list, &br_offloads->bridges);
+ mlx5_esw_bridge_debugfs_init(br_netdev, bridge);
return bridge;
@@ -873,6 +887,7 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
if (--bridge->refcnt)
return;
+ mlx5_esw_bridge_debugfs_cleanup(bridge);
mlx5_esw_bridge_egress_table_cleanup(bridge);
mlx5_esw_bridge_mcast_disable(bridge);
list_del(&bridge->list);
@@ -885,14 +900,14 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads,
}
static struct mlx5_esw_bridge *
-mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads)
+mlx5_esw_bridge_lookup(struct net_device *br_netdev, struct mlx5_esw_bridge_offloads *br_offloads)
{
struct mlx5_esw_bridge *bridge;
ASSERT_RTNL();
list_for_each_entry(bridge, &br_offloads->bridges, list) {
- if (bridge->ifindex == ifindex) {
+ if (bridge->ifindex == br_netdev->ifindex) {
mlx5_esw_bridge_get(bridge);
return bridge;
}
@@ -905,7 +920,7 @@ mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads
return ERR_PTR(err);
}
- bridge = mlx5_esw_bridge_create(ifindex, br_offloads);
+ bridge = mlx5_esw_bridge_create(br_netdev, br_offloads);
if (IS_ERR(bridge) && list_empty(&br_offloads->bridges))
mlx5_esw_bridge_ingress_table_cleanup(br_offloads);
return bridge;
@@ -1369,8 +1384,9 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_ow
entry->ingress_counter = counter;
handle = peer ?
- mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan,
- mlx5_fc_id(counter), bridge) :
+ mlx5_esw_bridge_ingress_flow_peer_create(vport_num, esw_owner_vhca_id,
+ addr, vlan, mlx5_fc_id(counter),
+ bridge) :
mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan,
mlx5_fc_id(counter), bridge);
if (IS_ERR(handle)) {
@@ -1587,15 +1603,15 @@ static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_off
return 0;
}
-static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
- u16 flags,
+static int mlx5_esw_bridge_vport_link_with_flags(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id, u16 flags,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
struct mlx5_esw_bridge *bridge;
int err;
- bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads);
+ bridge = mlx5_esw_bridge_lookup(br_netdev, br_offloads);
if (IS_ERR(bridge)) {
NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex");
return PTR_ERR(bridge);
@@ -1613,15 +1629,16 @@ err_vport:
return err;
}
-int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
- return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0,
+ return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id, 0,
br_offloads, extack);
}
-int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
@@ -1633,7 +1650,7 @@ int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_
NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge");
return -EINVAL;
}
- if (port->bridge->ifindex != ifindex) {
+ if (port->bridge->ifindex != br_netdev->ifindex) {
NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge");
return -EINVAL;
}
@@ -1644,23 +1661,25 @@ int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_
return err;
}
-int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch))
return 0;
- return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id,
+ return mlx5_esw_bridge_vport_link_with_flags(br_netdev, vport_num, esw_owner_vhca_id,
MLX5_ESW_BRIDGE_PORT_FLAG_PEER,
br_offloads, extack);
}
-int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack)
{
- return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads,
+ return mlx5_esw_bridge_vport_unlink(br_netdev, vport_num, esw_owner_vhca_id, br_offloads,
extack);
}
@@ -1887,6 +1906,7 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
xa_init(&br_offloads->ports);
br_offloads->esw = esw;
esw->br_offloads = br_offloads;
+ mlx5_esw_bridge_debugfs_offloads_init(br_offloads);
return br_offloads;
}
@@ -1902,6 +1922,7 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
mlx5_esw_bridge_flush(br_offloads);
WARN_ON(!xa_empty(&br_offloads->ports));
+ mlx5_esw_bridge_debugfs_offloads_cleanup(br_offloads);
esw->br_offloads = NULL;
kvfree(br_offloads);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
index a9dd18c73d6a..c2c7c70d99eb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h
@@ -10,6 +10,7 @@
#include <linux/xarray.h>
#include "eswitch.h"
+struct dentry;
struct mlx5_flow_table;
struct mlx5_flow_group;
@@ -17,6 +18,7 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_eswitch *esw;
struct list_head bridges;
struct xarray ports;
+ struct dentry *debugfs_root;
struct notifier_block netdev_nb;
struct notifier_block nb_blk;
@@ -43,16 +45,18 @@ struct mlx5_esw_bridge_offloads {
struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw);
void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw);
-int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_link(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
-int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_unlink(struct net_device *br_netdev, u16 vport_num, u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
-int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_peer_link(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
-int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id,
+int mlx5_esw_bridge_vport_peer_unlink(struct net_device *br_netdev, u16 vport_num,
+ u16 esw_owner_vhca_id,
struct mlx5_esw_bridge_offloads *br_offloads,
struct netlink_ext_ack *extack);
void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
new file mode 100644
index 000000000000..b6a45eff28f5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_debugfs.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#include <linux/debugfs.h>
+#include "bridge.h"
+#include "bridge_priv.h"
+
+static void *mlx5_esw_bridge_debugfs_start(struct seq_file *seq, loff_t *pos);
+static void *mlx5_esw_bridge_debugfs_next(struct seq_file *seq, void *v, loff_t *pos);
+static void mlx5_esw_bridge_debugfs_stop(struct seq_file *seq, void *v);
+static int mlx5_esw_bridge_debugfs_show(struct seq_file *seq, void *v);
+
+static const struct seq_operations mlx5_esw_bridge_debugfs_sops = {
+ .start = mlx5_esw_bridge_debugfs_start,
+ .next = mlx5_esw_bridge_debugfs_next,
+ .stop = mlx5_esw_bridge_debugfs_stop,
+ .show = mlx5_esw_bridge_debugfs_show,
+};
+DEFINE_SEQ_ATTRIBUTE(mlx5_esw_bridge_debugfs);
+
+static void *mlx5_esw_bridge_debugfs_start(struct seq_file *seq, loff_t *pos)
+{
+ struct mlx5_esw_bridge *bridge = seq->private;
+
+ rtnl_lock();
+ return *pos ? seq_list_start(&bridge->fdb_list, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *mlx5_esw_bridge_debugfs_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct mlx5_esw_bridge *bridge = seq->private;
+
+ return seq_list_next(v == SEQ_START_TOKEN ? &bridge->fdb_list : v, &bridge->fdb_list, pos);
+}
+
+static void mlx5_esw_bridge_debugfs_stop(struct seq_file *seq, void *v)
+{
+ rtnl_unlock();
+}
+
+static int mlx5_esw_bridge_debugfs_show(struct seq_file *seq, void *v)
+{
+ struct mlx5_esw_bridge_fdb_entry *entry;
+ u64 packets, bytes, lastuse;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "%-16s %-17s %4s %20s %20s %20s %5s\n",
+ "DEV", "MAC", "VLAN", "PACKETS", "BYTES", "LASTUSE", "FLAGS");
+ return 0;
+ }
+
+ entry = list_entry(v, struct mlx5_esw_bridge_fdb_entry, list);
+ mlx5_fc_query_cached_raw(entry->ingress_counter, &bytes, &packets, &lastuse);
+ seq_printf(seq, "%-16s %-17pM %4d %20llu %20llu %20llu %#5x\n",
+ entry->dev->name, entry->key.addr, entry->key.vid, packets, bytes, lastuse,
+ entry->flags);
+ return 0;
+}
+
+void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_bridge *bridge)
+{
+ if (!bridge->br_offloads->debugfs_root)
+ return;
+
+ bridge->debugfs_dir = debugfs_create_dir(br_netdev->name,
+ bridge->br_offloads->debugfs_root);
+ debugfs_create_file("fdb", 0444, bridge->debugfs_dir, bridge,
+ &mlx5_esw_bridge_debugfs_fops);
+}
+
+void mlx5_esw_bridge_debugfs_cleanup(struct mlx5_esw_bridge *bridge)
+{
+ debugfs_remove_recursive(bridge->debugfs_dir);
+ bridge->debugfs_dir = NULL;
+}
+
+void mlx5_esw_bridge_debugfs_offloads_init(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ if (!br_offloads->esw->debugfs_root)
+ return;
+
+ br_offloads->debugfs_root = debugfs_create_dir("bridge", br_offloads->esw->debugfs_root);
+}
+
+void mlx5_esw_bridge_debugfs_offloads_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
+{
+ debugfs_remove_recursive(br_offloads->debugfs_root);
+ br_offloads->debugfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
index 2eae594a5e80..2455f8b93c1e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
@@ -540,16 +540,29 @@ static struct mlx5_flow_handle *
mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
{
struct mlx5_devcom *devcom = port->bridge->br_offloads->esw->dev->priv.devcom;
+ struct mlx5_eswitch *tmp, *peer_esw = NULL;
static struct mlx5_flow_handle *handle;
- struct mlx5_eswitch *peer_esw;
+ int i;
- peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
- if (!peer_esw)
+ if (!mlx5_devcom_for_each_peer_begin(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
return ERR_PTR(-ENODEV);
+ mlx5_devcom_for_each_peer_entry(devcom,
+ MLX5_DEVCOM_ESW_OFFLOADS,
+ tmp, i) {
+ if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
+ peer_esw = tmp;
+ break;
+ }
+ }
+ if (!peer_esw) {
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ return ERR_PTR(-ENODEV);
+ }
+
handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
- mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ mlx5_devcom_for_each_peer_end(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
return handle;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
index c9595801bdb4..4911cc32161b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h
@@ -199,6 +199,7 @@ struct mlx5_esw_bridge {
int refcnt;
struct list_head list;
struct mlx5_esw_bridge_offloads *br_offloads;
+ struct dentry *debugfs_dir;
struct list_head fdb_list;
struct rhashtable fdb_ht;
@@ -241,4 +242,9 @@ void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
struct mlx5_esw_bridge_vlan *vlan);
void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge);
+void mlx5_esw_bridge_debugfs_offloads_init(struct mlx5_esw_bridge_offloads *br_offloads);
+void mlx5_esw_bridge_debugfs_offloads_cleanup(struct mlx5_esw_bridge_offloads *br_offloads);
+void mlx5_esw_bridge_debugfs_init(struct net_device *br_netdev, struct mlx5_esw_bridge *bridge);
+void mlx5_esw_bridge_debugfs_cleanup(struct mlx5_esw_bridge *bridge);
+
#endif /* _MLX5_ESW_BRIDGE_PRIVATE_ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index 084a910bb4e7..af779c700278 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -18,7 +18,8 @@ static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_
{
return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
- mlx5_eswitch_is_vf_vport(esw, vport_num);
+ mlx5_eswitch_is_vf_vport(esw, vport_num) ||
+ mlx5_core_is_ec_vf_vport(esw->dev, vport_num);
}
static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
@@ -56,6 +57,11 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
vport_num - 1, external);
+ } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
+ memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
+ dl_port->attrs.switch_id.id_len = ppid.id_len;
+ devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ vport_num - 1, false);
}
return dl_port;
}
@@ -65,6 +71,15 @@ static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
kfree(dl_port);
}
+static const struct devlink_port_ops mlx5_esw_dl_port_ops = {
+ .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+ .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get,
+ .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set,
+};
+
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_core_dev *dev = esw->dev;
@@ -87,7 +102,8 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
+ &mlx5_esw_dl_port_ops);
if (err)
goto reg_err;
@@ -134,6 +150,20 @@ struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u1
return IS_ERR(vport) ? ERR_CAST(vport) : vport->dl_port;
}
+static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = {
+#ifdef CONFIG_MLX5_SF_MANAGER
+ .port_del = mlx5_devlink_sf_port_del,
+#endif
+ .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get,
+ .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set,
+ .port_fn_roce_get = mlx5_devlink_port_fn_roce_get,
+ .port_fn_roce_set = mlx5_devlink_port_fn_roce_set,
+#ifdef CONFIG_MLX5_SF_MANAGER
+ .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get,
+ .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set,
+#endif
+};
+
int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
u16 vport_num, u32 controller, u32 sfnum)
{
@@ -156,7 +186,8 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p
devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller);
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
- err = devl_port_register(devlink, dl_port, dl_port_index);
+ err = devl_port_register_with_ops(devlink, dl_port, dl_port_index,
+ &mlx5_esw_dl_sf_port_ops);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
index fabe49a35a5c..255bc8b749f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c
@@ -285,9 +285,8 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
- } else {
- esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
}
+ esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
/* Star rule to forward all traffic to uplink vport */
memset(&dest, 0, sizeof(dest));
@@ -299,9 +298,8 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
goto out;
- } else {
- esw->fdb_table.legacy.vepa_star_rule = flow_rule;
}
+ esw->fdb_table.legacy.vepa_star_rule = flow_rule;
out:
kvfree(spec);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 901c53751b0a..faec7d7a4400 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -31,6 +31,7 @@
*/
#include <linux/etherdevice.h>
+#include <linux/debugfs.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
#include <linux/mlx5/vport.h>
@@ -41,6 +42,7 @@
#include "esw/qos.h"
#include "mlx5_core.h"
#include "lib/eq.h"
+#include "lag/lag.h"
#include "eswitch.h"
#include "fs_core.h"
#include "devlink.h"
@@ -92,7 +94,7 @@ mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
- if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
+ if (!esw)
return ERR_PTR(-EPERM);
vport = xa_load(&esw->vports, vport_num);
@@ -113,7 +115,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ if (vport || mlx5_core_is_ecpf(dev))
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -309,11 +312,12 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
fdb_add:
/* SRIOV is enabled: Forward UC MAC to vport */
- if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
+ if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY) {
vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
- esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
- vport, mac, vaddr->flow_rule);
+ esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
+ vport, mac, vaddr->flow_rule);
+ }
return 0;
}
@@ -710,6 +714,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport)
struct mlx5_eswitch *esw = dev->priv.eswitch;
u8 mac[ETH_ALEN];
+ if (!MLX5_CAP_GEN(dev, log_max_l2_table))
+ return;
+
mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
@@ -946,7 +953,8 @@ void mlx5_esw_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
vport->enabled = false;
/* Disable events from this vport */
- arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
+ if (MLX5_CAP_GEN(esw->dev, log_max_l2_table))
+ arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
@@ -1045,6 +1053,18 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
}
}
+static void mlx5_eswitch_clear_ec_vf_vports_info(struct mlx5_eswitch *esw)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ memset(&vport->qos, 0, sizeof(vport->qos));
+ memset(&vport->info, 0, sizeof(vport->info));
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+ }
+}
+
/* Public E-Switch API */
int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
enum mlx5_eswitch_vport_event enabled_events)
@@ -1084,6 +1104,19 @@ void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
}
}
+static void mlx5_eswitch_unload_ec_vf_vports(struct mlx5_eswitch *esw,
+ u16 num_ec_vfs)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+ if (!vport->enabled)
+ continue;
+ mlx5_eswitch_unload_vport(esw, vport->vport);
+ }
+}
+
int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events)
{
@@ -1104,6 +1137,26 @@ vf_err:
return err;
}
+static int mlx5_eswitch_load_ec_vf_vports(struct mlx5_eswitch *esw, u16 num_ec_vfs,
+ enum mlx5_eswitch_vport_event enabled_events)
+{
+ struct mlx5_vport *vport;
+ unsigned long i;
+ int err;
+
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, num_ec_vfs) {
+ err = mlx5_eswitch_load_vport(esw, vport->vport, enabled_events);
+ if (err)
+ goto vf_err;
+ }
+
+ return 0;
+
+vf_err:
+ mlx5_eswitch_unload_ec_vf_vports(esw, num_ec_vfs);
+ return err;
+}
+
static int host_pf_enable_hca(struct mlx5_core_dev *dev)
{
if (!mlx5_core_is_ecpf(dev))
@@ -1148,6 +1201,12 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
if (ret)
goto ecpf_err;
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs,
+ enabled_events);
+ if (ret)
+ goto ec_vf_err;
+ }
}
/* Enable VF vports */
@@ -1158,6 +1217,9 @@ mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
return 0;
vf_err:
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
+ec_vf_err:
if (mlx5_ecpf_vport_exists(esw->dev))
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
ecpf_err:
@@ -1174,8 +1236,11 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- if (mlx5_ecpf_vport_exists(esw->dev))
+ if (mlx5_ecpf_vport_exists(esw->dev)) {
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_vfs);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
+ }
host_pf_disable_hca(esw->dev);
mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
@@ -1219,6 +1284,9 @@ mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_num_of_vfs);
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ esw->esw_funcs.num_ec_vfs = num_vfs;
+
kvfree(out);
}
@@ -1326,9 +1394,9 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int num_vfs)
mlx5_eswitch_event_handlers_register(esw);
- esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
mlx5_esw_mode_change_notify(esw, esw->mode);
@@ -1350,7 +1418,7 @@ abort:
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
{
bool toggle_lag;
- int ret;
+ int ret = 0;
if (!mlx5_esw_allowed(esw))
return 0;
@@ -1370,10 +1438,21 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
- ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
- if (!ret)
- esw->esw_funcs.num_vfs = num_vfs;
+ /* If this is the ECPF the number of host VFs is managed via the
+ * eswitch function change event handler, and any num_vfs provided
+ * here are intended to be EC VFs.
+ */
+ if (!mlx5_core_is_ecpf(esw->dev)) {
+ ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
+ if (!ret)
+ esw->esw_funcs.num_vfs = num_vfs;
+ } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ ret = mlx5_eswitch_load_ec_vf_vports(esw, num_vfs, vport_events);
+ if (!ret)
+ esw->esw_funcs.num_ec_vfs = num_vfs;
+ }
}
+
up_write(&esw->mode_lock);
if (toggle_lag)
@@ -1393,16 +1472,22 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
/* If driver is unloaded, this function is called twice by remove_one()
* and mlx5_unload(). Prevent the second call.
*/
- if (!esw->esw_funcs.num_vfs && !clear_vf)
+ if (!esw->esw_funcs.num_vfs && !esw->esw_funcs.num_ec_vfs && !clear_vf)
goto unlock;
- esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Unload vfs: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
-
- mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
- if (clear_vf)
- mlx5_eswitch_clear_vf_vports_info(esw);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
+
+ if (!mlx5_core_is_ecpf(esw->dev)) {
+ mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_vf_vports_info(esw);
+ } else if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_eswitch_unload_ec_vf_vports(esw, esw->esw_funcs.num_ec_vfs);
+ if (clear_vf)
+ mlx5_eswitch_clear_ec_vf_vports_info(esw);
+ }
if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev);
@@ -1413,7 +1498,10 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
if (esw->mode == MLX5_ESWITCH_LEGACY)
mlx5_eswitch_disable_locked(esw);
- esw->esw_funcs.num_vfs = 0;
+ if (!mlx5_core_is_ecpf(esw->dev))
+ esw->esw_funcs.num_vfs = 0;
+ else
+ esw->esw_funcs.num_ec_vfs = 0;
unlock:
up_write(&esw->mode_lock);
@@ -1433,9 +1521,9 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw)
mlx5_eswitch_event_handlers_unregister(esw);
- esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
+ esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), necvfs(%d), active vports(%d)\n",
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
- esw->esw_funcs.num_vfs, esw->enabled_vports);
+ esw->esw_funcs.num_vfs, esw->esw_funcs.num_ec_vfs, esw->enabled_vports);
if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) {
esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED;
@@ -1595,7 +1683,19 @@ static int mlx5_esw_vports_init(struct mlx5_eswitch *esw)
idx++;
}
- if (mlx5_ecpf_vport_exists(dev)) {
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ int ec_vf_base_num = mlx5_core_ec_vf_vport_base(dev);
+
+ for (i = 0; i < mlx5_core_max_ec_vfs(esw->dev); i++) {
+ err = mlx5_esw_vport_alloc(esw, idx, ec_vf_base_num + i);
+ if (err)
+ goto err;
+ idx++;
+ }
+ }
+
+ if (mlx5_ecpf_vport_exists(dev) ||
+ mlx5_core_is_ecpf_esw_manager(dev)) {
err = mlx5_esw_vport_alloc(esw, idx, MLX5_VPORT_ECPF);
if (err)
goto err;
@@ -1611,22 +1711,60 @@ err:
return err;
}
+static int mlx5_devlink_esw_multiport_set(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ return -EOPNOTSUPP;
+
+ if (ctx->val.vbool)
+ return mlx5_lag_mpesw_enable(dev);
+
+ mlx5_lag_mpesw_disable(dev);
+ return 0;
+}
+
+static int mlx5_devlink_esw_multiport_get(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct mlx5_core_dev *dev = devlink_priv(devlink);
+
+ ctx->val.vbool = mlx5_lag_is_mpesw(dev);
+ return 0;
+}
+
+static const struct devlink_param mlx5_eswitch_params[] = {
+ DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_ID_ESW_MULTIPORT,
+ "esw_multiport", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ mlx5_devlink_esw_multiport_get,
+ mlx5_devlink_esw_multiport_set, NULL),
+};
+
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
{
struct mlx5_eswitch *esw;
int err;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_VPORT_MANAGER(dev) && !MLX5_ESWITCH_MANAGER(dev))
return 0;
esw = kzalloc(sizeof(*esw), GFP_KERNEL);
if (!esw)
return -ENOMEM;
+ err = devl_params_register(priv_to_devlink(dev), mlx5_eswitch_params,
+ ARRAY_SIZE(mlx5_eswitch_params));
+ if (err)
+ goto free_esw;
+
esw->dev = dev;
esw->manager_vport = mlx5_eswitch_manager_vport(dev);
esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
+ esw->debugfs_root = debugfs_create_dir("esw", mlx5_debugfs_get_dev_root(dev));
esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
if (!esw->work_queue) {
err = -ENOMEM;
@@ -1680,13 +1818,17 @@ reps_err:
abort:
if (esw->work_queue)
destroy_workqueue(esw->work_queue);
+ debugfs_remove_recursive(esw->debugfs_root);
+ devl_params_unregister(priv_to_devlink(dev), mlx5_eswitch_params,
+ ARRAY_SIZE(mlx5_eswitch_params));
+free_esw:
kfree(esw);
return err;
}
void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
{
- if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
+ if (!esw)
return;
esw_info(esw->dev, "cleanup\n");
@@ -1703,6 +1845,9 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
mutex_destroy(&esw->offloads.decap_tbl_lock);
esw_offloads_cleanup(esw);
mlx5_esw_vports_cleanup(esw);
+ debugfs_remove_recursive(esw->debugfs_root);
+ devl_params_unregister(priv_to_devlink(esw->dev), mlx5_eswitch_params,
+ ARRAY_SIZE(mlx5_eswitch_params));
kfree(esw);
}
@@ -1763,12 +1908,6 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
static bool mlx5_esw_check_port_type(struct mlx5_eswitch *esw, u16 vport_num, xa_mark_t mark)
{
- struct mlx5_vport *vport;
-
- vport = mlx5_eswitch_get_vport(esw, vport_num);
- if (IS_ERR(vport))
- return false;
-
return xa_get_mark(&esw->vports, vport_num, mark);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index add6cfa432a5..ae0dc8a3060d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -123,8 +123,14 @@ struct vport_ingress {
} offloads;
};
+enum vport_egress_acl_type {
+ VPORT_EGRESS_ACL_TYPE_DEFAULT,
+ VPORT_EGRESS_ACL_TYPE_SHARED_FDB,
+};
+
struct vport_egress {
struct mlx5_flow_table *acl;
+ enum vport_egress_acl_type type;
struct mlx5_flow_handle *allowed_vlan;
struct mlx5_flow_group *vlan_grp;
union {
@@ -136,7 +142,7 @@ struct vport_egress {
struct {
struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule;
- struct mlx5_flow_handle *bounce_rule;
+ struct xarray bounce_rules;
struct mlx5_flow_group *bounce_grp;
} offloads;
};
@@ -218,7 +224,7 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *send_to_vport_meta_grp;
struct mlx5_flow_group *peer_miss_grp;
- struct mlx5_flow_handle **peer_miss_rules;
+ struct mlx5_flow_handle **peer_miss_rules[MLX5_MAX_PORTS];
struct mlx5_flow_group *miss_grp;
struct mlx5_flow_handle **send_to_vport_meta_rules;
struct mlx5_flow_handle *miss_rule_uni;
@@ -249,7 +255,7 @@ struct mlx5_esw_offload {
struct mlx5_flow_group *vport_rx_drop_group;
struct mlx5_flow_handle *vport_rx_drop_rule;
struct xarray vport_reps;
- struct list_head peer_flows;
+ struct list_head peer_flows[MLX5_MAX_PORTS];
struct mutex peer_mutex;
struct mutex encap_tbl_lock; /* protects encap_tbl */
DECLARE_HASHTABLE(encap_tbl, 8);
@@ -283,6 +289,7 @@ struct mlx5_host_work {
struct mlx5_esw_functions {
struct mlx5_nb nb;
u16 num_vfs;
+ u16 num_ec_vfs;
};
enum {
@@ -297,6 +304,8 @@ enum {
MLX5_ESW_FDB_CREATED = BIT(0),
};
+struct dentry;
+
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
@@ -305,6 +314,7 @@ struct mlx5_eswitch {
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
+ struct dentry *debugfs_root;
struct workqueue_struct *work_queue;
struct xarray vports;
u32 flags;
@@ -337,12 +347,13 @@ struct mlx5_eswitch {
int mode;
u16 manager_vport;
u16 first_host_vport;
+ u8 num_peers;
struct mlx5_esw_functions esw_funcs;
struct {
u32 large_group_num;
} params;
struct blocking_notifier_head n_head;
- bool paired[MLX5_MAX_PORTS];
+ struct xarray paired;
};
void esw_offloads_disable(struct mlx5_eswitch *esw);
@@ -506,12 +517,12 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
struct netlink_ext_ack *extack);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
enum devlink_eswitch_encap_mode *encap);
-int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack);
-int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack);
+int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_roce_get(struct devlink_port *port, bool *is_enabled,
struct netlink_ext_ack *extack);
int mlx5_devlink_port_fn_roce_set(struct devlink_port *port, bool enable,
@@ -578,6 +589,13 @@ mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
return esw->manager_vport == vport_num;
}
+static inline bool mlx5_esw_is_owner(struct mlx5_eswitch *esw, u16 vport_num,
+ u16 esw_owner_vhca_id)
+{
+ return esw_owner_vhca_id == MLX5_CAP_GEN(esw->dev, vhca_id) ||
+ (vport_num == MLX5_VPORT_UPLINK && mlx5_lag_is_master(esw->dev));
+}
+
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
{
return mlx5_core_is_ecpf_esw_manager(dev) ?
@@ -640,6 +658,19 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
#define mlx5_esw_for_each_host_func_vport(esw, index, vport, last) \
mlx5_esw_for_each_vport_marked(esw, index, vport, last, MLX5_ESW_VPT_HOST_FN)
+/* This macro should only be used if EC SRIOV is enabled.
+ *
+ * Because there were no more marks available on the xarray this uses a
+ * for_each_range approach. The range is only valid when EC SRIOV is enabled
+ */
+#define mlx5_esw_for_each_ec_vf_vport(esw, index, vport, last) \
+ xa_for_each_range(&((esw)->vports), \
+ index, \
+ vport, \
+ MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base), \
+ MLX5_CAP_GEN_2((esw->dev), ec_vf_vport_base) +\
+ (last) - 1)
+
struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink);
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num);
@@ -686,6 +717,14 @@ mlx5_esw_vporttbl_put(struct mlx5_eswitch *esw, struct mlx5_vport_tbl_attr *attr
struct mlx5_flow_handle *
esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag);
+void mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
+ u32 *flow_group_in,
+ int match_params);
+
+void mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
+ u16 vport,
+ struct mlx5_flow_spec *spec);
+
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num);
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num);
@@ -740,9 +779,9 @@ void esw_vport_change_handle_locked(struct mlx5_vport *vport);
bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller);
-int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
- struct mlx5_eswitch *slave_esw);
-void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw, int max_slaves);
+void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw);
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw);
@@ -757,6 +796,13 @@ static inline int mlx5_eswitch_num_vfs(struct mlx5_eswitch *esw)
return 0;
}
+static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw)
+{
+ if (mlx5_esw_allowed(esw))
+ return esw->num_peers;
+ return 0;
+}
+
static inline struct mlx5_flow_table *
mlx5_eswitch_get_slow_fdb(struct mlx5_eswitch *esw)
{
@@ -794,16 +840,18 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev,
}
static inline int
-mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
- struct mlx5_eswitch *slave_esw)
+mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw, int max_slaves)
{
return 0;
}
static inline void
-mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw) {}
+static inline int mlx5_eswitch_get_npeers(struct mlx5_eswitch *esw) { return 0; }
+
static inline int
mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 8d19c20d3447..bdfe609cc9ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -55,13 +55,6 @@
#define mlx5_esw_for_each_rep(esw, i, rep) \
xa_for_each(&((esw)->offloads.vport_reps), i, rep)
-#define mlx5_esw_for_each_sf_rep(esw, i, rep) \
- xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
-
-#define mlx5_esw_for_each_vf_rep(esw, index, rep) \
- mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
- rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
-
/* There are two match-all miss flows, one for unicast dst mac and
* one for multicast.
*/
@@ -838,6 +831,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
+ u16 vport;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -847,20 +841,43 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
- /* source vport is the esw manager */
- MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
- if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
- MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(from_esw->dev, vhca_id));
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
- if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
- MLX5_SET_TO_ONES(fte_match_set_misc, misc,
- source_eswitch_owner_vhca_id);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
+
+ /* source vport is the esw manager */
+ vport = from_esw->manager_vport;
+
+ if (mlx5_eswitch_vport_match_metadata_enabled(on_esw)) {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_for_match(from_esw, vport));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
+ MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
+ mlx5_eswitch_get_vport_metadata_mask());
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
+ } else {
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port, vport);
+
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
+ MLX5_CAP_GEN(from_esw->dev, vhca_id));
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+
+ if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc,
+ source_eswitch_owner_vhca_id);
+
+ spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
+ }
+
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = rep->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
@@ -1052,6 +1069,9 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
void *misc;
int err;
+ if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev))
+ return 0;
+
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
@@ -1108,11 +1128,32 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
flows[vport->index] = flow;
}
- esw->fdb_table.offloads.peer_miss_rules = flows;
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ if (i >= mlx5_core_max_ec_vfs(peer_dev))
+ break;
+ esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
+ spec, vport->vport);
+ flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
+ spec, &flow_act, &dest, 1);
+ if (IS_ERR(flow)) {
+ err = PTR_ERR(flow);
+ goto add_ec_vf_flow_err;
+ }
+ flows[vport->index] = flow;
+ }
+ }
+ esw->fdb_table.offloads.peer_miss_rules[mlx5_get_dev_index(peer_dev)] = flows;
kvfree(spec);
return 0;
+add_ec_vf_flow_err:
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
add_vf_flow_err:
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
if (!flows[vport->index])
@@ -1136,13 +1177,28 @@ alloc_flows_err:
return err;
}
-static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
+static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
+ struct mlx5_core_dev *peer_dev)
{
+ u16 peer_index = mlx5_get_dev_index(peer_dev);
struct mlx5_flow_handle **flows;
struct mlx5_vport *vport;
unsigned long i;
- flows = esw->fdb_table.offloads.peer_miss_rules;
+ flows = esw->fdb_table.offloads.peer_miss_rules[peer_index];
+ if (!flows)
+ return;
+
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) {
+ /* The flow for a particular vport could be NULL if the other ECPF
+ * has fewer or no VFs enabled
+ */
+ if (!flows[vport->index])
+ continue;
+ mlx5_del_flow_rules(flows[vport->index]);
+ }
+ }
mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
mlx5_del_flow_rules(flows[vport->index]);
@@ -1156,7 +1212,9 @@ static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
mlx5_del_flow_rules(flows[vport->index]);
}
+
kvfree(flows);
+ esw->fdb_table.offloads.peer_miss_rules[peer_index] = NULL;
}
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
@@ -1269,8 +1327,10 @@ esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
#define MAX_PF_SQ 256
#define MAX_SQ_NVPORTS 32
-static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
- u32 *flow_group_in)
+void
+mlx5_esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
+ u32 *flow_group_in,
+ int match_params)
{
void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
flow_group_in,
@@ -1279,7 +1339,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS_2);
+ MLX5_MATCH_MISC_PARAMETERS_2 | match_params);
MLX5_SET(fte_match_param, match_criteria,
misc_parameters_2.metadata_reg_c_0,
@@ -1287,7 +1347,7 @@ static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
} else {
MLX5_SET(create_flow_group_in, flow_group_in,
match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
+ MLX5_MATCH_MISC_PARAMETERS | match_params);
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_port);
@@ -1463,14 +1523,13 @@ esw_create_send_to_vport_group(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
+ mlx5_esw_set_flow_group_source_port(esw, flow_group_in, MLX5_MATCH_MISC_PARAMETERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
-
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
+
+ if (!mlx5_eswitch_vport_match_metadata_enabled(esw) &&
+ MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
MLX5_SET_TO_ONES(fte_match_param, match_criteria,
misc_parameters.source_eswitch_owner_vhca_id);
MLX5_SET(create_flow_group_in, flow_group_in,
@@ -1548,6 +1607,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
u32 *flow_group_in,
int *ix)
{
+ int max_peer_ports = (esw->total_vports - 1) * (MLX5_MAX_PORTS - 1);
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_group *g;
void *match_criteria;
@@ -1558,7 +1618,7 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen);
- esw_set_flow_group_source_port(esw, flow_group_in);
+ mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
match_criteria = MLX5_ADDR_OF(create_flow_group_in,
@@ -1574,8 +1634,8 @@ esw_create_peer_esw_miss_group(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, *ix);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
- *ix + esw->total_vports - 1);
- *ix += esw->total_vports;
+ *ix + max_peer_ports);
+ *ix += max_peer_ports + 1;
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
@@ -1677,7 +1737,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
* total vports of the peer (currently is also uses esw->total_vports).
*/
table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
- esw->total_vports * 2 + MLX5_ESW_MISS_FLOWS;
+ esw->total_vports * MLX5_MAX_PORTS + MLX5_ESW_MISS_FLOWS;
/* create the slow path fdb with encap set, so further table instances
* can be created at run time while VFs are probed if the FW allows that.
@@ -1844,8 +1904,7 @@ static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
if (!flow_group_in)
return -ENOMEM;
- /* create vport rx group */
- esw_set_flow_group_source_port(esw, flow_group_in);
+ mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
@@ -1915,21 +1974,13 @@ static void esw_destroy_vport_rx_drop_group(struct mlx5_eswitch *esw)
mlx5_destroy_flow_group(esw->offloads.vport_rx_drop_group);
}
-struct mlx5_flow_handle *
-mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
- struct mlx5_flow_destination *dest)
+void
+mlx5_esw_set_spec_source_port(struct mlx5_eswitch *esw,
+ u16 vport,
+ struct mlx5_flow_spec *spec)
{
- struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_handle *flow_rule;
- struct mlx5_flow_spec *spec;
void *misc;
- spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
- if (!spec) {
- flow_rule = ERR_PTR(-ENOMEM);
- goto out;
- }
-
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
@@ -1949,6 +2000,23 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
}
+}
+
+struct mlx5_flow_handle *
+mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
+ struct mlx5_flow_destination *dest)
+{
+ struct mlx5_flow_act flow_act = {0};
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_flow_spec *spec;
+
+ spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
+ if (!spec) {
+ flow_rule = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ mlx5_esw_set_spec_source_port(esw, vport, spec);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
@@ -2142,6 +2210,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
"Failed setting eswitch to offloads");
esw->mode = MLX5_ESWITCH_LEGACY;
mlx5_rescan_drivers(esw->dev);
+ return err;
}
if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
if (mlx5_eswitch_inline_mode_get(esw,
@@ -2151,19 +2220,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
"Inline mode is different between vports");
}
}
- return err;
-}
-
-static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
- struct mlx5_eswitch_rep *rep,
- xa_mark_t mark)
-{
- bool mark_set;
-
- /* Copy the mark from vport to its rep */
- mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
- if (mark_set)
- xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
+ return 0;
}
static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
@@ -2185,9 +2242,6 @@ static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx
if (err)
goto insert_err;
- mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
- mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
- mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
return 0;
insert_err:
@@ -2328,37 +2382,13 @@ static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
esw->offloads.rep_ops[rep_type]->unload(rep);
}
-static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
-{
- struct mlx5_eswitch_rep *rep;
- unsigned long i;
-
- mlx5_esw_for_each_sf_rep(esw, i, rep)
- __esw_offloads_unload_rep(esw, rep, rep_type);
-}
-
static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
{
struct mlx5_eswitch_rep *rep;
unsigned long i;
- __unload_reps_sf_vport(esw, rep_type);
-
- mlx5_esw_for_each_vf_rep(esw, i, rep)
- __esw_offloads_unload_rep(esw, rep, rep_type);
-
- if (mlx5_ecpf_vport_exists(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
- __esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
+ mlx5_esw_for_each_rep(esw, i, rep)
__esw_offloads_unload_rep(esw, rep, rep_type);
- }
-
- rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
- __esw_offloads_unload_rep(esw, rep, rep_type);
}
int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
@@ -2476,6 +2506,7 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
struct mlx5_vport *vport,
struct mlx5_flow_table *acl)
{
+ u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {};
@@ -2491,8 +2522,7 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
- MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
- MLX5_CAP_GEN(slave, vhca_id));
+ MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, slave_index);
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
@@ -2507,49 +2537,43 @@ static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
&dest, 1);
- if (IS_ERR(flow_rule))
+ if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- else
- vport->egress.offloads.bounce_rule = flow_rule;
+ } else {
+ err = xa_insert(&vport->egress.offloads.bounce_rules,
+ slave_index, flow_rule, GFP_KERNEL);
+ if (err)
+ mlx5_del_flow_rules(flow_rule);
+ }
kvfree(spec);
return err;
}
-static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
- struct mlx5_core_dev *slave)
+static int esw_master_egress_create_resources(struct mlx5_eswitch *esw,
+ struct mlx5_flow_namespace *egress_ns,
+ struct mlx5_vport *vport, size_t count)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
- struct mlx5_eswitch *esw = master->priv.eswitch;
struct mlx5_flow_table_attr ft_attr = {
- .max_fte = 1, .prio = 0, .level = 0,
- .flags = MLX5_FLOW_TABLE_OTHER_VPORT,
+ .max_fte = count, .prio = 0, .level = 0,
};
- struct mlx5_flow_namespace *egress_ns;
struct mlx5_flow_table *acl;
struct mlx5_flow_group *g;
- struct mlx5_vport *vport;
void *match_criteria;
u32 *flow_group_in;
int err;
- vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
-
- egress_ns = mlx5_get_flow_vport_acl_namespace(master,
- MLX5_FLOW_NAMESPACE_ESW_EGRESS,
- vport->index);
- if (!egress_ns)
- return -EINVAL;
-
if (vport->egress.acl)
- return -EINVAL;
+ return 0;
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
if (!flow_group_in)
return -ENOMEM;
+ if (vport->vport || mlx5_core_is_ecpf(esw->dev))
+ ft_attr.flags = MLX5_FLOW_TABLE_OTHER_VPORT;
+
acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
if (IS_ERR(acl)) {
err = PTR_ERR(acl);
@@ -2568,7 +2592,7 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
MLX5_SET(create_flow_group_in, flow_group_in,
source_eswitch_owner_vhca_id_valid, 1);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, count);
g = mlx5_create_flow_group(acl, flow_group_in);
if (IS_ERR(g)) {
@@ -2576,19 +2600,15 @@ static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
goto err_group;
}
- err = __esw_set_master_egress_rule(master, slave, vport, acl);
- if (err)
- goto err_rule;
-
vport->egress.acl = acl;
vport->egress.offloads.bounce_grp = g;
+ vport->egress.type = VPORT_EGRESS_ACL_TYPE_SHARED_FDB;
+ xa_init_flags(&vport->egress.offloads.bounce_rules, XA_FLAGS_ALLOC);
kvfree(flow_group_in);
return 0;
-err_rule:
- mlx5_destroy_flow_group(g);
err_group:
mlx5_destroy_flow_table(acl);
out:
@@ -2596,18 +2616,74 @@ out:
return err;
}
-static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
+static void esw_master_egress_destroy_resources(struct mlx5_vport *vport)
+{
+ if (!xa_empty(&vport->egress.offloads.bounce_rules))
+ return;
+ mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp);
+ vport->egress.offloads.bounce_grp = NULL;
+ mlx5_destroy_flow_table(vport->egress.acl);
+ vport->egress.acl = NULL;
+}
+
+static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
+ struct mlx5_core_dev *slave, size_t count)
+{
+ struct mlx5_eswitch *esw = master->priv.eswitch;
+ u16 slave_index = MLX5_CAP_GEN(slave, vhca_id);
+ struct mlx5_flow_namespace *egress_ns;
+ struct mlx5_vport *vport;
+ int err;
+
+ vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (IS_ERR(vport))
+ return PTR_ERR(vport);
+
+ egress_ns = mlx5_get_flow_vport_acl_namespace(master,
+ MLX5_FLOW_NAMESPACE_ESW_EGRESS,
+ vport->index);
+ if (!egress_ns)
+ return -EINVAL;
+
+ if (vport->egress.acl && vport->egress.type != VPORT_EGRESS_ACL_TYPE_SHARED_FDB)
+ return 0;
+
+ err = esw_master_egress_create_resources(esw, egress_ns, vport, count);
+ if (err)
+ return err;
+
+ if (xa_load(&vport->egress.offloads.bounce_rules, slave_index))
+ return -EINVAL;
+
+ err = __esw_set_master_egress_rule(master, slave, vport, vport->egress.acl);
+ if (err)
+ goto err_rule;
+
+ return 0;
+
+err_rule:
+ esw_master_egress_destroy_resources(vport);
+ return err;
+}
+
+static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev,
+ struct mlx5_core_dev *slave_dev)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
dev->priv.eswitch->manager_vport);
- esw_acl_egress_ofld_cleanup(vport);
+ esw_acl_egress_ofld_bounce_rule_destroy(vport, MLX5_CAP_GEN(slave_dev, vhca_id));
+
+ if (xa_empty(&vport->egress.offloads.bounce_rules)) {
+ esw_acl_egress_ofld_cleanup(vport);
+ xa_destroy(&vport->egress.offloads.bounce_rules);
+ }
}
-int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
- struct mlx5_eswitch *slave_esw)
+int mlx5_eswitch_offloads_single_fdb_add_one(struct mlx5_eswitch *master_esw,
+ struct mlx5_eswitch *slave_esw, int max_slaves)
{
int err;
@@ -2617,7 +2693,7 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
return err;
err = esw_set_master_egress_rule(master_esw->dev,
- slave_esw->dev);
+ slave_esw->dev, max_slaves);
if (err)
goto err_acl;
@@ -2625,21 +2701,21 @@ int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
err_acl:
esw_set_slave_root_fdb(NULL, slave_esw->dev);
-
return err;
}
-void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
+void mlx5_eswitch_offloads_single_fdb_del_one(struct mlx5_eswitch *master_esw,
struct mlx5_eswitch *slave_esw)
{
- esw_unset_master_egress_rule(master_esw->dev);
esw_set_slave_root_fdb(NULL, slave_esw->dev);
+ esw_unset_master_egress_rule(master_esw->dev, slave_esw->dev);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
-static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
+static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw)
{
const struct mlx5_eswitch_rep_ops *ops;
struct mlx5_eswitch_rep *rep;
@@ -2652,18 +2728,19 @@ static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
ops = esw->offloads.rep_ops[rep_type];
if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
ops->event)
- ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
+ ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, peer_esw);
}
}
}
-static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
+static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw,
+ struct mlx5_eswitch *peer_esw)
{
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
mlx5e_tc_clean_fdb_peer_flows(esw);
#endif
- mlx5_esw_offloads_rep_event_unpair(esw);
- esw_del_fdb_peer_miss_rules(esw);
+ mlx5_esw_offloads_rep_event_unpair(esw, peer_esw);
+ esw_del_fdb_peer_miss_rules(esw, peer_esw->dev);
}
static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
@@ -2694,7 +2771,7 @@ static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
return 0;
err_out:
- mlx5_esw_offloads_unpair(esw);
+ mlx5_esw_offloads_unpair(esw, peer_esw);
return err;
}
@@ -2702,7 +2779,9 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
struct mlx5_eswitch *peer_esw,
bool pair)
{
+ u8 peer_idx = mlx5_get_dev_index(peer_esw->dev);
struct mlx5_flow_root_namespace *peer_ns;
+ u8 idx = mlx5_get_dev_index(esw->dev);
struct mlx5_flow_root_namespace *ns;
int err;
@@ -2710,18 +2789,18 @@ static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
ns = esw->dev->priv.steering->fdb_root_ns;
if (pair) {
- err = mlx5_flow_namespace_set_peer(ns, peer_ns);
+ err = mlx5_flow_namespace_set_peer(ns, peer_ns, peer_idx);
if (err)
return err;
- err = mlx5_flow_namespace_set_peer(peer_ns, ns);
+ err = mlx5_flow_namespace_set_peer(peer_ns, ns, idx);
if (err) {
- mlx5_flow_namespace_set_peer(ns, NULL);
+ mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
return err;
}
} else {
- mlx5_flow_namespace_set_peer(ns, NULL);
- mlx5_flow_namespace_set_peer(peer_ns, NULL);
+ mlx5_flow_namespace_set_peer(ns, NULL, peer_idx);
+ mlx5_flow_namespace_set_peer(peer_ns, NULL, idx);
}
return 0;
@@ -2734,15 +2813,21 @@ static int mlx5_esw_offloads_devcom_event(int event,
struct mlx5_eswitch *esw = my_data;
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
struct mlx5_eswitch *peer_esw = event_data;
+ u16 esw_i, peer_esw_i;
+ bool esw_paired;
int err;
+ peer_esw_i = MLX5_CAP_GEN(peer_esw->dev, vhca_id);
+ esw_i = MLX5_CAP_GEN(esw->dev, vhca_id);
+ esw_paired = !!xa_load(&esw->paired, peer_esw_i);
+
switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break;
- if (esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+ if (esw_paired)
break;
err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
@@ -2756,28 +2841,43 @@ static int mlx5_esw_offloads_devcom_event(int event,
if (err)
goto err_pair;
- esw->paired[mlx5_get_dev_index(peer_esw->dev)] = true;
- peer_esw->paired[mlx5_get_dev_index(esw->dev)] = true;
- mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
+ err = xa_insert(&esw->paired, peer_esw_i, peer_esw, GFP_KERNEL);
+ if (err)
+ goto err_xa;
+
+ err = xa_insert(&peer_esw->paired, esw_i, esw, GFP_KERNEL);
+ if (err)
+ goto err_peer_xa;
+
+ esw->num_peers++;
+ peer_esw->num_peers++;
+ mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
break;
case ESW_OFFLOADS_DEVCOM_UNPAIR:
- if (!esw->paired[mlx5_get_dev_index(peer_esw->dev)])
+ if (!esw_paired)
break;
- mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
- esw->paired[mlx5_get_dev_index(peer_esw->dev)] = false;
- peer_esw->paired[mlx5_get_dev_index(esw->dev)] = false;
- mlx5_esw_offloads_unpair(peer_esw);
- mlx5_esw_offloads_unpair(esw);
+ peer_esw->num_peers--;
+ esw->num_peers--;
+ if (!esw->num_peers && !peer_esw->num_peers)
+ mlx5_devcom_comp_set_ready(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
+ xa_erase(&peer_esw->paired, esw_i);
+ xa_erase(&esw->paired, peer_esw_i);
+ mlx5_esw_offloads_unpair(peer_esw, esw);
+ mlx5_esw_offloads_unpair(esw, peer_esw);
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
break;
}
return 0;
+err_peer_xa:
+ xa_erase(&esw->paired, peer_esw_i);
+err_xa:
+ mlx5_esw_offloads_unpair(peer_esw, esw);
err_pair:
- mlx5_esw_offloads_unpair(esw);
+ mlx5_esw_offloads_unpair(esw, peer_esw);
err_peer:
mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
err_out:
@@ -2789,24 +2889,29 @@ err_out:
void mlx5_esw_offloads_devcom_init(struct mlx5_eswitch *esw)
{
struct mlx5_devcom *devcom = esw->dev->priv.devcom;
+ int i;
- INIT_LIST_HEAD(&esw->offloads.peer_flows);
+ for (i = 0; i < MLX5_MAX_PORTS; i++)
+ INIT_LIST_HEAD(&esw->offloads.peer_flows[i]);
mutex_init(&esw->offloads.peer_mutex);
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
- if (!mlx5_is_lag_supported(esw->dev))
+ if (!mlx5_lag_is_supported(esw->dev))
return;
+ xa_init(&esw->paired);
mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event,
esw);
+ esw->num_peers = 0;
mlx5_devcom_send_event(devcom,
MLX5_DEVCOM_ESW_OFFLOADS,
- ESW_OFFLOADS_DEVCOM_PAIR, esw);
+ ESW_OFFLOADS_DEVCOM_PAIR,
+ ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
}
void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
@@ -2816,13 +2921,15 @@ void mlx5_esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return;
- if (!mlx5_is_lag_supported(esw->dev))
+ if (!mlx5_lag_is_supported(esw->dev))
return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
+ ESW_OFFLOADS_DEVCOM_UNPAIR,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
+ xa_destroy(&esw->paired);
}
bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
@@ -2834,9 +2941,6 @@ bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
MLX5_FDB_TO_VPORT_REG_C_0))
return false;
- if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
- return false;
-
return true;
}
@@ -3247,6 +3351,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
/* Representor will control the vport link state */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
+ if (mlx5_core_ec_sriov_enabled(esw->dev))
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
/* Uplink vport rep must load first. */
err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
@@ -3285,7 +3392,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
/* If changing from switchdev to legacy mode without sriov enabled,
* no need to create legacy fdb.
*/
- if (!mlx5_sriov_is_enabled(esw->dev))
+ if (!mlx5_core_is_pf(esw->dev) || !mlx5_sriov_is_enabled(esw->dev))
return 0;
err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS);
@@ -3484,8 +3591,27 @@ static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
goto revert_inline_mode;
}
}
+ if (mlx5_core_ec_sriov_enabled(esw->dev)) {
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
+ if (err) {
+ err_vport_num = vport->vport;
+ NL_SET_ERR_MSG_MOD(extack,
+ "Failed to set min inline on vport");
+ goto revert_ec_vf_inline_mode;
+ }
+ }
+ }
return 0;
+revert_ec_vf_inline_mode:
+ mlx5_esw_for_each_ec_vf_vport(esw, i, vport, esw->esw_funcs.num_ec_vfs) {
+ if (vport->vport == err_vport_num)
+ break;
+ mlx5_modify_nic_vport_min_inline(dev,
+ vport->vport,
+ esw->offloads.inline_mode);
+ }
revert_inline_mode:
mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
if (vport->vport == err_vport_num)
@@ -3835,9 +3961,6 @@ static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num,
int err;
*vhca_id = 0;
- if (mlx5_esw_is_manager_vport(esw, vport_num) ||
- !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
- return -EPERM;
query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
if (!query_ctx)
@@ -3926,9 +4049,9 @@ is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num)
mlx5_esw_is_sf_vport(esw, vport_num);
}
-int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
- u8 *hw_addr, int *hw_addr_len,
- struct netlink_ext_ack *extack)
+int mlx5_devlink_port_fn_hw_addr_get(struct devlink_port *port,
+ u8 *hw_addr, int *hw_addr_len,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
struct mlx5_vport *vport;
@@ -3955,9 +4078,9 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port,
return 0;
}
-int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port,
- const u8 *hw_addr, int hw_addr_len,
- struct netlink_ext_ack *extack)
+int mlx5_devlink_port_fn_hw_addr_set(struct devlink_port *port,
+ const u8 *hw_addr, int hw_addr_len,
+ struct netlink_ext_ack *extack)
{
struct mlx5_eswitch *esw;
u16 vport_num;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c
index 718cf09c28ce..3ec892d51f57 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/events.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c
@@ -5,7 +5,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
-#include "lib/mlx5.h"
+#include "lib/events.h"
struct mlx5_event_nb {
struct mlx5_nb nb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index ec83e6483d1a..91dcb0dcad10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -139,7 +139,8 @@ static void mlx5_cmd_stub_modify_header_dealloc(struct mlx5_flow_root_namespace
}
static int mlx5_cmd_stub_set_peer(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_root_namespace *peer_ns)
+ struct mlx5_flow_root_namespace *peer_ns,
+ u8 peer_idx)
{
return 0;
}
@@ -243,16 +244,22 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns,
ft->type == FS_FT_FDB &&
mlx5_lag_is_shared_fdb(dev) &&
mlx5_lag_is_master(dev)) {
- err = mlx5_cmd_set_slave_root_fdb(dev,
- mlx5_lag_get_peer_mdev(dev),
- !disconnect, (!disconnect) ?
- ft->id : 0);
- if (err && !disconnect) {
- MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
- MLX5_SET(set_flow_table_root_in, in, table_id,
- ns->root_ft->id);
- mlx5_cmd_exec_in(dev, set_flow_table_root, in);
+ struct mlx5_core_dev *peer_dev;
+ int i;
+
+ mlx5_lag_for_each_peer_mdev(dev, peer_dev, i) {
+ err = mlx5_cmd_set_slave_root_fdb(dev, peer_dev, !disconnect,
+ (!disconnect) ? ft->id : 0);
+ if (err && !disconnect) {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ MLX5_SET(set_flow_table_root_in, in, table_id,
+ ns->root_ft->id);
+ mlx5_cmd_exec_in(dev, set_flow_table_root, in);
+ }
+ if (err)
+ break;
}
+
}
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index 8ef4254b9ea1..b6b9a5a20591 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -93,7 +93,8 @@ struct mlx5_flow_cmds {
struct mlx5_modify_hdr *modify_hdr);
int (*set_peer)(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_root_namespace *peer_ns);
+ struct mlx5_flow_root_namespace *peer_ns,
+ u8 peer_idx);
int (*create_ns)(struct mlx5_flow_root_namespace *ns);
int (*destroy_ns)(struct mlx5_flow_root_namespace *ns);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 19da02c41616..4ef04aa28771 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -3620,7 +3620,8 @@ void mlx5_destroy_match_definer(struct mlx5_core_dev *dev,
}
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_root_namespace *peer_ns)
+ struct mlx5_flow_root_namespace *peer_ns,
+ u8 peer_idx)
{
if (peer_ns && ns->mode != peer_ns->mode) {
mlx5_core_err(ns->dev,
@@ -3628,7 +3629,7 @@ int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
return -EINVAL;
}
- return ns->cmds->set_peer(ns, peer_ns);
+ return ns->cmds->set_peer(ns, peer_ns, peer_idx);
}
/* This function should be called only at init stage of the namespace.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index b043190e50a8..03e64c4c245d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -302,7 +302,8 @@ void mlx5_fc_update_sampling_interval(struct mlx5_core_dev *dev,
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_fw_cmds(void);
int mlx5_flow_namespace_set_peer(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_root_namespace *peer_ns);
+ struct mlx5_flow_root_namespace *peer_ns,
+ u8 peer_idx);
int mlx5_flow_namespace_set_mode(struct mlx5_flow_namespace *ns,
enum mlx5_flow_steering_mode mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 7bb7be01225a..fb2035a5ec99 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -196,14 +196,11 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
- if (MLX5_CAP_GEN(dev, vport_group_manager) &&
- MLX5_ESWITCH_MANAGER(dev)) {
+ if (MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
- }
- if (MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
index 50022e7565f1..fb7874da3caa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c
@@ -21,6 +21,7 @@ struct mlx5_fw_reset {
struct workqueue_struct *wq;
struct work_struct fw_live_patch_work;
struct work_struct reset_request_work;
+ struct work_struct reset_unload_work;
struct work_struct reset_reload_work;
struct work_struct reset_now_work;
struct work_struct reset_abort_work;
@@ -30,6 +31,26 @@ struct mlx5_fw_reset {
int ret;
};
+enum {
+ MLX5_FW_RST_STATE_IDLE = 0,
+ MLX5_FW_RST_STATE_TOGGLE_REQ = 4,
+};
+
+enum {
+ MLX5_RST_STATE_BIT_NUM = 12,
+ MLX5_RST_ACK_BIT_NUM = 22,
+};
+
+static u8 mlx5_get_fw_rst_state(struct mlx5_core_dev *dev)
+{
+ return (ioread32be(&dev->iseg->initializing) >> MLX5_RST_STATE_BIT_NUM) & 0xF;
+}
+
+static void mlx5_set_fw_rst_ack(struct mlx5_core_dev *dev)
+{
+ iowrite32be(BIT(MLX5_RST_ACK_BIT_NUM), &dev->iseg->initializing);
+}
+
static int mlx5_fw_reset_enable_remote_dev_reset_set(struct devlink *devlink, u32 id,
struct devlink_param_gset_ctx *ctx)
{
@@ -155,7 +176,7 @@ int mlx5_fw_reset_set_live_patch(struct mlx5_core_dev *dev)
return mlx5_reg_mfrl_set(dev, MLX5_MFRL_REG_RESET_LEVEL0, 0, 0, false);
}
-static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
+static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev, bool unloaded)
{
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
@@ -163,7 +184,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev)
if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags)) {
complete(&fw_reset->done);
} else {
- mlx5_unload_one(dev, false);
+ if (!unloaded)
+ mlx5_unload_one(dev, false);
if (mlx5_health_wait_pci_up(dev))
mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n");
else
@@ -204,7 +226,7 @@ static void mlx5_sync_reset_reload_work(struct work_struct *work)
mlx5_sync_reset_clear_reset_requested(dev, false);
mlx5_enter_error_state(dev, true);
- mlx5_fw_reset_complete_reload(dev);
+ mlx5_fw_reset_complete_reload(dev, false);
}
#define MLX5_RESET_POLL_INTERVAL (HZ / 10)
@@ -276,6 +298,44 @@ static void mlx5_fw_live_patch_event(struct work_struct *work)
mlx5_core_err(dev, "Failed to reload FW tracer\n");
}
+static int mlx5_check_dev_ids(struct mlx5_core_dev *dev, u16 dev_id)
+{
+ struct pci_bus *bridge_bus = dev->pdev->bus;
+ struct pci_dev *sdev;
+ u16 sdev_id;
+ int err;
+
+ /* Check that all functions under the pci bridge are PFs of
+ * this device otherwise fail this function.
+ */
+ list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
+ err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
+ if (err)
+ return err;
+ if (sdev_id != dev_id) {
+ mlx5_core_warn(dev, "unrecognized dev_id (0x%x)\n", sdev_id);
+ return -EPERM;
+ }
+ }
+ return 0;
+}
+
+static bool mlx5_is_reset_now_capable(struct mlx5_core_dev *dev)
+{
+ u16 dev_id;
+ int err;
+
+ if (!MLX5_CAP_GEN(dev, fast_teardown)) {
+ mlx5_core_warn(dev, "fast teardown is not supported by firmware\n");
+ return false;
+ }
+
+ err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
+ if (err)
+ return false;
+ return (!mlx5_check_dev_ids(dev, dev_id));
+}
+
static void mlx5_sync_reset_request_event(struct work_struct *work)
{
struct mlx5_fw_reset *fw_reset = container_of(work, struct mlx5_fw_reset,
@@ -283,7 +343,8 @@ static void mlx5_sync_reset_request_event(struct work_struct *work)
struct mlx5_core_dev *dev = fw_reset->dev;
int err;
- if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags)) {
+ if (test_bit(MLX5_FW_RESET_FLAGS_NACK_RESET_REQUEST, &fw_reset->reset_flags) ||
+ !mlx5_is_reset_now_capable(dev)) {
err = mlx5_fw_reset_set_reset_sync_nack(dev);
mlx5_core_warn(dev, "PCI Sync FW Update Reset Nack %s",
err ? "Failed" : "Sent");
@@ -303,26 +364,18 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev)
{
struct pci_bus *bridge_bus = dev->pdev->bus;
struct pci_dev *bridge = bridge_bus->self;
- u16 reg16, dev_id, sdev_id;
unsigned long timeout;
struct pci_dev *sdev;
+ u16 reg16, dev_id;
int cap, err;
u32 reg32;
- /* Check that all functions under the pci bridge are PFs of
- * this device otherwise fail this function.
- */
err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, &dev_id);
if (err)
return err;
- list_for_each_entry(sdev, &bridge_bus->devices, bus_list) {
- err = pci_read_config_word(sdev, PCI_DEVICE_ID, &sdev_id);
- if (err)
- return err;
- if (sdev_id != dev_id)
- return -EPERM;
- }
-
+ err = mlx5_check_dev_ids(dev, dev_id);
+ if (err)
+ return err;
cap = pci_find_capability(bridge, PCI_CAP_ID_EXP);
if (!cap)
return -EOPNOTSUPP;
@@ -427,7 +480,70 @@ static void mlx5_sync_reset_now_event(struct work_struct *work)
mlx5_enter_error_state(dev, true);
done:
fw_reset->ret = err;
- mlx5_fw_reset_complete_reload(dev);
+ mlx5_fw_reset_complete_reload(dev, false);
+}
+
+static void mlx5_sync_reset_unload_event(struct work_struct *work)
+{
+ struct mlx5_fw_reset *fw_reset;
+ struct mlx5_core_dev *dev;
+ unsigned long timeout;
+ bool reset_action;
+ u8 rst_state;
+ int err;
+
+ fw_reset = container_of(work, struct mlx5_fw_reset, reset_unload_work);
+ dev = fw_reset->dev;
+
+ if (mlx5_sync_reset_clear_reset_requested(dev, false))
+ return;
+
+ mlx5_core_warn(dev, "Sync Reset Unload. Function is forced down.\n");
+
+ err = mlx5_cmd_fast_teardown_hca(dev);
+ if (err)
+ mlx5_core_warn(dev, "Fast teardown failed, unloading, err %d\n", err);
+ else
+ mlx5_enter_error_state(dev, true);
+
+ if (test_bit(MLX5_FW_RESET_FLAGS_PENDING_COMP, &fw_reset->reset_flags))
+ mlx5_unload_one_devl_locked(dev, false);
+ else
+ mlx5_unload_one(dev, false);
+
+ mlx5_set_fw_rst_ack(dev);
+ mlx5_core_warn(dev, "Sync Reset Unload done, device reset expected\n");
+
+ reset_action = false;
+ timeout = jiffies + msecs_to_jiffies(mlx5_tout_ms(dev, RESET_UNLOAD));
+ do {
+ rst_state = mlx5_get_fw_rst_state(dev);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ ||
+ rst_state == MLX5_FW_RST_STATE_IDLE) {
+ reset_action = true;
+ break;
+ }
+ msleep(20);
+ } while (!time_after(jiffies, timeout));
+
+ if (!reset_action) {
+ mlx5_core_err(dev, "Got timeout waiting for sync reset action, state = %u\n",
+ rst_state);
+ fw_reset->ret = -ETIMEDOUT;
+ goto done;
+ }
+
+ mlx5_core_warn(dev, "Sync Reset, got reset action. rst_state = %u\n", rst_state);
+ if (rst_state == MLX5_FW_RST_STATE_TOGGLE_REQ) {
+ err = mlx5_pci_link_toggle(dev);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_pci_link_toggle failed, err %d\n", err);
+ fw_reset->ret = err;
+ }
+ }
+
+done:
+ mlx5_fw_reset_complete_reload(dev, true);
}
static void mlx5_sync_reset_abort_event(struct work_struct *work)
@@ -452,6 +568,9 @@ static void mlx5_sync_reset_events_handle(struct mlx5_fw_reset *fw_reset, struct
case MLX5_SYNC_RST_STATE_RESET_REQUEST:
queue_work(fw_reset->wq, &fw_reset->reset_request_work);
break;
+ case MLX5_SYNC_RST_STATE_RESET_UNLOAD:
+ queue_work(fw_reset->wq, &fw_reset->reset_unload_work);
+ break;
case MLX5_SYNC_RST_STATE_RESET_NOW:
queue_work(fw_reset->wq, &fw_reset->reset_now_work);
break;
@@ -486,10 +605,13 @@ static int fw_reset_event_notifier(struct notifier_block *nb, unsigned long acti
int mlx5_fw_reset_wait_reset_done(struct mlx5_core_dev *dev)
{
unsigned long pci_sync_update_timeout = mlx5_tout_ms(dev, PCI_SYNC_UPDATE);
- unsigned long timeout = msecs_to_jiffies(pci_sync_update_timeout);
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
+ unsigned long timeout;
int err;
+ if (MLX5_CAP_GEN(dev, pci_sync_for_fw_update_with_driver_unload))
+ pci_sync_update_timeout += mlx5_tout_ms(dev, RESET_UNLOAD);
+ timeout = msecs_to_jiffies(pci_sync_update_timeout);
if (!wait_for_completion_timeout(&fw_reset->done, timeout)) {
mlx5_core_warn(dev, "FW sync reset timeout after %lu seconds\n",
pci_sync_update_timeout / 1000);
@@ -526,6 +648,7 @@ void mlx5_drain_fw_reset(struct mlx5_core_dev *dev)
set_bit(MLX5_FW_RESET_FLAGS_DROP_NEW_REQUESTS, &fw_reset->reset_flags);
cancel_work_sync(&fw_reset->fw_live_patch_work);
cancel_work_sync(&fw_reset->reset_request_work);
+ cancel_work_sync(&fw_reset->reset_unload_work);
cancel_work_sync(&fw_reset->reset_reload_work);
cancel_work_sync(&fw_reset->reset_now_work);
cancel_work_sync(&fw_reset->reset_abort_work);
@@ -564,6 +687,7 @@ int mlx5_fw_reset_init(struct mlx5_core_dev *dev)
INIT_WORK(&fw_reset->fw_live_patch_work, mlx5_fw_live_patch_event);
INIT_WORK(&fw_reset->reset_request_work, mlx5_sync_reset_request_event);
+ INIT_WORK(&fw_reset->reset_unload_work, mlx5_sync_reset_unload_event);
INIT_WORK(&fw_reset->reset_reload_work, mlx5_sync_reset_reload_work);
INIT_WORK(&fw_reset->reset_now_work, mlx5_sync_reset_now_event);
INIT_WORK(&fw_reset->reset_abort_work, mlx5_sync_reset_abort_event);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 871c32dda66e..187cb2c464f8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -39,6 +39,7 @@
#include "mlx5_core.h"
#include "lib/eq.h"
#include "lib/mlx5.h"
+#include "lib/events.h"
#include "lib/pci_vsc.h"
#include "lib/tout.h"
#include "diag/fw_tracer.h"
@@ -719,7 +720,7 @@ static const struct devlink_health_reporter_ops mlx5_fw_fatal_reporter_ops = {
#define MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD 30000
#define MLX5_FW_REPORTER_DEFAULT_GRACEFUL_PERIOD MLX5_FW_REPORTER_VF_GRACEFUL_PERIOD
-static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
+void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
struct devlink *devlink = priv_to_devlink(dev);
@@ -735,17 +736,17 @@ static void mlx5_fw_reporters_create(struct mlx5_core_dev *dev)
}
health->fw_reporter =
- devlink_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
- 0, dev);
+ devl_health_reporter_create(devlink, &mlx5_fw_reporter_ops,
+ 0, dev);
if (IS_ERR(health->fw_reporter))
mlx5_core_warn(dev, "Failed to create fw reporter, err = %ld\n",
PTR_ERR(health->fw_reporter));
health->fw_fatal_reporter =
- devlink_health_reporter_create(devlink,
- &mlx5_fw_fatal_reporter_ops,
- grace_period,
- dev);
+ devl_health_reporter_create(devlink,
+ &mlx5_fw_fatal_reporter_ops,
+ grace_period,
+ dev);
if (IS_ERR(health->fw_fatal_reporter))
mlx5_core_warn(dev, "Failed to create fw fatal reporter, err = %ld\n",
PTR_ERR(health->fw_fatal_reporter));
@@ -777,7 +778,8 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- queue_work(health->wq, &health->fatal_report_work);
+ if (!mlx5_dev_is_lightweight(dev))
+ queue_work(health->wq, &health->fatal_report_work);
}
#define MLX5_MSEC_PER_HOUR (MSEC_PER_SEC * 60 * 60)
@@ -905,10 +907,15 @@ void mlx5_health_cleanup(struct mlx5_core_dev *dev)
int mlx5_health_init(struct mlx5_core_dev *dev)
{
+ struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_core_health *health;
char *name;
- mlx5_fw_reporters_create(dev);
+ if (!mlx5_dev_is_lightweight(dev)) {
+ devl_lock(devlink);
+ mlx5_fw_reporters_create(dev);
+ devl_unlock(devlink);
+ }
mlx5_reporter_vnic_create(dev);
health = &dev->priv.health;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
index 5d331b940f4d..f0a074b2fcdf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
@@ -512,8 +512,11 @@ static void mlx5_lag_set_port_sel_mode_offloads(struct mlx5_lag *ldev,
return;
if (MLX5_CAP_PORT_SELECTION(dev0->dev, port_select_flow_table) &&
- tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH)
+ tracker->tx_type == NETDEV_LAG_TX_TYPE_HASH) {
+ if (ldev->ports > 2)
+ ldev->buckets = MLX5_LAG_MAX_HASH_BUCKETS;
set_bit(MLX5_LAG_MODE_FLAG_HASH_BASED, flags);
+ }
}
static int mlx5_lag_set_flags(struct mlx5_lag *ldev, enum mlx5_lag_mode mode,
@@ -550,6 +553,29 @@ char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags)
}
}
+static int mlx5_lag_create_single_fdb(struct mlx5_lag *ldev)
+{
+ struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
+ struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
+ int err;
+ int i;
+
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ struct mlx5_eswitch *slave_esw = ldev->pf[i].dev->priv.eswitch;
+
+ err = mlx5_eswitch_offloads_single_fdb_add_one(master_esw,
+ slave_esw, ldev->ports);
+ if (err)
+ goto err;
+ }
+ return 0;
+err:
+ for (; i > MLX5_LAG_P1; i--)
+ mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
+ ldev->pf[i].dev->priv.eswitch);
+ return err;
+}
+
static int mlx5_create_lag(struct mlx5_lag *ldev,
struct lag_tracker *tracker,
enum mlx5_lag_mode mode,
@@ -557,7 +583,6 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
int err;
@@ -575,8 +600,7 @@ static int mlx5_create_lag(struct mlx5_lag *ldev,
}
if (shared_fdb) {
- err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch,
- dev1->priv.eswitch);
+ err = mlx5_lag_create_single_fdb(ldev);
if (err)
mlx5_core_err(dev0, "Can't enable single FDB mode\n");
else
@@ -647,19 +671,21 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
int mlx5_deactivate_lag(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
+ struct mlx5_eswitch *master_esw = dev0->priv.eswitch;
u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {};
bool roce_lag = __mlx5_lag_is_roce(ldev);
unsigned long flags = ldev->mode_flags;
int err;
+ int i;
ldev->mode = MLX5_LAG_MODE_NONE;
ldev->mode_flags = 0;
mlx5_lag_mp_reset(ldev);
if (test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags)) {
- mlx5_eswitch_offloads_destroy_single_fdb(dev0->priv.eswitch,
- dev1->priv.eswitch);
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++)
+ mlx5_eswitch_offloads_single_fdb_del_one(master_esw,
+ ldev->pf[i].dev->priv.eswitch);
clear_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &flags);
}
@@ -685,7 +711,7 @@ int mlx5_deactivate_lag(struct mlx5_lag *ldev)
return 0;
}
-#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 2
+#define MLX5_LAG_OFFLOADS_SUPPORTED_PORTS 4
bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
{
#ifdef CONFIG_MLX5_ESWITCH
@@ -711,7 +737,7 @@ bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
if (mlx5_eswitch_mode(ldev->pf[i].dev) != mode)
return false;
- if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports != MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
+ if (mode == MLX5_ESWITCH_OFFLOADS && ldev->ports > MLX5_LAG_OFFLOADS_SUPPORTED_PORTS)
return false;
#else
for (i = 0; i < ldev->ports; i++)
@@ -759,7 +785,6 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
{
bool shared_fdb = test_bit(MLX5_LAG_MODE_FLAG_SHARED_FDB, &ldev->mode_flags);
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
bool roce_lag;
int err;
int i;
@@ -784,28 +809,35 @@ void mlx5_disable_lag(struct mlx5_lag *ldev)
if (shared_fdb || roce_lag)
mlx5_lag_add_devices(ldev);
- if (shared_fdb) {
- if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
- }
+ if (shared_fdb)
+ for (i = 0; i < ldev->ports; i++)
+ if (!(ldev->pf[i].dev->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV))
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
}
-bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
+static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev)
{
- struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
-
- if (is_mdev_switchdev_mode(dev0) &&
- is_mdev_switchdev_mode(dev1) &&
- mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) &&
- mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) &&
- mlx5_devcom_is_paired(dev0->priv.devcom,
- MLX5_DEVCOM_ESW_OFFLOADS) &&
- MLX5_CAP_GEN(dev1, lag_native_fdb_selection) &&
- MLX5_CAP_ESW(dev1, root_ft_on_other_esw) &&
- MLX5_CAP_ESW(dev0, esw_shared_ingress_acl))
+ struct mlx5_core_dev *dev;
+ int i;
+
+ for (i = MLX5_LAG_P1 + 1; i < ldev->ports; i++) {
+ dev = ldev->pf[i].dev;
+ if (is_mdev_switchdev_mode(dev) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
+ MLX5_CAP_GEN(dev, lag_native_fdb_selection) &&
+ MLX5_CAP_ESW(dev, root_ft_on_other_esw) &&
+ mlx5_eswitch_get_npeers(dev->priv.eswitch) ==
+ MLX5_CAP_GEN(dev, num_lag_ports) - 1)
+ continue;
+ return false;
+ }
+
+ dev = ldev->pf[MLX5_LAG_P1].dev;
+ if (is_mdev_switchdev_mode(dev) &&
+ mlx5_eswitch_vport_match_metadata_enabled(dev->priv.eswitch) &&
+ mlx5_devcom_comp_is_ready(dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS) &&
+ MLX5_CAP_ESW(dev, esw_shared_ingress_acl) &&
+ mlx5_eswitch_get_npeers(dev->priv.eswitch) == MLX5_CAP_GEN(dev, num_lag_ports) - 1)
return true;
return false;
@@ -842,7 +874,6 @@ static bool mlx5_lag_should_disable_lag(struct mlx5_lag *ldev, bool do_bond)
static void mlx5_do_bond(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
- struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev;
struct lag_tracker tracker = { };
bool do_bond, roce_lag;
int err;
@@ -883,20 +914,24 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
for (i = 1; i < ldev->ports; i++)
mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
} else if (shared_fdb) {
+ int i;
+
dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
- err = mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- if (!err)
- err = mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++) {
+ err = mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
+ if (err)
+ break;
+ }
if (err) {
dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
mlx5_rescan_drivers_locked(dev0);
mlx5_deactivate_lag(ldev);
mlx5_lag_add_devices(ldev);
- mlx5_eswitch_reload_reps(dev0->priv.eswitch);
- mlx5_eswitch_reload_reps(dev1->priv.eswitch);
+ for (i = 0; i < ldev->ports; i++)
+ mlx5_eswitch_reload_reps(ldev->pf[i].dev->priv.eswitch);
mlx5_core_err(dev0, "Failed to enable lag\n");
return;
}
@@ -1233,14 +1268,21 @@ recheck:
mlx5_ldev_put(ldev);
}
+bool mlx5_lag_is_supported(struct mlx5_core_dev *dev)
+{
+ if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
+ !MLX5_CAP_GEN(dev, lag_master) ||
+ MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
+ MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
+ return false;
+ return true;
+}
+
void mlx5_lag_add_mdev(struct mlx5_core_dev *dev)
{
int err;
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS ||
- MLX5_CAP_GEN(dev, num_lag_ports) <= 1))
+ if (!mlx5_lag_is_supported(dev))
return;
recheck:
@@ -1496,26 +1538,37 @@ u8 mlx5_lag_get_num_ports(struct mlx5_core_dev *dev)
}
EXPORT_SYMBOL(mlx5_lag_get_num_ports);
-struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev)
+struct mlx5_core_dev *mlx5_lag_get_next_peer_mdev(struct mlx5_core_dev *dev, int *i)
{
struct mlx5_core_dev *peer_dev = NULL;
struct mlx5_lag *ldev;
unsigned long flags;
+ int idx;
spin_lock_irqsave(&lag_lock, flags);
ldev = mlx5_lag_dev(dev);
if (!ldev)
goto unlock;
- peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ?
- ldev->pf[MLX5_LAG_P2].dev :
- ldev->pf[MLX5_LAG_P1].dev;
+ if (*i == ldev->ports)
+ goto unlock;
+ for (idx = *i; idx < ldev->ports; idx++)
+ if (ldev->pf[idx].dev != dev)
+ break;
+
+ if (idx == ldev->ports) {
+ *i = idx;
+ goto unlock;
+ }
+ *i = idx + 1;
+
+ peer_dev = ldev->pf[idx].dev;
unlock:
spin_unlock_irqrestore(&lag_lock, flags);
return peer_dev;
}
-EXPORT_SYMBOL(mlx5_lag_get_peer_mdev);
+EXPORT_SYMBOL(mlx5_lag_get_next_peer_mdev);
int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev,
u64 *values,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
index bc1f1dd3e283..a061b1873e27 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h
@@ -74,15 +74,7 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw;
};
-static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
-{
- if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
- !MLX5_CAP_GEN(dev, lag_master) ||
- MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
- MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
- return false;
- return true;
-}
+bool mlx5_lag_is_supported(struct mlx5_core_dev *dev);
static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev)
@@ -111,7 +103,6 @@ int mlx5_activate_lag(struct mlx5_lag *ldev,
bool shared_fdb);
int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev,
struct net_device *ndev);
-bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev);
char *mlx5_get_str_port_sel_mode(enum mlx5_lag_mode mode, unsigned long flags);
void mlx5_infer_tx_enabled(struct lag_tracker *tracker, u8 num_ports,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
index d85a8dfc153d..b1aa494c76ba 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mp.c
@@ -7,13 +7,14 @@
#include "lag/mp.h"
#include "mlx5_core.h"
#include "eswitch.h"
-#include "lib/mlx5.h"
+#include "lib/events.h"
static bool __mlx5_lag_is_multipath(struct mlx5_lag *ldev)
{
return ldev->mode == MLX5_LAG_MODE_MULTIPATH;
}
+#define MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS 2
static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
{
if (!mlx5_lag_is_ready(ldev))
@@ -22,6 +23,9 @@ static bool mlx5_lag_multipath_check_prereq(struct mlx5_lag *ldev)
if (__mlx5_lag_is_active(ldev) && !__mlx5_lag_is_multipath(ldev))
return false;
+ if (ldev->ports > MLX5_LAG_MULTIPATH_OFFLOADS_SUPPORTED_PORTS)
+ return false;
+
return mlx5_esw_multipath_prereq(ldev->pf[MLX5_LAG_P1].dev,
ldev->pf[MLX5_LAG_P2].dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
index 0c0ef600f643..4bf15391525c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/mpesw.c
@@ -6,7 +6,7 @@
#include "lag/lag.h"
#include "eswitch.h"
#include "esw/acl/ofld.h"
-#include "lib/mlx5.h"
+#include "lib/events.h"
static void mlx5_mpesw_metadata_cleanup(struct mlx5_lag *ldev)
{
@@ -65,6 +65,7 @@ err_metadata:
return err;
}
+#define MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS 2
static int enable_mpesw(struct mlx5_lag *ldev)
{
struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev;
@@ -74,6 +75,9 @@ static int enable_mpesw(struct mlx5_lag *ldev)
if (ldev->mode != MLX5_LAG_MODE_NONE)
return -EINVAL;
+ if (ldev->ports > MLX5_LAG_MPESW_OFFLOADS_SUPPORTED_PORTS)
+ return -EOPNOTSUPP;
+
if (mlx5_eswitch_mode(dev0) != MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_PORT_SELECTION(dev0, port_select_flow_table) ||
!MLX5_CAP_GEN(dev0, create_lag_when_not_master_up) ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 932fbc843c69..973babfaff25 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -93,17 +93,23 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
}
-static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
+static s32 mlx5_ptp_getmaxphase(struct ptp_clock_info *ptp)
{
- s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN;
- s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev;
- if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) {
- min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN;
- max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX;
- }
+ mdev = container_of(clock, struct mlx5_core_dev, clock);
+
+ return MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range) ?
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX :
+ MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
+}
+
+static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
+{
+ s64 max = mlx5_ptp_getmaxphase(&mdev->clock.ptp_info);
- if (delta < min || delta > max)
+ if (delta < -max || delta > max)
return false;
return true;
@@ -351,14 +357,6 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
- struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
- struct mlx5_core_dev *mdev;
-
- mdev = container_of(clock, struct mlx5_core_dev, clock);
-
- if (!mlx5_is_mtutc_time_adj_cap(mdev, delta))
- return -ERANGE;
-
return mlx5_ptp_adjtime(ptp, delta);
}
@@ -734,6 +732,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.pps = 0,
.adjfine = mlx5_ptp_adjfine,
.adjphase = mlx5_ptp_adjphase,
+ .getmaxphase = mlx5_ptp_getmaxphase,
.adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
index b7d779d08d83..78c94b22bdc0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c
@@ -19,7 +19,7 @@ struct mlx5_devcom_component {
mlx5_devcom_event_handler_t handler;
struct rw_semaphore sem;
- bool paired;
+ bool ready;
};
struct mlx5_devcom_list {
@@ -75,13 +75,14 @@ struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return NULL;
- if (MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_DEVCOM_PORTS_SUPPORTED)
+ if (MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_DEVCOM_PORTS_SUPPORTED)
return NULL;
mlx5_dev_list_lock();
sguid0 = mlx5_query_nic_system_image_guid(dev);
list_for_each_entry(iter, &devcom_list, list) {
- struct mlx5_core_dev *tmp_dev = NULL;
+ /* There is at least one device in iter */
+ struct mlx5_core_dev *tmp_dev;
idx = -1;
for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++) {
@@ -193,7 +194,7 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
- int event,
+ int event, int rollback_event,
void *event_data)
{
struct mlx5_devcom_component *comp;
@@ -210,84 +211,134 @@ int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
if (i != devcom->idx && data) {
err = comp->handler(event, data, event_data);
- break;
+ if (err)
+ goto rollback;
}
}
up_write(&comp->sem);
+ return 0;
+
+rollback:
+ while (i--) {
+ void *data = rcu_dereference_protected(comp->device[i].data,
+ lockdep_is_held(&comp->sem));
+
+ if (i != devcom->idx && data)
+ comp->handler(rollback_event, data, event_data);
+ }
+
+ up_write(&comp->sem);
return err;
}
-void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- bool paired)
+void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool ready)
{
struct mlx5_devcom_component *comp;
comp = &devcom->priv->components[id];
WARN_ON(!rwsem_is_locked(&comp->sem));
- WRITE_ONCE(comp->paired, paired);
+ WRITE_ONCE(comp->ready, ready);
}
-bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
{
if (IS_ERR_OR_NULL(devcom))
return false;
- return READ_ONCE(devcom->priv->components[id].paired);
+ return READ_ONCE(devcom->priv->components[id].ready);
}
-void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
{
struct mlx5_devcom_component *comp;
- int i;
if (IS_ERR_OR_NULL(devcom))
- return NULL;
+ return false;
comp = &devcom->priv->components[id];
down_read(&comp->sem);
- if (!READ_ONCE(comp->paired)) {
+ if (!READ_ONCE(comp->ready)) {
up_read(&comp->sem);
- return NULL;
+ return false;
}
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
- if (i != devcom->idx)
- break;
+ return true;
+}
+
+void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id)
+{
+ struct mlx5_devcom_component *comp = &devcom->priv->components[id];
- return rcu_dereference_protected(comp->device[i].data, lockdep_is_held(&comp->sem));
+ up_read(&comp->sem);
}
-void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id)
+void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int *i)
{
struct mlx5_devcom_component *comp;
- int i;
+ void *ret;
+ int idx;
- if (IS_ERR_OR_NULL(devcom))
- return NULL;
+ comp = &devcom->priv->components[id];
- for (i = 0; i < MLX5_DEVCOM_PORTS_SUPPORTED; i++)
- if (i != devcom->idx)
- break;
+ if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
+ return NULL;
+ for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
+ if (idx != devcom->idx) {
+ ret = rcu_dereference_protected(comp->device[idx].data,
+ lockdep_is_held(&comp->sem));
+ if (ret)
+ break;
+ }
+ }
- comp = &devcom->priv->components[id];
- /* This can change concurrently, however 'data' pointer will remain
- * valid for the duration of RCU read section.
- */
- if (!READ_ONCE(comp->paired))
+ if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
+ *i = idx;
return NULL;
+ }
+ *i = idx + 1;
- return rcu_dereference(comp->device[i].data);
+ return ret;
}
-void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id)
+void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ int *i)
{
- struct mlx5_devcom_component *comp = &devcom->priv->components[id];
+ struct mlx5_devcom_component *comp;
+ void *ret;
+ int idx;
- up_read(&comp->sem);
+ comp = &devcom->priv->components[id];
+
+ if (*i == MLX5_DEVCOM_PORTS_SUPPORTED)
+ return NULL;
+ for (idx = *i; idx < MLX5_DEVCOM_PORTS_SUPPORTED; idx++) {
+ if (idx != devcom->idx) {
+ /* This can change concurrently, however 'data' pointer will remain
+ * valid for the duration of RCU read section.
+ */
+ if (!READ_ONCE(comp->ready))
+ return NULL;
+ ret = rcu_dereference(comp->device[idx].data);
+ if (ret)
+ break;
+ }
+ }
+
+ if (idx == MLX5_DEVCOM_PORTS_SUPPORTED) {
+ *i = idx;
+ return NULL;
+ }
+ *i = idx + 1;
+
+ return ret;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
index 9a496f4722da..d953a01b8eaa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h
@@ -6,7 +6,7 @@
#include <linux/mlx5/driver.h>
-#define MLX5_DEVCOM_PORTS_SUPPORTED 2
+#define MLX5_DEVCOM_PORTS_SUPPORTED 4
enum mlx5_devcom_components {
MLX5_DEVCOM_ESW_OFFLOADS,
@@ -30,20 +30,33 @@ void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom,
int mlx5_devcom_send_event(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id,
- int event,
+ int event, int rollback_event,
void *event_data);
-void mlx5_devcom_set_paired(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id,
- bool paired);
-bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
+void mlx5_devcom_comp_set_ready(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id,
+ bool ready);
+bool mlx5_devcom_comp_is_ready(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
-void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom,
- enum mlx5_devcom_components id);
-void *mlx5_devcom_get_peer_data_rcu(struct mlx5_devcom *devcom, enum mlx5_devcom_components id);
-void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom,
+bool mlx5_devcom_for_each_peer_begin(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id);
+void mlx5_devcom_for_each_peer_end(struct mlx5_devcom *devcom,
enum mlx5_devcom_components id);
+void *mlx5_devcom_get_next_peer_data(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id, int *i);
-#endif
+#define mlx5_devcom_for_each_peer_entry(devcom, id, data, i) \
+ for (i = 0, data = mlx5_devcom_get_next_peer_data(devcom, id, &i); \
+ data; \
+ data = mlx5_devcom_get_next_peer_data(devcom, id, &i))
+
+void *mlx5_devcom_get_next_peer_data_rcu(struct mlx5_devcom *devcom,
+ enum mlx5_devcom_components id, int *i);
+#define mlx5_devcom_for_each_peer_entry_rcu(devcom, id, data, i) \
+ for (i = 0, data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i); \
+ data; \
+ data = mlx5_devcom_get_next_peer_data_rcu(devcom, id, &i))
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/events.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/events.h
new file mode 100644
index 000000000000..a0f7faea317b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/events.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+#ifndef __LIB_EVENTS_H__
+#define __LIB_EVENTS_H__
+
+#include "mlx5_core.h"
+
+#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
+#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
+
+enum port_module_event_status_type {
+ MLX5_MODULE_STATUS_PLUGGED = 0x1,
+ MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
+ MLX5_MODULE_STATUS_ERROR = 0x3,
+ MLX5_MODULE_STATUS_DISABLED = 0x4,
+ MLX5_MODULE_STATUS_NUM,
+};
+
+enum port_module_event_error_type {
+ MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
+ MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
+ MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
+ MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
+ MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
+ MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
+ MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
+ MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
+ MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
+ MLX5_MODULE_EVENT_ERROR_NUM,
+};
+
+struct mlx5_pme_stats {
+ u64 status_counters[MLX5_MODULE_STATUS_NUM];
+ u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
+};
+
+void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
+int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
index ccf12f7db6f0..2b5826a785c4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
@@ -45,40 +45,6 @@ int mlx5_crdump_enable(struct mlx5_core_dev *dev);
void mlx5_crdump_disable(struct mlx5_core_dev *dev);
int mlx5_crdump_collect(struct mlx5_core_dev *dev, u32 *cr_data);
-/* TODO move to lib/events.h */
-
-#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF
-#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF
-
-enum port_module_event_status_type {
- MLX5_MODULE_STATUS_PLUGGED = 0x1,
- MLX5_MODULE_STATUS_UNPLUGGED = 0x2,
- MLX5_MODULE_STATUS_ERROR = 0x3,
- MLX5_MODULE_STATUS_DISABLED = 0x4,
- MLX5_MODULE_STATUS_NUM,
-};
-
-enum port_module_event_error_type {
- MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0,
- MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1,
- MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2,
- MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3,
- MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4,
- MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5,
- MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6,
- MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7,
- MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc,
- MLX5_MODULE_EVENT_ERROR_NUM,
-};
-
-struct mlx5_pme_stats {
- u64 status_counters[MLX5_MODULE_STATUS_NUM];
- u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM];
-};
-
-void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats);
-int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data);
-
static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
{
return devlink_net(priv_to_devlink(dev));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 8ff16318e32d..4450091e181a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -99,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
struct mlx5_mpfs *mpfs;
- if (!MLX5_ESWITCH_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev) || l2table_size == 1)
return 0;
mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
index 696e45e2bd06..e223e0e46433 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.c
@@ -24,7 +24,8 @@ static const u32 tout_def_sw_val[MAX_TIMEOUT_TYPES] = {
[MLX5_TO_TEARDOWN_MS] = 3000,
[MLX5_TO_FSM_REACTIVATE_MS] = 5000,
[MLX5_TO_RECLAIM_PAGES_MS] = 5000,
- [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000
+ [MLX5_TO_RECLAIM_VFS_PAGES_MS] = 120000,
+ [MLX5_TO_RESET_UNLOAD_MS] = 300000
};
static void tout_set(struct mlx5_core_dev *dev, u64 val, enum mlx5_timeouts_types type)
@@ -118,7 +119,8 @@ u64 _mlx5_tout_ms(struct mlx5_core_dev *dev, enum mlx5_timeouts_types type)
#define MLX5_TIMEOUT_FILL(fld, reg_out, dev, to_type, to_extra) \
({ \
u64 fw_to = MLX5_TIMEOUT_QUERY(fld, reg_out); \
- tout_set(dev, fw_to + (to_extra), to_type); \
+ if (fw_to) \
+ tout_set(dev, fw_to + (to_extra), to_type); \
fw_to; \
})
@@ -146,6 +148,7 @@ static int tout_query_dtor(struct mlx5_core_dev *dev)
MLX5_TIMEOUT_FILL(fsm_reactivate_to, out, dev, MLX5_TO_FSM_REACTIVATE_MS, 0);
MLX5_TIMEOUT_FILL(reclaim_pages_to, out, dev, MLX5_TO_RECLAIM_PAGES_MS, 0);
MLX5_TIMEOUT_FILL(reclaim_vfs_pages_to, out, dev, MLX5_TO_RECLAIM_VFS_PAGES_MS, 0);
+ MLX5_TIMEOUT_FILL(reset_unload_to, out, dev, MLX5_TO_RESET_UNLOAD_MS, 0);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
index bc9e9aeda847..99e0a05526fe 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/tout.h
@@ -26,6 +26,7 @@ enum mlx5_timeouts_types {
MLX5_TO_FSM_REACTIVATE_MS,
MLX5_TO_RECLAIM_PAGES_MS,
MLX5_TO_RECLAIM_VFS_PAGES_MS,
+ MLX5_TO_RESET_UNLOAD_MS,
MAX_TIMEOUT_TYPES
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index d6ee016deae1..88dbea6631d5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -619,6 +619,9 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
+ if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
+ MLX5_SET(cmd_hca_cap, set_hca_cap,
+ pci_sync_for_fw_update_with_driver_unload, 1);
if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
MLX5_SET(cmd_hca_cap,
@@ -1118,7 +1121,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_devcom_unregister_device(dev->priv.devcom);
}
-static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
+static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
{
int err;
@@ -1183,28 +1186,56 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
goto reclaim_boot_pages;
}
+ return 0;
+
+reclaim_boot_pages:
+ mlx5_reclaim_startup_pages(dev);
+err_disable_hca:
+ mlx5_core_disable_hca(dev, 0);
+stop_health_poll:
+ mlx5_stop_health_poll(dev, boot);
+err_cmd_cleanup:
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
+ mlx5_cmd_cleanup(dev);
+
+ return err;
+}
+
+static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
+{
+ mlx5_reclaim_startup_pages(dev);
+ mlx5_core_disable_hca(dev, 0);
+ mlx5_stop_health_poll(dev, boot);
+ mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
+ mlx5_cmd_cleanup(dev);
+}
+
+static int mlx5_function_open(struct mlx5_core_dev *dev)
+{
+ int err;
+
err = set_hca_ctrl(dev);
if (err) {
mlx5_core_err(dev, "set_hca_ctrl failed\n");
- goto reclaim_boot_pages;
+ return err;
}
err = set_hca_cap(dev);
if (err) {
mlx5_core_err(dev, "set_hca_cap failed\n");
- goto reclaim_boot_pages;
+ return err;
}
err = mlx5_satisfy_startup_pages(dev, 0);
if (err) {
mlx5_core_err(dev, "failed to allocate init pages\n");
- goto reclaim_boot_pages;
+ return err;
}
err = mlx5_cmd_init_hca(dev, sw_owner_id);
if (err) {
mlx5_core_err(dev, "init hca failed\n");
- goto reclaim_boot_pages;
+ return err;
}
mlx5_set_driver_version(dev);
@@ -1212,26 +1243,13 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout
err = mlx5_query_hca_caps(dev);
if (err) {
mlx5_core_err(dev, "query hca failed\n");
- goto reclaim_boot_pages;
+ return err;
}
mlx5_start_health_fw_log_up(dev);
-
return 0;
-
-reclaim_boot_pages:
- mlx5_reclaim_startup_pages(dev);
-err_disable_hca:
- mlx5_core_disable_hca(dev, 0);
-stop_health_poll:
- mlx5_stop_health_poll(dev, boot);
-err_cmd_cleanup:
- mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
- mlx5_cmd_cleanup(dev);
-
- return err;
}
-static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+static int mlx5_function_close(struct mlx5_core_dev *dev)
{
int err;
@@ -1240,15 +1258,33 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
return err;
}
- mlx5_reclaim_startup_pages(dev);
- mlx5_core_disable_hca(dev, 0);
- mlx5_stop_health_poll(dev, boot);
- mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
- mlx5_cmd_cleanup(dev);
return 0;
}
+static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
+{
+ int err;
+
+ err = mlx5_function_enable(dev, boot, timeout);
+ if (err)
+ return err;
+
+ err = mlx5_function_open(dev);
+ if (err)
+ mlx5_function_disable(dev, boot);
+ return err;
+}
+
+static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
+{
+ int err = mlx5_function_close(dev);
+
+ if (!err)
+ mlx5_function_disable(dev, boot);
+ return err;
+}
+
static int mlx5_load(struct mlx5_core_dev *dev)
{
int err;
@@ -1391,12 +1427,11 @@ static void mlx5_unload(struct mlx5_core_dev *dev)
mlx5_put_uars_page(dev, dev->priv.uar);
}
-int mlx5_init_one(struct mlx5_core_dev *dev)
+int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
{
- struct devlink *devlink = priv_to_devlink(dev);
+ bool light_probe = mlx5_dev_is_lightweight(dev);
int err = 0;
- devl_lock(devlink);
mutex_lock(&dev->intf_state_mutex);
dev->state = MLX5_DEVICE_STATE_UP;
@@ -1410,9 +1445,14 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto function_teardown;
}
- err = mlx5_devlink_params_register(priv_to_devlink(dev));
- if (err)
- goto err_devlink_params_reg;
+ /* In case of light_probe, mlx5_devlink is already registered.
+ * Hence, don't register devlink again.
+ */
+ if (!light_probe) {
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
+ if (err)
+ goto err_devlink_params_reg;
+ }
err = mlx5_load(dev);
if (err)
@@ -1425,14 +1465,14 @@ int mlx5_init_one(struct mlx5_core_dev *dev)
goto err_register;
mutex_unlock(&dev->intf_state_mutex);
- devl_unlock(devlink);
return 0;
err_register:
clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
mlx5_unload(dev);
err_load:
- mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ if (!light_probe)
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
err_devlink_params_reg:
mlx5_cleanup_once(dev);
function_teardown:
@@ -1440,6 +1480,16 @@ function_teardown:
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);
+ return err;
+}
+
+int mlx5_init_one(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int err;
+
+ devl_lock(devlink);
+ err = mlx5_init_one_devl_locked(dev);
devl_unlock(devlink);
return err;
}
@@ -1557,6 +1607,100 @@ void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
devl_unlock(devlink);
}
+/* In case of light probe, we don't need a full query of hca_caps, but only the bellow caps.
+ * A full query of hca_caps will be done when the device will reload.
+ */
+static int mlx5_query_hca_caps_light(struct mlx5_core_dev *dev)
+{
+ int err;
+
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
+ if (err)
+ return err;
+
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table) ||
+ MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN_64(dev, general_obj_types) &
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_VDPA_EMULATION);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5_init_one_light(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ int err;
+
+ dev->state = MLX5_DEVICE_STATE_UP;
+ err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_function_enable err=%d\n", err);
+ goto out;
+ }
+
+ err = mlx5_query_hca_caps_light(dev);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_query_hca_caps_light err=%d\n", err);
+ goto query_hca_caps_err;
+ }
+
+ devl_lock(devlink);
+ err = mlx5_devlink_params_register(priv_to_devlink(dev));
+ devl_unlock(devlink);
+ if (err) {
+ mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
+ goto query_hca_caps_err;
+ }
+
+ return 0;
+
+query_hca_caps_err:
+ mlx5_function_disable(dev, true);
+out:
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+ return err;
+}
+
+void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+
+ devl_lock(devlink);
+ mlx5_devlink_params_unregister(priv_to_devlink(dev));
+ devl_unlock(devlink);
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+ mlx5_function_disable(dev, true);
+}
+
+/* xxx_light() function are used in order to configure the device without full
+ * init (light init). e.g.: There isn't a point in reload a device to light state.
+ * Hence, mlx5_load_one_light() isn't needed.
+ */
+
+void mlx5_unload_one_light(struct mlx5_core_dev *dev)
+{
+ if (dev->state != MLX5_DEVICE_STATE_UP)
+ return;
+ mlx5_function_disable(dev, false);
+ dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
+}
+
static const int types[] = {
MLX5_CAP_GENERAL,
MLX5_CAP_GENERAL_2,
@@ -1809,7 +1953,7 @@ static void remove_one(struct pci_dev *pdev)
mlx5_drain_fw_reset(dev);
mlx5_drain_health_wq(dev);
devlink_unregister(devlink);
- mlx5_sriov_disable(pdev);
+ mlx5_sriov_disable(pdev, false);
mlx5_thermal_uninit(dev);
mlx5_crdump_disable(dev);
mlx5_uninit_one(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 229520405d4a..c4be257c043d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -195,7 +195,7 @@ void mlx5_sriov_cleanup(struct mlx5_core_dev *dev);
int mlx5_sriov_attach(struct mlx5_core_dev *dev);
void mlx5_sriov_detach(struct mlx5_core_dev *dev);
int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs);
-void mlx5_sriov_disable(struct pci_dev *pdev);
+void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change);
int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count);
int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id);
int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id);
@@ -240,11 +240,14 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend);
int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev);
+void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev);
+bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void);
int mlx5_dev_list_trylock(void);
+void mlx5_fw_reporters_create(struct mlx5_core_dev *dev);
int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
@@ -307,16 +310,20 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev)
int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx);
void mlx5_mdev_uninit(struct mlx5_core_dev *dev);
int mlx5_init_one(struct mlx5_core_dev *dev);
+int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev);
void mlx5_uninit_one(struct mlx5_core_dev *dev);
void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend);
void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend);
int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery);
int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery);
+int mlx5_init_one_light(struct mlx5_core_dev *dev);
+void mlx5_uninit_one_light(struct mlx5_core_dev *dev);
+void mlx5_unload_one_light(struct mlx5_core_dev *dev);
-int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 function_id,
+int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport,
u16 opmod);
-#define mlx5_vport_get_other_func_general_cap(dev, fid, out) \
- mlx5_vport_get_other_func_cap(dev, fid, out, MLX5_CAP_GENERAL)
+#define mlx5_vport_get_other_func_general_cap(dev, vport, out) \
+ mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL)
void mlx5_events_work_enqueue(struct mlx5_core_dev *dev, struct work_struct *work);
static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev)
@@ -331,4 +338,31 @@ bool mlx5_rdma_supported(struct mlx5_core_dev *dev);
bool mlx5_vnet_supported(struct mlx5_core_dev *dev);
bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev);
+static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev)
+{
+ return MLX5_CAP_GEN_2(dev, ec_vf_vport_base);
+}
+
+static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev)
+{
+ return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev);
+}
+
+static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num)
+{
+ int base_vport = mlx5_core_ec_vf_vport_base(dev);
+ int max_vport = base_vport + mlx5_core_max_ec_vfs(dev);
+
+ if (!mlx5_core_ec_sriov_enabled(dev))
+ return false;
+
+ return (vport_num >= base_vport && vport_num < max_vport);
+}
+
+static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
+{
+ return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
+ : vport;
+}
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index 95dc67fb3001..dcf58efac159 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -79,7 +79,13 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct
if (!func_id)
return mlx5_core_is_ecpf(dev) && !ec_function ? MLX5_HOST_PF : MLX5_PF;
- return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF;
+ if (func_id <= max(mlx5_core_max_vfs(dev), mlx5_core_max_ec_vfs(dev))) {
+ if (ec_function)
+ return MLX5_EC_VF;
+ else
+ return MLX5_VF;
+ }
+ return MLX5_SF;
}
static u32 mlx5_get_ec_function(u32 function)
@@ -730,6 +736,9 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
WARN(dev->priv.page_counters[MLX5_HOST_PF],
"External host PF FW pages counter is %d after reclaiming all pages\n",
dev->priv.page_counters[MLX5_HOST_PF]);
+ WARN(dev->priv.page_counters[MLX5_EC_VF],
+ "EC VFs FW pages counter is %d after reclaiming all pages\n",
+ dev->priv.page_counters[MLX5_EC_VF]);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
index 98412bd5a696..cba2a4afb5fd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c
@@ -41,6 +41,15 @@ struct mlx5_irq_table {
struct mlx5_irq_pool *sf_comp_pool;
};
+static int mlx5_core_func_to_vport(const struct mlx5_core_dev *dev,
+ int func,
+ bool ec_vf_func)
+{
+ if (!ec_vf_func)
+ return func;
+ return mlx5_core_ec_vf_vport_base(dev) + func - 1;
+}
+
/**
* mlx5_get_default_msix_vec_count - Get the default number of MSI-X vectors
* to be ssigned to each VF.
@@ -79,6 +88,8 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *hca_cap = NULL, *query_cap = NULL, *cap;
int num_vf_msix, min_msix, max_msix;
+ bool ec_vf_function;
+ int vport;
int ret;
num_vf_msix = MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix);
@@ -104,7 +115,9 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
goto out;
}
- ret = mlx5_vport_get_other_func_general_cap(dev, function_id, query_cap);
+ ec_vf_function = mlx5_core_ec_sriov_enabled(dev);
+ vport = mlx5_core_func_to_vport(dev, function_id, ec_vf_function);
+ ret = mlx5_vport_get_other_func_general_cap(dev, vport, query_cap);
if (ret)
goto out;
@@ -115,6 +128,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int function_id,
MLX5_SET(set_hca_cap_in, hca_cap, opcode, MLX5_CMD_OP_SET_HCA_CAP);
MLX5_SET(set_hca_cap_in, hca_cap, other_function, 1);
+ MLX5_SET(set_hca_cap_in, hca_cap, ec_vf_function, ec_vf_function);
MLX5_SET(set_hca_cap_in, hca_cap, function_id, function_id);
MLX5_SET(set_hca_cap_in, hca_cap, op_mod,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
index 540cf05f6373..a42f6cd99b74 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/rdma.c
@@ -30,9 +30,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
struct mlx5_flow_spec *spec;
struct mlx5_flow_table *ft;
struct mlx5_flow_group *fg;
- void *match_criteria;
+ struct mlx5_eswitch *esw;
u32 *flow_group_in;
- void *misc;
int err;
if (!(MLX5_CAP_FLOWTABLE_RDMA_RX(dev, ft_support) &&
@@ -63,12 +62,8 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
goto free;
}
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
- MLX5_MATCH_MISC_PARAMETERS);
- match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
- match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria,
- misc_parameters.source_port);
+ esw = dev->priv.eswitch;
+ mlx5_esw_set_flow_group_source_port(esw, flow_group_in, 0);
fg = mlx5_create_flow_group(ft, flow_group_in);
if (IS_ERR(fg)) {
@@ -77,14 +72,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
goto destroy_flow_table;
}
- spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
- misc_parameters);
- MLX5_SET(fte_match_set_misc, misc, source_port,
- dev->priv.eswitch->manager_vport);
- misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
- misc_parameters);
- MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ mlx5_esw_set_spec_source_port(esw, esw->manager_vport, spec);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, NULL, 0);
@@ -115,7 +103,7 @@ free:
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{
- mlx5_core_roce_gid_set(dev, 0, 0, 0,
+ mlx5_core_roce_gid_set(dev, 0, MLX5_ROCE_VERSION_2, 0,
NULL, NULL, false, 0, 1);
}
@@ -135,7 +123,7 @@ static int mlx5_rdma_add_roce_addr(struct mlx5_core_dev *dev)
mlx5_rdma_make_default_gid(dev, &gid);
return mlx5_core_roce_gid_set(dev, 0,
- MLX5_ROCE_VERSION_1,
+ MLX5_ROCE_VERSION_2,
0, gid.raw, mac,
false, 0, 1);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
index 0692363cf80e..8fe82f1191bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c
@@ -3,6 +3,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/device.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "dev.h"
#include "devlink.h"
@@ -28,6 +29,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
mdev->priv.adev_idx = adev->id;
sf_dev->mdev = mdev;
+ /* Only local SFs do light probe */
+ if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
+ mlx5_dev_set_lightweight(mdev);
+
err = mlx5_mdev_init(mdev, MLX5_SF_PROF);
if (err) {
mlx5_core_warn(mdev, "mlx5_mdev_init on err=%d\n", err);
@@ -41,7 +46,10 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia
goto remap_err;
}
- err = mlx5_init_one(mdev);
+ if (MLX5_ESWITCH_MANAGER(sf_dev->parent_mdev))
+ err = mlx5_init_one_light(mdev);
+ else
+ err = mlx5_init_one(mdev);
if (err) {
mlx5_core_warn(mdev, "mlx5_init_one err=%d\n", err);
goto init_one_err;
@@ -65,7 +73,10 @@ static void mlx5_sf_dev_remove(struct auxiliary_device *adev)
mlx5_drain_health_wq(sf_dev->mdev);
devlink_unregister(devlink);
- mlx5_uninit_one(sf_dev->mdev);
+ if (mlx5_dev_is_lightweight(sf_dev->mdev))
+ mlx5_uninit_one_light(sf_dev->mdev);
+ else
+ mlx5_uninit_one(sf_dev->mdev);
iounmap(sf_dev->mdev->iseg);
mlx5_mdev_uninit(sf_dev->mdev);
mlx5_devlink_free(devlink);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
index 7d955a4d9f14..6a3fa30b2bf2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c
@@ -28,7 +28,6 @@ struct mlx5_sf_table {
struct mutex sf_state_lock; /* Serializes sf state among user cmds & vhca event handler. */
struct notifier_block esw_nb;
struct notifier_block vhca_nb;
- u8 ecpu: 1;
};
static struct mlx5_sf *
@@ -283,7 +282,7 @@ out:
static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
- unsigned int *new_port_index)
+ struct devlink_port **dl_port)
{
struct mlx5_eswitch *esw = dev->priv.eswitch;
struct mlx5_sf *sf;
@@ -297,7 +296,7 @@ static int mlx5_sf_add(struct mlx5_core_dev *dev, struct mlx5_sf_table *table,
new_attr->controller, new_attr->sfnum);
if (err)
goto esw_err;
- *new_port_index = sf->port_index;
+ *dl_port = &sf->dl_port;
trace_mlx5_sf_add(dev, sf->port_index, sf->controller, sf->hw_fn_id, new_attr->sfnum);
return 0;
@@ -339,7 +338,7 @@ mlx5_sf_new_check_attr(struct mlx5_core_dev *dev, const struct devlink_port_new_
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *new_attr,
struct netlink_ext_ack *extack,
- unsigned int *new_port_index)
+ struct devlink_port **dl_port)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_sf_table *table;
@@ -355,7 +354,7 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink,
"Port add is only supported in eswitch switchdev mode or SF ports are disabled.");
return -EOPNOTSUPP;
}
- err = mlx5_sf_add(dev, table, new_attr, extack, new_port_index);
+ err = mlx5_sf_add(dev, table, new_attr, extack, dl_port);
mlx5_sf_table_put(table);
return err;
}
@@ -379,7 +378,8 @@ static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf)
}
}
-int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+int mlx5_devlink_sf_port_del(struct devlink *devlink,
+ struct devlink_port *dl_port,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
@@ -394,7 +394,7 @@ int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
"Port del is only supported in eswitch switchdev mode or SF ports are disabled.");
return -EOPNOTSUPP;
}
- sf = mlx5_sf_lookup_by_index(table, port_index);
+ sf = mlx5_sf_lookup_by_index(table, dl_port->index);
if (!sf) {
err = -ENODEV;
goto sf_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
index 3a480e06ecc0..860f9ddb7107 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h
@@ -21,8 +21,9 @@ void mlx5_sf_table_cleanup(struct mlx5_core_dev *dev);
int mlx5_devlink_sf_port_new(struct devlink *devlink,
const struct devlink_port_new_attrs *add_attr,
struct netlink_ext_ack *extack,
- unsigned int *new_port_index);
-int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index,
+ struct devlink_port **dl_port);
+int mlx5_devlink_sf_port_del(struct devlink *devlink,
+ struct devlink_port *dl_port,
struct netlink_ext_ack *extack);
int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port,
enum devlink_port_fn_state *state,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 20d7662c10fb..4e42a3b9b8ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -37,7 +37,7 @@
#include "mlx5_irq.h"
#include "eswitch.h"
-static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
+static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf, u16 func_id)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
struct mlx5_hca_vport_context *in;
@@ -59,7 +59,7 @@ static int sriov_restore_guids(struct mlx5_core_dev *dev, int vf)
!!(in->node_guid) * MLX5_HCA_VPORT_SEL_NODE_GUID |
!!(in->policy) * MLX5_HCA_VPORT_SEL_STATE_POLICY;
- err = mlx5_core_modify_hca_vport_context(dev, 1, 1, vf + 1, in);
+ err = mlx5_core_modify_hca_vport_context(dev, 1, 1, func_id, in);
if (err)
mlx5_core_warn(dev, "modify vport context failed, unable to restore VF %d settings\n", vf);
@@ -73,9 +73,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
int err, vf, num_msix_count;
-
- if (!MLX5_ESWITCH_MANAGER(dev))
- goto enable_vfs_hca;
+ int vport_num;
err = mlx5_eswitch_enable(dev->priv.eswitch, num_vfs);
if (err) {
@@ -84,7 +82,6 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return err;
}
-enable_vfs_hca:
num_msix_count = mlx5_get_default_msix_vec_count(dev, num_vfs);
for (vf = 0; vf < num_vfs; vf++) {
/* Notify the VF before its enablement to let it set
@@ -108,7 +105,10 @@ enable_vfs_hca:
sriov->vfs_ctx[vf].enabled = 1;
if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) {
- err = sriov_restore_guids(dev, vf);
+ vport_num = mlx5_core_ec_sriov_enabled(dev) ?
+ mlx5_core_ec_vf_vport_base(dev) + vf
+ : vf + 1;
+ err = sriov_restore_guids(dev, vf, vport_num);
if (err) {
mlx5_core_warn(dev,
"failed to restore VF %d settings, err %d\n",
@@ -123,9 +123,11 @@ enable_vfs_hca:
}
static void
-mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
+mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf, bool num_vf_change)
{
struct mlx5_core_sriov *sriov = &dev->priv.sriov;
+ bool wait_for_ec_vf_pages = true;
+ bool wait_for_vf_pages = true;
int err;
int vf;
@@ -147,11 +149,30 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
+ /* There are a number of scenarios when SRIOV is being disabled:
+ * 1. VFs or ECVFs had been created, and now set back to 0 (num_vf_change == true).
+ * - If EC SRIOV is enabled then this flow is happening on the
+ * embedded platform, wait for only EC VF pages.
+ * - If EC SRIOV is not enabled this flow is happening on non-embedded
+ * platform, wait for the VF pages.
+ *
+ * 2. The driver is being unloaded. In this case wait for all pages.
+ */
+ if (num_vf_change) {
+ if (mlx5_core_ec_sriov_enabled(dev))
+ wait_for_vf_pages = false;
+ else
+ wait_for_ec_vf_pages = false;
+ }
+
+ if (wait_for_ec_vf_pages && mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_EC_VF]))
+ mlx5_core_warn(dev, "timeout reclaiming EC VFs pages\n");
+
/* For ECPFs, skip waiting for host VF pages until ECPF is destroyed */
if (mlx5_core_is_ecpf(dev))
return;
- if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
+ if (wait_for_vf_pages && mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
}
@@ -172,12 +193,12 @@ static int mlx5_sriov_enable(struct pci_dev *pdev, int num_vfs)
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
mlx5_core_warn(dev, "pci_enable_sriov failed : %d\n", err);
- mlx5_device_disable_sriov(dev, num_vfs, true);
+ mlx5_device_disable_sriov(dev, num_vfs, true, true);
}
return err;
}
-void mlx5_sriov_disable(struct pci_dev *pdev)
+void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
struct devlink *devlink = priv_to_devlink(dev);
@@ -185,7 +206,7 @@ void mlx5_sriov_disable(struct pci_dev *pdev)
pci_disable_sriov(pdev);
devl_lock(devlink);
- mlx5_device_disable_sriov(dev, num_vfs, true);
+ mlx5_device_disable_sriov(dev, num_vfs, true, num_vf_change);
devl_unlock(devlink);
}
@@ -200,7 +221,7 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
err = mlx5_sriov_enable(pdev, num_vfs);
else
- mlx5_sriov_disable(pdev);
+ mlx5_sriov_disable(pdev, true);
if (!err)
sriov->num_vfs = num_vfs;
@@ -245,7 +266,7 @@ void mlx5_sriov_detach(struct mlx5_core_dev *dev)
if (!mlx5_core_is_pf(dev))
return;
- mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false);
+ mlx5_device_disable_sriov(dev, pci_num_vf(dev->pdev), false, false);
}
static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
@@ -284,6 +305,7 @@ int mlx5_sriov_init(struct mlx5_core_dev *dev)
total_vfs = pci_sriov_get_totalvfs(pdev);
sriov->max_vfs = mlx5_get_max_vfs(dev);
sriov->num_vfs = pci_num_vf(pdev);
+ sriov->max_ec_vfs = mlx5_core_ec_sriov_enabled(dev) ? pci_sriov_get_totalvfs(dev->pdev) : 0;
sriov->vfs_ctx = kcalloc(total_vfs, sizeof(*sriov->vfs_ctx), GFP_KERNEL);
if (!sriov->vfs_ctx)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
index 0f783e7906cb..e739ec6cdf90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
@@ -2077,8 +2077,9 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn,
struct mlx5dr_action *action;
u8 peer_vport;
- peer_vport = vhca_id_valid && (vhca_id != dmn->info.caps.gvmi);
- vport_dmn = peer_vport ? dmn->peer_dmn : dmn;
+ peer_vport = vhca_id_valid && mlx5_core_is_pf(dmn->mdev) &&
+ (vhca_id != dmn->info.caps.gvmi);
+ vport_dmn = peer_vport ? dmn->peer_dmn[vhca_id] : dmn;
if (!vport_dmn) {
mlx5dr_dbg(dmn, "No peer vport domain for given vhca_id\n");
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
index 1aa525e509f1..7491911ebcb5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c
@@ -34,6 +34,7 @@ int mlx5dr_cmd_query_esw_vport_context(struct mlx5_core_dev *mdev,
int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
u16 vport_number, u16 *gvmi)
{
+ bool ec_vf_func = other_vport ? mlx5_core_is_ec_vf_vport(mdev, vport_number) : false;
u32 in[MLX5_ST_SZ_DW(query_hca_cap_in)] = {};
int out_size;
void *out;
@@ -46,7 +47,8 @@ int mlx5dr_cmd_query_gvmi(struct mlx5_core_dev *mdev, bool other_vport,
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, other_function, other_vport);
- MLX5_SET(query_hca_cap_in, in, function_id, vport_number);
+ MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(mdev, vport_number, ec_vf_func));
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
MLX5_SET(query_hca_cap_in, in, op_mod,
MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE << 1 |
HCA_CAP_OPMOD_GET_CUR);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
index 9a2dfe6ebe31..75dc85dc24ef 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c
@@ -555,17 +555,18 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *dmn)
}
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
- struct mlx5dr_domain *peer_dmn)
+ struct mlx5dr_domain *peer_dmn,
+ u8 peer_idx)
{
mlx5dr_domain_lock(dmn);
- if (dmn->peer_dmn)
- refcount_dec(&dmn->peer_dmn->refcount);
+ if (dmn->peer_dmn[peer_idx])
+ refcount_dec(&dmn->peer_dmn[peer_idx]->refcount);
- dmn->peer_dmn = peer_dmn;
+ dmn->peer_dmn[peer_idx] = peer_dmn;
- if (dmn->peer_dmn)
- refcount_inc(&dmn->peer_dmn->refcount);
+ if (dmn->peer_dmn[peer_idx])
+ refcount_inc(&dmn->peer_dmn[peer_idx]->refcount);
mlx5dr_domain_unlock(dmn);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
index 2010d4ac6519..69d7a8f3c402 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c
@@ -1647,6 +1647,7 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
+ int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
@@ -1657,11 +1658,11 @@ dr_ste_v0_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- vport_dmn = dmn->peer_dmn;
+ else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
+ (id == dmn->peer_dmn[id]->info.caps.gvmi))
+ vport_dmn = dmn->peer_dmn[id];
else
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
index 4c0704ad166b..f4ef0b22b991 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c
@@ -1979,6 +1979,7 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
u8 *tag)
{
struct mlx5dr_match_misc *misc = &value->misc;
+ int id = misc->source_eswitch_owner_vhca_id;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_domain *vport_dmn;
@@ -1988,11 +1989,11 @@ static int dr_ste_v1_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (sb->vhca_id_valid) {
/* Find port GVMI based on the eswitch_owner_vhca_id */
- if (misc->source_eswitch_owner_vhca_id == dmn->info.caps.gvmi)
+ if (id == dmn->info.caps.gvmi)
vport_dmn = dmn;
- else if (dmn->peer_dmn && (misc->source_eswitch_owner_vhca_id ==
- dmn->peer_dmn->info.caps.gvmi))
- vport_dmn = dmn->peer_dmn;
+ else if (id < MLX5_MAX_PORTS && dmn->peer_dmn[id] &&
+ (id == dmn->peer_dmn[id]->info.caps.gvmi))
+ vport_dmn = dmn->peer_dmn[id];
else
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
index 678a993ab053..1622dbbe6b97 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h
@@ -935,7 +935,7 @@ struct mlx5dr_domain_info {
};
struct mlx5dr_domain {
- struct mlx5dr_domain *peer_dmn;
+ struct mlx5dr_domain *peer_dmn[MLX5_MAX_PORTS];
struct mlx5_core_dev *mdev;
u32 pdn;
struct mlx5_uars_page *uar;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index cc215beb7436..6aac5f006bf8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -780,14 +780,15 @@ restore_fte:
}
static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns,
- struct mlx5_flow_root_namespace *peer_ns)
+ struct mlx5_flow_root_namespace *peer_ns,
+ u8 peer_idx)
{
struct mlx5dr_domain *peer_domain = NULL;
if (peer_ns)
peer_domain = peer_ns->fs_dr_domain.dr_domain;
mlx5dr_domain_set_peer(ns->fs_dr_domain.dr_domain,
- peer_domain);
+ peer_domain, peer_idx);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
index d1c04f43d86d..24cbb33ecd6c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h
@@ -48,7 +48,8 @@ int mlx5dr_domain_destroy(struct mlx5dr_domain *domain);
int mlx5dr_domain_sync(struct mlx5dr_domain *domain, u32 flags);
void mlx5dr_domain_set_peer(struct mlx5dr_domain *dmn,
- struct mlx5dr_domain *peer_dmn);
+ struct mlx5dr_domain *peer_dmn,
+ u8 peer_idx);
struct mlx5dr_table *
mlx5dr_table_create(struct mlx5dr_domain *domain, u32 level, u32 flags,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index ba7e3df22413..5a31fb47ffa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -288,7 +288,8 @@ int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
MLX5_SET(query_nic_vport_context_in, in, allowed_list_type, list_type);
MLX5_SET(query_nic_vport_context_in, in, vport_number, vport);
- MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
+ if (vport || mlx5_core_is_ecpf(dev))
+ MLX5_SET(query_nic_vport_context_in, in, other_vport, 1);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
if (err)
@@ -1160,23 +1161,26 @@ u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
-int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 vport, void *out,
u16 opmod)
{
+ bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)] = {};
opmod = (opmod << 1) | (HCA_CAP_OPMOD_GET_MAX & 0x01);
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
- MLX5_SET(query_hca_cap_in, in, function_id, function_id);
+ MLX5_SET(query_hca_cap_in, in, function_id, mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(query_hca_cap_in, in, other_function, true);
+ MLX5_SET(query_hca_cap_in, in, ec_vf_function, ec_vf_func);
return mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
}
EXPORT_SYMBOL_GPL(mlx5_vport_get_other_func_cap);
int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap,
- u16 function_id, u16 opmod)
+ u16 vport, u16 opmod)
{
+ bool ec_vf_func = mlx5_core_is_ec_vf_vport(dev, vport);
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
void *set_hca_cap;
void *set_ctx;
@@ -1190,8 +1194,10 @@ int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap
MLX5_SET(set_hca_cap_in, set_ctx, op_mod, opmod << 1);
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
memcpy(set_hca_cap, hca_cap, MLX5_ST_SZ_BYTES(cmd_hca_cap));
- MLX5_SET(set_hca_cap_in, set_ctx, function_id, function_id);
+ MLX5_SET(set_hca_cap_in, set_ctx, function_id,
+ mlx5_vport_to_func_id(dev, vport, ec_vf_func));
MLX5_SET(set_hca_cap_in, set_ctx, other_function, true);
+ MLX5_SET(set_hca_cap_in, set_ctx, ec_vf_function, ec_vf_func);
ret = mlx5_cmd_exec_in(dev, set_hca_cap, set_ctx);
kfree(set_ctx);
diff --git a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
index b001e5258091..47f6cc0401c3 100644
--- a/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
+++ b/drivers/net/ethernet/mellanox/mlxfw/mlxfw_mfa2_format.h
@@ -44,7 +44,7 @@ MLXFW_MFA2_TLV(multi, struct mlxfw_mfa2_tlv_multi,
MLXFW_MFA2_TLV_MULTI_PART);
struct mlxfw_mfa2_tlv_psid {
- u8 psid[0];
+ DECLARE_FLEX_ARRAY(u8, psid);
} __packed;
MLXFW_MFA2_TLV_VARSIZE(psid, struct mlxfw_mfa2_tlv_psid,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 22db0bb15c45..1ccf3b73ed72 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -1723,8 +1723,6 @@ static const struct devlink_ops mlxsw_devlink_ops = {
BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE),
.reload_down = mlxsw_devlink_core_bus_device_reload_down,
.reload_up = mlxsw_devlink_core_bus_device_reload_up,
- .port_split = mlxsw_devlink_port_split,
- .port_unsplit = mlxsw_devlink_port_unsplit,
.sb_pool_get = mlxsw_devlink_sb_pool_get,
.sb_pool_set = mlxsw_devlink_sb_pool_set,
.sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
@@ -3116,6 +3114,11 @@ u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
}
EXPORT_SYMBOL(mlxsw_core_res_get);
+static const struct devlink_port_ops mlxsw_devlink_port_ops = {
+ .port_split = mlxsw_devlink_port_split,
+ .port_unsplit = mlxsw_devlink_port_unsplit,
+};
+
static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
enum devlink_port_flavour flavour,
u8 slot_index, u32 port_number, bool split,
@@ -3150,7 +3153,8 @@ static int __mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u16 local_port,
devlink_port_linecard_set(devlink_port,
linecard->devlink_linecard);
}
- err = devl_port_register(devlink, devlink_port, local_port);
+ err = devl_port_register_with_ops(devlink, devlink_port, local_port,
+ &mlxsw_devlink_port_ops);
if (err)
memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
return err;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
index bd1a51a0a540..f0b2963ebac3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c
@@ -42,6 +42,7 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = {
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_64_95, 0x34, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_32_63, 0x38, 4),
MLXSW_AFK_ELEMENT_INFO_BUF(DST_IP_0_31, 0x3C, 4),
+ MLXSW_AFK_ELEMENT_INFO_U32(FDB_MISS, 0x40, 0, 1),
};
struct mlxsw_afk {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
index 3a037fe47211..65a4abadc7db 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h
@@ -35,6 +35,7 @@ enum mlxsw_afk_element {
MLXSW_AFK_ELEMENT_IP_DSCP,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_MSB,
MLXSW_AFK_ELEMENT_VIRT_ROUTER_LSB,
+ MLXSW_AFK_ELEMENT_FDB_MISS,
MLXSW_AFK_ELEMENT_MAX,
};
@@ -69,7 +70,7 @@ struct mlxsw_afk_element_info {
MLXSW_AFK_ELEMENT_INFO(MLXSW_AFK_ELEMENT_TYPE_BUF, \
_element, _offset, 0, _size)
-#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x40
+#define MLXSW_AFK_ELEMENT_STORAGE_SIZE 0x44
struct mlxsw_afk_element_inst { /* element instance in actual block */
enum mlxsw_afk_element element;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/i2c.c b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
index 2c586c2308ae..41298835a11e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/i2c.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/i2c.c
@@ -751,7 +751,7 @@ static void mlxsw_i2c_remove(struct i2c_client *client)
int mlxsw_i2c_driver_register(struct i2c_driver *i2c_driver)
{
- i2c_driver->probe_new = mlxsw_i2c_probe;
+ i2c_driver->probe = mlxsw_i2c_probe;
i2c_driver->remove = mlxsw_i2c_remove;
return i2c_add_driver(i2c_driver);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 02a327744a61..25a01dafde1b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -4337,8 +4337,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
/* Join a router interface configured on the LAG, if exists */
- err = mlxsw_sp_port_vlan_router_join(mlxsw_sp_port->default_vlan,
- lag_dev, extack);
+ err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
+ extack);
if (err)
goto err_router_join;
@@ -5139,14 +5139,6 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
return notifier_from_errno(err);
}
-static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inetaddr_valid_event,
-};
-
-static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = {
- .notifier_call = mlxsw_sp_inet6addr_valid_event,
-};
-
static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
{PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
{0, },
@@ -5191,12 +5183,9 @@ static int __init mlxsw_sp_module_init(void)
{
int err;
- register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
- register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
-
err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
if (err)
- goto err_sp1_core_driver_register;
+ return err;
err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
if (err)
@@ -5242,9 +5231,6 @@ err_sp3_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
err_sp2_core_driver_register:
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
-err_sp1_core_driver_register:
- unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
return err;
}
@@ -5258,8 +5244,6 @@ static void __exit mlxsw_sp_module_exit(void)
mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
- unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb);
- unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb);
}
module_init(mlxsw_sp_module_init);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4c22f8004514..231e364cbb7c 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -755,14 +755,6 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp);
void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp,
const struct net_device *macvlan_dev);
-int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
-int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
- unsigned long event, void *ptr);
-int
-mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev,
- struct netlink_ext_ack *extack);
void
mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan);
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
index 00c32320f891..4dea39f2b304 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c
@@ -123,10 +123,12 @@ const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = {
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(DMAC_0_31, 0x04, 4),
};
static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_1[] = {
+ MLXSW_AFK_ELEMENT_INST_U32(FDB_MISS, 0x00, 3, 1),
MLXSW_AFK_ELEMENT_INST_BUF(SMAC_0_31, 0x04, 4),
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
index 5416093c0e35..c8a356accdf8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c
@@ -221,7 +221,7 @@ start_again:
for (; i < rif_count; i++) {
struct mlxsw_sp_rif *rif = mlxsw_sp_rif_by_index(mlxsw_sp, i);
- if (!rif || !mlxsw_sp_rif_dev(rif))
+ if (!rif || !mlxsw_sp_rif_has_dev(rif))
continue;
err = mlxsw_sp_erif_entry_get(mlxsw_sp, &entry, rif,
counters_enabled);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
index 594cdcb90b3d..72917f09e806 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c
@@ -281,39 +281,38 @@ static int mlxsw_sp_flower_parse_actions(struct mlxsw_sp *mlxsw_sp,
return 0;
}
-static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
- struct flow_cls_offload *f,
- struct mlxsw_sp_flow_block *block)
+static int
+mlxsw_sp_flower_parse_meta_iif(struct mlxsw_sp_acl_rule_info *rulei,
+ const struct mlxsw_sp_flow_block *block,
+ const struct flow_match_meta *match,
+ struct netlink_ext_ack *extack)
{
- struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct mlxsw_sp_port *mlxsw_sp_port;
struct net_device *ingress_dev;
- struct flow_match_meta match;
- if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ if (!match->mask->ingress_ifindex)
return 0;
- flow_rule_match_meta(rule, &match);
- if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
- NL_SET_ERR_MSG_MOD(f->common.extack, "Unsupported ingress ifindex mask");
+ if (match->mask->ingress_ifindex != 0xFFFFFFFF) {
+ NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
return -EINVAL;
}
ingress_dev = __dev_get_by_index(block->net,
- match.key->ingress_ifindex);
+ match->key->ingress_ifindex);
if (!ingress_dev) {
- NL_SET_ERR_MSG_MOD(f->common.extack, "Can't find specified ingress port to match on");
+ NL_SET_ERR_MSG_MOD(extack, "Can't find specified ingress port to match on");
return -EINVAL;
}
if (!mlxsw_sp_port_dev_check(ingress_dev)) {
- NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on non-mlxsw ingress port");
+ NL_SET_ERR_MSG_MOD(extack, "Can't match on non-mlxsw ingress port");
return -EINVAL;
}
mlxsw_sp_port = netdev_priv(ingress_dev);
if (mlxsw_sp_port->mlxsw_sp != block->mlxsw_sp) {
- NL_SET_ERR_MSG_MOD(f->common.extack, "Can't match on a port from different device");
+ NL_SET_ERR_MSG_MOD(extack, "Can't match on a port from different device");
return -EINVAL;
}
@@ -321,9 +320,29 @@ static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
mlxsw_sp_port->local_port,
0xFFFFFFFF);
+
return 0;
}
+static int mlxsw_sp_flower_parse_meta(struct mlxsw_sp_acl_rule_info *rulei,
+ struct flow_cls_offload *f,
+ struct mlxsw_sp_flow_block *block)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_match_meta match;
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
+ return 0;
+
+ flow_rule_match_meta(rule, &match);
+
+ mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_FDB_MISS,
+ match.key->l2_miss, match.mask->l2_miss);
+
+ return mlxsw_sp_flower_parse_meta_iif(rulei, block, &match,
+ f->common.extack);
+}
+
static void mlxsw_sp_flower_parse_ipv4(struct mlxsw_sp_acl_rule_info *rulei,
struct flow_cls_offload *f)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index 1f6bc0c7e91d..69cd689dbc83 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -704,12 +704,12 @@ void mlxsw_sp_mr_vif_del(struct mlxsw_sp_mr_table *mr_table, vifi_t vif_index)
static struct mlxsw_sp_mr_vif *
mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
- const struct net_device *dev)
+ const struct mlxsw_sp_rif *rif)
{
vifi_t vif_index;
for (vif_index = 0; vif_index < MAXVIFS; vif_index++)
- if (mr_table->vifs[vif_index].dev == dev)
+ if (mlxsw_sp_rif_dev_is(rif, mr_table->vifs[vif_index].dev))
return &mr_table->vifs[vif_index];
return NULL;
}
@@ -717,13 +717,12 @@ mlxsw_sp_mr_dev_vif_lookup(struct mlxsw_sp_mr_table *mr_table,
int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif)
{
- const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_mr_vif *mr_vif;
- if (!rif_dev)
+ if (!mlxsw_sp_rif_has_dev(rif))
return 0;
- mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif);
if (!mr_vif)
return 0;
return mlxsw_sp_mr_vif_resolve(mr_table, mr_vif->dev, mr_vif,
@@ -733,13 +732,12 @@ int mlxsw_sp_mr_rif_add(struct mlxsw_sp_mr_table *mr_table,
void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif)
{
- const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_mr_vif *mr_vif;
- if (!rif_dev)
+ if (!mlxsw_sp_rif_has_dev(rif))
return;
- mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif);
if (!mr_vif)
return;
mlxsw_sp_mr_vif_unresolve(mr_table, mr_vif->dev, mr_vif);
@@ -748,17 +746,16 @@ void mlxsw_sp_mr_rif_del(struct mlxsw_sp_mr_table *mr_table,
void mlxsw_sp_mr_rif_mtu_update(struct mlxsw_sp_mr_table *mr_table,
const struct mlxsw_sp_rif *rif, int mtu)
{
- const struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = mr_table->mlxsw_sp;
struct mlxsw_sp_mr_route_vif_entry *rve;
struct mlxsw_sp_mr *mr = mlxsw_sp->mr;
struct mlxsw_sp_mr_vif *mr_vif;
- if (!rif_dev)
+ if (!mlxsw_sp_rif_has_dev(rif))
return;
/* Search for a VIF that use that RIF */
- mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif_dev);
+ mr_vif = mlxsw_sp_mr_dev_vif_lookup(mr_table, rif);
if (!mr_vif)
return;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
index d309b77a0194..bb8eeb86edf7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c
@@ -11,10 +11,12 @@
#include "spectrum_nve.h"
#define MLXSW_SP_NVE_VXLAN_IPV4_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \
- VXLAN_F_LEARN)
+ VXLAN_F_LEARN | \
+ VXLAN_F_LOCALBYPASS)
#define MLXSW_SP_NVE_VXLAN_IPV6_SUPPORTED_FLAGS (VXLAN_F_IPV6 | \
VXLAN_F_UDP_ZERO_CSUM6_TX | \
- VXLAN_F_UDP_ZERO_CSUM6_RX)
+ VXLAN_F_UDP_ZERO_CSUM6_RX | \
+ VXLAN_F_LOCALBYPASS)
static bool mlxsw_sp_nve_vxlan_ipv4_flags_check(const struct vxlan_config *cfg,
struct netlink_ext_ack *extack)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 4a73e2fe95ef..445ba7fe3c40 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -51,10 +51,27 @@ struct mlxsw_sp_vr;
struct mlxsw_sp_lpm_tree;
struct mlxsw_sp_rif_ops;
-struct mlxsw_sp_rif {
+struct mlxsw_sp_crif_key {
+ struct net_device *dev;
+};
+
+struct mlxsw_sp_crif {
+ struct mlxsw_sp_crif_key key;
+ struct rhash_head ht_node;
+ bool can_destroy;
struct list_head nexthop_list;
+ struct mlxsw_sp_rif *rif;
+};
+
+static const struct rhashtable_params mlxsw_sp_crif_ht_params = {
+ .key_offset = offsetof(struct mlxsw_sp_crif, key),
+ .key_len = sizeof_field(struct mlxsw_sp_crif, key),
+ .head_offset = offsetof(struct mlxsw_sp_crif, ht_node),
+};
+
+struct mlxsw_sp_rif {
+ struct mlxsw_sp_crif *crif; /* NULL for underlay RIF */
struct list_head neigh_list;
- struct net_device *dev; /* NULL for underlay RIF */
struct mlxsw_sp_fid *fid;
unsigned char addr[ETH_ALEN];
int mtu;
@@ -71,6 +88,13 @@ struct mlxsw_sp_rif {
bool counter_egress_valid;
};
+static struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
+{
+ if (!rif->crif)
+ return NULL;
+ return rif->crif->key.dev;
+}
+
struct mlxsw_sp_rif_params {
struct net_device *dev;
union {
@@ -96,8 +120,8 @@ struct mlxsw_sp_rif_subport {
struct mlxsw_sp_rif_ipip_lb {
struct mlxsw_sp_rif common;
struct mlxsw_sp_rif_ipip_lb_config lb_config;
- u16 ul_vr_id; /* Reserved for Spectrum-2. */
- u16 ul_rif_id; /* Reserved for Spectrum. */
+ u16 ul_vr_id; /* Spectrum-1. */
+ u16 ul_rif_id; /* Spectrum-2+. */
};
struct mlxsw_sp_rif_params_ipip_lb {
@@ -748,10 +772,11 @@ static bool mlxsw_sp_vr_is_used(const struct mlxsw_sp_vr *vr)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find_unused(struct mlxsw_sp *mlxsw_sp)
{
+ int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
struct mlxsw_sp_vr *vr;
int i;
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ for (i = 0; i < max_vrs; i++) {
vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
return vr;
@@ -792,12 +817,13 @@ static u32 mlxsw_sp_fix_tb_id(u32 tb_id)
static struct mlxsw_sp_vr *mlxsw_sp_vr_find(struct mlxsw_sp *mlxsw_sp,
u32 tb_id)
{
+ int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
struct mlxsw_sp_vr *vr;
int i;
tb_id = mlxsw_sp_fix_tb_id(tb_id);
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ for (i = 0; i < max_vrs; i++) {
vr = &mlxsw_sp->router->vrs[i];
if (mlxsw_sp_vr_is_used(vr) && vr->tb_id == tb_id)
return vr;
@@ -959,6 +985,7 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib *fib,
struct mlxsw_sp_lpm_tree *new_tree)
{
+ int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
enum mlxsw_sp_l3proto proto = fib->proto;
struct mlxsw_sp_lpm_tree *old_tree;
u8 old_id, new_id = new_tree->id;
@@ -968,7 +995,7 @@ static int mlxsw_sp_vrs_lpm_tree_replace(struct mlxsw_sp *mlxsw_sp,
old_tree = mlxsw_sp->router->lpm.proto_trees[proto];
old_id = old_tree->id;
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ for (i = 0; i < max_vrs; i++) {
vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_lpm_tree_should_replace(vr, proto, old_id))
continue;
@@ -1052,6 +1079,61 @@ u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev)
return tb_id;
}
+static void
+mlxsw_sp_crif_init(struct mlxsw_sp_crif *crif, struct net_device *dev)
+{
+ crif->key.dev = dev;
+ INIT_LIST_HEAD(&crif->nexthop_list);
+}
+
+static struct mlxsw_sp_crif *
+mlxsw_sp_crif_alloc(struct net_device *dev)
+{
+ struct mlxsw_sp_crif *crif;
+
+ crif = kzalloc(sizeof(*crif), GFP_KERNEL);
+ if (!crif)
+ return NULL;
+
+ mlxsw_sp_crif_init(crif, dev);
+ return crif;
+}
+
+static void mlxsw_sp_crif_free(struct mlxsw_sp_crif *crif)
+{
+ if (WARN_ON(crif->rif))
+ return;
+
+ WARN_ON(!list_empty(&crif->nexthop_list));
+ kfree(crif);
+}
+
+static int mlxsw_sp_crif_insert(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_crif *crif)
+{
+ return rhashtable_insert_fast(&router->crif_ht, &crif->ht_node,
+ mlxsw_sp_crif_ht_params);
+}
+
+static void mlxsw_sp_crif_remove(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_crif *crif)
+{
+ rhashtable_remove_fast(&router->crif_ht, &crif->ht_node,
+ mlxsw_sp_crif_ht_params);
+}
+
+static struct mlxsw_sp_crif *
+mlxsw_sp_crif_lookup(struct mlxsw_sp_router *router,
+ const struct net_device *dev)
+{
+ struct mlxsw_sp_crif_key key = {
+ .dev = (struct net_device *)dev,
+ };
+
+ return rhashtable_lookup_fast(&router->crif_ht, &key,
+ mlxsw_sp_crif_ht_params);
+}
+
static struct mlxsw_sp_rif *
mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_params *params,
@@ -1557,6 +1639,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
u16 ul_rif_id, bool enable)
{
struct mlxsw_sp_rif_ipip_lb_config lb_cf = lb_rif->lb_config;
+ struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
enum mlxsw_reg_ritr_loopback_ipip_options ipip_options;
struct mlxsw_sp_rif *rif = &lb_rif->common;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
@@ -1569,7 +1652,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
case MLXSW_SP_L3_PROTO_IPV4:
saddr4 = be32_to_cpu(lb_cf.saddr.addr4);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
- rif->rif_index, rif->vr_id, rif->dev->mtu);
+ rif->rif_index, rif->vr_id, dev->mtu);
mlxsw_reg_ritr_loopback_ipip4_pack(ritr_pl, lb_cf.lb_ipipt,
ipip_options, ul_vr_id,
ul_rif_id, saddr4,
@@ -1579,7 +1662,7 @@ mlxsw_sp_rif_ipip_lb_op(struct mlxsw_sp_rif_ipip_lb *lb_rif, u16 ul_vr_id,
case MLXSW_SP_L3_PROTO_IPV6:
saddr6 = &lb_cf.saddr.addr6;
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_LOOPBACK_IF,
- rif->rif_index, rif->vr_id, rif->dev->mtu);
+ rif->rif_index, rif->vr_id, dev->mtu);
mlxsw_reg_ritr_loopback_ipip6_pack(ritr_pl, lb_cf.lb_ipipt,
ipip_options, ul_vr_id,
ul_rif_id, saddr6,
@@ -1639,9 +1722,29 @@ static void mlxsw_sp_netdevice_ipip_ol_down_event(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
}
-static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+
+static void mlxsw_sp_rif_migrate_destroy(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *old_rif,
- struct mlxsw_sp_rif *new_rif);
+ struct mlxsw_sp_rif *new_rif,
+ bool migrate_nhs)
+{
+ struct mlxsw_sp_crif *crif = old_rif->crif;
+ struct mlxsw_sp_crif mock_crif = {};
+
+ if (migrate_nhs)
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+
+ /* Plant a mock CRIF so that destroying the old RIF doesn't unoffload
+ * our nexthops and IPIP tunnels, and doesn't sever the crif->rif link.
+ */
+ mlxsw_sp_crif_init(&mock_crif, crif->key.dev);
+ old_rif->crif = &mock_crif;
+ mock_crif.rif = old_rif;
+ mlxsw_sp_rif_destroy(old_rif);
+}
+
static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
@@ -1659,18 +1762,11 @@ mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
return PTR_ERR(new_lb_rif);
ipip_entry->ol_lb = new_lb_rif;
- if (keep_encap)
- mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
- &new_lb_rif->common);
-
- mlxsw_sp_rif_destroy(&old_lb_rif->common);
-
+ mlxsw_sp_rif_migrate_destroy(mlxsw_sp, &old_lb_rif->common,
+ &new_lb_rif->common, keep_encap);
return 0;
}
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
-
/**
* __mlxsw_sp_ipip_entry_update_tunnel - Update offload related to IPIP entry.
* @mlxsw_sp: mlxsw_sp.
@@ -2329,7 +2425,7 @@ static void mlxsw_sp_router_neigh_ent_ipv4_process(struct mlxsw_sp *mlxsw_sp,
}
dipn = htonl(dip);
- dev = mlxsw_sp->router->rifs[rif]->dev;
+ dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
n = neigh_lookup(&arp_tbl, &dipn, dev);
if (!n)
return;
@@ -2357,7 +2453,7 @@ static void mlxsw_sp_router_neigh_ent_ipv6_process(struct mlxsw_sp *mlxsw_sp,
return;
}
- dev = mlxsw_sp->router->rifs[rif]->dev;
+ dev = mlxsw_sp_rif_dev(mlxsw_sp->router->rifs[rif]);
n = neigh_lookup(&nd_tbl, &dip, dev);
if (!n)
return;
@@ -2745,13 +2841,12 @@ static void mlxsw_sp_router_update_priority_work(struct work_struct *work)
}
static int mlxsw_sp_router_schedule_work(struct net *net,
- struct notifier_block *nb,
+ struct mlxsw_sp_router *router,
+ struct neighbour *n,
void (*cb)(struct work_struct *))
{
struct mlxsw_sp_netevent_work *net_work;
- struct mlxsw_sp_router *router;
- router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
if (!net_eq(net, mlxsw_sp_net(router->mlxsw_sp)))
return NOTIFY_DONE;
@@ -2761,19 +2856,31 @@ static int mlxsw_sp_router_schedule_work(struct net *net,
INIT_WORK(&net_work->work, cb);
net_work->mlxsw_sp = router->mlxsw_sp;
+ net_work->n = n;
mlxsw_core_schedule_work(&net_work->work);
return NOTIFY_DONE;
}
+static bool mlxsw_sp_dev_lower_is_port(struct net_device *dev)
+{
+ struct mlxsw_sp_port *mlxsw_sp_port;
+
+ rcu_read_lock();
+ mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
+ rcu_read_unlock();
+ return !!mlxsw_sp_port;
+}
+
static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
- struct mlxsw_sp_netevent_work *net_work;
- struct mlxsw_sp_port *mlxsw_sp_port;
- struct mlxsw_sp *mlxsw_sp;
+ struct mlxsw_sp_router *router;
unsigned long interval;
struct neigh_parms *p;
struct neighbour *n;
+ struct net *net;
+
+ router = container_of(nb, struct mlxsw_sp_router, netevent_nb);
switch (event) {
case NETEVENT_DELAY_PROBE_TIME_UPDATE:
@@ -2787,51 +2894,37 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
/* We are in atomic context and can't take RTNL mutex,
* so use RCU variant to walk the device chain.
*/
- mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(p->dev);
- if (!mlxsw_sp_port)
+ if (!mlxsw_sp_dev_lower_is_port(p->dev))
return NOTIFY_DONE;
- mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
interval = jiffies_to_msecs(NEIGH_VAR(p, DELAY_PROBE_TIME));
- mlxsw_sp->router->neighs_update.interval = interval;
-
- mlxsw_sp_port_dev_put(mlxsw_sp_port);
+ router->neighs_update.interval = interval;
break;
case NETEVENT_NEIGH_UPDATE:
n = ptr;
+ net = neigh_parms_net(n->parms);
if (n->tbl->family != AF_INET && n->tbl->family != AF_INET6)
return NOTIFY_DONE;
- mlxsw_sp_port = mlxsw_sp_port_lower_dev_hold(n->dev);
- if (!mlxsw_sp_port)
+ if (!mlxsw_sp_dev_lower_is_port(n->dev))
return NOTIFY_DONE;
- net_work = kzalloc(sizeof(*net_work), GFP_ATOMIC);
- if (!net_work) {
- mlxsw_sp_port_dev_put(mlxsw_sp_port);
- return NOTIFY_BAD;
- }
-
- INIT_WORK(&net_work->work, mlxsw_sp_router_neigh_event_work);
- net_work->mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- net_work->n = n;
-
/* Take a reference to ensure the neighbour won't be
* destructed until we drop the reference in delayed
* work.
*/
neigh_clone(n);
- mlxsw_core_schedule_work(&net_work->work);
- mlxsw_sp_port_dev_put(mlxsw_sp_port);
- break;
+ return mlxsw_sp_router_schedule_work(net, router, n,
+ mlxsw_sp_router_neigh_event_work);
+
case NETEVENT_IPV4_MPATH_HASH_UPDATE:
case NETEVENT_IPV6_MPATH_HASH_UPDATE:
- return mlxsw_sp_router_schedule_work(ptr, nb,
+ return mlxsw_sp_router_schedule_work(ptr, router, NULL,
mlxsw_sp_router_mp_hash_event_work);
case NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE:
- return mlxsw_sp_router_schedule_work(ptr, nb,
+ return mlxsw_sp_router_schedule_work(ptr, router, NULL,
mlxsw_sp_router_update_priority_work);
}
@@ -2902,7 +2995,7 @@ struct mlxsw_sp_nexthop_key {
struct mlxsw_sp_nexthop {
struct list_head neigh_list_node; /* member of neigh entry list */
- struct list_head rif_list_node;
+ struct list_head crif_list_node;
struct list_head router_list_node;
struct mlxsw_sp_nexthop_group_info *nhgi; /* pointer back to the group
* this nexthop belongs to
@@ -2915,7 +3008,7 @@ struct mlxsw_sp_nexthop {
int nh_weight;
int norm_nh_weight;
int num_adj_entries;
- struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_crif *crif;
u8 should_offload:1, /* set indicates this nexthop should be written
* to the adjacency table.
*/
@@ -2935,6 +3028,14 @@ struct mlxsw_sp_nexthop {
bool counter_valid;
};
+static struct net_device *
+mlxsw_sp_nexthop_dev(const struct mlxsw_sp_nexthop *nh)
+{
+ if (!nh->crif)
+ return NULL;
+ return nh->crif->key.dev;
+}
+
enum mlxsw_sp_nexthop_group_type {
MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV4,
MLXSW_SP_NEXTHOP_GROUP_TYPE_IPV6,
@@ -2952,9 +3053,18 @@ struct mlxsw_sp_nexthop_group_info {
is_resilient:1;
struct list_head list; /* member in nh_res_grp_list */
struct mlxsw_sp_nexthop nexthops[];
-#define nh_rif nexthops[0].rif
};
+static struct mlxsw_sp_rif *
+mlxsw_sp_nhgi_rif(const struct mlxsw_sp_nexthop_group_info *nhgi)
+{
+ struct mlxsw_sp_crif *crif = nhgi->nexthops[0].crif;
+
+ if (!crif)
+ return NULL;
+ return crif->rif;
+}
+
struct mlxsw_sp_nexthop_group_vr_key {
u16 vr_id;
enum mlxsw_sp_l3proto proto;
@@ -3076,7 +3186,9 @@ int mlxsw_sp_nexthop_indexes(struct mlxsw_sp_nexthop *nh, u32 *p_adj_index,
struct mlxsw_sp_rif *mlxsw_sp_nexthop_rif(struct mlxsw_sp_nexthop *nh)
{
- return nh->rif;
+ if (WARN_ON(!nh->crif))
+ return NULL;
+ return nh->crif->rif;
}
bool mlxsw_sp_nexthop_group_has_ipip(struct mlxsw_sp_nexthop *nh)
@@ -3461,11 +3573,12 @@ static int __mlxsw_sp_nexthop_eth_update(struct mlxsw_sp *mlxsw_sp,
bool force, char *ratr_pl)
{
struct mlxsw_sp_neigh_entry *neigh_entry = nh->neigh_entry;
+ struct mlxsw_sp_rif *rif = mlxsw_sp_nexthop_rif(nh);
enum mlxsw_reg_ratr_op op;
u16 rif_index;
- rif_index = nh->rif ? nh->rif->rif_index :
- mlxsw_sp->router->lb_rif_index;
+ rif_index = rif ? rif->rif_index :
+ mlxsw_sp->router->lb_crif->rif->rif_index;
op = force ? MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY :
MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY_ON_ACTIVITY;
mlxsw_reg_ratr_pack(ratr_pl, op, true, MLXSW_REG_RATR_TYPE_ETHERNET,
@@ -4008,16 +4121,18 @@ mlxsw_sp_nexthop_dead_neigh_replace(struct mlxsw_sp *mlxsw_sp,
{
struct neighbour *n, *old_n = neigh_entry->key.n;
struct mlxsw_sp_nexthop *nh;
+ struct net_device *dev;
bool entry_connected;
u8 nud_state, dead;
int err;
nh = list_first_entry(&neigh_entry->nexthop_list,
struct mlxsw_sp_nexthop, neigh_list_node);
+ dev = mlxsw_sp_nexthop_dev(nh);
- n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
if (!n) {
- n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -4081,44 +4196,49 @@ mlxsw_sp_nexthop_neigh_update(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_nexthop_rif_init(struct mlxsw_sp_nexthop *nh,
- struct mlxsw_sp_rif *rif)
+static void mlxsw_sp_nexthop_crif_init(struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_crif *crif)
{
- if (nh->rif)
+ if (nh->crif)
return;
- nh->rif = rif;
- list_add(&nh->rif_list_node, &rif->nexthop_list);
+ nh->crif = crif;
+ list_add(&nh->crif_list_node, &crif->nexthop_list);
}
-static void mlxsw_sp_nexthop_rif_fini(struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_crif_fini(struct mlxsw_sp_nexthop *nh)
{
- if (!nh->rif)
+ if (!nh->crif)
return;
- list_del(&nh->rif_list_node);
- nh->rif = NULL;
+ list_del(&nh->crif_list_node);
+ nh->crif = NULL;
}
static int mlxsw_sp_nexthop_neigh_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
+ struct net_device *dev;
struct neighbour *n;
u8 nud_state, dead;
int err;
+ if (WARN_ON(!nh->crif->rif))
+ return 0;
+
if (!nh->nhgi->gateway || nh->neigh_entry)
return 0;
+ dev = mlxsw_sp_nexthop_dev(nh);
/* Take a reference of neigh here ensuring that neigh would
* not be destructed before the nexthop entry is finished.
* The reference is taken either in neigh_lookup() or
* in neigh_create() in case n is not found.
*/
- n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_lookup(nh->neigh_tbl, &nh->gw_addr, dev);
if (!n) {
- n = neigh_create(nh->neigh_tbl, &nh->gw_addr, nh->rif->dev);
+ n = neigh_create(nh->neigh_tbl, &nh->gw_addr, dev);
if (IS_ERR(n))
return PTR_ERR(n);
neigh_event_send(n, NULL);
@@ -4197,15 +4317,20 @@ static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
+ struct mlxsw_sp_crif *crif;
bool removing;
if (!nh->nhgi->gateway || nh->ipip_entry)
return;
+ crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, ipip_entry->ol_dev);
+ if (WARN_ON(!crif))
+ return;
+
nh->ipip_entry = ipip_entry;
removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
+ mlxsw_sp_nexthop_crif_init(nh, crif);
}
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
@@ -4237,7 +4362,7 @@ static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
{
const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
- struct mlxsw_sp_rif *rif;
+ struct mlxsw_sp_crif *crif;
int err;
ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
@@ -4251,11 +4376,15 @@ static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
- if (!rif)
+ crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, dev);
+ if (!crif)
+ return 0;
+
+ mlxsw_sp_nexthop_crif_init(nh, crif);
+
+ if (!crif->rif)
return 0;
- mlxsw_sp_nexthop_rif_init(nh, rif);
err = mlxsw_sp_nexthop_neigh_init(mlxsw_sp, nh);
if (err)
goto err_neigh_init;
@@ -4263,25 +4392,30 @@ static int mlxsw_sp_nexthop_type_init(struct mlxsw_sp *mlxsw_sp,
return 0;
err_neigh_init:
- mlxsw_sp_nexthop_rif_fini(nh);
+ mlxsw_sp_nexthop_crif_fini(nh);
return err;
}
-static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh)
+static void mlxsw_sp_nexthop_type_rif_gone(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
{
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
mlxsw_sp_nexthop_neigh_fini(mlxsw_sp, nh);
- mlxsw_sp_nexthop_rif_fini(nh);
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- mlxsw_sp_nexthop_rif_fini(nh);
mlxsw_sp_nexthop_ipip_fini(mlxsw_sp, nh);
break;
}
}
+static void mlxsw_sp_nexthop_type_fini(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh)
+{
+ mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
+ mlxsw_sp_nexthop_crif_fini(nh);
+}
+
static int mlxsw_sp_nexthop4_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop_group *nh_grp,
struct mlxsw_sp_nexthop *nh,
@@ -4368,16 +4502,17 @@ static void mlxsw_sp_nexthop4_event(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_nexthop *nh;
bool removing;
- list_for_each_entry(nh, &rif->nexthop_list, rif_list_node) {
+ list_for_each_entry(nh, &rif->crif->nexthop_list, crif_list_node) {
switch (nh->type) {
case MLXSW_SP_NEXTHOP_TYPE_ETH:
removing = false;
break;
case MLXSW_SP_NEXTHOP_TYPE_IPIP:
- removing = !mlxsw_sp_ipip_netdev_ul_up(rif->dev);
+ removing = !mlxsw_sp_ipip_netdev_ul_up(dev);
break;
default:
WARN_ON(1);
@@ -4389,25 +4524,14 @@ static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
}
}
-static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *old_rif,
- struct mlxsw_sp_rif *new_rif)
-{
- struct mlxsw_sp_nexthop *nh;
-
- list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
- list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
- nh->rif = new_rif;
- mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
-}
-
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
struct mlxsw_sp_nexthop *nh, *tmp;
- list_for_each_entry_safe(nh, tmp, &rif->nexthop_list, rif_list_node) {
- mlxsw_sp_nexthop_type_fini(mlxsw_sp, nh);
+ list_for_each_entry_safe(nh, tmp, &rif->crif->nexthop_list,
+ crif_list_node) {
+ mlxsw_sp_nexthop_type_rif_gone(mlxsw_sp, nh);
mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh->nhgi->nh_grp);
}
}
@@ -4427,7 +4551,7 @@ static int mlxsw_sp_adj_trap_entry_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_ratr_pack(ratr_pl, MLXSW_REG_RATR_OP_WRITE_WRITE_ENTRY, true,
MLXSW_REG_RATR_TYPE_ETHERNET,
mlxsw_sp->router->adj_trap_index,
- mlxsw_sp->router->lb_rif_index);
+ mlxsw_sp->router->lb_crif->rif->rif_index);
mlxsw_reg_ratr_trap_action_set(ratr_pl, trap_action);
mlxsw_reg_ratr_trap_id_set(ratr_pl, MLXSW_TRAP_ID_RTR_EGRESS0);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ratr), ratr_pl);
@@ -4743,21 +4867,19 @@ static bool mlxsw_sp_nexthop_obj_is_gateway(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_nexthop_obj_blackhole_init(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- u16 lb_rif_index = mlxsw_sp->router->lb_rif_index;
-
nh->action = MLXSW_SP_NEXTHOP_ACTION_DISCARD;
nh->should_offload = 1;
/* While nexthops that discard packets do not forward packets
* via an egress RIF, they still need to be programmed using a
* valid RIF, so use the loopback RIF created during init.
*/
- nh->rif = mlxsw_sp->router->rifs[lb_rif_index];
+ nh->crif = mlxsw_sp->router->lb_crif;
}
static void mlxsw_sp_nexthop_obj_blackhole_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh)
{
- nh->rif = NULL;
+ nh->crif = NULL;
nh->should_offload = 0;
}
@@ -5491,7 +5613,7 @@ mlxsw_sp_fib_entry_should_offload(const struct mlxsw_sp_fib_entry *fib_entry)
case MLXSW_SP_FIB_ENTRY_TYPE_REMOTE:
return !!nh_group->nhgi->adj_index_valid;
case MLXSW_SP_FIB_ENTRY_TYPE_LOCAL:
- return !!nh_group->nhgi->nh_rif;
+ return !!mlxsw_sp_nhgi_rif(nh_group->nhgi);
case MLXSW_SP_FIB_ENTRY_TYPE_BLACKHOLE:
case MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP:
case MLXSW_SP_FIB_ENTRY_TYPE_NVE_DECAP:
@@ -5509,9 +5631,10 @@ mlxsw_sp_rt6_nexthop(struct mlxsw_sp_nexthop_group *nh_grp,
for (i = 0; i < nh_grp->nhgi->count; i++) {
struct mlxsw_sp_nexthop *nh = &nh_grp->nhgi->nexthops[i];
+ struct net_device *dev = mlxsw_sp_nexthop_dev(nh);
struct fib6_info *rt = mlxsw_sp_rt6->rt;
- if (nh->rif && nh->rif->dev == rt->fib6_nh->fib_nh_dev &&
+ if (dev && dev == rt->fib6_nh->fib_nh_dev &&
ipv6_addr_equal((const struct in6_addr *) &nh->gw_addr,
&rt->fib6_nh->fib_nh_gw6))
return nh;
@@ -5752,7 +5875,8 @@ static int mlxsw_sp_fib_entry_op_remote(struct mlxsw_sp *mlxsw_sp,
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = nhgi->adj_index;
ecmp_size = nhgi->ecmp_size;
- } else if (!nhgi->adj_index_valid && nhgi->count && nhgi->nh_rif) {
+ } else if (!nhgi->adj_index_valid && nhgi->count &&
+ mlxsw_sp_nhgi_rif(nhgi)) {
trap_action = MLXSW_REG_RALUE_TRAP_ACTION_NOP;
adjacency_index = mlxsw_sp->router->adj_trap_index;
ecmp_size = 1;
@@ -5771,7 +5895,7 @@ static int mlxsw_sp_fib_entry_op_local(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry,
enum mlxsw_reg_ralue_op op)
{
- struct mlxsw_sp_rif *rif = fib_entry->nh_group->nhgi->nh_rif;
+ struct mlxsw_sp_rif *rif = mlxsw_sp_nhgi_rif(fib_entry->nh_group->nhgi);
enum mlxsw_reg_ralue_trap_action trap_action;
char ralue_pl[MLXSW_REG_RALUE_LEN];
u16 trap_id = 0;
@@ -7298,9 +7422,10 @@ static void mlxsw_sp_vr_fib_flush(struct mlxsw_sp *mlxsw_sp,
static void mlxsw_sp_router_fib_flush(struct mlxsw_sp *mlxsw_sp)
{
+ int max_vrs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS);
int i, j;
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_VRS); i++) {
+ for (i = 0; i < max_vrs; i++) {
struct mlxsw_sp_vr *vr = &mlxsw_sp->router->vrs[i];
if (!mlxsw_sp_vr_is_used(vr))
@@ -7699,11 +7824,12 @@ static struct mlxsw_sp_rif *
mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp,
const struct net_device *dev)
{
+ int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
int i;
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ for (i = 0; i < max_rifs; i++)
if (mlxsw_sp->router->rifs[i] &&
- mlxsw_sp->router->rifs[i]->dev == dev)
+ mlxsw_sp_rif_dev_is(mlxsw_sp->router->rifs[i], dev))
return mlxsw_sp->router->rifs[i];
return NULL;
@@ -7761,33 +7887,52 @@ static int mlxsw_sp_router_rif_disable(struct mlxsw_sp *mlxsw_sp, u16 rif)
static void mlxsw_sp_router_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
+ /* Signal to nexthop cleanup that the RIF is going away. */
+ rif->crif->rif = NULL;
+
mlxsw_sp_router_rif_disable(mlxsw_sp, rif->rif_index);
mlxsw_sp_nexthop_rif_gone_sync(mlxsw_sp, rif);
mlxsw_sp_neigh_rif_gone_sync(mlxsw_sp, rif);
}
+static bool __mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
+{
+ struct inet6_dev *inet6_dev;
+ struct in_device *idev;
+
+ idev = __in_dev_get_rcu(dev);
+ if (idev && idev->ifa_list)
+ return false;
+
+ inet6_dev = __in6_dev_get(dev);
+ if (inet6_dev && !list_empty(&inet6_dev->addr_list))
+ return false;
+
+ return true;
+}
+
+static bool mlxsw_sp_dev_addr_list_empty(const struct net_device *dev)
+{
+ bool addr_list_empty;
+
+ rcu_read_lock();
+ addr_list_empty = __mlxsw_sp_dev_addr_list_empty(dev);
+ rcu_read_unlock();
+
+ return addr_list_empty;
+}
+
static bool
mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
unsigned long event)
{
- struct inet6_dev *inet6_dev;
- bool addr_list_empty = true;
- struct in_device *idev;
+ bool addr_list_empty;
switch (event) {
case NETDEV_UP:
return rif == NULL;
case NETDEV_DOWN:
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (idev && idev->ifa_list)
- addr_list_empty = false;
-
- inet6_dev = __in6_dev_get(dev);
- if (addr_list_empty && inet6_dev &&
- !list_empty(&inet6_dev->addr_list))
- addr_list_empty = false;
- rcu_read_unlock();
+ addr_list_empty = mlxsw_sp_dev_addr_list_empty(dev);
/* macvlans do not have a RIF, but rather piggy back on the
* RIF of their lower device.
@@ -7796,7 +7941,7 @@ mlxsw_sp_rif_should_config(struct mlxsw_sp_rif *rif, struct net_device *dev,
return true;
if (rif && addr_list_empty &&
- !netif_is_l3_slave(rif->dev))
+ !netif_is_l3_slave(mlxsw_sp_rif_dev(rif)))
return true;
/* It is possible we already removed the RIF ourselves
* if it was assigned to a netdev that is now a bridge
@@ -7854,27 +7999,39 @@ static void mlxsw_sp_rif_index_free(struct mlxsw_sp *mlxsw_sp, u16 rif_index,
static struct mlxsw_sp_rif *mlxsw_sp_rif_alloc(size_t rif_size, u16 rif_index,
u16 vr_id,
- struct net_device *l3_dev)
+ struct mlxsw_sp_crif *crif)
{
+ struct net_device *l3_dev = crif ? crif->key.dev : NULL;
struct mlxsw_sp_rif *rif;
rif = kzalloc(rif_size, GFP_KERNEL);
if (!rif)
return NULL;
- INIT_LIST_HEAD(&rif->nexthop_list);
INIT_LIST_HEAD(&rif->neigh_list);
if (l3_dev) {
ether_addr_copy(rif->addr, l3_dev->dev_addr);
rif->mtu = l3_dev->mtu;
- rif->dev = l3_dev;
}
rif->vr_id = vr_id;
rif->rif_index = rif_index;
+ if (crif) {
+ rif->crif = crif;
+ crif->rif = rif;
+ }
return rif;
}
+static void mlxsw_sp_rif_free(struct mlxsw_sp_rif *rif)
+{
+ WARN_ON(!list_empty(&rif->neigh_list));
+
+ if (rif->crif)
+ rif->crif->rif = NULL;
+ kfree(rif);
+}
+
struct mlxsw_sp_rif *mlxsw_sp_rif_by_index(const struct mlxsw_sp *mlxsw_sp,
u16 rif_index)
{
@@ -7893,7 +8050,8 @@ u16 mlxsw_sp_ipip_lb_rif_index(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif)
{
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(lb_rif->common.dev);
+ struct net_device *dev = mlxsw_sp_rif_dev(&lb_rif->common);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
struct mlxsw_sp_vr *ul_vr;
ul_vr = mlxsw_sp_vr_get(lb_rif->common.mlxsw_sp, ul_tb_id, NULL);
@@ -8070,12 +8228,18 @@ mlxsw_sp_router_hwstats_notify_schedule(struct net_device *dev)
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif)
{
- return rif->dev->ifindex;
+ return mlxsw_sp_rif_dev(rif)->ifindex;
}
-const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif)
+bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif)
+{
+ return !!mlxsw_sp_rif_dev(rif);
+}
+
+bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
+ const struct net_device *dev)
{
- return rif->dev;
+ return mlxsw_sp_rif_dev(rif) == dev;
}
static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
@@ -8083,7 +8247,7 @@ static void mlxsw_sp_rif_push_l3_stats(struct mlxsw_sp_rif *rif)
struct rtnl_hw_stats64 stats = {};
if (!mlxsw_sp_router_port_l3_stats_fetch(rif, &stats))
- netdev_offload_xstats_push_delta(rif->dev,
+ netdev_offload_xstats_push_delta(mlxsw_sp_rif_dev(rif),
NETDEV_OFFLOAD_XSTATS_TYPE_L3,
&stats);
}
@@ -8098,6 +8262,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
const struct mlxsw_sp_rif_ops *ops;
struct mlxsw_sp_fid *fid = NULL;
enum mlxsw_sp_rif_type type;
+ struct mlxsw_sp_crif *crif;
struct mlxsw_sp_rif *rif;
struct mlxsw_sp_vr *vr;
u16 rif_index;
@@ -8117,12 +8282,18 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_rif_index_alloc;
}
- rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, params->dev);
+ crif = mlxsw_sp_crif_lookup(mlxsw_sp->router, params->dev);
+ if (WARN_ON(!crif)) {
+ err = -ENOENT;
+ goto err_crif_lookup;
+ }
+
+ rif = mlxsw_sp_rif_alloc(ops->rif_size, rif_index, vr->id, crif);
if (!rif) {
err = -ENOMEM;
goto err_rif_alloc;
}
- dev_hold(rif->dev);
+ dev_hold(params->dev);
mlxsw_sp->router->rifs[rif_index] = rif;
rif->mlxsw_sp = mlxsw_sp;
rif->ops = ops;
@@ -8150,12 +8321,12 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
goto err_mr_rif_add;
}
- if (netdev_offload_xstats_enabled(rif->dev,
+ if (netdev_offload_xstats_enabled(params->dev,
NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
err = mlxsw_sp_router_port_l3_stats_enable(rif);
if (err)
goto err_stats_enable;
- mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ mlxsw_sp_router_hwstats_notify_schedule(params->dev);
} else {
mlxsw_sp_rif_counters_alloc(rif);
}
@@ -8173,9 +8344,10 @@ err_configure:
mlxsw_sp_fid_put(fid);
err_fid_get:
mlxsw_sp->router->rifs[rif_index] = NULL;
- dev_put(rif->dev);
- kfree(rif);
+ dev_put(params->dev);
+ mlxsw_sp_rif_free(rif);
err_rif_alloc:
+err_crif_lookup:
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
err_rif_index_alloc:
vr->rif_count--;
@@ -8185,8 +8357,10 @@ err_rif_index_alloc:
static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
const struct mlxsw_sp_rif_ops *ops = rif->ops;
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
+ struct mlxsw_sp_crif *crif = rif->crif;
struct mlxsw_sp_fid *fid = rif->fid;
u8 rif_entries = rif->rif_entries;
u16 rif_index = rif->rif_index;
@@ -8197,11 +8371,10 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
mlxsw_sp_router_rif_gone_sync(mlxsw_sp, rif);
vr = &mlxsw_sp->router->vrs[rif->vr_id];
- if (netdev_offload_xstats_enabled(rif->dev,
- NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
+ if (netdev_offload_xstats_enabled(dev, NETDEV_OFFLOAD_XSTATS_TYPE_L3)) {
mlxsw_sp_rif_push_l3_stats(rif);
mlxsw_sp_router_port_l3_stats_disable(rif);
- mlxsw_sp_router_hwstats_notify_schedule(rif->dev);
+ mlxsw_sp_router_hwstats_notify_schedule(dev);
} else {
mlxsw_sp_rif_counters_free(rif);
}
@@ -8213,11 +8386,14 @@ static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif)
/* Loopback RIFs are not associated with a FID. */
mlxsw_sp_fid_put(fid);
mlxsw_sp->router->rifs[rif->rif_index] = NULL;
- dev_put(rif->dev);
- kfree(rif);
+ dev_put(dev);
+ mlxsw_sp_rif_free(rif);
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
vr->rif_count--;
mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ if (crif->can_destroy)
+ mlxsw_sp_crif_free(crif);
}
void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp,
@@ -8549,25 +8725,20 @@ __mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
mlxsw_sp_rif_subport_put(rif);
}
-int
-mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
- struct net_device *l3_dev,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_sp_port_vlan_router_join_existing(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan,
+ struct net_device *l3_dev,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port_vlan->mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_rif *rif;
- int err = 0;
- mutex_lock(&mlxsw_sp->router->lock);
- rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev);
- if (!rif)
- goto out;
+ lockdep_assert_held(&mlxsw_sp->router->lock);
- err = __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
- extack);
-out:
- mutex_unlock(&mlxsw_sp->router->lock);
- return err;
+ if (!mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev))
+ return 0;
+
+ return __mlxsw_sp_port_vlan_router_join(mlxsw_sp_port_vlan, l3_dev,
+ extack);
}
void
@@ -8874,8 +9045,8 @@ out:
return notifier_from_errno(err);
}
-int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct in_validator_info *ivi = (struct in_validator_info *) ptr;
struct net_device *dev = ivi->ivi_dev->dev;
@@ -8957,8 +9128,8 @@ static int mlxsw_sp_inet6addr_event(struct notifier_block *nb,
return NOTIFY_DONE;
}
-int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+static int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct in6_validator_info *i6vi = (struct in6_validator_info *) ptr;
struct net_device *dev = i6vi->i6vi_dev->dev;
@@ -9004,7 +9175,7 @@ mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- struct net_device *dev = rif->dev;
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
u8 old_mac_profile;
u16 fid_index;
int err;
@@ -9088,6 +9259,104 @@ static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif,
return -ENOBUFS;
}
+static bool mlxsw_sp_router_netdevice_interesting(struct mlxsw_sp *mlxsw_sp,
+ struct net_device *dev)
+{
+ struct vlan_dev_priv *vlan;
+
+ if (netif_is_lag_master(dev) ||
+ netif_is_bridge_master(dev) ||
+ mlxsw_sp_port_dev_check(dev) ||
+ mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev) ||
+ netif_is_l3_master(dev))
+ return true;
+
+ if (!is_vlan_dev(dev))
+ return false;
+
+ vlan = vlan_dev_priv(dev);
+ return netif_is_lag_master(vlan->real_dev) ||
+ netif_is_bridge_master(vlan->real_dev) ||
+ mlxsw_sp_port_dev_check(vlan->real_dev);
+}
+
+static struct mlxsw_sp_crif *
+mlxsw_sp_crif_register(struct mlxsw_sp_router *router, struct net_device *dev)
+{
+ struct mlxsw_sp_crif *crif;
+ int err;
+
+ if (WARN_ON(mlxsw_sp_crif_lookup(router, dev)))
+ return NULL;
+
+ crif = mlxsw_sp_crif_alloc(dev);
+ if (!crif)
+ return ERR_PTR(-ENOMEM);
+
+ err = mlxsw_sp_crif_insert(router, crif);
+ if (err)
+ goto err_netdev_insert;
+
+ return crif;
+
+err_netdev_insert:
+ mlxsw_sp_crif_free(crif);
+ return ERR_PTR(err);
+}
+
+static void mlxsw_sp_crif_unregister(struct mlxsw_sp_router *router,
+ struct mlxsw_sp_crif *crif)
+{
+ struct mlxsw_sp_nexthop *nh, *tmp;
+
+ mlxsw_sp_crif_remove(router, crif);
+
+ list_for_each_entry_safe(nh, tmp, &crif->nexthop_list, crif_list_node)
+ mlxsw_sp_nexthop_type_fini(router->mlxsw_sp, nh);
+
+ if (crif->rif)
+ crif->can_destroy = true;
+ else
+ mlxsw_sp_crif_free(crif);
+}
+
+static int mlxsw_sp_netdevice_register(struct mlxsw_sp_router *router,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_crif *crif;
+
+ if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
+ return 0;
+
+ crif = mlxsw_sp_crif_register(router, dev);
+ return PTR_ERR_OR_ZERO(crif);
+}
+
+static void mlxsw_sp_netdevice_unregister(struct mlxsw_sp_router *router,
+ struct net_device *dev)
+{
+ struct mlxsw_sp_crif *crif;
+
+ if (!mlxsw_sp_router_netdevice_interesting(router->mlxsw_sp, dev))
+ return;
+
+ /* netdev_run_todo(), by way of netdev_wait_allrefs_any(), rebroadcasts
+ * the NETDEV_UNREGISTER message, so we can get here twice. If that's
+ * what happened, the netdevice state is NETREG_UNREGISTERED. In that
+ * case, we expect to have collected the CRIF already, and warn if it
+ * still exists. Otherwise we expect the CRIF to exist.
+ */
+ crif = mlxsw_sp_crif_lookup(router, dev);
+ if (dev->reg_state == NETREG_UNREGISTERED) {
+ if (!WARN_ON(crif))
+ return;
+ }
+ if (WARN_ON(!crif))
+ return;
+
+ mlxsw_sp_crif_unregister(router, crif);
+}
+
static bool mlxsw_sp_is_offload_xstats_event(unsigned long event)
{
switch (event) {
@@ -9254,6 +9523,46 @@ mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event,
return err;
}
+static int
+mlxsw_sp_port_vid_router_join_existing(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid, struct net_device *dev,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+
+ mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port,
+ vid);
+ if (WARN_ON(!mlxsw_sp_port_vlan))
+ return -EINVAL;
+
+ return mlxsw_sp_port_vlan_router_join_existing(mlxsw_sp_port_vlan,
+ dev, extack);
+}
+
+static int __mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ u16 default_vid = MLXSW_SP_DEFAULT_VID;
+
+ return mlxsw_sp_port_vid_router_join_existing(mlxsw_sp_port,
+ default_vid, lag_dev,
+ extack);
+}
+
+int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ mutex_lock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+ err = __mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev, extack);
+ mutex_unlock(&mlxsw_sp_port->mlxsw_sp->router->lock);
+
+ return err;
+}
+
static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
@@ -9267,6 +9576,15 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
mutex_lock(&mlxsw_sp->router->lock);
+ if (event == NETDEV_REGISTER) {
+ err = mlxsw_sp_netdevice_register(router, dev);
+ if (err)
+ /* No need to roll this back, UNREGISTER will collect it
+ * anyhow.
+ */
+ goto out;
+ }
+
if (mlxsw_sp_is_offload_xstats_event(event))
err = mlxsw_sp_netdevice_offload_xstats_cmd(mlxsw_sp, dev,
event, ptr);
@@ -9281,6 +9599,10 @@ static int mlxsw_sp_router_netdevice_event(struct notifier_block *nb,
else if (mlxsw_sp_is_vrf_event(event, ptr))
err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
+ if (event == NETDEV_UNREGISTER)
+ mlxsw_sp_netdevice_unregister(router, dev);
+
+out:
mutex_unlock(&mlxsw_sp->router->lock);
return notifier_from_errno(err);
@@ -9300,15 +9622,16 @@ static int __mlxsw_sp_rif_macvlan_flush(struct net_device *dev,
static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct netdev_nested_priv priv = {
.data = (void *)rif,
};
- if (!netif_is_macvlan_port(rif->dev))
+ if (!netif_is_macvlan_port(dev))
return 0;
- netdev_warn(rif->dev, "Router interface is deleted. Upper macvlans will not work\n");
- return netdev_walk_all_upper_dev_rcu(rif->dev,
+ netdev_warn(dev, "Router interface is deleted. Upper macvlans will not work\n");
+ return netdev_walk_all_upper_dev_rcu(dev,
__mlxsw_sp_rif_macvlan_flush, &priv);
}
@@ -9329,6 +9652,7 @@ static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif,
static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif_subport *rif_subport;
char ritr_pl[MLXSW_REG_RITR_LEN];
@@ -9336,8 +9660,8 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
rif_subport = mlxsw_sp_rif_subport_rif(rif);
mlxsw_reg_ritr_pack(ritr_pl, enable, MLXSW_REG_RITR_SP_IF,
- rif->rif_index, rif->vr_id, rif->dev->mtu);
- mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ rif->rif_index, rif->vr_id, dev->mtu);
+ mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
efid = mlxsw_sp_fid_index(rif->fid);
mlxsw_reg_ritr_sp_if_pack(ritr_pl, rif_subport->lag,
@@ -9350,6 +9674,7 @@ static int mlxsw_sp_rif_subport_op(struct mlxsw_sp_rif *rif, bool enable)
static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
u8 mac_profile;
int err;
@@ -9363,7 +9688,7 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_rif_subport_op;
- err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
goto err_rif_fdb_op;
@@ -9375,7 +9700,7 @@ static int mlxsw_sp_rif_subport_configure(struct mlxsw_sp_rif *rif,
return 0;
err_fid_rif_set:
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_rif_subport_op(rif, false);
@@ -9386,10 +9711,11 @@ err_rif_subport_op:
static void mlxsw_sp_rif_subport_deconfigure(struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_unset(fid);
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_rif_subport_op(rif, false);
@@ -9415,12 +9741,13 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_subport_ops = {
static int mlxsw_sp_rif_fid_op(struct mlxsw_sp_rif *rif, u16 fid, bool enable)
{
enum mlxsw_reg_ritr_if_type type = MLXSW_REG_RITR_FID_IF;
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
mlxsw_reg_ritr_pack(ritr_pl, enable, type, rif->rif_index, rif->vr_id,
- rif->dev->mtu);
- mlxsw_reg_ritr_mac_pack(ritr_pl, rif->dev->dev_addr);
+ dev->mtu);
+ mlxsw_reg_ritr_mac_pack(ritr_pl, dev->dev_addr);
mlxsw_reg_ritr_if_mac_profile_id_set(ritr_pl, rif->mac_profile_id);
mlxsw_reg_ritr_fid_if_fid_set(ritr_pl, fid);
@@ -9435,6 +9762,7 @@ u16 mlxsw_sp_router_port(const struct mlxsw_sp *mlxsw_sp)
static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u16 fid_index = mlxsw_sp_fid_index(rif->fid);
u8 mac_profile;
@@ -9460,7 +9788,7 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
if (err)
goto err_fid_bc_flood_set;
- err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
goto err_rif_fdb_op;
@@ -9472,7 +9800,7 @@ static int mlxsw_sp_rif_fid_configure(struct mlxsw_sp_rif *rif,
return 0;
err_fid_rif_set:
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
@@ -9489,12 +9817,13 @@ err_rif_fid_op:
static void mlxsw_sp_rif_fid_deconfigure(struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
u16 fid_index = mlxsw_sp_fid_index(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_fid *fid = rif->fid;
mlxsw_sp_fid_rif_unset(fid);
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
@@ -9509,7 +9838,9 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
- return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex);
+ int rif_ifindex = mlxsw_sp_rif_dev_ifindex(rif);
+
+ return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif_ifindex);
}
static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
@@ -9517,7 +9848,7 @@ static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
struct switchdev_notifier_fdb_info info = {};
struct net_device *dev;
- dev = br_fdb_find_port(rif->dev, mac, 0);
+ dev = br_fdb_find_port(mlxsw_sp_rif_dev(rif), mac, 0);
if (!dev)
return;
@@ -9540,17 +9871,18 @@ static struct mlxsw_sp_fid *
mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct net_device *br_dev;
u16 vid;
int err;
- if (is_vlan_dev(rif->dev)) {
- vid = vlan_dev_vlan_id(rif->dev);
- br_dev = vlan_dev_real_dev(rif->dev);
+ if (is_vlan_dev(dev)) {
+ vid = vlan_dev_vlan_id(dev);
+ br_dev = vlan_dev_real_dev(dev);
if (WARN_ON(!netif_is_bridge_master(br_dev)))
return ERR_PTR(-EINVAL);
} else {
- err = br_vlan_get_pvid(rif->dev, &vid);
+ err = br_vlan_get_pvid(dev, &vid);
if (err < 0 || !vid) {
NL_SET_ERR_MSG_MOD(extack, "Couldn't determine bridge PVID");
return ERR_PTR(-EINVAL);
@@ -9562,12 +9894,13 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif,
static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
{
+ struct net_device *rif_dev = mlxsw_sp_rif_dev(rif);
struct switchdev_notifier_fdb_info info = {};
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct net_device *br_dev;
struct net_device *dev;
- br_dev = is_vlan_dev(rif->dev) ? vlan_dev_real_dev(rif->dev) : rif->dev;
+ br_dev = is_vlan_dev(rif_dev) ? vlan_dev_real_dev(rif_dev) : rif_dev;
dev = br_fdb_find_port(br_dev, mac, vid);
if (!dev)
return;
@@ -9581,11 +9914,12 @@ static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac)
static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
bool enable)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
char ritr_pl[MLXSW_REG_RITR_LEN];
mlxsw_reg_ritr_vlan_if_pack(ritr_pl, enable, rif->rif_index, rif->vr_id,
- rif->dev->mtu, rif->dev->dev_addr,
+ dev->mtu, dev->dev_addr,
rif->mac_profile_id, vid, efid);
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl);
@@ -9594,6 +9928,7 @@ static int mlxsw_sp_rif_vlan_op(struct mlxsw_sp_rif *rif, u16 vid, u16 efid,
static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
struct netlink_ext_ack *extack)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
u8 mac_profile;
@@ -9619,7 +9954,7 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
if (err)
goto err_fid_bc_flood_set;
- err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ err = mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), true);
if (err)
goto err_rif_fdb_op;
@@ -9631,7 +9966,7 @@ static int mlxsw_sp_rif_vlan_configure(struct mlxsw_sp_rif *rif, u16 efid,
return 0;
err_fid_rif_set:
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
err_rif_fdb_op:
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
@@ -9648,11 +9983,12 @@ err_rif_vlan_fid_op:
static void mlxsw_sp_rif_vlan_deconfigure(struct mlxsw_sp_rif *rif)
{
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
u16 vid = mlxsw_sp_fid_8021q_vid(rif->fid);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
mlxsw_sp_fid_rif_unset(rif->fid);
- mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, rif->dev->dev_addr,
+ mlxsw_sp_rif_fdb_op(rif->mlxsw_sp, dev->dev_addr,
mlxsw_sp_fid_index(rif->fid), false);
mlxsw_sp_rif_macvlan_flush(rif);
mlxsw_sp_fid_flood_set(rif->fid, MLXSW_SP_FLOOD_TYPE_BC,
@@ -9719,12 +10055,13 @@ mlxsw_sp1_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_vr *ul_vr;
int err;
- ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, NULL);
+ ul_vr = mlxsw_sp_vr_get(mlxsw_sp, ul_tb_id, extack);
if (IS_ERR(ul_vr))
return PTR_ERR(ul_vr);
@@ -9786,6 +10123,7 @@ mlxsw_sp_rif_ipip_lb_ul_rif_op(struct mlxsw_sp_rif *ul_rif, bool enable)
static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
+ struct mlxsw_sp_crif *ul_crif,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif *ul_rif;
@@ -9799,7 +10137,8 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
return ERR_PTR(err);
}
- ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id, NULL);
+ ul_rif = mlxsw_sp_rif_alloc(sizeof(*ul_rif), rif_index, vr->id,
+ ul_crif);
if (!ul_rif) {
err = -ENOMEM;
goto err_rif_alloc;
@@ -9817,7 +10156,7 @@ mlxsw_sp_ul_rif_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_vr *vr,
ul_rif_op_err:
mlxsw_sp->router->rifs[rif_index] = NULL;
- kfree(ul_rif);
+ mlxsw_sp_rif_free(ul_rif);
err_rif_alloc:
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
return ERR_PTR(err);
@@ -9832,12 +10171,13 @@ static void mlxsw_sp_ul_rif_destroy(struct mlxsw_sp_rif *ul_rif)
atomic_sub(rif_entries, &mlxsw_sp->router->rifs_count);
mlxsw_sp_rif_ipip_lb_ul_rif_op(ul_rif, false);
mlxsw_sp->router->rifs[ul_rif->rif_index] = NULL;
- kfree(ul_rif);
+ mlxsw_sp_rif_free(ul_rif);
mlxsw_sp_rif_index_free(mlxsw_sp, rif_index, rif_entries);
}
static struct mlxsw_sp_rif *
mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
+ struct mlxsw_sp_crif *ul_crif,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_vr *vr;
@@ -9850,7 +10190,7 @@ mlxsw_sp_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 tb_id,
if (refcount_inc_not_zero(&vr->ul_rif_refcnt))
return vr->ul_rif;
- vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, extack);
+ vr->ul_rif = mlxsw_sp_ul_rif_create(mlxsw_sp, vr, ul_crif, extack);
if (IS_ERR(vr->ul_rif)) {
err = PTR_ERR(vr->ul_rif);
goto err_ul_rif_create;
@@ -9888,7 +10228,7 @@ int mlxsw_sp_router_ul_rif_get(struct mlxsw_sp *mlxsw_sp, u32 ul_tb_id,
int err = 0;
mutex_lock(&mlxsw_sp->router->lock);
- ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, NULL);
if (IS_ERR(ul_rif)) {
err = PTR_ERR(ul_rif);
goto out;
@@ -9918,12 +10258,13 @@ mlxsw_sp2_rif_ipip_lb_configure(struct mlxsw_sp_rif *rif,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_rif_ipip_lb *lb_rif = mlxsw_sp_rif_ipip_lb_rif(rif);
- u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(rif->dev);
+ struct net_device *dev = mlxsw_sp_rif_dev(rif);
+ u32 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(dev);
struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp;
struct mlxsw_sp_rif *ul_rif;
int err;
- ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL);
+ ul_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, ul_tb_id, NULL, extack);
if (IS_ERR(ul_rif))
return PTR_ERR(ul_rif);
@@ -10041,11 +10382,12 @@ err_rifs_table_init:
static void mlxsw_sp_rifs_fini(struct mlxsw_sp *mlxsw_sp)
{
+ int max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS);
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
int i;
WARN_ON_ONCE(atomic_read(&mlxsw_sp->router->rifs_count));
- for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++)
+ for (i = 0; i < max_rifs; i++)
WARN_ON_ONCE(mlxsw_sp->router->rifs[i]);
devl_resource_occ_get_unregister(devlink, MLXSW_SP_RESOURCE_RIFS);
@@ -10444,28 +10786,41 @@ static void __mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rgcr), rgcr_pl);
}
-static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp)
+static int mlxsw_sp_lb_rif_init(struct mlxsw_sp *mlxsw_sp,
+ struct netlink_ext_ack *extack)
{
- u16 lb_rif_index;
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+ struct mlxsw_sp_rif *lb_rif;
int err;
+ router->lb_crif = mlxsw_sp_crif_alloc(NULL);
+ if (IS_ERR(router->lb_crif))
+ return PTR_ERR(router->lb_crif);
+
/* Create a generic loopback RIF associated with the main table
* (default VRF). Any table can be used, but the main table exists
- * anyway, so we do not waste resources.
+ * anyway, so we do not waste resources. Loopback RIFs are usually
+ * created with a NULL CRIF, but this RIF is used as a fallback RIF
+ * for blackhole nexthops, and nexthops expect to have a valid CRIF.
*/
- err = mlxsw_sp_router_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN,
- &lb_rif_index);
- if (err)
- return err;
-
- mlxsw_sp->router->lb_rif_index = lb_rif_index;
+ lb_rif = mlxsw_sp_ul_rif_get(mlxsw_sp, RT_TABLE_MAIN, router->lb_crif,
+ extack);
+ if (IS_ERR(lb_rif)) {
+ err = PTR_ERR(lb_rif);
+ goto err_ul_rif_get;
+ }
return 0;
+
+err_ul_rif_get:
+ mlxsw_sp_crif_free(router->lb_crif);
+ return err;
}
static void mlxsw_sp_lb_rif_fini(struct mlxsw_sp *mlxsw_sp)
{
- mlxsw_sp_router_ul_rif_put(mlxsw_sp, mlxsw_sp->router->lb_rif_index);
+ mlxsw_sp_ul_rif_put(mlxsw_sp->router->lb_crif->rif);
+ mlxsw_sp_crif_free(mlxsw_sp->router->lb_crif);
}
static int mlxsw_sp1_router_init(struct mlxsw_sp *mlxsw_sp)
@@ -10504,6 +10859,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_router *router;
+ struct notifier_block *nb;
int err;
router = kzalloc(sizeof(*mlxsw_sp->router), GFP_KERNEL);
@@ -10525,14 +10881,19 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_router_init;
- err = mlxsw_sp_rifs_init(mlxsw_sp);
- if (err)
- goto err_rifs_init;
-
err = mlxsw_sp->router_ops->ipips_init(mlxsw_sp);
if (err)
goto err_ipips_init;
+ err = rhashtable_init(&mlxsw_sp->router->crif_ht,
+ &mlxsw_sp_crif_ht_params);
+ if (err)
+ goto err_crif_ht_init;
+
+ err = mlxsw_sp_rifs_init(mlxsw_sp);
+ if (err)
+ goto err_rifs_init;
+
err = rhashtable_init(&mlxsw_sp->router->nexthop_ht,
&mlxsw_sp_nexthop_ht_params);
if (err)
@@ -10556,7 +10917,7 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_vrs_init;
- err = mlxsw_sp_lb_rif_init(mlxsw_sp);
+ err = mlxsw_sp_lb_rif_init(mlxsw_sp, extack);
if (err)
goto err_lb_rif_init;
@@ -10582,6 +10943,17 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp,
if (err)
goto err_register_inet6addr_notifier;
+ router->inetaddr_valid_nb.notifier_call = mlxsw_sp_inetaddr_valid_event;
+ err = register_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ if (err)
+ goto err_register_inetaddr_valid_notifier;
+
+ nb = &router->inet6addr_valid_nb;
+ nb->notifier_call = mlxsw_sp_inet6addr_valid_event;
+ err = register_inet6addr_validator_notifier(nb);
+ if (err)
+ goto err_register_inet6addr_valid_notifier;
+
mlxsw_sp->router->netevent_nb.notifier_call =
mlxsw_sp_router_netevent_event;
err = register_netevent_notifier(&mlxsw_sp->router->netevent_nb);
@@ -10621,6 +10993,10 @@ err_register_fib_notifier:
err_register_nexthop_notifier:
unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
err_register_netevent_notifier:
+ unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
+err_register_inet6addr_valid_notifier:
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+err_register_inetaddr_valid_notifier:
unregister_inet6addr_notifier(&router->inet6addr_nb);
err_register_inet6addr_notifier:
unregister_inetaddr_notifier(&router->inetaddr_nb);
@@ -10643,10 +11019,12 @@ err_lpm_init:
err_nexthop_group_ht_init:
rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
err_nexthop_ht_init:
- mlxsw_sp_ipips_fini(mlxsw_sp);
-err_ipips_init:
mlxsw_sp_rifs_fini(mlxsw_sp);
err_rifs_init:
+ rhashtable_destroy(&mlxsw_sp->router->crif_ht);
+err_crif_ht_init:
+ mlxsw_sp_ipips_fini(mlxsw_sp);
+err_ipips_init:
__mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
@@ -10658,15 +11036,18 @@ err_router_ops_init:
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
{
+ struct mlxsw_sp_router *router = mlxsw_sp->router;
+
unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->netdevice_nb);
- unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->fib_nb);
+ &router->netdevice_nb);
+ unregister_fib_notifier(mlxsw_sp_net(mlxsw_sp), &router->fib_nb);
unregister_nexthop_notifier(mlxsw_sp_net(mlxsw_sp),
- &mlxsw_sp->router->nexthop_nb);
- unregister_netevent_notifier(&mlxsw_sp->router->netevent_nb);
- unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb);
- unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb);
+ &router->nexthop_nb);
+ unregister_netevent_notifier(&router->netevent_nb);
+ unregister_inet6addr_validator_notifier(&router->inet6addr_valid_nb);
+ unregister_inetaddr_validator_notifier(&router->inetaddr_valid_nb);
+ unregister_inet6addr_notifier(&router->inet6addr_nb);
+ unregister_inetaddr_notifier(&router->inetaddr_nb);
mlxsw_core_flush_owq();
mlxsw_sp_mp_hash_fini(mlxsw_sp);
mlxsw_sp_neigh_fini(mlxsw_sp);
@@ -10674,12 +11055,13 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_vrs_fini(mlxsw_sp);
mlxsw_sp_mr_fini(mlxsw_sp);
mlxsw_sp_lpm_fini(mlxsw_sp);
- rhashtable_destroy(&mlxsw_sp->router->nexthop_group_ht);
- rhashtable_destroy(&mlxsw_sp->router->nexthop_ht);
- mlxsw_sp_ipips_fini(mlxsw_sp);
+ rhashtable_destroy(&router->nexthop_group_ht);
+ rhashtable_destroy(&router->nexthop_ht);
mlxsw_sp_rifs_fini(mlxsw_sp);
+ rhashtable_destroy(&mlxsw_sp->router->crif_ht);
+ mlxsw_sp_ipips_fini(mlxsw_sp);
__mlxsw_sp_router_fini(mlxsw_sp);
- cancel_delayed_work_sync(&mlxsw_sp->router->nh_grp_activity_dw);
- mutex_destroy(&mlxsw_sp->router->lock);
- kfree(mlxsw_sp->router);
+ cancel_delayed_work_sync(&router->nh_grp_activity_dw);
+ mutex_destroy(&router->lock);
+ kfree(router);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
index 37d6e4c80e6a..9a2669a08480 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h
@@ -20,6 +20,7 @@ struct mlxsw_sp_router_nve_decap {
struct mlxsw_sp_router {
struct mlxsw_sp *mlxsw_sp;
+ struct rhashtable crif_ht;
struct gen_pool *rifs_table;
struct mlxsw_sp_rif **rifs;
struct idr rif_mac_profiles_idr;
@@ -52,12 +53,14 @@ struct mlxsw_sp_router {
struct notifier_block inetaddr_nb;
struct notifier_block inet6addr_nb;
struct notifier_block netdevice_nb;
+ struct notifier_block inetaddr_valid_nb;
+ struct notifier_block inet6addr_valid_nb;
const struct mlxsw_sp_rif_ops **rif_ops_arr;
const struct mlxsw_sp_ipip_ops **ipip_ops_arr;
struct mlxsw_sp_router_nve_decap nve_decap_config;
struct mutex lock; /* Protects shared router resources */
struct mlxsw_sp_fib_entry_op_ctx *ll_op_ctx;
- u16 lb_rif_index;
+ struct mlxsw_sp_crif *lb_crif;
const struct mlxsw_sp_adj_grp_size_range *adj_grp_size_ranges;
size_t adj_grp_size_ranges_count;
struct delayed_work nh_grp_activity_dw;
@@ -91,7 +94,9 @@ u16 mlxsw_sp_ipip_lb_ul_vr_id(const struct mlxsw_sp_rif_ipip_lb *rif);
u16 mlxsw_sp_ipip_lb_ul_rif_id(const struct mlxsw_sp_rif_ipip_lb *lb_rif);
u32 mlxsw_sp_ipip_dev_ul_tb_id(const struct net_device *ol_dev);
int mlxsw_sp_rif_dev_ifindex(const struct mlxsw_sp_rif *rif);
-const struct net_device *mlxsw_sp_rif_dev(const struct mlxsw_sp_rif *rif);
+bool mlxsw_sp_rif_has_dev(const struct mlxsw_sp_rif *rif);
+bool mlxsw_sp_rif_dev_is(const struct mlxsw_sp_rif *rif,
+ const struct net_device *dev);
int mlxsw_sp_rif_counter_value_get(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif,
enum mlxsw_sp_rif_counter_dir dir,
@@ -166,5 +171,8 @@ int mlxsw_sp_ipip_ecn_encap_init(struct mlxsw_sp *mlxsw_sp);
int mlxsw_sp_ipip_ecn_decap_init(struct mlxsw_sp *mlxsw_sp);
struct net_device *
mlxsw_sp_ipip_netdev_ul_dev_get(const struct net_device *ol_dev);
+int mlxsw_sp_router_port_join_lag(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct net_device *lag_dev,
+ struct netlink_ext_ack *extack);
#endif /* _MLXSW_ROUTER_H_*/