From 56ea7ed103b46970e171eb1c95916f393d64eeff Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Thu, 13 May 2021 17:31:03 -0700 Subject: igc: Fix use-after-free error during reset Cleans the next descriptor to watch (next_to_watch) when cleaning the TX ring. Failure to do so can cause invalid memory accesses. If igc_poll() runs while the controller is being reset this can lead to the driver try to free a skb that was already freed. Log message: [ 101.525242] refcount_t: underflow; use-after-free. [ 101.525251] WARNING: CPU: 1 PID: 646 at lib/refcount.c:28 refcount_warn_saturate+0xab/0xf0 [ 101.525259] Modules linked in: sch_etf(E) sch_mqprio(E) rfkill(E) intel_rapl_msr(E) intel_rapl_common(E) x86_pkg_temp_thermal(E) intel_powerclamp(E) coretemp(E) binfmt_misc(E) kvm_intel(E) kvm(E) irqbypass(E) crc32_pclmul(E) ghash_clmulni_intel(E) aesni_intel(E) mei_wdt(E) libaes(E) crypto_simd(E) cryptd(E) glue_helper(E) snd_hda_codec_hdmi(E) rapl(E) intel_cstate(E) snd_hda_intel(E) snd_intel_dspcfg(E) sg(E) soundwire_intel(E) intel_uncore(E) at24(E) soundwire_generic_allocation(E) iTCO_wdt(E) soundwire_cadence(E) intel_pmc_bxt(E) serio_raw(E) snd_hda_codec(E) iTCO_vendor_support(E) watchdog(E) snd_hda_core(E) snd_hwdep(E) snd_soc_core(E) snd_compress(E) snd_pcsp(E) soundwire_bus(E) snd_pcm(E) evdev(E) snd_timer(E) mei_me(E) snd(E) soundcore(E) mei(E) configfs(E) ip_tables(E) x_tables(E) autofs4(E) ext4(E) crc32c_generic(E) crc16(E) mbcache(E) jbd2(E) sd_mod(E) t10_pi(E) crc_t10dif(E) crct10dif_generic(E) i915(E) ahci(E) libahci(E) ehci_pci(E) igb(E) xhci_pci(E) ehci_hcd(E) [ 101.525303] drm_kms_helper(E) dca(E) xhci_hcd(E) libata(E) crct10dif_pclmul(E) cec(E) crct10dif_common(E) tsn(E) igc(E) e1000e(E) ptp(E) i2c_i801(E) crc32c_intel(E) psmouse(E) i2c_algo_bit(E) i2c_smbus(E) scsi_mod(E) lpc_ich(E) pps_core(E) usbcore(E) drm(E) button(E) video(E) [ 101.525318] CPU: 1 PID: 646 Comm: irq/37-enp7s0-T Tainted: G E 5.10.30-rt37-tsn1-rt-ipipe #ipipe [ 101.525320] Hardware name: SIEMENS AG SIMATIC IPC427D/A5E31233588, BIOS V17.02.09 03/31/2017 [ 101.525322] RIP: 0010:refcount_warn_saturate+0xab/0xf0 [ 101.525325] Code: 05 31 48 44 01 01 e8 f0 c6 42 00 0f 0b c3 80 3d 1f 48 44 01 00 75 90 48 c7 c7 78 a8 f3 a6 c6 05 0f 48 44 01 01 e8 d1 c6 42 00 <0f> 0b c3 80 3d fe 47 44 01 00 0f 85 6d ff ff ff 48 c7 c7 d0 a8 f3 [ 101.525327] RSP: 0018:ffffbdedc0917cb8 EFLAGS: 00010286 [ 101.525329] RAX: 0000000000000000 RBX: ffff98fd6becbf40 RCX: 0000000000000001 [ 101.525330] RDX: 0000000000000001 RSI: ffffffffa6f2700c RDI: 00000000ffffffff [ 101.525332] RBP: ffff98fd6becc14c R08: ffffffffa7463d00 R09: ffffbdedc0917c50 [ 101.525333] R10: ffffffffa74c3578 R11: 0000000000000034 R12: 00000000ffffff00 [ 101.525335] R13: ffff98fd6b0b1000 R14: 0000000000000039 R15: ffff98fd6be35c40 [ 101.525337] FS: 0000000000000000(0000) GS:ffff98fd6e240000(0000) knlGS:0000000000000000 [ 101.525339] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 101.525341] CR2: 00007f34135a3a70 CR3: 0000000150210003 CR4: 00000000001706e0 [ 101.525343] Call Trace: [ 101.525346] sock_wfree+0x9c/0xa0 [ 101.525353] unix_destruct_scm+0x7b/0xa0 [ 101.525358] skb_release_head_state+0x40/0x90 [ 101.525362] skb_release_all+0xe/0x30 [ 101.525364] napi_consume_skb+0x57/0x160 [ 101.525367] igc_poll+0xb7/0xc80 [igc] [ 101.525376] ? sched_clock+0x5/0x10 [ 101.525381] ? sched_clock_cpu+0xe/0x100 [ 101.525385] net_rx_action+0x14c/0x410 [ 101.525388] __do_softirq+0xe9/0x2f4 [ 101.525391] __local_bh_enable_ip+0xe3/0x110 [ 101.525395] ? irq_finalize_oneshot.part.47+0xe0/0xe0 [ 101.525398] irq_forced_thread_fn+0x6a/0x80 [ 101.525401] irq_thread+0xe8/0x180 [ 101.525403] ? wake_threads_waitq+0x30/0x30 [ 101.525406] ? irq_thread_check_affinity+0xd0/0xd0 [ 101.525408] kthread+0x183/0x1a0 [ 101.525412] ? kthread_park+0x80/0x80 [ 101.525415] ret_from_fork+0x22/0x30 Fixes: 13b5b7fd6a4a ("igc: Add support for Tx/Rx rings") Reported-by: Erez Geva Signed-off-by: Vinicius Costa Gomes Tested-by: Dvora Fuxbrumer Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 95323095094d..1d00a63eb935 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring) igc_unmap_tx_buffer(tx_ring->dev, tx_buffer); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; -- cgit v1.2.3 From 7b292608db23ccbbfbfa50cdb155d01725d7a52e Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Thu, 13 May 2021 17:31:04 -0700 Subject: igb: Fix use-after-free error during reset Cleans the next descriptor to watch (next_to_watch) when cleaning the TX ring. Failure to do so can cause invalid memory accesses. If igb_poll() runs while the controller is reset this can lead to the driver try to free a skb that was already freed. (The crash is harder to reproduce with the igb driver, but the same potential problem exists as the code is identical to igc) Fixes: 7cc6fd4c60f2 ("igb: Don't bother clearing Tx buffer_info in igb_clean_tx_ring") Signed-off-by: Vinicius Costa Gomes Reported-by: Erez Geva Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_main.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 7e6435dc7e80..a61e2e5e95c0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4835,6 +4835,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) DMA_TO_DEVICE); } + tx_buffer->next_to_watch = NULL; + /* move us one more past the eop_desc for start of next pkt */ tx_buffer++; i++; -- cgit v1.2.3 From 05682a0a61b6cbecd97a0f37f743b2cbfd516977 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Fri, 21 May 2021 12:50:19 -0700 Subject: igc: change default return of igc_read_phy_reg() Static analysis reports this problem igc_main.c:4944:20: warning: The left operand of '&' is a garbage value if (!(phy_data & SR_1000T_REMOTE_RX_STATUS) && ~~~~~~~~ ^ phy_data is set by the call to igc_read_phy_reg() only if there is a read_reg() op, else it is unset and a 0 is returned. Change the return to -EOPNOTSUPP. Fixes: 208983f099d9 ("igc: Add watchdog") Signed-off-by: Tom Rix Tested-by: Dvora Fuxbrumer Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 9e0bbb2e55e3..5901ed9fb545 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data) if (hw->phy.ops.read_reg) return hw->phy.ops.read_reg(hw, offset, data); - return 0; + return -EOPNOTSUPP; } void igc_reinit_locked(struct igc_adapter *); -- cgit v1.2.3 From dd2aefcd5e37989ae5f90afdae44bbbf3a2990da Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 12 Jun 2021 15:46:09 +0200 Subject: ixgbe: Fix an error handling path in 'ixgbe_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: 6fabd715e6d8 ("ixgbe: Implement PCIe AER support") Signed-off-by: Christophe JAILLET Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index ffff69efd78a..913253f8ecb4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11067,6 +11067,7 @@ err_ioremap: disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From c6bc9e5ce5d37cb3e6b552f41b92a193db1806ab Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 12 Jun 2021 22:00:05 +0200 Subject: igc: Fix an error handling path in 'igc_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: c9a11c23ceb6 ("igc: Add netdev") Signed-off-by: Christophe JAILLET Tested-by: Dvora Fuxbrumer Acked-by: Sasha Neftin Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igc/igc_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 1d00a63eb935..e29aadbc6744 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6056,6 +6056,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From fea03b1cebd653cd095f2e9a58cfe1c85661c363 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 12 Jun 2021 22:08:33 +0200 Subject: igb: Fix an error handling path in 'igb_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: 40a914fa72ab ("igb: Add support for pci-e Advanced Error Reporting") Signed-off-by: Christophe JAILLET Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index a61e2e5e95c0..abc239b736fb 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -3615,6 +3615,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From e85e14d68f517ef12a5fb8123fff65526b35b6cd Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 16 Jun 2021 07:00:36 +0200 Subject: fm10k: Fix an error handling path in 'fm10k_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: 19ae1b3fb99c ("fm10k: Add support for PCI power management and error handling") Signed-off-by: Christophe JAILLET Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/fm10k/fm10k_pci.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c index dbcae92bb18d..adfa2768f024 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c @@ -2227,6 +2227,7 @@ err_sw_init: err_ioremap: free_netdev(netdev); err_alloc_netdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From 4589075608420bc49fcef6e98279324bf2bb91ae Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 16 Jun 2021 07:05:53 +0200 Subject: e1000e: Fix an error handling path in 'e1000_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: 111b9dc5c981 ("e1000e: add aer support") Signed-off-by: Christophe JAILLET Acked-by: Sasha Neftin Tested-by: Dvora Fuxbrumer Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000e/netdev.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d150dade06cf..757a54c39eef 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7664,6 +7664,7 @@ err_flashmap: err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_mem_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From af30cbd2f4d6d66a9b6094e0aa32420bc8b20e08 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 16 Jun 2021 07:53:02 +0200 Subject: iavf: Fix an error handling path in 'iavf_probe()' If an error occurs after a 'pci_enable_pcie_error_reporting()' call, it must be undone by a corresponding 'pci_disable_pcie_error_reporting()' call, as already done in the remove function. Fixes: 5eae00c57f5e ("i40evf: main driver core") Signed-off-by: Christophe JAILLET Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/iavf/iavf_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index e612c24fa384..44bafedd09f2 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err_ioremap: free_netdev(netdev); err_alloc_etherdev: + pci_disable_pcie_error_reporting(pdev); pci_release_regions(pdev); err_pci_reg: err_dma: -- cgit v1.2.3 From 6c19d772618fea40d9681f259368f284a330fd90 Mon Sep 17 00:00:00 2001 From: Aleksandr Loktionov Date: Thu, 22 Apr 2021 10:19:23 +0000 Subject: igb: Check if num of q_vectors is smaller than max before array access Ensure that the adapter->q_vector[MAX_Q_VECTORS] array isn't accessed beyond its size. It was fixed by using a local variable num_q_vectors as a limit for loop index, and ensure that num_q_vectors is not bigger than MAX_Q_VECTORS. Fixes: 047e0030f1e6 ("igb: add new data structure for handling interrupts and NAPI") Signed-off-by: Aleksandr Loktionov Reviewed-by: Grzegorz Siwik Reviewed-by: Arkadiusz Kubalewski Reviewed-by: Slawomir Laba Reviewed-by: Sylwester Dziedziuch Reviewed-by: Mateusz Palczewski Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_main.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index abc239b736fb..9470ba891483 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter) **/ static int igb_request_msix(struct igb_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; struct net_device *netdev = adapter->netdev; int i, err = 0, vector = 0, free_vector = 0; @@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igb_q_vector *q_vector = adapter->q_vector[i]; vector++; -- cgit v1.2.3 From 382a7c20d9253bcd5715789b8179528d0f3de72c Mon Sep 17 00:00:00 2001 From: Jedrzej Jagielski Date: Fri, 11 Jun 2021 22:42:17 +0000 Subject: igb: Fix position of assignment to *ring Assignment to *ring should be done after correctness check of the argument queue. Fixes: 91db364236c8 ("igb: Refactor igb_configure_cbs()") Signed-off-by: Jedrzej Jagielski Acked-by: Vinicius Costa Gomes Tested-by: Tony Brelinski Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/igb/igb_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 9470ba891483..171a7a629b20 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1685,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter) **/ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) { - struct igb_ring *ring = adapter->tx_ring[queue]; struct net_device *netdev = adapter->netdev; struct e1000_hw *hw = &adapter->hw; + struct igb_ring *ring; u32 tqavcc, tqavctrl; u16 value; WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); + ring = adapter->tx_ring[queue]; /* If any of the Qav features is enabled, configure queues as SR and * with HIGH PRIO. If none is, then configure them with LOW PRIO and -- cgit v1.2.3 From a3609ac24c18947737f5bc1746b8735814c521d1 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 29 Jun 2021 06:43:05 +0200 Subject: net: usb: asix: ax88772: suspend PHY on driver probe After probe/bind sequence is the PHY in active state, even if interface is stopped. As result, on some systems like Samsung Exynos5250 SoC based Arndale board, the ASIX PHY will be able to negotiate the link but fail to transmit the data. To handle it, suspend the PHY on probe. Fixes: e532a096be0e ("net: usb: asix: ax88772: add phylib support") Signed-off-by: Oleksij Rempel Reported-by: Marek Szyprowski Tested-by: Marek Szyprowski Signed-off-by: David S. Miller --- drivers/net/usb/asix_devices.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c index aec97b021a73..2c115216420a 100644 --- a/drivers/net/usb/asix_devices.c +++ b/drivers/net/usb/asix_devices.c @@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev) return ret; } + phy_suspend(priv->phydev); priv->phydev->mac_managed_pm = 1; phy_attached_info(priv->phydev); -- cgit v1.2.3 From 873a1e3d207ae587a7a1cc1d84545146b449ea5d Mon Sep 17 00:00:00 2001 From: Harman Kalra Date: Tue, 29 Jun 2021 22:30:04 +0530 Subject: octeontx2-af: cn10k: Setting up lmtst map table Introducing a new mailbox to support updating lmt entries and common lmt base address scheme i.e. multiple pcifuncs can share lmt region to reduce L1 cache pressure for application. Parameters passed to mailbox includes the primary pcifunc value whose lmt regions will be shared by other secondary pcifuncs. Here secondary pcifunc will be the one who is calling the mailbox. For example: By default each pcifunc has its own LMT base address: PCIFUNC1 LMT_BASE_ADDR A PCIFUNC2 LMT_BASE_ADDR B PCIFUNC3 LMT_BASE_ADDR C PCIFUNC4 LMT_BASE_ADDR D Application will choose PCIFUNC1 as base/primary pcifunc and as and when other pcifunc(secondary pcifuncs) gets probed, this mailbox will be called and LMTST table will be updated as: PCIFUNC1 LMT_BASE_ADDR A PCIFUNC2 LMT_BASE_ADDR A PCIFUNC3 LMT_BASE_ADDR A PCIFUNC4 LMT_BASE_ADDR A On FLR lmtst map table gets resetted to the default lmt base addresses for all secondary pcifuncs. Signed-off-by: Harman Kalra Signed-off-by: Geetha sowjanya Signed-off-by: Sunil Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 7 ++ drivers/net/ethernet/marvell/octeontx2/af/rvu.c | 1 + drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 4 + .../net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 140 +++++++++++++++++++++ .../net/ethernet/marvell/octeontx2/af/rvu_reg.h | 5 + .../net/ethernet/marvell/octeontx2/af/rvu_struct.h | 3 +- 6 files changed, 159 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 770d86262838..638db868125a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ + msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ @@ -1278,6 +1280,11 @@ struct set_vf_perm { u64 flags; }; +struct lmtst_tbl_setup_req { + struct mbox_msghdr hdr; + u16 base_pcifunc; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 0b092949d7ac..10cddf1ac7b9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_reset_lmt_map_tbl(rvu, pcifunc); rvu_detach_rsrcs(rvu, NULL, pcifunc); mutex_unlock(&rvu->flr_lock); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 9e5d9ba6f01e..3c0a7e981f72 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -243,6 +243,7 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ unsigned long flags; }; @@ -754,6 +755,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot); int rvu_set_channels_base(struct rvu *rvu); void rvu_program_channels(struct rvu *rvu); +/* CN10K RVU - LMT*/ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc); + #ifdef CONFIG_DEBUG_FS void rvu_dbg_init(struct rvu *rvu); void rvu_dbg_exit(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 7d9e71c6965f..87f56e1f32e3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -10,6 +10,146 @@ #include "cgx.h" #include "rvu_reg.h" +/* RVU LMTST */ +#define LMT_TBL_OP_READ 0 +#define LMT_TBL_OP_WRITE 1 +#define LMT_MAP_TABLE_SIZE (128 * 1024) +#define LMT_MAPTBL_ENTRY_SIZE 16 + +/* Function to perform operations (read/write) on lmtst map table */ +static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, + int lmt_tbl_op) +{ + void __iomem *lmt_map_base; + u64 tbl_base; + + tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE); + + lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE); + if (!lmt_map_base) { + dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n"); + return -ENOMEM; + } + + if (lmt_tbl_op == LMT_TBL_OP_READ) { + *val = readq(lmt_map_base + index); + } else { + writeq((*val), (lmt_map_base + index)); + /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S + * changes effective. Write 1 for flush and read is being used as a + * barrier and sets up a data dependency. Write to 0 after a write + * to 1 to complete the flush. + */ + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0)); + rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL); + rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00); + } + + iounmap(lmt_map_base); + return 0; +} + +static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) +{ + return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + + (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; +} + +int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, + struct lmtst_tbl_setup_req *req, + struct msg_rsp *rsp) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + u32 pri_tbl_idx, sec_tbl_idx; + int err = 0; + u64 val; + + /* Reconfiguring lmtst map table in lmt region shared mode i.e. make + * multiple PF_FUNCs to share an LMTLINE region, so primary/base + * pcifunc (which is passed as an argument to mailbox) is the one + * whose lmt base address will be shared among other secondary + * pcifunc (will be the one who is calling this mailbox). + */ + if (req->base_pcifunc) { + /* Calculating the LMT table index equivalent to primary + * pcifunc. + */ + pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); + + /* Truncating secondary pcifunc to calculate the LMT table index + * equivalent to secondary pcifunc. + */ + sec_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); + /* Read the base lmt addr of the secondary pcifunc */ + err = lmtst_map_table_ops(rvu, sec_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + sec_tbl_idx, err); + goto error; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Read the base lmt addr of the primary pcifunc */ + err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, + LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + pri_tbl_idx, err); + goto error; + } + + /* Update the base lmt addr of secondary with primary's base + * lmt addr. + */ + err = lmtst_map_table_ops(rvu, sec_tbl_idx, &val, + LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + sec_tbl_idx, err); + goto error; + } + } + +error: + return err; +} + +/* Resetting the lmtst map table to original base addresses */ +void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err; + + if (is_rvu_otx2(rvu)) + return; + + if (pfvf->lmt_base_addr) { + /* This corresponds to lmt map table index */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + /* Reverting back original lmt base addr for respective + * pcifunc. + */ + err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } +} + int rvu_set_channels_base(struct rvu *rvu) { struct rvu_hwinfo *hw = rvu->hw; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 76837d5e19c6..61bafe956aae 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -692,4 +692,9 @@ #define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6) #define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0) +/* APR */ +#define APR_AF_LMT_CFG (0x000ull) +#define APR_AF_LMT_MAP_BASE (0x008ull) +#define APR_AF_LMT_CTL (0x010ull) + #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 14aa8e37ea41..5bbe6727d11d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -35,7 +35,8 @@ enum rvu_block_addr_e { BLKADDR_NDC_NPA0 = 0xeULL, BLKADDR_NDC_NIX1_RX = 0x10ULL, BLKADDR_NDC_NIX1_TX = 0x11ULL, - BLK_COUNT = 0x12ULL, + BLKADDR_APR = 0x16ULL, + BLK_COUNT = 0x17ULL, }; /* RVU Block Type Enumeration */ -- cgit v1.2.3 From 893ae97214c385be02f8ec097298cc48c7f0d905 Mon Sep 17 00:00:00 2001 From: Geetha sowjanya Date: Tue, 29 Jun 2021 22:30:05 +0530 Subject: octeontx2-af: cn10k: Support configurable LMTST regions This patch extends the lmtst_tbl_setup_req mbox to support run time LMTST configuration. RVU PF/VF and DPDK/ODP allocates a LMT region, creates a translation entry for a device via VFIO IOCTLs. This IOVA is shared with AF through above mbox. AF then uses RVU_SMMU transulation Widget and gets PA for the IOVA and updates the LMTtable entry for that device. Signed-off-by: Geetha sowjanya Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 3 + .../net/ethernet/marvell/octeontx2/af/rvu_cn10k.c | 130 +++++++++++++++------ .../net/ethernet/marvell/octeontx2/af/rvu_reg.h | 5 + 3 files changed, 103 insertions(+), 35 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 638db868125a..9672cbf8a90a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1283,6 +1283,9 @@ struct set_vf_perm { struct lmtst_tbl_setup_req { struct mbox_msghdr hdr; u16 base_pcifunc; + u8 use_local_lmt_region; + u64 lmt_iova; + u64 rsvd[4]; }; /* CPT mailbox error codes diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 87f56e1f32e3..8d48b64485c6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -55,14 +55,101 @@ static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE; } +static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, + u64 iova, u64 *lmt_addr) +{ + u64 pa, val, pf; + int err; + + if (!iova) { + dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__); + return -EINVAL; + } + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova); + pf = rvu_get_pf(pcifunc) & 0x1F; + val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 | + ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF); + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val); + + err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false); + if (err) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__); + return err; + } + val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS); + if (val & ~0x1ULL) { + dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); + return -EIO; + } + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + * PA[11:0] = IOVA[11:0] + */ + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa &= GENMASK_ULL(39, 0); + *lmt_addr = (pa << 12) | (iova & 0xFFF); + + return 0; +} + +static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + u32 tbl_idx; + int err = 0; + u64 val; + + /* Read the current lmt addr of pcifunc */ + tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + + /* Storing the seondary's lmt base address as this needs to be + * reverted in FLR. Also making sure this default value doesn't + * get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_base_addr) + pfvf->lmt_base_addr = val; + + /* Update the LMT table with new addr */ + err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + return err; + } + return 0; +} + int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, struct lmtst_tbl_setup_req *req, struct msg_rsp *rsp) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); - u32 pri_tbl_idx, sec_tbl_idx; + u64 lmt_addr, val; + u32 pri_tbl_idx; int err = 0; - u64 val; + + /* Check if PF_FUNC wants to use it's own local memory as LMTLINE + * region, if so, convert that IOVA to physical address and + * populate LMT table with that address + */ + if (req->use_local_lmt_region) { + err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc, + req->lmt_iova, &lmt_addr); + if (err < 0) + return err; + + /* Update the lmt addr for this PFFUNC in the LMT table */ + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr); + if (err) + return err; + } /* Reconfiguring lmtst map table in lmt region shared mode i.e. make * multiple PF_FUNCs to share an LMTLINE region, so primary/base @@ -76,27 +163,6 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, */ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc); - /* Truncating secondary pcifunc to calculate the LMT table index - * equivalent to secondary pcifunc. - */ - sec_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); - /* Read the base lmt addr of the secondary pcifunc */ - err = lmtst_map_table_ops(rvu, sec_tbl_idx, &val, - LMT_TBL_OP_READ); - if (err) { - dev_err(rvu->dev, - "Failed to read LMT map table: index 0x%x err %d\n", - sec_tbl_idx, err); - goto error; - } - - /* Storing the seondary's lmt base address as this needs to be - * reverted in FLR. Also making sure this default value doesn't - * get overwritten on multiple calls to this mailbox. - */ - if (!pfvf->lmt_base_addr) - pfvf->lmt_base_addr = val; - /* Read the base lmt addr of the primary pcifunc */ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val, LMT_TBL_OP_READ); @@ -104,24 +170,18 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", pri_tbl_idx, err); - goto error; + return err; } /* Update the base lmt addr of secondary with primary's base * lmt addr. */ - err = lmtst_map_table_ops(rvu, sec_tbl_idx, &val, - LMT_TBL_OP_WRITE); - if (err) { - dev_err(rvu->dev, - "Failed to update LMT map table: index 0x%x err %d\n", - sec_tbl_idx, err); - goto error; - } + err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val); + if (err) + return err; } -error: - return err; + return 0; } /* Resetting the lmtst map table to original base addresses */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 61bafe956aae..8b01ef6e2c99 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -49,6 +49,11 @@ #define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4) #define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4) #define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4) +#define RVU_AF_SMMU_ADDR_REQ (0x6000) +#define RVU_AF_SMMU_TXN_REQ (0x6008) +#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) +#define RVU_AF_SMMU_ADDR_TLN (0x6018) +#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) -- cgit v1.2.3 From 5c0512072f6517326d9fba083c4467f173ddd984 Mon Sep 17 00:00:00 2001 From: Geetha sowjanya Date: Tue, 29 Jun 2021 22:30:06 +0530 Subject: octeontx2-pf: cn10k: Use runtime allocated LMTLINE region The current driver uses static LMTST region allocated by firmware. This memory gets populated as PF/VF BAR2. RVU PF/VF driver ioremap the memory as device memory for NIX/NPA operation. Since the memory is mapped as device memory we see performance degration. To address this issue this patch implements runtime memory allocation. RVU PF/VF allocates memory during device probe and share the base address with RVU AF. RVU AF then configure the LMT MAP table accordingly. Signed-off-by: Geetha sowjanya Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c | 87 +++++++++------------- drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h | 3 +- .../ethernet/marvell/octeontx2/nic/otx2_common.h | 7 +- .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 17 ++--- .../net/ethernet/marvell/octeontx2/nic/otx2_txrx.h | 1 + .../net/ethernet/marvell/octeontx2/nic/otx2_vf.c | 12 ++- 6 files changed, 54 insertions(+), 73 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 1b08896b46d2..184de9466286 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = { .refill_pool_ptrs = cn10k_refill_pool_ptrs, }; -int cn10k_pf_lmtst_init(struct otx2_nic *pf) +int cn10k_lmtst_init(struct otx2_nic *pfvf) { - int size, num_lines; - u64 base; - if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { - pf->hw_ops = &otx2_hw_ops; + struct lmtst_tbl_setup_req *req; + int qcount, err; + + if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) { + pfvf->hw_ops = &otx2_hw_ops; return 0; } - pf->hw_ops = &cn10k_hw_ops; - base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) + - (MBOX_SIZE * (pf->total_vfs + 1)); - - size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) - - (MBOX_SIZE * (pf->total_vfs + 1)); - - pf->hw.lmt_base = ioremap(base, size); + pfvf->hw_ops = &cn10k_hw_ops; + qcount = pfvf->hw.max_queues; + /* LMTST lines allocation + * qcount = num_online_cpus(); + * NPA = TX + RX + XDP. + * NIX = TX * 32 (For Burst SQE flush). + */ + pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32); + pfvf->npa_lmt_lines = qcount * 3; + pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE; - if (!pf->hw.lmt_base) { - dev_err(pf->dev, "Unable to map PF LMTST region\n"); + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); return -ENOMEM; } - /* FIXME: Get the num of LMTST lines from LMT table */ - pf->tot_lmt_lines = size / LMT_LINE_SIZE; - num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) / - pf->hw.tx_queues; - /* Number of LMT lines per SQ queues */ - pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - - pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE; - return 0; -} + req->use_local_lmt_region = true; -int cn10k_vf_lmtst_init(struct otx2_nic *vf) -{ - int size, num_lines; - - if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) { - vf->hw_ops = &otx2_hw_ops; - return 0; + err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines, + LMT_LINE_SIZE); + if (err) { + mutex_unlock(&pfvf->mbox.lock); + return err; } + pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base; + req->lmt_iova = (u64)pfvf->dync_lmt->iova; - vf->hw_ops = &cn10k_hw_ops; - size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM); - vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev, - PCI_MBOX_BAR_NUM), - size); - if (!vf->hw.lmt_base) { - dev_err(vf->dev, "Unable to map VF LMTST region\n"); - return -ENOMEM; - } + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); - vf->tot_lmt_lines = size / LMT_LINE_SIZE; - /* LMTST lines per SQ */ - num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) / - vf->hw.tx_queues; - vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines; - vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE; return 0; } -EXPORT_SYMBOL(cn10k_vf_lmtst_init); +EXPORT_SYMBOL(cn10k_lmtst_init); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) { @@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) struct otx2_snd_queue *sq; sq = &pfvf->qset.sq[qidx]; - sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base + + sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base + (qidx * pfvf->nix_lmt_size)); + sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE); + /* Get memory to put this msg */ aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox); if (!aq) @@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq) void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx) { - struct otx2_nic *pfvf = dev; - int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines); u64 val = 0, tar_addr = 0; /* FIXME: val[0:10] LMT_ID. * [12:15] no of LMTST - 1 in the burst. * [19:63] data size of each LMTST in the burst except first. */ - val = (lmt_id & 0x7FF); + val = (sq->lmt_id & 0x7FF); /* Target address for LMTST flush tells HW how many 128bit * words are present. * tar_addr[6:4] size of first LMTST - 1 in units of 128b. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 71292a4cf1f3..1a1ae334477d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -12,8 +12,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); -int cn10k_pf_lmtst_init(struct otx2_nic *pf); -int cn10k_vf_lmtst_init(struct otx2_nic *vf); +int cn10k_lmtst_init(struct otx2_nic *pfvf); int cn10k_free_all_ipolicers(struct otx2_nic *pfvf); int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf); int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 234b330f3183..20a9c69f020f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -218,8 +218,8 @@ struct otx2_hw { unsigned long cap_flag; #define LMT_LINE_SIZE 128 -#define NIX_LMTID_BASE 72 /* RX + TX + XDP */ - void __iomem *lmt_base; +#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */ + u64 *lmt_base; u64 *npa_lmt_base; u64 *nix_lmt_base; }; @@ -363,8 +363,9 @@ struct otx2_nic { /* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */ int nix_blkaddr; /* LMTST Lines info */ + struct qmem *dync_lmt; u16 tot_lmt_lines; - u16 nix_lmt_lines; + u16 npa_lmt_lines; u32 nix_lmt_size; struct otx2_ptp *ptp; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 59912f73417b..088c28df849d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1533,10 +1533,10 @@ int otx2_open(struct net_device *netdev) if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) { /* Reserve LMT lines for NPA AURA batch free */ - pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base; + pf->hw.npa_lmt_base = pf->hw.lmt_base; /* Reserve LMT lines for NIX TX */ - pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base + - (NIX_LMTID_BASE * LMT_LINE_SIZE)); + pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base + + (pf->npa_lmt_lines * LMT_LINE_SIZE)); } err = otx2_init_hw_resources(pf); @@ -2526,7 +2526,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_pf_lmtst_init(pf); + err = cn10k_lmtst_init(pf); if (err) goto err_detach_rsrc; @@ -2630,8 +2630,8 @@ err_del_mcam_entries: err_ptp_destroy: otx2_ptp_destroy(pf); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_detach_resources(&pf->mbox); err_disable_mbox_intr: otx2_disable_mbox_intr(pf); @@ -2772,9 +2772,8 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_detach_resources(&pf->mbox); - if (pf->hw.lmt_base) - iounmap(pf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) + qmem_free(pf->dev, pf->dync_lmt); otx2_disable_mbox_intr(pf); otx2_pfaf_mbox_destroy(pf); pci_free_irq_vectors(pf->pdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 52486c1f0973..2f144e2cf436 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -83,6 +83,7 @@ struct otx2_snd_queue { u16 num_sqbs; u16 sqe_thresh; u8 sqe_per_sqb; + u32 lmt_id; u64 io_addr; u64 *aura_fc_addr; u64 *lmt_addr; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index 13a908f75ba0..a8bee5aefec1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_detach_rsrc; - err = cn10k_vf_lmtst_init(vf); + err = cn10k_lmtst_init(vf); if (err) goto err_detach_rsrc; @@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_unreg_netdev: unregister_netdev(netdev); err_detach_rsrc: - if (hw->lmt_base) - iounmap(hw->lmt_base); + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2_detach_resources(&vf->mbox); err_disable_mbox_intr: otx2vf_disable_mbox_intr(vf); @@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev) destroy_workqueue(vf->otx2_wq); otx2vf_disable_mbox_intr(vf); otx2_detach_resources(&vf->mbox); - - if (vf->hw.lmt_base) - iounmap(vf->hw.lmt_base); - + if (test_bit(CN10K_LMTST, &vf->hw.cap_flag)) + qmem_free(vf->dev, vf->dync_lmt); otx2vf_vfaf_mbox_destroy(vf); pci_free_irq_vectors(vf->pdev); pci_set_drvdata(pdev, NULL); -- cgit v1.2.3 From 5a3c680aa2c12c90c44af383fe6882a39875ab81 Mon Sep 17 00:00:00 2001 From: Doug Berger Date: Tue, 29 Jun 2021 17:14:19 -0700 Subject: net: bcmgenet: ensure EXT_ENERGY_DET_MASK is clear Setting the EXT_ENERGY_DET_MASK bit allows the port energy detection logic of the internal PHY to prevent the system from sleeping. Some internal PHYs will report that energy is detected when the network interface is closed which can prevent the system from going to sleep if WoL is enabled when the interface is brought down. Since the driver does not support waking the system on this logic, this commit clears the bit whenever the internal PHY is powered up and the other logic for manipulating the bit is removed since it serves no useful function. Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Doug Berger Acked-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 17 ++--------------- drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c | 6 ------ 2 files changed, 2 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 41f7f078cd27..35e9956e930c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv, switch (mode) { case GENET_POWER_PASSIVE: - reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS); + reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS | + EXT_ENERGY_DET_MASK); if (GENET_IS_V5(priv)) { reg &= ~(EXT_PWR_DOWN_PHY_EN | EXT_PWR_DOWN_PHY_RD | @@ -3292,7 +3293,6 @@ static int bcmgenet_open(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned long dma_ctrl; - u32 reg; int ret; netif_dbg(priv, ifup, dev, "bcmgenet_open\n"); @@ -3318,12 +3318,6 @@ static int bcmgenet_open(struct net_device *dev) bcmgenet_set_hw_addr(priv, dev->dev_addr); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); @@ -4139,7 +4133,6 @@ static int bcmgenet_resume(struct device *d) struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rxnfc_rule *rule; unsigned long dma_ctrl; - u32 reg; int ret; if (!netif_running(dev)) @@ -4176,12 +4169,6 @@ static int bcmgenet_resume(struct device *d) if (rule->state != BCMGENET_RXNFC_STATE_UNUSED) bcmgenet_hfb_create_rxnfc_filter(priv, rule); - if (priv->internal_phy) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg |= EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - /* Disable RX/TX DMA and flush TX queues */ dma_ctrl = bcmgenet_dma_disable(priv); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index facde824bcaa..e31a5a397f11 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv, reg |= CMD_RX_EN; bcmgenet_umac_writel(priv, reg, UMAC_CMD); - if (priv->hw_params->flags & GENET_HAS_EXT) { - reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT); - reg &= ~EXT_ENERGY_DET_MASK; - bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT); - } - reg = UMAC_IRQ_MPD_R; if (hfb_enable) reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM; -- cgit v1.2.3 From 7da467d82d1ed4fb317aff836f99709169e73f10 Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:26 +0200 Subject: net: dsa: mv88e6xxx: enable .port_set_policy() on Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit f3a2cd326e44 ("net: dsa: mv88e6xxx: introduce .port_set_policy") introduced .port_set_policy() method with implementation for several models, but forgot to add Topaz, which can use the 6352 implementation. Use the 6352 implementation of .port_set_policy() on Topaz. Signed-off-by: Marek Behún Fixes: f3a2cd326e44 ("net: dsa: mv88e6xxx: introduce .port_set_policy") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 961fa6b75cad..6bcee3e012d4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, @@ -4383,6 +4384,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_speed_duplex = mv88e6341_port_set_speed_duplex, .port_max_speed_mode = mv88e6341_port_max_speed_mode, .port_tag_remap = mv88e6095_port_tag_remap, + .port_set_policy = mv88e6352_port_set_policy, .port_set_frame_mode = mv88e6351_port_set_frame_mode, .port_set_ucast_flood = mv88e6352_port_set_ucast_flood, .port_set_mcast_flood = mv88e6352_port_set_mcast_flood, -- cgit v1.2.3 From 11527f3c4725640e6c40a2b7654e303f45e82a6c Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:27 +0200 Subject: net: dsa: mv88e6xxx: use correct .stats_set_histogram() on Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 40cff8fca9e3 ("net: dsa: mv88e6xxx: Fix stats histogram mode") introduced wrong .stats_set_histogram() method for Topaz family. The Peridot method should be used instead. Signed-off-by: Marek Behún Fixes: 40cff8fca9e3 ("net: dsa: mv88e6xxx: Fix stats histogram mode") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 6bcee3e012d4..b125d3227dbd 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3597,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, @@ -4398,7 +4398,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .port_set_cmode = mv88e6341_port_set_cmode, .port_setup_message_port = mv88e6xxx_setup_message_port, .stats_snapshot = mv88e6390_g1_stats_snapshot, - .stats_set_histogram = mv88e6095_g1_stats_set_histogram, + .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, .stats_get_strings = mv88e6320_stats_get_strings, .stats_get_stats = mv88e6390_stats_get_stats, -- cgit v1.2.3 From 3709488790022c85720f991bff50d48ed5a36e6a Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:28 +0200 Subject: net: dsa: mv88e6xxx: enable .rmu_disable() on Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 9e5baf9b36367 ("net: dsa: mv88e6xxx: add RMU disable op") introduced .rmu_disable() method with implementation for several models, but forgot to add Topaz, which can use the Peridot implementation. Use the Peridot implementation of .rmu_disable() on Topaz. Signed-off-by: Marek Behún Fixes: 9e5baf9b36367 ("net: dsa: mv88e6xxx: add RMU disable op") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index b125d3227dbd..d4b05c10e5f2 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3607,6 +3607,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4408,6 +4409,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, + .rmu_disable = mv88e6390_g1_rmu_disable, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, -- cgit v1.2.3 From c07fff3492acae41cedbabea395b644dd5872b8c Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:29 +0200 Subject: net: dsa: mv88e6xxx: enable devlink ATU hash param for Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 23e8b470c7788 ("net: dsa: mv88e6xxx: Add devlink param for ATU hash algorithm.") introduced ATU hash algorithm access via devlink, but did not enable it for Topaz. Enable this feature also for Topaz. Signed-off-by: Marek Behún Fixes: 23e8b470c7788 ("net: dsa: mv88e6xxx: Add devlink param for ATU hash algorithm.") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index d4b05c10e5f2..354ff0b84b7f 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3608,6 +3608,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, @@ -4410,6 +4412,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, .rmu_disable = mv88e6390_g1_rmu_disable, + .atu_get_hash = mv88e6165_g1_atu_get_hash, + .atu_set_hash = mv88e6165_g1_atu_set_hash, .vtu_getnext = mv88e6352_g1_vtu_getnext, .vtu_loadpurge = mv88e6352_g1_vtu_loadpurge, .serdes_power = mv88e6390_serdes_power, -- cgit v1.2.3 From a03b98d68367b18e5db6d6850e2cc18754fba94a Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:30 +0200 Subject: net: dsa: mv88e6xxx: enable SerDes RX stats for Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 0df952873636a ("mv88e6xxx: Add serdes Rx statistics") added support for RX statistics on SerDes ports for Peridot. This same implementation is also valid for Topaz, but was not enabled at the time. We need to use the generic .serdes_get_lane() method instead of the Peridot specific one in the stats methods so that on Topaz the proper one is used. Signed-off-by: Marek Behún Fixes: 0df952873636a ("mv88e6xxx: Add serdes Rx statistics") Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 6 ++++++ drivers/net/dsa/mv88e6xxx/serdes.c | 6 +++--- 2 files changed, 9 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 354ff0b84b7f..1e95a0facbd4 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3623,6 +3623,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_irq_enable = mv88e6390_serdes_irq_enable, .serdes_irq_status = mv88e6390_serdes_irq_status, .gpio_ops = &mv88e6352_gpio_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, .phylink_validate = mv88e6341_phylink_validate, }; @@ -4429,6 +4432,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, + .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, + .serdes_get_strings = mv88e6390_serdes_get_strings, + .serdes_get_stats = mv88e6390_serdes_get_stats, .phylink_validate = mv88e6341_phylink_validate, }; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index e4fbef81bc52..b1d46dd8eaab 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = { int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port) { - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; return ARRAY_SIZE(mv88e6390_serdes_hw_stats); @@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip, struct mv88e6390_serdes_hw_stat *stat; int i; - if (mv88e6390_serdes_get_lane(chip, port) < 0) + if (mv88e6xxx_serdes_get_lane(chip, port) < 0) return 0; for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) { @@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port, int lane; int i; - lane = mv88e6390_serdes_get_lane(chip, port); + lane = mv88e6xxx_serdes_get_lane(chip, port); if (lane < 0) return 0; -- cgit v1.2.3 From 953b0dcbe2e3f7bee98cc3bca2ec82c8298e9c16 Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Thu, 1 Jul 2021 00:22:31 +0200 Subject: net: dsa: mv88e6xxx: enable SerDes PCS register dump via ethtool -d on Topaz MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit bf3504cea7d7e ("net: dsa: mv88e6xxx: Add 6390 family PCS registers to ethtool -d") added support for dumping SerDes PCS registers via ethtool -d for Peridot. The same implementation is also valid for Topaz, but was not enabled at the time. Signed-off-by: Marek Behún Fixes: bf3504cea7d7e ("net: dsa: mv88e6xxx: Add 6390 family PCS registers to ethtool -d") Reviewed-by: Andrew Lunn Signed-off-by: David S. Miller --- drivers/net/dsa/mv88e6xxx/chip.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 1e95a0facbd4..beb41572d04e 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3626,6 +3626,8 @@ static const struct mv88e6xxx_ops mv88e6141_ops = { .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; @@ -4435,6 +4437,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = { .serdes_get_sset_count = mv88e6390_serdes_get_sset_count, .serdes_get_strings = mv88e6390_serdes_get_strings, .serdes_get_stats = mv88e6390_serdes_get_stats, + .serdes_get_regs_len = mv88e6390_serdes_get_regs_len, + .serdes_get_regs = mv88e6390_serdes_get_regs, .phylink_validate = mv88e6341_phylink_validate, }; -- cgit v1.2.3 From 6f14078e3ee59ccc5806f7bff0f25f94a6d3ff80 Mon Sep 17 00:00:00 2001 From: Sunil Kumar Kori Date: Wed, 30 Jun 2021 15:40:57 +0530 Subject: octeontx2-af: DMAC filter support in MAC block MAC block supports 32 dmac filters which are logically divided among all attached LMACS. For example MAC block0 having one LMAC then maximum supported filters are 32 where as MAC block1 having 4 enabled LMACS them maximum supported filteres are 8 for each LMAC. This patch adds mbox handlers to add/delete/update mac entry in DMAC filter table. Signed-off-by: Sunil Kumar Kori Signed-off-by: Hariprasad Kelam Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 264 +++++++++++++++++++-- drivers/net/ethernet/marvell/octeontx2/af/cgx.h | 7 + .../ethernet/marvell/octeontx2/af/lmac_common.h | 12 +- drivers/net/ethernet/marvell/octeontx2/af/mbox.h | 48 +++- drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 2 + .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 109 +++++++++ .../net/ethernet/marvell/octeontx2/af/rvu_nix.c | 3 + 7 files changed, 425 insertions(+), 20 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index fac6474ad694..bc413f96b430 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id) return test_bit(lmac_id, &cgx->lmac_bmap); } +/* Helper function to get sequential index + * given the enabled LMAC of a CGX + */ +static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id) +{ + int tmp, id = 0; + + for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) { + if (tmp == lmac_id) + break; + id++; + } + + return id; +} + struct mac_ops *get_mac_ops(void *cgxd) { if (!cgxd) @@ -211,37 +227,229 @@ static u64 mac2u64 (u8 *mac_addr) return mac; } +static void cfg2mac(u64 cfg, u8 *mac_addr) +{ + int i, index = 0; + + for (i = ETH_ALEN - 1; i >= 0; i--, index++) + mac_addr[i] = (cfg >> (8 * index)) & 0xFF; +} + int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index, id; u64 cfg; + /* access mac_ops to know csr_offset */ mac_ops = cgx_dev->mac_ops; + /* copy 6bytes from macaddr */ /* memcpy(&cfg, mac_addr, 6); */ cfg = mac2u64 (mac_addr); - cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)), + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49)); cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg |= CGX_DMAC_CTL0_CAM_ENABLE; + cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE | + CGX_DMAC_MCAST_MODE); + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return 0; +} + +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + int index, idx; + u64 cfg = 0; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Get available index where entry is to be installed */ + idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap); + if (idx < 0) + return idx; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + idx; + + cfg = mac2u64 (mac_addr); + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cfg |= ((u64)lmac_id << 49); + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT); + + if (is_multicast_ether_addr(mac_addr)) { + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE_CAM; + lmac->mcast_filters_count++; + } else if (!lmac->mcast_filters_count) { + cfg |= CGX_DMAC_MCAST_MODE; + } + + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + + return idx; +} + +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 index = 0, id; + u64 cfg; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Restore index 0 to its default init value as done during + * cgx_lmac_init + */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */ + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); return 0; } +/* Allows caller to change macaddress associated with index + * in dmac filter table including index 0 reserved for + * interface mac address + */ +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct mac_ops *mac_ops; + struct lmac *lmac; + u64 cfg; + int id; + + lmac = lmac_pdata(lmac_id, cgx_dev); + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* ensure index is already set */ + if (!test_bit(index, lmac->mac_to_index_bmap.bmap)) + return -EINVAL; + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + cfg &= ~CGX_RX_DMAC_ADR_MASK; + cfg |= mac2u64 (mac_addr); + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg); + return 0; +} + +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + struct mac_ops *mac_ops; + u8 mac[ETH_ALEN]; + u64 cfg; + int id; + + if (!lmac) + return -ENODEV; + + mac_ops = cgx_dev->mac_ops; + /* Validate the index */ + if (index >= lmac->mac_to_index_bmap.max) + return -EINVAL; + + /* Skip deletion for reserved index i.e. index 0 */ + if (index == 0) + return 0; + + rvu_free_rsrc(&lmac->mac_to_index_bmap, index); + + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max + index; + + /* Read MAC address to check whether it is ucast or mcast */ + cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8))); + + cfg2mac(cfg, mac); + if (is_multicast_ether_addr(mac)) + lmac->mcast_filters_count--; + + if (!lmac->mcast_filters_count) { + cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); + cfg &= ~GENMASK_ULL(2, 1); + cfg |= CGX_DMAC_MCAST_MODE; + cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); + } + + cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0); + + return 0; +} + +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id) +{ + struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); + + if (lmac) + return lmac->mac_to_index_bmap.max; + + return 0; +} + u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev); struct mac_ops *mac_ops; + int index; u64 cfg; + int id; mac_ops = cgx_dev->mac_ops; - cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8); + id = get_sequence_id_of_lmac(cgx_dev, lmac_id); + + index = id * lmac->mac_to_index_bmap.max; + + cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8); return cfg & CGX_RX_DMAC_ADR_MASK; } @@ -297,35 +505,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable) void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable) { struct cgx *cgx = cgx_get_pdata(cgx_id); + struct lmac *lmac = lmac_pdata(lmac_id, cgx); + u16 max_dmac = lmac->mac_to_index_bmap.max; struct mac_ops *mac_ops; + int index, i; u64 cfg = 0; + int id; if (!cgx) return; + id = get_sequence_id_of_lmac(cgx, lmac_id); + mac_ops = cgx->mac_ops; if (enable) { /* Enable promiscuous mode on LMAC */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); - cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE); - cfg |= CGX_DMAC_BCAST_MODE; + cfg &= ~CGX_DMAC_CAM_ACCEPT; + cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE); cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg); + } } else { /* Disable promiscuous mode */ cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE; cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg); - cfg = cgx_read(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8)); - cfg |= CGX_DMAC_CAM_ADDR_ENABLE; - cgx_write(cgx, 0, - (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg); + for (i = 0; i < max_dmac; i++) { + index = id * max_dmac + i; + cfg = cgx_read(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8)); + if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) { + cfg |= CGX_DMAC_CAM_ADDR_ENABLE; + cgx_write(cgx, 0, + (CGXX_CMRX_RX_DMAC_CAM0 + + index * 0x8), + cfg); + } + } } } @@ -1234,6 +1458,15 @@ static int cgx_lmac_init(struct cgx *cgx) } lmac->cgx = cgx; + lmac->mac_to_index_bmap.max = + MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count; + err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap); + if (err) + return err; + + /* Reserve first entry for default MAC address */ + set_bit(0, lmac->mac_to_index_bmap.bmap); + init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); spin_lock_init(&lmac->event_cb_lock); @@ -1274,6 +1507,7 @@ static int cgx_lmac_exit(struct cgx *cgx) continue; cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false); cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true); + kfree(lmac->mac_to_index_bmap.bmap); kfree(lmac->name); kfree(lmac); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 12521262164a..0c613f83a41c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -23,6 +23,7 @@ #define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define MAX_DMAC_ENTRIES_PER_CGX 32 #define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) @@ -46,6 +47,7 @@ #define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset) #define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) #define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2) #define CGX_DMAC_MCAST_MODE BIT_ULL(1) #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) @@ -139,7 +141,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable); int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id); u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id); +int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr); +int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index); +int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id); void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); @@ -165,4 +171,5 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index); unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); +int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index 45706fd87120..a8b7b1c7a1d5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -10,17 +10,19 @@ #include "rvu.h" #include "cgx.h" /** - * struct lmac + * struct lmac - per lmac locks and properties * @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion * @cmd_lock: Lock to serialize the command interface * @resp: command response * @link_info: link related information + * @mac_to_index_bmap: Mac address to CGX table index mapping * @event_cb: callback for linkchange events * @event_cb_lock: lock for serializing callback with unregister - * @cmd_pend: flag set before new command is started - * flag cleared after command response is received * @cgx: parent cgx port + * @mcast_filters_count: Number of multicast filters installed * @lmac_id: lmac port id + * @cmd_pend: flag set before new command is started + * flag cleared after command response is received * @name: lmac port name */ struct lmac { @@ -29,12 +31,14 @@ struct lmac { struct mutex cmd_lock; u64 resp; struct cgx_link_user_info link_info; + struct rsrc_bmap mac_to_index_bmap; struct cgx_event_cb event_cb; /* lock for serializing callback with unregister */ spinlock_t event_cb_lock; - bool cmd_pend; struct cgx *cgx; + u8 mcast_filters_count; u8 lmac_id; + bool cmd_pend; char *name; }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 9672cbf8a90a..f5ec39de026a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -165,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\ M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \ cgx_features_info_msg) \ M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \ - /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ +M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \ + cgx_mac_addr_add_rsp) \ +M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \ + msg_rsp) \ +M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \ + cgx_max_dmac_entries_get_rsp) \ +M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \ +M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \ + msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ npa_lf_alloc_req, npa_lf_alloc_rsp) \ @@ -403,6 +411,38 @@ struct cgx_mac_addr_set_or_get { u8 mac_addr[ETH_ALEN]; }; +/* Structure for requesting the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; +}; + +/* Structure for response against the operation to + * add DMAC filter entry into CGX interface + */ +struct cgx_mac_addr_add_rsp { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for requesting the operation to + * delete DMAC filter entry from CGX interface + */ +struct cgx_mac_addr_del_req { + struct mbox_msghdr hdr; + u8 index; +}; + +/* Structure for response against the operation to + * get maximum supported DMAC filter entries + */ +struct cgx_max_dmac_entries_get_rsp { + struct mbox_msghdr hdr; + u8 max_dmac_filters; +}; + struct cgx_link_user_info { uint64_t link_up:1; uint64_t full_duplex:1; @@ -501,6 +541,12 @@ struct cgx_set_link_mode_rsp { int status; }; +struct cgx_mac_addr_update_req { + struct mbox_msghdr hdr; + u8 mac_addr[ETH_ALEN]; + u8 index; +}; + #define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */ #define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */ #define RVU_MAC_VERSION BIT_ULL(2) diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 3c0a7e981f72..bc0d24507033 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -657,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable); int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start); int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index, int rxtxflag, u64 *stat); +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc); + /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6e2bf4fcd29c..9c6f4ba2d726 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } +void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + int i = 0, lmac_count = 0; + u8 max_dmac_filters; + u8 cgx_id, lmac_id; + void *cgx_dev; + + if (!is_cgx_config_permitted(rvu, pcifunc)) + return; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + cgx_dev = cgx_get_pdata(cgx_id); + lmac_count = cgx_get_lmac_cnt(cgx_dev); + max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count; + + for (i = 0; i < max_dmac_filters; i++) + cgx_lmac_addr_del(cgx_id, lmac_id, i); + + /* As cgx_lmac_addr_del does not clear entry for index 0 + * so it needs to be done explicitly + */ + cgx_lmac_addr_reset(cgx_id, lmac_id); +} + int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, return 0; } +int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu, + struct cgx_mac_addr_add_req *req, + struct cgx_mac_addr_add_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + int rc = 0; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr); + if (rc >= 0) { + rsp->index = rc; + return 0; + } + + return rc; +} + +int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu, + struct cgx_mac_addr_del_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_del(cgx_id, lmac_id, req->index); +} + +int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu, + struct msg_req *req, + struct cgx_max_dmac_entries_get_rsp + *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + /* If msg is received from PFs(which are not mapped to CGX LMACs) + * or VF then no entries are allocated for DMAC filters at CGX level. + * So returning zero. + */ + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) { + rsp->max_dmac_filters = 0; + return 0; + } + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id); + return 0; +} + int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) @@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu, rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac); return 0; } + +int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_reset(cgx_id, lmac_id); +} + +int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, + struct cgx_mac_addr_update_req *req, + struct msg_rsp *rsp) +{ + int pf = rvu_get_pf(req->hdr.pcifunc); + u8 cgx_id, lmac_id; + + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index d6f8210652c5..aeae37704428 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) /* Free and disable any MCAM entries used by this NIX LF */ rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf); + + /* Disable DMAC filters used */ + rvu_cgx_disable_dmac_entries(rvu, pcifunc); } int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, -- cgit v1.2.3 From dbc52debf95f8f902f74309e7ae0de38e1ed4e4c Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Wed, 30 Jun 2021 15:40:58 +0530 Subject: octeontx2-af: Debugfs support for DMAC filters Add debugfs support to display CGX/RPM DMAC filter table associated with pf. cat /sys/kernel/debug/octeontx2/cgx/cgx0/lmac0/mac_filter PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE 0002:02:00.0 PF2 ACCEPT ACCEPT UNICAST DMAC-INDEX ADDRESS 0 00:0f:b7:06:17:06 1 1a:1b:1c:1d:1e:01 2 1a:1b:1c:1d:1e:02 Signed-off-by: Hariprasad Kelam Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/af/cgx.c | 28 +++++++ drivers/net/ethernet/marvell/octeontx2/af/cgx.h | 3 + drivers/net/ethernet/marvell/octeontx2/af/rvu.h | 1 + .../net/ethernet/marvell/octeontx2/af/rvu_cgx.c | 2 +- .../ethernet/marvell/octeontx2/af/rvu_debugfs.c | 88 ++++++++++++++++++++-- 5 files changed, 113 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index bc413f96b430..9169849881bf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -266,6 +266,34 @@ int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr) return 0; } +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id) +{ + struct mac_ops *mac_ops; + struct cgx *cgx = cgxd; + + if (!cgxd || !is_lmac_valid(cgxd, lmac_id)) + return 0; + + cgx = cgxd; + /* Get mac_ops to know csr offset */ + mac_ops = cgx->mac_ops; + + return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0); +} + +u64 cgx_read_dmac_entry(void *cgxd, int index) +{ + struct mac_ops *mac_ops; + struct cgx *cgx; + + if (!cgxd) + return 0; + + cgx = cgxd; + mac_ops = cgx->mac_ops; + return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8))); +} + int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr) { struct cgx *cgx_dev = cgx_get_pdata(cgx_id); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 0c613f83a41c..237ba2b56210 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -52,6 +52,7 @@ #define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset) #define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 #define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 @@ -172,4 +173,6 @@ unsigned long cgx_get_lmac_bmap(void *cgxd); void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val); u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset); int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index); +u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id); +u64 cgx_read_dmac_entry(void *cgxd, int index); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index bc0d24507033..10e58a5d5861 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -744,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature); u32 rvu_cgx_get_fifolen(struct rvu *rvu); void *rvu_first_cgx_pdata(struct rvu *rvu); +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id); int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf, int type); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 9c6f4ba2d726..6cc8fbb7190c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id) return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id]; } -static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) +int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id) { unsigned long pfmap; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 3cc3c6fd1d84..370d4ca1e5ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id) return err; } -static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id) { struct dentry *current_dir; - int err, lmac_id; char *buf; current_dir = filp->file->f_path.dentry->d_parent; @@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) if (!buf) return -EINVAL; - err = kstrtoint(buf + 1, 10, &lmac_id); - if (!err) { - err = cgx_print_stats(filp, lmac_id); - if (err) - return err; - } + return kstrtoint(buf + 1, 10, lmac_id); +} + +static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused) +{ + int lmac_id, err; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_stats(filp, lmac_id); + return err; } RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL); +static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id) +{ + struct pci_dev *pdev = NULL; + void *cgxd = s->private; + char *bcast, *mcast; + u16 index, domain; + u8 dmac[ETH_ALEN]; + struct rvu *rvu; + u64 cfg, mac; + int pf; + + rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OCTEONTX2_RVU_AF, NULL)); + if (!rvu) + return -ENODEV; + + pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id); + domain = 2; + + pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0); + if (!pdev) + return 0; + + cfg = cgx_read_dmac_ctrl(cgxd, lmac_id); + bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT"; + mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT"; + + seq_puts(s, + "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n"); + seq_printf(s, "%s PF%d %9s %9s", + dev_name(&pdev->dev), pf, bcast, mcast); + if (cfg & CGX_DMAC_CAM_ACCEPT) + seq_printf(s, "%12s\n\n", "UNICAST"); + else + seq_printf(s, "%16s\n\n", "PROMISCUOUS"); + + seq_puts(s, "\nDMAC-INDEX ADDRESS\n"); + + for (index = 0 ; index < 32 ; index++) { + cfg = cgx_read_dmac_entry(cgxd, index); + /* Display enabled dmac entries associated with current lmac */ + if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) && + FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) { + mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg); + u64_to_ether_addr(mac, dmac); + seq_printf(s, "%7d %pM\n", index, dmac); + } + } + + return 0; +} + +static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused) +{ + int err, lmac_id; + + err = rvu_dbg_derive_lmacid(filp, &lmac_id); + if (!err) + return cgx_print_dmac_flt(filp, lmac_id); + + return err; +} + +RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL); + static void rvu_dbg_cgx_init(struct rvu *rvu) { struct mac_ops *mac_ops; @@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu) debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac, cgx, &rvu_dbg_cgx_stat_fops); + debugfs_create_file("mac_filter", 0600, + rvu->rvu_dbg.lmac, cgx, + &rvu_dbg_cgx_dmac_flt_fops); } } } -- cgit v1.2.3 From 79d2be385e9eabe4403eb85bcc7d3efc6b936a76 Mon Sep 17 00:00:00 2001 From: Hariprasad Kelam Date: Wed, 30 Jun 2021 15:40:59 +0530 Subject: octeontx2-pf: offload DMAC filters to CGX/RPM block DMAC filtering can be achieved by either NPC MCAM rules or CGX/RPM MAC filters. Currently we are achieving this by NPC MCAM rules. This patch offloads DMAC filters to CGX/RPM MAC filters instead of NPC MCAM rules. Offloading DMAC filter to CGX/RPM block helps in reducing traffic to NPC block and save MCAM rules Signed-off-by: Hariprasad Kelam Signed-off-by: Sunil Kovvuri Goutham Signed-off-by: David S. Miller --- .../net/ethernet/marvell/octeontx2/nic/Makefile | 2 +- .../ethernet/marvell/octeontx2/nic/otx2_common.c | 3 + .../ethernet/marvell/octeontx2/nic/otx2_common.h | 11 + .../ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c | 173 ++++++++++++++++ .../ethernet/marvell/octeontx2/nic/otx2_flows.c | 229 ++++++++++++++++++++- .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 9 + 6 files changed, 417 insertions(+), 10 deletions(-) create mode 100644 drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 457c94793e63..3254b02205ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o rvu_nicvf-y := otx2_vf.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index cf7875d51d87..7cccd802c4ed 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) /* update dmac field in vlan offload rule */ if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); + /* update dmac address in ntuple and DMAC filter list */ + if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_update_pfmac_flow(pfvf); } else { return -EPERM; } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 20a9c69f020f..8fd58cd07f50 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -288,6 +288,9 @@ struct otx2_flow_config { u16 tc_flower_offset; u16 ntuple_max_flows; u16 tc_max_flows; + u8 dmacflt_max_flows; + u8 *bmap_to_dmacindex; + unsigned long dmacflt_bmap; struct list_head flow_list; }; @@ -329,6 +332,7 @@ struct otx2_nic { #define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11) #define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12) #define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13) +#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14) u64 flags; struct otx2_qset qset; @@ -834,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +/* CGX/RPM DMAC filters support */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos); +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf); +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf); #endif /* OTX2_COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c new file mode 100644 index 000000000000..ffe3e94562d0 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -0,0 +1,173 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Physcial Function ethernet driver + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac, + u8 *dmac_index) +{ + struct cgx_mac_addr_add_req *req; + struct cgx_mac_addr_add_rsp *rsp; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + err = otx2_sync_mbox_msg(&pf->mbox); + + if (!err) { + rsp = (struct cgx_mac_addr_add_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr); + *dmac_index = rsp->index; + } + + mutex_unlock(&pf->mbox.lock); + return err; +} + +static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf) +{ + struct cgx_mac_addr_set_or_get *req; + int err; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + ether_addr_copy(req->mac_addr, pf->netdev->dev_addr); + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos) +{ + u8 *dmacindex; + + /* Store dmacindex returned by CGX/RPM driver which will + * be used for macaddr update/remove + */ + dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_add_pfmac(pf); + else + return otx2_dmacflt_do_add(pf, mac, dmacindex); +} + +static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac, + u8 dmac_index) +{ + struct cgx_mac_addr_del_req *req; + int err; + + mutex_lock(&pfvf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox); + if (!req) { + mutex_unlock(&pfvf->mbox.lock); + return -ENOMEM; + } + + req->index = dmac_index; + + err = otx2_sync_mbox_msg(&pfvf->mbox); + mutex_unlock(&pfvf->mbox.lock); + + return err; +} + +static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf) +{ + struct msg_req *req; + int err; + + mutex_lock(&pf->mbox.lock); + req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox); + if (!req) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, + u8 bit_pos) +{ + u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + + if (ether_addr_equal(mac, pf->netdev->dev_addr)) + return otx2_dmacflt_remove_pfmac(pf); + else + return otx2_dmacflt_do_remove(pf, mac, dmacindex); +} + +/* CGX/RPM blocks support max unicast entries of 32. + * on typical configuration MAC block associated + * with 4 lmacs, each lmac will have 8 dmac entries + */ +int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf) +{ + struct cgx_max_dmac_entries_get_rsp *rsp; + struct msg_req *msg; + int err; + + mutex_lock(&pf->mbox.lock); + msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox); + + if (!msg) { + mutex_unlock(&pf->mbox.lock); + return -ENOMEM; + } + + err = otx2_sync_mbox_msg(&pf->mbox); + if (err) + goto out; + + rsp = (struct cgx_max_dmac_entries_get_rsp *) + otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr); + pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters; + +out: + mutex_unlock(&pf->mbox.lock); + return err; +} + +int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) +{ + struct cgx_mac_addr_update_req *req; + int rc; + + mutex_lock(&pf->mbox.lock); + + req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox); + + if (!req) { + mutex_unlock(&pf->mbox.lock); + rc = -ENOMEM; + } + + ether_addr_copy(req->mac_addr, mac); + req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos]; + rc = otx2_sync_mbox_msg(&pf->mbox); + + mutex_unlock(&pf->mbox.lock); + return rc; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 8c97106bdd1c..4d9de525802d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -18,6 +18,12 @@ struct otx2_flow { bool is_vf; u8 rss_ctx_id; int vf; + bool dmac_filter; +}; + +enum dmac_req { + DMAC_ADDR_UPDATE, + DMAC_ADDR_DEL }; static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg) @@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) if (!pf->mac_table) return -ENOMEM; + otx2_dmacflt_get_max_cnt(pf); + + /* DMAC filters are not allocated */ + if (!pf->flow_cfg->dmacflt_max_flows) + return 0; + + pf->flow_cfg->bmap_to_dmacindex = + devm_kzalloc(pf->dev, sizeof(u8) * + pf->flow_cfg->dmacflt_max_flows, + GFP_KERNEL); + + if (!pf->flow_cfg->bmap_to_dmacindex) + return -ENOMEM; + + pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT; + return 0; } @@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac) { struct otx2_nic *pf = netdev_priv(netdev); + if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(netdev, + "Add %pM to CGX/RPM DMAC filters list as well\n", + mac); + return otx2_do_add_macfilter(pf, mac); } @@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } +static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +{ + if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) + return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + else + return flow_cfg->ntuple_max_flows; +} + int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) { struct otx2_flow *iter; - if (location >= pfvf->flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(pfvf->flow_cfg)) return -EINVAL; list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { @@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, int idx = 0; int err = 0; - nfc->data = pfvf->flow_cfg->ntuple_max_flows; + nfc->data = otx2_get_maxflows(pfvf->flow_cfg); while ((!err || err == -ENOENT) && idx < rule_cnt) { err = otx2_get_flow(pfvf, nfc, location); if (!err) @@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return 0; } +static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf, + struct ethtool_rx_flow_spec *fsp) +{ + struct ethhdr *eth_mask = &fsp->m_u.ether_spec; + struct ethhdr *eth_hdr = &fsp->h_u.ether_spec; + u64 ring_cookie = fsp->ring_cookie; + u32 flow_type; + + if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)) + return false; + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + + /* CGX/RPM block dmac filtering configured for white listing + * check for action other than DROP + */ + if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC && + !ethtool_get_flow_spec_ring_vf(ring_cookie)) { + if (is_zero_ether_addr(eth_mask->h_dest) && + is_valid_ether_addr(eth_hdr->h_dest)) + return true; + } + + return false; +} + static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) { u64 ring_cookie = flow->flow_spec.ring_cookie; @@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) return err; } +static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, + struct otx2_flow *flow) +{ + struct otx2_flow *pf_mac; + struct ethhdr *eth_hdr; + + pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL); + if (!pf_mac) + return -ENOMEM; + + pf_mac->entry = 0; + pf_mac->dmac_filter = true; + pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + memcpy(&pf_mac->flow_spec, &flow->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + pf_mac->flow_spec.location = pf_mac->location; + + /* Copy PF mac address */ + eth_hdr = &pf_mac->flow_spec.h_u.ether_spec; + ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr); + + /* Install DMAC filter with PF mac address */ + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0); + + otx2_add_flow_to_list(pfvf, pf_mac); + pfvf->flow_cfg->nr_flows++; + set_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + + return 0; +} + int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct ethtool_rx_flow_spec *fsp = &nfc->fs; struct otx2_flow *flow; + struct ethhdr *eth_hdr; bool new = false; + int err = 0; u32 ring; - int err; ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) @@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC) return -EINVAL; - if (fsp->location >= flow_cfg->ntuple_max_flows) + if (fsp->location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, fsp->location); if (!flow) { - flow = kzalloc(sizeof(*flow), GFP_ATOMIC); + flow = kzalloc(sizeof(*flow), GFP_KERNEL); if (!flow) return -ENOMEM; flow->location = fsp->location; - flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (fsp->flow_type & FLOW_RSS) flow->rss_ctx_id = nfc->rss_context; - err = otx2_add_flow_msg(pfvf, flow); + if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) { + eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* Sync dmac filter table with updated fields */ + if (flow->dmac_filter) + return otx2_dmacflt_update(pfvf, eth_hdr->h_dest, + flow->entry); + + if (bitmap_full(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows)) { + netdev_warn(pfvf->netdev, + "Can't insert the rule %d as max allowed dmac filters are %d\n", + flow->location + + flow_cfg->dmacflt_max_flows, + flow_cfg->dmacflt_max_flows); + err = -EINVAL; + if (new) + kfree(flow); + return err; + } + + /* Install PF mac address to DMAC filter list */ + if (!test_bit(0, &flow_cfg->dmacflt_bmap)) + otx2_add_flow_with_pfmac(pfvf, flow); + + flow->dmac_filter = true; + flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows); + fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + flow->flow_spec.location = fsp->location; + flow->location = fsp->location; + + set_bit(flow->entry, &flow_cfg->dmacflt_bmap); + otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); + + } else { + if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + netdev_warn(pfvf->netdev, + "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", + flow->location, + flow_cfg->ntuple_max_flows - 1); + err = -EINVAL; + } else { + flow->entry = flow_cfg->flow_ent[flow->location]; + err = otx2_add_flow_msg(pfvf, flow); + } + } + if (err) { if (new) kfree(flow); @@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all) return err; } +static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + bool found = false; + + list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) { + if (iter->dmac_filter && iter->entry == 0) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + if (req == DMAC_ADDR_DEL) { + otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + 0); + clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap); + found = true; + } else { + ether_addr_copy(eth_hdr->h_dest, + pfvf->netdev->dev_addr); + otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0); + } + break; + } + } + + if (found) { + list_del(&iter->list); + kfree(iter); + pfvf->flow_cfg->nr_flows--; + } +} + int otx2_remove_flow(struct otx2_nic *pfvf, u32 location) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct otx2_flow *flow; int err; - if (location >= flow_cfg->ntuple_max_flows) + if (location >= otx2_get_maxflows(flow_cfg)) return -EINVAL; flow = otx2_find_flow(pfvf, location); if (!flow) return -ENOENT; - err = otx2_remove_flow_msg(pfvf, flow->entry, false); + if (flow->dmac_filter) { + struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec; + + /* user not allowed to remove dmac filter with interface mac */ + if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest)) + return -EPERM; + + err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest, + flow->entry); + clear_bit(flow->entry, &flow_cfg->dmacflt_bmap); + /* If all dmac filters are removed delete macfilter with + * interface mac address and configure CGX/RPM block in + * promiscuous mode + */ + if (bitmap_weight(&flow_cfg->dmacflt_bmap, + flow_cfg->dmacflt_max_flows) == 1) + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL); + } else { + err = otx2_remove_flow_msg(pfvf, flow->entry, false); + } + if (err) return err; @@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable) mutex_unlock(&pf->mbox.lock); return rsp_hdr->rc; } + +void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf) +{ + struct otx2_flow *iter; + struct ethhdr *eth_hdr; + + list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) { + if (iter->dmac_filter) { + eth_hdr = &iter->flow_spec.h_u.ether_spec; + otx2_dmacflt_add(pf, eth_hdr->h_dest, + iter->entry); + } + } +} + +void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf) +{ + otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 088c28df849d..f300b807a85b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable) struct msg_req *msg; int err; + if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap, + pf->flow_cfg->dmacflt_max_flows)) + netdev_warn(pf->netdev, + "CGX/RPM internal loopback might not work as DMAC filters are active\n"); + mutex_lock(&pf->mbox.lock); if (enable) msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox); @@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev) /* Restore pause frame settings */ otx2_config_pause_frm(pf); + /* Install DMAC Filters */ + if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) + otx2_dmacflt_reinstall_flows(pf); + err = otx2_rxtx_enable(pf, true); if (err) goto err_tx_stop_queues; -- cgit v1.2.3 From 856a5c97268d474282360c8a3cf4f37f6036dbec Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Thu, 1 Jul 2021 20:37:06 +0530 Subject: net: wwan: iosm: fix uevent reporting Change uevent env variable name to IOSM_EVENT & correct reporting format to key=value pair. Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/iosm/iosm_ipc_uevent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c index 2229d752926c..d12188ffed7e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c +++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c @@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent) /* Store the device and event information */ info->dev = dev; - snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent); + snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent); /* Schedule uevent in process context using work queue */ schedule_work(&info->work); -- cgit v1.2.3 From 3bcfc0a2d3199d0a83d47ed67ad57a1c2f5a16d7 Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Thu, 1 Jul 2021 20:37:31 +0530 Subject: net: wwan: iosm: remove reduandant check Remove reduandant IP session id check since required checks are in place under caller. Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/iosm/iosm_ipc_imem_ops.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index 46f76e8aae92..e4e9461b603e 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) return -EIO; } - /* check for the interafce id - * if if_id 1 to 8 then create IP MUX channel sessions. - * To start MUX session from 0 as network interface id would start - * from 1 so map it to if_id = if_id - 1 - */ - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - return ipc_mux_open_session(ipc_imem->mux, if_id - 1); - - return -EINVAL; + return ipc_mux_open_session(ipc_imem->mux, if_id - 1); } /* Release a net link to CP. */ @@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, goto out; } - if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - /* Route the UL packet through IP MUX Layer */ - ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, - if_id - 1, skb); - else - dev_err(ipc_imem->dev, - "invalid if_id %d: ", if_id); + /* Route the UL packet through IP MUX Layer */ + ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id - 1, skb); out: return ret; } -- cgit v1.2.3 From 5bb4eea0c5f5b9383a543293966bdf20e54988aa Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Thu, 1 Jul 2021 20:37:45 +0530 Subject: net: wwan: iosm: correct link-id handling Link ID to be kept intact with MBIM session ID Ex: ID 0 should be associated to MBIM session ID 0. Reported-by: Loic Poulain Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/iosm/iosm_ipc_imem_ops.c | 6 +++--- drivers/net/wwan/iosm/iosm_ipc_imem_ops.h | 6 +++--- drivers/net/wwan/iosm/iosm_ipc_mux_codec.c | 2 +- drivers/net/wwan/iosm/iosm_ipc_wwan.c | 4 ++-- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index e4e9461b603e..0a472ce77370 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -24,7 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id) return -EIO; } - return ipc_mux_open_session(ipc_imem->mux, if_id - 1); + return ipc_mux_open_session(ipc_imem->mux, if_id); } /* Release a net link to CP. */ @@ -33,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id, { if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END) - ipc_mux_close_session(ipc_imem->mux, if_id - 1); + ipc_mux_close_session(ipc_imem->mux, if_id); } /* Tasklet call to do uplink transfer. */ @@ -76,7 +76,7 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem, } /* Route the UL packet through IP MUX Layer */ - ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id - 1, skb); + ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb); out: return ret; } diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h index fd356dafbdd6..2007fe23e9a5 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h @@ -27,11 +27,11 @@ #define BOOT_CHECK_DEFAULT_TIMEOUT 400 /* IP MUX channel range */ -#define IP_MUX_SESSION_START 1 -#define IP_MUX_SESSION_END 8 +#define IP_MUX_SESSION_START 0 +#define IP_MUX_SESSION_END 7 /* Default IP MUX channel */ -#define IP_MUX_SESSION_DEFAULT 1 +#define IP_MUX_SESSION_DEFAULT 0 /** * ipc_imem_sys_port_open - Open a port link to CP. diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c index e634ffc6ec08..562de275797a 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c +++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c @@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id, /* Pass the packet to the netif layer. */ dest_skb->priority = service_class; - return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1); + return ipc_wwan_receive(wwan, dest_skb, false, if_id); } /* Decode Flow Credit Table in the block */ diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index c999c64001f4..84e37c4b0f74 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -252,8 +252,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, skb->pkt_type = PACKET_HOST; - if (if_id < (IP_MUX_SESSION_START - 1) || - if_id > (IP_MUX_SESSION_END - 1)) { + if (if_id < IP_MUX_SESSION_START || + if_id > IP_MUX_SESSION_END) { ret = -EINVAL; goto free; } -- cgit v1.2.3 From c302e3a1c86f78421d58ef564ba22519b0b039c0 Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Thu, 1 Jul 2021 20:39:17 +0530 Subject: net: wwan: iosm: fix netdev tx stats Update tx stats on successful packet consume, drop. Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/iosm/iosm_ipc_wwan.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index 84e37c4b0f74..e0c19c59c5f6 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, { struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; + unsigned int len = skb->len; int if_id = priv->if_id; int ret; @@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb, /* Return code of zero is success */ if (ret == 0) { + netdev->stats.tx_packets++; + netdev->stats.tx_bytes += len; ret = NETDEV_TX_OK; } else if (ret == -EBUSY) { ret = NETDEV_TX_BUSY; @@ -140,7 +143,8 @@ exit: ret); dev_kfree_skb_any(skb); - return ret; + netdev->stats.tx_dropped++; + return NETDEV_TX_OK; } /* Ops structure for wwan net link */ -- cgit v1.2.3 From d7340f46beae05227f5f4a1c8cb18e81e0c3fe0e Mon Sep 17 00:00:00 2001 From: M Chetan Kumar Date: Thu, 1 Jul 2021 20:39:34 +0530 Subject: net: wwan: iosm: set default mtu Set netdev default mtu size to 1500. Signed-off-by: M Chetan Kumar Signed-off-by: David S. Miller --- drivers/net/wwan/iosm/iosm_ipc_wwan.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index e0c19c59c5f6..b2357ad5d517 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -162,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) iosm_dev->priv_flags |= IFF_NO_QUEUE; iosm_dev->type = ARPHRD_NONE; + iosm_dev->mtu = ETH_DATA_LEN; iosm_dev->min_mtu = ETH_MIN_MTU; iosm_dev->max_mtu = ETH_MAX_MTU; -- cgit v1.2.3 From 5d43f951b1ac797450bb4d230fdc960b739bea04 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 30 Jun 2021 16:11:52 +0800 Subject: ptp: add ptp virtual clock driver framework This patch is to add ptp virtual clock driver framework utilizing timecounter/cyclecounter. The patch just exports two essential APIs for PTP driver. - ptp_vclock_register() - ptp_vclock_unregister() Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/Makefile | 2 +- drivers/ptp/ptp_private.h | 15 ++++ drivers/ptp/ptp_vclock.c | 150 +++++++++++++++++++++++++++++++++++++++ include/linux/ptp_clock_kernel.h | 4 +- 4 files changed, 169 insertions(+), 2 deletions(-) create mode 100644 drivers/ptp/ptp_vclock.c (limited to 'drivers') diff --git a/drivers/ptp/Makefile b/drivers/ptp/Makefile index 8673d1743faa..28a6fe342d3e 100644 --- a/drivers/ptp/Makefile +++ b/drivers/ptp/Makefile @@ -3,7 +3,7 @@ # Makefile for PTP 1588 clock support. # -ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o +ptp-y := ptp_clock.o ptp_chardev.o ptp_sysfs.o ptp_vclock.o ptp_kvm-$(CONFIG_X86) := ptp_kvm_x86.o ptp_kvm_common.o ptp_kvm-$(CONFIG_HAVE_ARM_SMCCC) := ptp_kvm_arm.o ptp_kvm_common.o obj-$(CONFIG_PTP_1588_CLOCK) += ptp.o diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 6b97155148f1..853b79b6b30e 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -48,6 +48,19 @@ struct ptp_clock { struct kthread_delayed_work aux_work; }; +#define info_to_vclock(d) container_of((d), struct ptp_vclock, info) +#define cc_to_vclock(d) container_of((d), struct ptp_vclock, cc) +#define dw_to_vclock(d) container_of((d), struct ptp_vclock, refresh_work) + +struct ptp_vclock { + struct ptp_clock *pclock; + struct ptp_clock_info info; + struct ptp_clock *clock; + struct cyclecounter cc; + struct timecounter tc; + spinlock_t lock; /* protects tc/cc */ +}; + /* * The function queue_cnt() is safe for readers to call without * holding q->lock. Readers use this function to verify that the queue @@ -89,4 +102,6 @@ extern const struct attribute_group *ptp_groups[]; int ptp_populate_pin_groups(struct ptp_clock *ptp); void ptp_cleanup_pin_groups(struct ptp_clock *ptp); +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock); +void ptp_vclock_unregister(struct ptp_vclock *vclock); #endif diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c new file mode 100644 index 000000000000..fc9205cc504d --- /dev/null +++ b/drivers/ptp/ptp_vclock.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * PTP virtual clock driver + * + * Copyright 2021 NXP + */ +#include +#include "ptp_private.h" + +#define PTP_VCLOCK_CC_SHIFT 31 +#define PTP_VCLOCK_CC_MULT (1 << PTP_VCLOCK_CC_SHIFT) +#define PTP_VCLOCK_FADJ_SHIFT 9 +#define PTP_VCLOCK_FADJ_DENOMINATOR 15625ULL +#define PTP_VCLOCK_REFRESH_INTERVAL (HZ * 2) + +static int ptp_vclock_adjfine(struct ptp_clock_info *ptp, long scaled_ppm) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + s64 adj; + + adj = (s64)scaled_ppm << PTP_VCLOCK_FADJ_SHIFT; + adj = div_s64(adj, PTP_VCLOCK_FADJ_DENOMINATOR); + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_read(&vclock->tc); + vclock->cc.mult = PTP_VCLOCK_CC_MULT + adj; + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_adjtime(struct ptp_clock_info *ptp, s64 delta) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_adjtime(&vclock->tc, delta); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static int ptp_vclock_gettime(struct ptp_clock_info *ptp, + struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_read(&vclock->tc); + spin_unlock_irqrestore(&vclock->lock, flags); + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int ptp_vclock_settime(struct ptp_clock_info *ptp, + const struct timespec64 *ts) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + u64 ns = timespec64_to_ns(ts); + unsigned long flags; + + spin_lock_irqsave(&vclock->lock, flags); + timecounter_init(&vclock->tc, &vclock->cc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + return 0; +} + +static long ptp_vclock_refresh(struct ptp_clock_info *ptp) +{ + struct ptp_vclock *vclock = info_to_vclock(ptp); + struct timespec64 ts; + + ptp_vclock_gettime(&vclock->info, &ts); + + return PTP_VCLOCK_REFRESH_INTERVAL; +} + +static const struct ptp_clock_info ptp_vclock_info = { + .owner = THIS_MODULE, + .name = "ptp virtual clock", + /* The maximum ppb value that long scaled_ppm can support */ + .max_adj = 32767999, + .adjfine = ptp_vclock_adjfine, + .adjtime = ptp_vclock_adjtime, + .gettime64 = ptp_vclock_gettime, + .settime64 = ptp_vclock_settime, + .do_aux_work = ptp_vclock_refresh, +}; + +static u64 ptp_vclock_read(const struct cyclecounter *cc) +{ + struct ptp_vclock *vclock = cc_to_vclock(cc); + struct ptp_clock *ptp = vclock->pclock; + struct timespec64 ts = {}; + + if (ptp->info->gettimex64) + ptp->info->gettimex64(ptp->info, &ts, NULL); + else + ptp->info->gettime64(ptp->info, &ts); + + return timespec64_to_ns(&ts); +} + +static const struct cyclecounter ptp_vclock_cc = { + .read = ptp_vclock_read, + .mask = CYCLECOUNTER_MASK(32), + .mult = PTP_VCLOCK_CC_MULT, + .shift = PTP_VCLOCK_CC_SHIFT, +}; + +struct ptp_vclock *ptp_vclock_register(struct ptp_clock *pclock) +{ + struct ptp_vclock *vclock; + + vclock = kzalloc(sizeof(*vclock), GFP_KERNEL); + if (!vclock) + return NULL; + + vclock->pclock = pclock; + vclock->info = ptp_vclock_info; + vclock->cc = ptp_vclock_cc; + + snprintf(vclock->info.name, PTP_CLOCK_NAME_LEN, "ptp%d_virt", + pclock->index); + + spin_lock_init(&vclock->lock); + + vclock->clock = ptp_clock_register(&vclock->info, &pclock->dev); + if (IS_ERR_OR_NULL(vclock->clock)) { + kfree(vclock); + return NULL; + } + + timecounter_init(&vclock->tc, &vclock->cc, 0); + ptp_schedule_worker(vclock->clock, PTP_VCLOCK_REFRESH_INTERVAL); + + return vclock; +} + +void ptp_vclock_unregister(struct ptp_vclock *vclock) +{ + ptp_clock_unregister(vclock->clock); + kfree(vclock); +} diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index aba237c0b3a2..b6fb771ee524 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -11,7 +11,9 @@ #include #include #include +#include +#define PTP_CLOCK_NAME_LEN 32 /** * struct ptp_clock_request - request PTP clock event * @@ -134,7 +136,7 @@ struct ptp_system_timestamp { struct ptp_clock_info { struct module *owner; - char name[16]; + char name[PTP_CLOCK_NAME_LEN]; s32 max_adj; int n_alarm; int n_ext_ts; -- cgit v1.2.3 From 73f37068d540eba5f93ba3a0019bf479d35ebd76 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 30 Jun 2021 16:11:53 +0800 Subject: ptp: support ptp physical/virtual clocks conversion Support ptp physical/virtual clocks conversion via sysfs. There will be a new attribute n_vclocks under ptp physical clock sysfs. - In default, the value is 0 meaning only ptp physical clock is in use. - Setting the value can create corresponding number of ptp virtual clocks to use. But current physical clock is guaranteed to stay free running. - Setting the value back to 0 can delete virtual clocks and back use physical clock again. Another new attribute max_vclocks control the maximum number of ptp vclocks. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- Documentation/ABI/testing/sysfs-ptp | 20 ++++++ drivers/ptp/ptp_clock.c | 26 +++++++ drivers/ptp/ptp_private.h | 21 ++++++ drivers/ptp/ptp_sysfs.c | 138 ++++++++++++++++++++++++++++++++++++ 4 files changed, 205 insertions(+) (limited to 'drivers') diff --git a/Documentation/ABI/testing/sysfs-ptp b/Documentation/ABI/testing/sysfs-ptp index 2363ad810ddb..d378f57c1b73 100644 --- a/Documentation/ABI/testing/sysfs-ptp +++ b/Documentation/ABI/testing/sysfs-ptp @@ -33,6 +33,13 @@ Description: frequency adjustment value (a positive integer) in parts per billion. +What: /sys/class/ptp/ptpN/max_vclocks +Date: May 2021 +Contact: Yangbo Lu +Description: + This file contains the maximum number of ptp vclocks. + Write integer to re-configure it. + What: /sys/class/ptp/ptpN/n_alarms Date: September 2010 Contact: Richard Cochran @@ -61,6 +68,19 @@ Description: This file contains the number of programmable pins offered by the PTP hardware clock. +What: /sys/class/ptp/ptpN/n_vclocks +Date: May 2021 +Contact: Yangbo Lu +Description: + This file contains the number of virtual PTP clocks in + use. By default, the value is 0 meaning that only the + physical clock is in use. Setting the value creates + the corresponding number of virtual clocks and causes + the physical clock to become free running. Setting the + value back to 0 deletes the virtual clocks and + switches the physical clock back to normal, adjustable + operation. + What: /sys/class/ptp/ptpN/pins Date: March 2014 Contact: Richard Cochran diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index a23a37a4d5dc..7334f478dde7 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -76,6 +76,11 @@ static int ptp_clock_settime(struct posix_clock *pc, const struct timespec64 *tp { struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + return ptp->info->settime64(ptp->info, tp); } @@ -97,6 +102,11 @@ static int ptp_clock_adjtime(struct posix_clock *pc, struct __kernel_timex *tx) struct ptp_clock_info *ops; int err = -EOPNOTSUPP; + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ops = ptp->info; if (tx->modes & ADJ_SETOFFSET) { @@ -161,6 +171,7 @@ static void ptp_clock_release(struct device *dev) ptp_cleanup_pin_groups(ptp); mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, ptp->index); kfree(ptp); } @@ -208,6 +219,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, spin_lock_init(&ptp->tsevq.lock); mutex_init(&ptp->tsevq_mux); mutex_init(&ptp->pincfg_mux); + mutex_init(&ptp->n_vclocks_mux); init_waitqueue_head(&ptp->tsev_wq); if (ptp->info->do_aux_work) { @@ -221,6 +233,14 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, ptp->pps_source->lookup_cookie = ptp; } + /* PTP virtual clock is being registered under physical clock */ + if (parent->class && parent->class->name && + strcmp(parent->class->name, "ptp") == 0) + ptp->is_virtual_clock = true; + + if (!ptp->is_virtual_clock) + ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; + err = ptp_populate_pin_groups(ptp); if (err) goto no_pin_groups; @@ -270,6 +290,7 @@ no_pin_groups: kworker_err: mutex_destroy(&ptp->tsevq_mux); mutex_destroy(&ptp->pincfg_mux); + mutex_destroy(&ptp->n_vclocks_mux); ida_simple_remove(&ptp_clocks_map, index); no_slot: kfree(ptp); @@ -280,6 +301,11 @@ EXPORT_SYMBOL(ptp_clock_register); int ptp_clock_unregister(struct ptp_clock *ptp) { + if (ptp_vclock_in_use(ptp)) { + pr_err("ptp: virtual clock in use\n"); + return -EBUSY; + } + ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 853b79b6b30e..87cb55953b69 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -18,6 +18,7 @@ #define PTP_MAX_TIMESTAMPS 128 #define PTP_BUF_TIMESTAMPS 30 +#define PTP_DEFAULT_MAX_VCLOCKS 20 struct timestamp_event_queue { struct ptp_extts_event buf[PTP_MAX_TIMESTAMPS]; @@ -46,6 +47,10 @@ struct ptp_clock { const struct attribute_group *pin_attr_groups[2]; struct kthread_worker *kworker; struct kthread_delayed_work aux_work; + unsigned int max_vclocks; + unsigned int n_vclocks; + struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ + bool is_virtual_clock; }; #define info_to_vclock(d) container_of((d), struct ptp_vclock, info) @@ -74,6 +79,22 @@ static inline int queue_cnt(struct timestamp_event_queue *q) return cnt < 0 ? PTP_MAX_TIMESTAMPS + cnt : cnt; } +/* Check if ptp virtual clock is in use */ +static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) +{ + bool in_use = false; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return true; + + if (!ptp->is_virtual_clock && ptp->n_vclocks) + in_use = true; + + mutex_unlock(&ptp->n_vclocks_mux); + + return in_use; +} + /* * see ptp_chardev.c */ diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index be076a91e20e..0b05041783a5 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -3,6 +3,7 @@ * PTP 1588 clock support - sysfs interface. * * Copyright (C) 2010 OMICRON electronics GmbH + * Copyright 2021 NXP */ #include #include @@ -148,6 +149,137 @@ out: } static DEVICE_ATTR(pps_enable, 0220, NULL, pps_enable_store); +static int unregister_vclock(struct device *dev, void *data) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_clock_info *info = ptp->info; + struct ptp_vclock *vclock; + u8 *num = data; + + vclock = info_to_vclock(info); + dev_info(dev->parent, "delete virtual clock ptp%d\n", + vclock->clock->index); + + ptp_vclock_unregister(vclock); + (*num)--; + + /* For break. Not error. */ + if (*num == 0) + return -EINVAL; + + return 0; +} + +static ssize_t n_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->n_vclocks); + + mutex_unlock(&ptp->n_vclocks_mux); + + return size; +} + +static ssize_t n_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + struct ptp_vclock *vclock; + int err = -EINVAL; + u32 num, i; + + if (kstrtou32(buf, 0, &num)) + return err; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (num > ptp->max_vclocks) { + dev_err(dev, "max value is %d\n", ptp->max_vclocks); + goto out; + } + + /* Need to create more vclocks */ + if (num > ptp->n_vclocks) { + for (i = 0; i < num - ptp->n_vclocks; i++) { + vclock = ptp_vclock_register(ptp); + if (!vclock) + goto out; + + dev_info(dev, "new virtual clock ptp%d\n", + vclock->clock->index); + } + } + + /* Need to delete vclocks */ + if (num < ptp->n_vclocks) { + i = ptp->n_vclocks - num; + device_for_each_child_reverse(dev, &i, + unregister_vclock); + } + + if (num == 0) + dev_info(dev, "only physical clock in use now\n"); + else + dev_info(dev, "guarantee physical clock free running\n"); + + ptp->n_vclocks = num; + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; +} +static DEVICE_ATTR_RW(n_vclocks); + +static ssize_t max_vclocks_show(struct device *dev, + struct device_attribute *attr, char *page) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + ssize_t size; + + size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->max_vclocks); + + return size; +} + +static ssize_t max_vclocks_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ptp_clock *ptp = dev_get_drvdata(dev); + u32 max; + + if (kstrtou32(buf, 0, &max) || max == 0) + return -EINVAL; + + if (max == ptp->max_vclocks) + return count; + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) + return -ERESTARTSYS; + + if (max < ptp->n_vclocks) { + mutex_unlock(&ptp->n_vclocks_mux); + return -EINVAL; + } + + ptp->max_vclocks = max; + + mutex_unlock(&ptp->n_vclocks_mux); + + return count; +} +static DEVICE_ATTR_RW(max_vclocks); + static struct attribute *ptp_attrs[] = { &dev_attr_clock_name.attr, @@ -162,6 +294,8 @@ static struct attribute *ptp_attrs[] = { &dev_attr_fifo.attr, &dev_attr_period.attr, &dev_attr_pps_enable.attr, + &dev_attr_n_vclocks.attr, + &dev_attr_max_vclocks.attr, NULL }; @@ -183,6 +317,10 @@ static umode_t ptp_is_attribute_visible(struct kobject *kobj, } else if (attr == &dev_attr_pps_enable.attr) { if (!info->pps) mode = 0; + } else if (attr == &dev_attr_n_vclocks.attr || + attr == &dev_attr_max_vclocks.attr) { + if (ptp->is_virtual_clock) + mode = 0; } return mode; -- cgit v1.2.3 From 44c494c8e30e35713c7d11ca3c5ab332cbfabacf Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 30 Jun 2021 16:11:54 +0800 Subject: ptp: track available ptp vclocks information Track available ptp vclocks information. Record index values of available ptp vclocks during registering and unregistering. This is preparation for supporting ptp vclocks info query through ethtool. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 15 ++++++++++++++- drivers/ptp/ptp_private.h | 1 + drivers/ptp/ptp_sysfs.c | 28 +++++++++++++++++++++++++--- 3 files changed, 40 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 7334f478dde7..9205a9362a9d 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -196,6 +196,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, { struct ptp_clock *ptp; int err = 0, index, major = MAJOR(ptp_devt); + size_t size; if (info->n_alarm > PTP_MAX_ALARMS) return ERR_PTR(-EINVAL); @@ -238,9 +239,17 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, strcmp(parent->class->name, "ptp") == 0) ptp->is_virtual_clock = true; - if (!ptp->is_virtual_clock) + if (!ptp->is_virtual_clock) { ptp->max_vclocks = PTP_DEFAULT_MAX_VCLOCKS; + size = sizeof(int) * ptp->max_vclocks; + ptp->vclock_index = kzalloc(size, GFP_KERNEL); + if (!ptp->vclock_index) { + err = -ENOMEM; + goto no_mem_for_vclocks; + } + } + err = ptp_populate_pin_groups(ptp); if (err) goto no_pin_groups; @@ -285,6 +294,8 @@ no_clock: no_pps: ptp_cleanup_pin_groups(ptp); no_pin_groups: + kfree(ptp->vclock_index); +no_mem_for_vclocks: if (ptp->kworker) kthread_destroy_worker(ptp->kworker); kworker_err: @@ -309,6 +320,8 @@ int ptp_clock_unregister(struct ptp_clock *ptp) ptp->defunct = 1; wake_up_interruptible(&ptp->tsev_wq); + kfree(ptp->vclock_index); + if (ptp->kworker) { kthread_cancel_delayed_work_sync(&ptp->aux_work); kthread_destroy_worker(ptp->kworker); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index 87cb55953b69..f75fadd9b244 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -49,6 +49,7 @@ struct ptp_clock { struct kthread_delayed_work aux_work; unsigned int max_vclocks; unsigned int n_vclocks; + int *vclock_index; struct mutex n_vclocks_mux; /* protect concurrent n_vclocks access */ bool is_virtual_clock; }; diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 0b05041783a5..6a36590ca77a 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -213,6 +213,9 @@ static ssize_t n_vclocks_store(struct device *dev, if (!vclock) goto out; + *(ptp->vclock_index + ptp->n_vclocks + i) = + vclock->clock->index; + dev_info(dev, "new virtual clock ptp%d\n", vclock->clock->index); } @@ -223,6 +226,9 @@ static ssize_t n_vclocks_store(struct device *dev, i = ptp->n_vclocks - num; device_for_each_child_reverse(dev, &i, unregister_vclock); + + for (i = 1; i <= ptp->n_vclocks - num; i++) + *(ptp->vclock_index + ptp->n_vclocks - i) = -1; } if (num == 0) @@ -256,6 +262,9 @@ static ssize_t max_vclocks_store(struct device *dev, const char *buf, size_t count) { struct ptp_clock *ptp = dev_get_drvdata(dev); + unsigned int *vclock_index; + int err = -EINVAL; + size_t size; u32 max; if (kstrtou32(buf, 0, &max) || max == 0) @@ -267,16 +276,29 @@ static ssize_t max_vclocks_store(struct device *dev, if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) return -ERESTARTSYS; - if (max < ptp->n_vclocks) { - mutex_unlock(&ptp->n_vclocks_mux); - return -EINVAL; + if (max < ptp->n_vclocks) + goto out; + + size = sizeof(int) * max; + vclock_index = kzalloc(size, GFP_KERNEL); + if (!vclock_index) { + err = -ENOMEM; + goto out; } + size = sizeof(int) * ptp->n_vclocks; + memcpy(vclock_index, ptp->vclock_index, size); + + kfree(ptp->vclock_index); + ptp->vclock_index = vclock_index; ptp->max_vclocks = max; mutex_unlock(&ptp->n_vclocks_mux); return count; +out: + mutex_unlock(&ptp->n_vclocks_mux); + return err; } static DEVICE_ATTR_RW(max_vclocks); -- cgit v1.2.3 From acb288e8047b7569fbc9af6fa6e9405315345103 Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 30 Jun 2021 16:11:55 +0800 Subject: ptp: add kernel API ptp_get_vclocks_index() Add kernel API ptp_get_vclocks_index() to get all ptp vclocks index on pclock. This is preparation for supporting ptp vclocks info query through ethtool. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 3 ++- drivers/ptp/ptp_private.h | 2 ++ drivers/ptp/ptp_vclock.c | 35 +++++++++++++++++++++++++++++++++++ include/linux/ptp_clock_kernel.h | 14 ++++++++++++++ 4 files changed, 53 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index 9205a9362a9d..f012fa581cf4 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -24,10 +24,11 @@ #define PTP_PPS_EVENT PPS_CAPTUREASSERT #define PTP_PPS_MODE (PTP_PPS_DEFAULTS | PPS_CANWAIT | PPS_TSFMT_TSPEC) +struct class *ptp_class; + /* private globals */ static dev_t ptp_devt; -static struct class *ptp_class; static DEFINE_IDA(ptp_clocks_map); diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h index f75fadd9b244..dba6be477067 100644 --- a/drivers/ptp/ptp_private.h +++ b/drivers/ptp/ptp_private.h @@ -96,6 +96,8 @@ static inline bool ptp_vclock_in_use(struct ptp_clock *ptp) return in_use; } +extern struct class *ptp_class; + /* * see ptp_chardev.c */ diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index fc9205cc504d..cefab29a0592 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -148,3 +148,38 @@ void ptp_vclock_unregister(struct ptp_vclock *vclock) ptp_clock_unregister(vclock->clock); kfree(vclock); } + +int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_clock *ptp; + struct device *dev; + int num = 0; + + if (pclock_index < 0) + return num; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", pclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return num; + + ptp = dev_get_drvdata(dev); + + if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) { + put_device(dev); + return num; + } + + *vclock_index = kzalloc(sizeof(int) * ptp->n_vclocks, GFP_KERNEL); + if (!(*vclock_index)) + goto out; + + memcpy(*vclock_index, ptp->vclock_index, sizeof(int) * ptp->n_vclocks); + num = ptp->n_vclocks; +out: + mutex_unlock(&ptp->n_vclocks_mux); + put_device(dev); + return num; +} +EXPORT_SYMBOL(ptp_get_vclocks_index); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index b6fb771ee524..300a984fec87 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -306,6 +306,18 @@ int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay); */ void ptp_cancel_worker_sync(struct ptp_clock *ptp); +/** + * ptp_get_vclocks_index() - get all vclocks index on pclock, and + * caller is responsible to free memory + * of vclock_index + * + * @pclock_index: phc index of ptp pclock. + * @vclock_index: pointer to pointer of vclock index. + * + * return number of vclocks. + */ +int ptp_get_vclocks_index(int pclock_index, int **vclock_index); + #else static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) @@ -325,6 +337,8 @@ static inline int ptp_schedule_worker(struct ptp_clock *ptp, { return -EOPNOTSUPP; } static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } +static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) +{ return 0; } #endif -- cgit v1.2.3 From 895487a3a10fb3a177e20dcde875515d46ccd4df Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Wed, 30 Jun 2021 16:11:57 +0800 Subject: ptp: add kernel API ptp_convert_timestamp() Add kernel API ptp_convert_timestamp() to convert raw hardware timestamp to a specified ptp vclock time. Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_vclock.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/ptp_clock_kernel.h | 13 +++++++++++++ 2 files changed, 47 insertions(+) (limited to 'drivers') diff --git a/drivers/ptp/ptp_vclock.c b/drivers/ptp/ptp_vclock.c index cefab29a0592..e0f87c57749a 100644 --- a/drivers/ptp/ptp_vclock.c +++ b/drivers/ptp/ptp_vclock.c @@ -183,3 +183,37 @@ out: return num; } EXPORT_SYMBOL(ptp_get_vclocks_index); + +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ + char name[PTP_CLOCK_NAME_LEN] = ""; + struct ptp_vclock *vclock; + struct ptp_clock *ptp; + unsigned long flags; + struct device *dev; + u64 ns; + + snprintf(name, PTP_CLOCK_NAME_LEN, "ptp%d", vclock_index); + dev = class_find_device_by_name(ptp_class, name); + if (!dev) + return; + + ptp = dev_get_drvdata(dev); + if (!ptp->is_virtual_clock) { + put_device(dev); + return; + } + + vclock = info_to_vclock(ptp->info); + + ns = ktime_to_ns(hwtstamps->hwtstamp); + + spin_lock_irqsave(&vclock->lock, flags); + ns = timecounter_cyc2time(&vclock->tc, ns); + spin_unlock_irqrestore(&vclock->lock, flags); + + put_device(dev); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} +EXPORT_SYMBOL(ptp_convert_timestamp); diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 300a984fec87..71fac9237725 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -12,6 +12,7 @@ #include #include #include +#include #define PTP_CLOCK_NAME_LEN 32 /** @@ -318,6 +319,15 @@ void ptp_cancel_worker_sync(struct ptp_clock *ptp); */ int ptp_get_vclocks_index(int pclock_index, int **vclock_index); +/** + * ptp_convert_timestamp() - convert timestamp to a ptp vclock time + * + * @hwtstamps: skb_shared_hwtstamps structure pointer + * @vclock_index: phc index of ptp vclock. + */ +void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index); + #else static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, struct device *parent) @@ -339,6 +349,9 @@ static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp) { } static inline int ptp_get_vclocks_index(int pclock_index, int **vclock_index) { return 0; } +static inline void ptp_convert_timestamp(struct skb_shared_hwtstamps *hwtstamps, + int vclock_index) +{ } #endif -- cgit v1.2.3 From 4f408e1fa6e10b6da72691233369172bac7d9e9b Mon Sep 17 00:00:00 2001 From: Sukadev Bhattiprolu Date: Wed, 30 Jun 2021 14:36:17 -0400 Subject: ibmvnic: retry reset if there are no other resets Normally, if a reset fails due to failover or other communication error there is another reset (eg: FAILOVER) in the queue and we would process that reset. But if we are unable to communicate with PHYP or VIOS after H_FREE_CRQ, there would be no other resets in the queue and the adapter would be in an undefined state even though it was in the OPEN state earlier. While starting the reset we set the carrier to off state so we won't even get the timeout resets. If the last queued reset fails, retry it as a hard reset (after the usual 60 second settling time). Signed-off-by: Sukadev Bhattiprolu Reviewed-by: Dany Madden Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 374a75d4faea..ed77191d19f4 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2420,9 +2420,10 @@ out: static void __ibmvnic_reset(struct work_struct *work) { - struct ibmvnic_rwi *rwi; struct ibmvnic_adapter *adapter; bool saved_state = false; + struct ibmvnic_rwi *tmprwi; + struct ibmvnic_rwi *rwi; unsigned long flags; u32 reset_state; int rc = 0; @@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work) } else { rc = do_reset(adapter, rwi, reset_state); } - kfree(rwi); + tmprwi = rwi; adapter->last_reset_time = jiffies; if (rc) @@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); + /* + * If there is another reset queued, free the previous rwi + * and process the new reset even if previous reset failed + * (the previous reset could have failed because of a fail + * over for instance, so process the fail over). + * + * If there are no resets queued and the previous reset failed, + * the adapter would be in an undefined state. So retry the + * previous reset as a hard reset. + */ + if (rwi) + kfree(tmprwi); + else if (rc) + rwi = tmprwi; + if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || - rwi->reset_reason == VNIC_RESET_MOBILITY)) + rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) adapter->force_reset_recovery = true; } -- cgit v1.2.3 From ca75bcf0a83b6cc7f53a593d98ec7121c4839b43 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jul 2021 10:15:09 +0200 Subject: net: remove the caif_hsi driver The caif_hsi driver relies on a cfhsi_get_ops symbol using symbol_get, but this symbol is not provided anywhere in the kernel tree. Remove this driver given that it is dead code. Signed-off-by: Christoph Hellwig Reviewed-by: Greg Kroah-Hartman Signed-off-by: David S. Miller --- drivers/net/caif/Kconfig | 9 - drivers/net/caif/Makefile | 3 - drivers/net/caif/caif_hsi.c | 1454 ------------------------------------------- include/net/caif/caif_hsi.h | 200 ------ 4 files changed, 1666 deletions(-) delete mode 100644 drivers/net/caif/caif_hsi.c delete mode 100644 include/net/caif/caif_hsi.h (limited to 'drivers') diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig index a77124bc1f4b..709660cb38f8 100644 --- a/drivers/net/caif/Kconfig +++ b/drivers/net/caif/Kconfig @@ -20,15 +20,6 @@ config CAIF_TTY identified as N_CAIF. When this ldisc is opened from user space it will redirect the TTY's traffic into the CAIF stack. -config CAIF_HSI - tristate "CAIF HSI transport driver" - depends on CAIF - default n - help - The CAIF low level driver for CAIF over HSI. - Be aware that if you enable this then you also need to - enable a low-level HSI driver. - config CAIF_VIRTIO tristate "CAIF virtio transport driver" depends on CAIF && HAS_DMA diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile index b1918c8c126c..97f664f8016c 100644 --- a/drivers/net/caif/Makefile +++ b/drivers/net/caif/Makefile @@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG # Serial interface obj-$(CONFIG_CAIF_TTY) += caif_serial.o -# HSI interface -obj-$(CONFIG_CAIF_HSI) += caif_hsi.o - # Virtio interface obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c deleted file mode 100644 index 3d63b15bbaa1..000000000000 --- a/drivers/net/caif/caif_hsi.c +++ /dev/null @@ -1,1454 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson - * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no - */ - -#define pr_fmt(fmt) KBUILD_MODNAME fmt - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Daniel Martensson"); -MODULE_DESCRIPTION("CAIF HSI driver"); - -/* Returns the number of padding bytes for alignment. */ -#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\ - (((pow)-((x)&((pow)-1))))) - -static const struct cfhsi_config hsi_default_config = { - - /* Inactivity timeout on HSI, ms */ - .inactivity_timeout = HZ, - - /* Aggregation timeout (ms) of zero means no aggregation is done*/ - .aggregation_timeout = 1, - - /* - * HSI link layer flow-control thresholds. - * Threshold values for the HSI packet queue. Flow-control will be - * asserted when the number of packets exceeds q_high_mark. It will - * not be de-asserted before the number of packets drops below - * q_low_mark. - * Warning: A high threshold value might increase throughput but it - * will at the same time prevent channel prioritization and increase - * the risk of flooding the modem. The high threshold should be above - * the low. - */ - .q_high_mark = 100, - .q_low_mark = 50, - - /* - * HSI padding options. - * Warning: must be a base of 2 (& operation used) and can not be zero ! - */ - .head_align = 4, - .tail_align = 4, -}; - -#define ON 1 -#define OFF 0 - -static LIST_HEAD(cfhsi_list); - -static void cfhsi_inactivity_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Schedule power down work queue. */ - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_down_work); -} - -static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi, - const struct sk_buff *skb, - int direction) -{ - struct caif_payload_info *info; - int hpad, tpad, len; - - info = (struct caif_payload_info *)&skb->cb; - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - len = skb->len + hpad + tpad; - - if (direction > 0) - cfhsi->aggregation_len += len; - else if (direction < 0) - cfhsi->aggregation_len -= len; -} - -static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi) -{ - int i; - - if (cfhsi->cfg.aggregation_timeout == 0) - return true; - - for (i = 0; i < CFHSI_PRIO_BEBK; ++i) { - if (cfhsi->qhead[i].qlen) - return true; - } - - /* TODO: Use aggregation_len instead */ - if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS) - return true; - - return false; -} - -static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - int i; - - for (i = 0; i < CFHSI_PRIO_LAST; ++i) { - skb = skb_dequeue(&cfhsi->qhead[i]); - if (skb) - break; - } - - return skb; -} - -static int cfhsi_tx_queue_len(struct cfhsi *cfhsi) -{ - int i, len = 0; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - len += skb_queue_len(&cfhsi->qhead[i]); - return len; -} - -static void cfhsi_abort_tx(struct cfhsi *cfhsi) -{ - struct sk_buff *skb; - - for (;;) { - spin_lock_bh(&cfhsi->lock); - skb = cfhsi_dequeue(cfhsi); - if (!skb) - break; - - cfhsi->ndev->stats.tx_errors++; - cfhsi->ndev->stats.tx_dropped++; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - kfree_skb(skb); - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); -} - -static int cfhsi_flush_fifo(struct cfhsi *cfhsi) -{ - char buffer[32]; /* Any reasonable value */ - size_t fifo_occupancy; - int ret; - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - do { - ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy); - if (ret) { - netdev_warn(cfhsi->ndev, - "%s: can't get FIFO occupancy: %d.\n", - __func__, ret); - break; - } else if (!fifo_occupancy) - /* No more data, exitting normally */ - break; - - fifo_occupancy = min(sizeof(buffer), fifo_occupancy); - set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy, - cfhsi->ops); - if (ret) { - clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits); - netdev_warn(cfhsi->ndev, - "%s: can't read data: %d.\n", - __func__, ret); - break; - } - - ret = 5 * HZ; - ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait, - !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret); - - if (ret < 0) { - netdev_warn(cfhsi->ndev, - "%s: can't wait for flush complete: %d.\n", - __func__, ret); - break; - } else if (!ret) { - ret = -ETIMEDOUT; - netdev_warn(cfhsi->ndev, - "%s: timeout waiting for flush complete.\n", - __func__); - break; - } - } while (1); - - return ret; -} - -static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int nfrms = 0; - int pld_len = 0; - struct sk_buff *skb; - u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - - skb = cfhsi_dequeue(cfhsi); - if (!skb) - return 0; - - /* Clear offset. */ - desc->offset = 0; - - /* Check if we can embed a CAIF frame. */ - if (skb->len < CFHSI_MAX_EMB_FRM_SZ) { - struct caif_payload_info *info; - int hpad; - int tpad; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Check if frame still fits with added alignment. */ - if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) { - u8 *pemb = desc->emb_frm; - desc->offset = CFHSI_DESC_SHORT_SZ; - *pemb = (u8)(hpad - 1); - pemb += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in embedded CAIF frame. */ - skb_copy_bits(skb, 0, pemb, skb->len); - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - } - } - - /* Create payload CAIF frames. */ - while (nfrms < CFHSI_MAX_PKTS) { - struct caif_payload_info *info; - int hpad; - int tpad; - - if (!skb) - skb = cfhsi_dequeue(cfhsi); - - if (!skb) - break; - - /* Calculate needed head alignment and tail alignment. */ - info = (struct caif_payload_info *)&skb->cb; - - hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align); - tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align); - - /* Fill in CAIF frame length in descriptor. */ - desc->cffrm_len[nfrms] = hpad + skb->len + tpad; - - /* Fill head padding information. */ - *pfrm = (u8)(hpad - 1); - pfrm += hpad; - - /* Update network statistics. */ - spin_lock_bh(&cfhsi->lock); - cfhsi->ndev->stats.tx_packets++; - cfhsi->ndev->stats.tx_bytes += skb->len; - cfhsi_update_aggregation_stats(cfhsi, skb, -1); - spin_unlock_bh(&cfhsi->lock); - - /* Copy in CAIF frame. */ - skb_copy_bits(skb, 0, pfrm, skb->len); - - /* Update payload length. */ - pld_len += desc->cffrm_len[nfrms]; - - /* Update frame pointer. */ - pfrm += skb->len + tpad; - - /* Consume the SKB */ - consume_skb(skb); - skb = NULL; - - /* Update number of frames. */ - nfrms++; - } - - /* Unused length fields should be zero-filled (according to SPEC). */ - while (nfrms < CFHSI_MAX_PKTS) { - desc->cffrm_len[nfrms] = 0x0000; - nfrms++; - } - - /* Check if we can piggy-back another descriptor. */ - if (cfhsi_can_send_aggregate(cfhsi)) - desc->header |= CFHSI_PIGGY_DESC; - else - desc->header &= ~CFHSI_PIGGY_DESC; - - return CFHSI_DESC_SZ + pld_len; -} - -static void cfhsi_start_tx(struct cfhsi *cfhsi) -{ - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len, res; - - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - do { - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - if (!len) { - spin_lock_bh(&cfhsi->lock); - if (unlikely(cfhsi_tx_queue_len(cfhsi))) { - spin_unlock_bh(&cfhsi->lock); - res = -EAGAIN; - continue; - } - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - break; - } - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - } while (res < 0); -} - -static void cfhsi_tx_done(struct cfhsi *cfhsi) -{ - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* - * Send flow on if flow off has been previously signalled - * and number of packets is below low water mark. - */ - spin_lock_bh(&cfhsi->lock); - if (cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark && - cfhsi->cfdev.flowctrl) { - - cfhsi->flow_off_sent = 0; - cfhsi->cfdev.flowctrl(cfhsi->ndev, ON); - } - - if (cfhsi_can_send_aggregate(cfhsi)) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_start_tx(cfhsi); - } else { - mod_timer(&cfhsi->aggregation_timer, - jiffies + cfhsi->cfg.aggregation_timeout); - spin_unlock_bh(&cfhsi->lock); - } - - return; -} - -static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - cfhsi_tx_done(cfhsi); -} - -static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Check for embedded CAIF frame. */ - if (desc->offset) { - struct sk_buff *skb; - int len = 0; - pfrm = ((u8 *)desc) + desc->offset; - - /* Remove offset padding. */ - pfrm += *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pfrm; - len |= ((*(pfrm+1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frame. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pfrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - /* Check for piggy-backed descriptor. */ - if (desc->header & CFHSI_PIGGY_DESC) - xfer_sz += CFHSI_DESC_SZ; - - if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) { - netdev_err(cfhsi->ndev, - "%s: Invalid payload len: %d, ignored.\n", - __func__, xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_desc_len(struct cfhsi_desc *desc) -{ - int xfer_sz = 0; - int nfrms = 0; - u16 *plen; - - if ((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) { - - pr_err("Invalid descriptor. %x %x\n", desc->header, - desc->offset); - return -EPROTO; - } - - /* Calculate transfer length. */ - plen = desc->cffrm_len; - while (nfrms < CFHSI_MAX_PKTS && *plen) { - xfer_sz += *plen; - plen++; - nfrms++; - } - - if (xfer_sz % 4) { - pr_err("Invalid payload len: %d, ignored.\n", xfer_sz); - return -EPROTO; - } - return xfer_sz; -} - -static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi) -{ - int rx_sz = 0; - int nfrms = 0; - u16 *plen = NULL; - u8 *pfrm = NULL; - - /* Sanity check header and offset. */ - if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) || - (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) { - netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n", - __func__); - return -EPROTO; - } - - /* Set frame pointer to start of payload. */ - pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ; - plen = desc->cffrm_len; - - /* Skip already processed frames. */ - while (nfrms < cfhsi->rx_state.nfrms) { - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - /* Parse payload. */ - while (nfrms < CFHSI_MAX_PKTS && *plen) { - struct sk_buff *skb; - u8 *pcffrm = NULL; - int len; - - /* CAIF frame starts after head padding. */ - pcffrm = pfrm + *pfrm + 1; - - /* Read length of CAIF frame (little endian). */ - len = *pcffrm; - len |= ((*(pcffrm + 1)) << 8) & 0xFF00; - len += 2; /* Add FCS fields. */ - - /* Sanity check length of CAIF frames. */ - if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) { - netdev_err(cfhsi->ndev, "%s: Invalid length.\n", - __func__); - return -EPROTO; - } - - /* Allocate SKB (OK even in IRQ context). */ - skb = alloc_skb(len + 1, GFP_ATOMIC); - if (!skb) { - netdev_err(cfhsi->ndev, "%s: Out of memory !\n", - __func__); - cfhsi->rx_state.nfrms = nfrms; - return -ENOMEM; - } - caif_assert(skb != NULL); - - skb_put_data(skb, pcffrm, len); - - skb->protocol = htons(ETH_P_CAIF); - skb_reset_mac_header(skb); - skb->dev = cfhsi->ndev; - - netif_rx_any_context(skb); - - /* Update network statistics. */ - cfhsi->ndev->stats.rx_packets++; - cfhsi->ndev->stats.rx_bytes += len; - - pfrm += *plen; - rx_sz += *plen; - plen++; - nfrms++; - } - - return rx_sz; -} - -static void cfhsi_rx_done(struct cfhsi *cfhsi) -{ - int res; - int desc_pld_len = 0, rx_len, rx_state; - struct cfhsi_desc *desc = NULL; - u8 *rx_ptr, *rx_buf; - struct cfhsi_desc *piggy_desc = NULL; - - desc = (struct cfhsi_desc *)cfhsi->rx_buf; - - netdev_dbg(cfhsi->ndev, "%s\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Update inactivity timer if pending. */ - spin_lock_bh(&cfhsi->lock); - mod_timer_pending(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - desc_pld_len = cfhsi_rx_desc_len(desc); - - if (desc_pld_len < 0) - goto out_of_sync; - - rx_buf = cfhsi->rx_buf; - rx_len = desc_pld_len; - if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC)) - rx_len += CFHSI_DESC_SZ; - if (desc_pld_len == 0) - rx_buf = cfhsi->rx_flip_buf; - } else { - rx_buf = cfhsi->rx_flip_buf; - - rx_len = CFHSI_DESC_SZ; - if (cfhsi->rx_state.pld_len > 0 && - (desc->header & CFHSI_PIGGY_DESC)) { - - piggy_desc = (struct cfhsi_desc *) - (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ + - cfhsi->rx_state.pld_len); - - cfhsi->rx_state.piggy_desc = true; - - /* Extract payload len from piggy-backed descriptor. */ - desc_pld_len = cfhsi_rx_desc_len(piggy_desc); - if (desc_pld_len < 0) - goto out_of_sync; - - if (desc_pld_len > 0) { - rx_len = desc_pld_len; - if (piggy_desc->header & CFHSI_PIGGY_DESC) - rx_len += CFHSI_DESC_SZ; - } - - /* - * Copy needed information from the piggy-backed - * descriptor to the descriptor in the start. - */ - memcpy(rx_buf, (u8 *)piggy_desc, - CFHSI_DESC_SHORT_SZ); - } - } - - if (desc_pld_len) { - rx_state = CFHSI_RX_STATE_PAYLOAD; - rx_ptr = rx_buf + CFHSI_DESC_SZ; - } else { - rx_state = CFHSI_RX_STATE_DESC; - rx_ptr = rx_buf; - rx_len = CFHSI_DESC_SZ; - } - - /* Initiate next read */ - if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) { - /* Set up new transfer. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", - __func__); - - res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len, - cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: RX error %d.\n", - __func__, res); - cfhsi->ndev->stats.rx_errors++; - cfhsi->ndev->stats.rx_dropped++; - } - } - - if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) { - /* Extract payload from descriptor */ - if (cfhsi_rx_desc(desc, cfhsi) < 0) - goto out_of_sync; - } else { - /* Extract payload */ - if (cfhsi_rx_pld(desc, cfhsi) < 0) - goto out_of_sync; - if (piggy_desc) { - /* Extract any payload in piggyback descriptor. */ - if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0) - goto out_of_sync; - /* Mark no embedded frame after extracting it */ - piggy_desc->offset = 0; - } - } - - /* Update state info */ - memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state)); - cfhsi->rx_state.state = rx_state; - cfhsi->rx_ptr = rx_ptr; - cfhsi->rx_len = rx_len; - cfhsi->rx_state.pld_len = desc_pld_len; - cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC; - - if (rx_buf != cfhsi->rx_buf) - swap(cfhsi->rx_buf, cfhsi->rx_flip_buf); - return; - -out_of_sync: - netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__); - print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, - cfhsi->rx_buf, CFHSI_DESC_SZ); - schedule_work(&cfhsi->out_of_sync_work); -} - -static void cfhsi_rx_slowpath(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits)) - wake_up_interruptible(&cfhsi->flush_fifo_wait); - else - cfhsi_rx_done(cfhsi); -} - -static void cfhsi_wake_up(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - int res; - int len; - long ret; - - cfhsi = container_of(work, struct cfhsi, wake_up_work); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) { - /* It happenes when wakeup is requested by - * both ends at the same time. */ - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - return; - } - - /* Activate wake line. */ - cfhsi->ops->cfhsi_wake_up(cfhsi->ops); - - netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n", - __func__); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait, - test_and_clear_bit(CFHSI_WAKE_UP_ACK, - &cfhsi->bits), ret); - if (unlikely(ret < 0)) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } else if (!ret) { - bool ca_wake = false; - size_t fifo_occupancy = 0; - - /* Wakeup timeout */ - netdev_dbg(cfhsi->ndev, "%s: Timeout.\n", - __func__); - - /* Check FIFO to check if modem has sent something. */ - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n", - __func__, (unsigned) fifo_occupancy); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - - if (ca_wake) { - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - - /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - /* Continue execution. */ - goto wake_ack; - } - - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - return; - } -wake_ack: - netdev_dbg(cfhsi->ndev, "%s: Woken.\n", - __func__); - - /* Clear power up bit. */ - set_bit(CFHSI_AWAKE, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - - /* Resume read operation. */ - netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__); - res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops); - - if (WARN_ON(res < 0)) - netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res); - - /* Clear power up acknowledment. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - - spin_lock_bh(&cfhsi->lock); - - /* Resume transmit if queues are not empty. */ - if (!cfhsi_tx_queue_len(cfhsi)) { - netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n", - __func__); - /* Start inactivity timer. */ - mod_timer(&cfhsi->inactivity_timer, - jiffies + cfhsi->cfg.inactivity_timeout); - spin_unlock_bh(&cfhsi->lock); - return; - } - - netdev_dbg(cfhsi->ndev, "%s: Host wake.\n", - __func__); - - spin_unlock_bh(&cfhsi->lock); - - /* Create HSI frame. */ - len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi); - - if (likely(len > 0)) { - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - netdev_err(cfhsi->ndev, - "%s: Failed to create HSI frame: %d.\n", - __func__, len); - } -} - -static void cfhsi_wake_down(struct work_struct *work) -{ - long ret; - struct cfhsi *cfhsi = NULL; - size_t fifo_occupancy = 0; - int retry = CFHSI_WAKE_TOUT; - - cfhsi = container_of(work, struct cfhsi, wake_down_work); - netdev_dbg(cfhsi->ndev, "%s.\n", __func__); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Deactivate wake line. */ - cfhsi->ops->cfhsi_wake_down(cfhsi->ops); - - /* Wait for acknowledge. */ - ret = CFHSI_WAKE_TOUT; - ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait, - test_and_clear_bit(CFHSI_WAKE_DOWN_ACK, - &cfhsi->bits), ret); - if (ret < 0) { - /* Interrupted by signal. */ - netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n", - __func__, ret); - return; - } else if (!ret) { - bool ca_wake = true; - - /* Timeout */ - netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__); - - /* Check if we misssed the interrupt. */ - WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops, - &ca_wake)); - if (!ca_wake) - netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n", - __func__); - } - - /* Check FIFO occupancy. */ - while (retry) { - WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops, - &fifo_occupancy)); - - if (!fifo_occupancy) - break; - - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(1); - retry--; - } - - if (!retry) - netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__); - - /* Clear AWAKE condition. */ - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Cancel pending RX requests. */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); -} - -static void cfhsi_out_of_sync(struct work_struct *work) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(work, struct cfhsi, out_of_sync_work); - - rtnl_lock(); - dev_close(cfhsi->ndev); - rtnl_unlock(); -} - -static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_up_wait); - - if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits)) - return; - - /* Schedule wake up work queue if the peer initiates. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); -} - -static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops) -{ - struct cfhsi *cfhsi = NULL; - - cfhsi = container_of(cb_ops, struct cfhsi, cb_ops); - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - /* Initiating low power is only permitted by the host (us). */ - set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - wake_up_interruptible(&cfhsi->wake_down_wait); -} - -static void cfhsi_aggregation_tout(struct timer_list *t) -{ - struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer); - - netdev_dbg(cfhsi->ndev, "%s.\n", - __func__); - - cfhsi_start_tx(cfhsi); -} - -static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev) -{ - struct cfhsi *cfhsi = NULL; - int start_xfer = 0; - int timer_active; - int prio; - - if (!dev) - return -EINVAL; - - cfhsi = netdev_priv(dev); - - switch (skb->priority) { - case TC_PRIO_BESTEFFORT: - case TC_PRIO_FILLER: - case TC_PRIO_BULK: - prio = CFHSI_PRIO_BEBK; - break; - case TC_PRIO_INTERACTIVE_BULK: - prio = CFHSI_PRIO_VI; - break; - case TC_PRIO_INTERACTIVE: - prio = CFHSI_PRIO_VO; - break; - case TC_PRIO_CONTROL: - default: - prio = CFHSI_PRIO_CTL; - break; - } - - spin_lock_bh(&cfhsi->lock); - - /* Update aggregation statistics */ - cfhsi_update_aggregation_stats(cfhsi, skb, 1); - - /* Queue the SKB */ - skb_queue_tail(&cfhsi->qhead[prio], skb); - - /* Sanity check; xmit should not be called after unregister_netdev */ - if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) { - spin_unlock_bh(&cfhsi->lock); - cfhsi_abort_tx(cfhsi); - return -EINVAL; - } - - /* Send flow off if number of packets is above high water mark. */ - if (!cfhsi->flow_off_sent && - cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark && - cfhsi->cfdev.flowctrl) { - cfhsi->flow_off_sent = 1; - cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF); - } - - if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) { - cfhsi->tx_state = CFHSI_TX_STATE_XFER; - start_xfer = 1; - } - - if (!start_xfer) { - /* Send aggregate if it is possible */ - bool aggregate_ready = - cfhsi_can_send_aggregate(cfhsi) && - del_timer(&cfhsi->aggregation_timer) > 0; - spin_unlock_bh(&cfhsi->lock); - if (aggregate_ready) - cfhsi_start_tx(cfhsi); - return NETDEV_TX_OK; - } - - /* Delete inactivity timer if started. */ - timer_active = del_timer_sync(&cfhsi->inactivity_timer); - - spin_unlock_bh(&cfhsi->lock); - - if (timer_active) { - struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf; - int len; - int res; - - /* Create HSI frame. */ - len = cfhsi_tx_frm(desc, cfhsi); - WARN_ON(!len); - - /* Set up new transfer. */ - res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops); - if (WARN_ON(res < 0)) { - netdev_err(cfhsi->ndev, "%s: TX error %d.\n", - __func__, res); - cfhsi_abort_tx(cfhsi); - } - } else { - /* Schedule wake up work queue if the we initiate. */ - if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits)) - queue_work(cfhsi->wq, &cfhsi->wake_up_work); - } - - return NETDEV_TX_OK; -} - -static const struct net_device_ops cfhsi_netdevops; - -static void cfhsi_setup(struct net_device *dev) -{ - int i; - struct cfhsi *cfhsi = netdev_priv(dev); - dev->features = 0; - dev->type = ARPHRD_CAIF; - dev->flags = IFF_POINTOPOINT | IFF_NOARP; - dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ; - dev->priv_flags |= IFF_NO_QUEUE; - dev->needs_free_netdev = true; - dev->netdev_ops = &cfhsi_netdevops; - for (i = 0; i < CFHSI_PRIO_LAST; ++i) - skb_queue_head_init(&cfhsi->qhead[i]); - cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW; - cfhsi->cfdev.use_frag = false; - cfhsi->cfdev.use_stx = false; - cfhsi->cfdev.use_fcs = false; - cfhsi->ndev = dev; - cfhsi->cfg = hsi_default_config; -} - -static int cfhsi_open(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - int res; - - clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Initialize state vaiables. */ - cfhsi->tx_state = CFHSI_TX_STATE_IDLE; - cfhsi->rx_state.state = CFHSI_RX_STATE_DESC; - - /* Set flow info */ - cfhsi->flow_off_sent = 0; - - /* - * Allocate a TX buffer with the size of a HSI packet descriptors - * and the necessary room for CAIF payload frames. - */ - cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL); - if (!cfhsi->tx_buf) { - res = -ENODEV; - goto err_alloc_tx; - } - - /* - * Allocate a RX buffer with the size of two HSI packet descriptors and - * the necessary room for CAIF payload frames. - */ - cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_buf) { - res = -ENODEV; - goto err_alloc_rx; - } - - cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL); - if (!cfhsi->rx_flip_buf) { - res = -ENODEV; - goto err_alloc_rx_flip; - } - - /* Initialize aggregation timeout */ - cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout; - - /* Initialize recieve vaiables. */ - cfhsi->rx_ptr = cfhsi->rx_buf; - cfhsi->rx_len = CFHSI_DESC_SZ; - - /* Initialize spin locks. */ - spin_lock_init(&cfhsi->lock); - - /* Set up the driver. */ - cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb; - cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb; - cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb; - cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb; - - /* Initialize the work queues. */ - INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up); - INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down); - INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync); - - /* Clear all bit fields. */ - clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits); - clear_bit(CFHSI_WAKE_UP, &cfhsi->bits); - clear_bit(CFHSI_AWAKE, &cfhsi->bits); - - /* Create work thread. */ - cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM); - if (!cfhsi->wq) { - netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n", - __func__); - res = -ENODEV; - goto err_create_wq; - } - - /* Initialize wait queues. */ - init_waitqueue_head(&cfhsi->wake_up_wait); - init_waitqueue_head(&cfhsi->wake_down_wait); - init_waitqueue_head(&cfhsi->flush_fifo_wait); - - /* Setup the inactivity timer. */ - timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0); - /* Setup the slowpath RX timer. */ - timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0); - /* Setup the aggregation timer. */ - timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0); - - /* Activate HSI interface. */ - res = cfhsi->ops->cfhsi_up(cfhsi->ops); - if (res) { - netdev_err(cfhsi->ndev, - "%s: can't activate HSI interface: %d.\n", - __func__, res); - goto err_activate; - } - - /* Flush FIFO */ - res = cfhsi_flush_fifo(cfhsi); - if (res) { - netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n", - __func__, res); - goto err_net_reg; - } - return res; - - err_net_reg: - cfhsi->ops->cfhsi_down(cfhsi->ops); - err_activate: - destroy_workqueue(cfhsi->wq); - err_create_wq: - kfree(cfhsi->rx_flip_buf); - err_alloc_rx_flip: - kfree(cfhsi->rx_buf); - err_alloc_rx: - kfree(cfhsi->tx_buf); - err_alloc_tx: - return res; -} - -static int cfhsi_close(struct net_device *ndev) -{ - struct cfhsi *cfhsi = netdev_priv(ndev); - u8 *tx_buf, *rx_buf, *flip_buf; - - /* going to shutdown driver */ - set_bit(CFHSI_SHUTDOWN, &cfhsi->bits); - - /* Delete timers if pending */ - del_timer_sync(&cfhsi->inactivity_timer); - del_timer_sync(&cfhsi->rx_slowpath_timer); - del_timer_sync(&cfhsi->aggregation_timer); - - /* Cancel pending RX request (if any) */ - cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops); - - /* Destroy workqueue */ - destroy_workqueue(cfhsi->wq); - - /* Store bufferes: will be freed later. */ - tx_buf = cfhsi->tx_buf; - rx_buf = cfhsi->rx_buf; - flip_buf = cfhsi->rx_flip_buf; - /* Flush transmit queues. */ - cfhsi_abort_tx(cfhsi); - - /* Deactivate interface */ - cfhsi->ops->cfhsi_down(cfhsi->ops); - - /* Free buffers. */ - kfree(tx_buf); - kfree(rx_buf); - kfree(flip_buf); - return 0; -} - -static void cfhsi_uninit(struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - ASSERT_RTNL(); - symbol_put(cfhsi_get_device); - list_del(&cfhsi->list); -} - -static const struct net_device_ops cfhsi_netdevops = { - .ndo_uninit = cfhsi_uninit, - .ndo_open = cfhsi_open, - .ndo_stop = cfhsi_close, - .ndo_start_xmit = cfhsi_xmit -}; - -static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi) -{ - int i; - - if (!data) { - pr_debug("no params data found\n"); - return; - } - - i = __IFLA_CAIF_HSI_INACTIVITY_TOUT; - /* - * Inactivity timeout in millisecs. Lowest possible value is 1, - * and highest possible is NEXT_TIMER_MAX_DELTA. - */ - if (data[i]) { - u32 inactivity_timeout = nla_get_u32(data[i]); - /* Pre-calculate inactivity timeout. */ - cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000; - if (cfhsi->cfg.inactivity_timeout == 0) - cfhsi->cfg.inactivity_timeout = 1; - else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA) - cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA; - } - - i = __IFLA_CAIF_HSI_AGGREGATION_TOUT; - if (data[i]) - cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_HEAD_ALIGN; - if (data[i]) - cfhsi->cfg.head_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_TAIL_ALIGN; - if (data[i]) - cfhsi->cfg.tail_align = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QHIGH_WATERMARK; - if (data[i]) - cfhsi->cfg.q_high_mark = nla_get_u32(data[i]); - - i = __IFLA_CAIF_HSI_QLOW_WATERMARK; - if (data[i]) - cfhsi->cfg.q_low_mark = nla_get_u32(data[i]); -} - -static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[], - struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - cfhsi_netlink_parms(data, netdev_priv(dev)); - netdev_state_change(dev); - return 0; -} - -static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = { - [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 }, - [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 }, -}; - -static size_t caif_hsi_get_size(const struct net_device *dev) -{ - int i; - size_t s = 0; - for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++) - s += nla_total_size(caif_hsi_policy[i].len); - return s; -} - -static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev) -{ - struct cfhsi *cfhsi = netdev_priv(dev); - - if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT, - cfhsi->cfg.inactivity_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT, - cfhsi->cfg.aggregation_timeout) || - nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN, - cfhsi->cfg.head_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN, - cfhsi->cfg.tail_align) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK, - cfhsi->cfg.q_high_mark) || - nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK, - cfhsi->cfg.q_low_mark)) - return -EMSGSIZE; - - return 0; -} - -static int caif_hsi_newlink(struct net *src_net, struct net_device *dev, - struct nlattr *tb[], struct nlattr *data[], - struct netlink_ext_ack *extack) -{ - struct cfhsi *cfhsi = NULL; - struct cfhsi_ops *(*get_ops)(void); - - ASSERT_RTNL(); - - cfhsi = netdev_priv(dev); - cfhsi_netlink_parms(data, cfhsi); - - get_ops = symbol_get(cfhsi_get_ops); - if (!get_ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - return -ENODEV; - } - - /* Assign the HSI device. */ - cfhsi->ops = (*get_ops)(); - if (!cfhsi->ops) { - pr_err("%s: failed to get the cfhsi_ops\n", __func__); - goto err; - } - - /* Assign the driver to this HSI device. */ - cfhsi->ops->cb_ops = &cfhsi->cb_ops; - if (register_netdevice(dev)) { - pr_warn("%s: caif_hsi device registration failed\n", __func__); - goto err; - } - /* Add CAIF HSI device to list. */ - list_add_tail(&cfhsi->list, &cfhsi_list); - - return 0; -err: - symbol_put(cfhsi_get_ops); - return -ENODEV; -} - -static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = { - .kind = "cfhsi", - .priv_size = sizeof(struct cfhsi), - .setup = cfhsi_setup, - .maxtype = __IFLA_CAIF_HSI_MAX, - .policy = caif_hsi_policy, - .newlink = caif_hsi_newlink, - .changelink = caif_hsi_changelink, - .get_size = caif_hsi_get_size, - .fill_info = caif_hsi_fill_info, -}; - -static void __exit cfhsi_exit_module(void) -{ - struct list_head *list_node; - struct list_head *n; - struct cfhsi *cfhsi; - - rtnl_link_unregister(&caif_hsi_link_ops); - - rtnl_lock(); - list_for_each_safe(list_node, n, &cfhsi_list) { - cfhsi = list_entry(list_node, struct cfhsi, list); - unregister_netdevice(cfhsi->ndev); - } - rtnl_unlock(); -} - -static int __init cfhsi_init_module(void) -{ - return rtnl_link_register(&caif_hsi_link_ops); -} - -module_init(cfhsi_init_module); -module_exit(cfhsi_exit_module); diff --git a/include/net/caif/caif_hsi.h b/include/net/caif/caif_hsi.h deleted file mode 100644 index 552cf68d28d2..000000000000 --- a/include/net/caif/caif_hsi.h +++ /dev/null @@ -1,200 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson AB 2010 - * Author: Daniel Martensson / daniel.martensson@stericsson.com - * Dmitry.Tarnyagin / dmitry.tarnyagin@stericsson.com - */ - -#ifndef CAIF_HSI_H_ -#define CAIF_HSI_H_ - -#include -#include -#include - -/* - * Maximum number of CAIF frames that can reside in the same HSI frame. - */ -#define CFHSI_MAX_PKTS 15 - -/* - * Maximum number of bytes used for the frame that can be embedded in the - * HSI descriptor. - */ -#define CFHSI_MAX_EMB_FRM_SZ 96 - -/* - * Decides if HSI buffers should be prefilled with 0xFF pattern for easier - * debugging. Both TX and RX buffers will be filled before the transfer. - */ -#define CFHSI_DBG_PREFILL 0 - -/* Structure describing a HSI packet descriptor. */ -#pragma pack(1) /* Byte alignment. */ -struct cfhsi_desc { - u8 header; - u8 offset; - u16 cffrm_len[CFHSI_MAX_PKTS]; - u8 emb_frm[CFHSI_MAX_EMB_FRM_SZ]; -}; -#pragma pack() /* Default alignment. */ - -/* Size of the complete HSI packet descriptor. */ -#define CFHSI_DESC_SZ (sizeof(struct cfhsi_desc)) - -/* - * Size of the complete HSI packet descriptor excluding the optional embedded - * CAIF frame. - */ -#define CFHSI_DESC_SHORT_SZ (CFHSI_DESC_SZ - CFHSI_MAX_EMB_FRM_SZ) - -/* - * Maximum bytes transferred in one transfer. - */ -#define CFHSI_MAX_CAIF_FRAME_SZ 4096 - -#define CFHSI_MAX_PAYLOAD_SZ (CFHSI_MAX_PKTS * CFHSI_MAX_CAIF_FRAME_SZ) - -/* Size of the complete HSI TX buffer. */ -#define CFHSI_BUF_SZ_TX (CFHSI_DESC_SZ + CFHSI_MAX_PAYLOAD_SZ) - -/* Size of the complete HSI RX buffer. */ -#define CFHSI_BUF_SZ_RX ((2 * CFHSI_DESC_SZ) + CFHSI_MAX_PAYLOAD_SZ) - -/* Bitmasks for the HSI descriptor. */ -#define CFHSI_PIGGY_DESC (0x01 << 7) - -#define CFHSI_TX_STATE_IDLE 0 -#define CFHSI_TX_STATE_XFER 1 - -#define CFHSI_RX_STATE_DESC 0 -#define CFHSI_RX_STATE_PAYLOAD 1 - -/* Bitmasks for power management. */ -#define CFHSI_WAKE_UP 0 -#define CFHSI_WAKE_UP_ACK 1 -#define CFHSI_WAKE_DOWN_ACK 2 -#define CFHSI_AWAKE 3 -#define CFHSI_WAKELOCK_HELD 4 -#define CFHSI_SHUTDOWN 5 -#define CFHSI_FLUSH_FIFO 6 - -#ifndef CFHSI_INACTIVITY_TOUT -#define CFHSI_INACTIVITY_TOUT (1 * HZ) -#endif /* CFHSI_INACTIVITY_TOUT */ - -#ifndef CFHSI_WAKE_TOUT -#define CFHSI_WAKE_TOUT (3 * HZ) -#endif /* CFHSI_WAKE_TOUT */ - -#ifndef CFHSI_MAX_RX_RETRIES -#define CFHSI_MAX_RX_RETRIES (10 * HZ) -#endif - -/* Structure implemented by the CAIF HSI driver. */ -struct cfhsi_cb_ops { - void (*tx_done_cb) (struct cfhsi_cb_ops *drv); - void (*rx_done_cb) (struct cfhsi_cb_ops *drv); - void (*wake_up_cb) (struct cfhsi_cb_ops *drv); - void (*wake_down_cb) (struct cfhsi_cb_ops *drv); -}; - -/* Structure implemented by HSI device. */ -struct cfhsi_ops { - int (*cfhsi_up) (struct cfhsi_ops *dev); - int (*cfhsi_down) (struct cfhsi_ops *dev); - int (*cfhsi_tx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_rx) (u8 *ptr, int len, struct cfhsi_ops *dev); - int (*cfhsi_wake_up) (struct cfhsi_ops *dev); - int (*cfhsi_wake_down) (struct cfhsi_ops *dev); - int (*cfhsi_get_peer_wake) (struct cfhsi_ops *dev, bool *status); - int (*cfhsi_fifo_occupancy) (struct cfhsi_ops *dev, size_t *occupancy); - int (*cfhsi_rx_cancel)(struct cfhsi_ops *dev); - struct cfhsi_cb_ops *cb_ops; -}; - -/* Structure holds status of received CAIF frames processing */ -struct cfhsi_rx_state { - int state; - int nfrms; - int pld_len; - int retries; - bool piggy_desc; -}; - -/* Priority mapping */ -enum { - CFHSI_PRIO_CTL = 0, - CFHSI_PRIO_VI, - CFHSI_PRIO_VO, - CFHSI_PRIO_BEBK, - CFHSI_PRIO_LAST, -}; - -struct cfhsi_config { - u32 inactivity_timeout; - u32 aggregation_timeout; - u32 head_align; - u32 tail_align; - u32 q_high_mark; - u32 q_low_mark; -}; - -/* Structure implemented by CAIF HSI drivers. */ -struct cfhsi { - struct caif_dev_common cfdev; - struct net_device *ndev; - struct platform_device *pdev; - struct sk_buff_head qhead[CFHSI_PRIO_LAST]; - struct cfhsi_cb_ops cb_ops; - struct cfhsi_ops *ops; - int tx_state; - struct cfhsi_rx_state rx_state; - struct cfhsi_config cfg; - int rx_len; - u8 *rx_ptr; - u8 *tx_buf; - u8 *rx_buf; - u8 *rx_flip_buf; - spinlock_t lock; - int flow_off_sent; - struct list_head list; - struct work_struct wake_up_work; - struct work_struct wake_down_work; - struct work_struct out_of_sync_work; - struct workqueue_struct *wq; - wait_queue_head_t wake_up_wait; - wait_queue_head_t wake_down_wait; - wait_queue_head_t flush_fifo_wait; - struct timer_list inactivity_timer; - struct timer_list rx_slowpath_timer; - - /* TX aggregation */ - int aggregation_len; - struct timer_list aggregation_timer; - - unsigned long bits; -}; -extern struct platform_driver cfhsi_driver; - -/** - * enum ifla_caif_hsi - CAIF HSI NetlinkRT parameters. - * @IFLA_CAIF_HSI_INACTIVITY_TOUT: Inactivity timeout before - * taking the HSI wakeline down, in milliseconds. - * When using RT Netlink to create, destroy or configure a CAIF HSI interface, - * enum ifla_caif_hsi is used to specify the configuration attributes. - */ -enum ifla_caif_hsi { - __IFLA_CAIF_HSI_UNSPEC, - __IFLA_CAIF_HSI_INACTIVITY_TOUT, - __IFLA_CAIF_HSI_AGGREGATION_TOUT, - __IFLA_CAIF_HSI_HEAD_ALIGN, - __IFLA_CAIF_HSI_TAIL_ALIGN, - __IFLA_CAIF_HSI_QHIGH_WATERMARK, - __IFLA_CAIF_HSI_QLOW_WATERMARK, - __IFLA_CAIF_HSI_MAX -}; - -struct cfhsi_ops *cfhsi_get_ops(void); - -#endif /* CAIF_HSI_H_ */ -- cgit v1.2.3 From 6b28a86d6c0bb02119f386ec2f56efde909e9bcb Mon Sep 17 00:00:00 2001 From: Mohammad Athari Bin Ismail Date: Wed, 30 Jun 2021 17:59:35 +0800 Subject: net: stmmac: Terminate FPE workqueue in suspend Add stmmac_fpe_stop_wq() in stmmac_suspend() to terminate FPE workqueue during suspend. So, in suspend mode, there will be no FPE workqueue available. Without this fix, new additional FPE workqueue will be created in every suspend->resume cycle. Fixes: 5a5586112b92 ("net: stmmac: support FPE link partner hand-shaking procedure") Signed-off-by: Mohammad Athari Bin Ismail Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 8d9d6ecf8c63..7b8404a21544 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev) priv->plat->rx_queues_to_use, false); stmmac_fpe_handshake(priv, false); + stmmac_fpe_stop_wq(priv); } priv->speed = SPEED_UNKNOWN; -- cgit v1.2.3 From 2342ae10d1272d411a468a85a67647dd115b344f Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 1 Jul 2021 22:18:24 +0200 Subject: gve: Fix an error handling path in 'gve_probe()' If the 'register_netdev() call fails, we must release the resources allocated by the previous 'gve_init_priv()' call, as already done in the remove function. Add a new label and the missing 'gve_teardown_priv_resources()' in the error handling path. Fixes: 893ce44df565 ("gve: Add basic driver framework for Compute Engine Virtual NIC") Signed-off-by: Christophe JAILLET Reviewed-by: Catherine Sullivan Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve_main.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 867e87af3432..44262c9f9ec2 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1565,7 +1565,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = register_netdev(dev); if (err) - goto abort_with_wq; + goto abort_with_gve_init; dev_info(&pdev->dev, "GVE version %s\n", gve_version_str); dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format); @@ -1573,6 +1573,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) queue_work(priv->gve_wq, &priv->service_task); return 0; +abort_with_gve_init: + gve_teardown_priv_resources(priv); + abort_with_wq: destroy_workqueue(priv->gve_wq); -- cgit v1.2.3 From 6dce38b4b7ffb39539b49feca2b3ce34dbaadf02 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 1 Jul 2021 22:18:37 +0200 Subject: gve: Propagate error codes to caller If 'gve_probe()' fails, we should propagate the error code, instead of hard coding a -ENXIO value. Make sure that all error handling paths set a correct value for 'err'. Signed-off-by: Christophe JAILLET Reviewed-by: Catherine Sullivan Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 44262c9f9ec2..c03984b26db4 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) err = pci_enable_device(pdev); if (err) - return -ENXIO; + return err; err = pci_request_regions(pdev, "gvnic-cfg"); if (err) @@ -1512,6 +1512,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues); if (!dev) { dev_err(&pdev->dev, "could not allocate netdev\n"); + err = -ENOMEM; goto abort_with_db_bar; } SET_NETDEV_DEV(dev, &pdev->dev); @@ -1593,7 +1594,7 @@ abort_with_pci_region: abort_with_enabled: pci_disable_device(pdev); - return -ENXIO; + return err; } static void gve_remove(struct pci_dev *pdev) -- cgit v1.2.3 From bde3c8ffdd4153a3e9f0b0d51d972b30113b35ac Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Thu, 1 Jul 2021 22:41:19 +0200 Subject: gve: Simplify code and axe the use of a deprecated API The wrappers in include/linux/pci-dma-compat.h should go away. Replace 'pci_set_dma_mask/pci_set_consistent_dma_mask' by an equivalent and less verbose 'dma_set_mask_and_coherent()' call. Signed-off-by: Christophe JAILLET Reviewed-by: Catherine Sullivan Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve_main.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index c03984b26db4..099a2bc5ae67 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent) pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err); goto abort_with_pci_region; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent dma mask: err=%d\n", err); - goto abort_with_pci_region; - } - reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0); if (!reg_bar) { dev_err(&pdev->dev, "Failed to map pci bar!\n"); -- cgit v1.2.3 From 1bfa4d0cb5adf954e0f4870a3ecb7cb19506320c Mon Sep 17 00:00:00 2001 From: Bailey Forrest Date: Thu, 1 Jul 2021 20:13:36 -0700 Subject: gve: DQO: Remove incorrect prefetch The prefetch is incorrectly using the dma address instead of the virtual address. It's supposed to be: prefetch((char *)buf_state->page_info.page_address + buf_state->page_info.page_offset) However, after correcting this mistake, there is no evidence of performance improvement. Fixes: 9b8dd5e5ea48 ("gve: DQO: Add RX path") Signed-off-by: Bailey Forrest Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve_rx_dqo.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index 77bb8227f89b..8500621b2cd4 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx, return 0; } - /* Prefetch the payload header. */ - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset); -#if L1_CACHE_BYTES < 128 - prefetch((char *)buf_state->addr + buf_state->page_info.page_offset + - L1_CACHE_BYTES); -#endif - if (eop && buf_len <= priv->rx_copybreak) { rx->skb_head = gve_rx_copy(priv->dev, napi, &buf_state->page_info, buf_len, 0); -- cgit v1.2.3 From 7cc93d888df764a13f196e3d4aef38869f7dd217 Mon Sep 17 00:00:00 2001 From: Louis Peens Date: Fri, 2 Jul 2021 11:21:39 +0200 Subject: nfp: flower-ct: remove callback delete deadlock The current location of the callback delete can lead to a race condition where deleting the callback requires a write_lock on the nf_table, but at the same time another thread from netfilter could have taken a read lock on the table before trying to offload. Since the driver is taking a rtnl_lock this can lead into a deadlock situation, where the netfilter offload will wait for the cls_flower rtnl_lock to be released, but this cannot happen since this is waiting for the nf_table read_lock to be released before it can delete the callback. Solve this by completely removing the nf_flow_table_offload_del_cb call, as this will now be cleaned up by act_ct itself when cleaning up the specific nf_table. Fixes: 62268e78145f ("nfp: flower-ct: add nft callback stubs") Signed-off-by: Louis Peens Signed-off-by: Yinjun Zhang Signed-off-by: Simon Horman Signed-off-by: David S. Miller --- drivers/net/ethernet/netronome/nfp/flower/conntrack.c | 13 ------------- 1 file changed, 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 273d529d43c2..128020b1573e 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_fl_ct_clean_flow_entry(ct_entry); kfree(ct_map_ent); - /* If this is the last pre_ct_rule it means that it is - * very likely that the nft table will be cleaned up next, - * as this happens on the removal of the last act_ct flow. - * However we cannot deregister the callback on the removal - * of the last nft flow as this runs into a deadlock situation. - * So deregister the callback on removal of the last pre_ct flow - * and remove any remaining nft flow entries. We also cannot - * save this state and delete the callback later since the - * nft table would already have been freed at that time. - */ if (!zt->pre_ct_count) { - nf_flow_table_offload_del_cb(zt->nft, - nfp_fl_ct_handle_nft_flow, - zt); zt->nft = NULL; nfp_fl_ct_clean_nft_entries(zt); } -- cgit v1.2.3 From b22580233d473dbf7bbfa4f6549c09e2c80e9e64 Mon Sep 17 00:00:00 2001 From: Ronak Doshi Date: Thu, 1 Jul 2021 23:44:27 -0700 Subject: vmxnet3: fix cksum offload issues for tunnels with non-default udp ports Commit dacce2be3312 ("vmxnet3: add geneve and vxlan tunnel offload support") added support for encapsulation offload. However, the inner offload capability is to be restricted to UDP tunnels with default Vxlan and Geneve ports. This patch fixes the issue for tunnels with non-default ports using features check capability and filtering appropriate features for such tunnels. Fixes: dacce2be3312 ("vmxnet3: add geneve and vxlan tunnel offload support") Signed-off-by: Ronak Doshi Acked-by: Guolin Yang Signed-off-by: David S. Miller --- drivers/net/vmxnet3/vmxnet3_ethtool.c | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index c0bd9cbc43b1..1b483cf2b1ca 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -26,6 +26,10 @@ #include "vmxnet3_int.h" +#include +#include + +#define VXLAN_UDP_PORT 8472 struct vmxnet3_stat_desc { char desc[ETH_GSTRING_LEN]; @@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) { u8 l4_proto = 0; + u16 port; + struct udphdr *udph; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): @@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb, return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } - if (l4_proto != IPPROTO_UDP) + switch (l4_proto) { + case IPPROTO_UDP: + udph = udp_hdr(skb); + port = be16_to_cpu(udph->dest); + /* Check if offloaded port is supported */ + if (port != GENEVE_UDP_PORT && + port != IANA_VXLAN_UDP_PORT && + port != VXLAN_UDP_PORT) { + return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } + break; + default: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); + } } return features; } -- cgit v1.2.3 From 6ff63a150b5556012589ae59efac1b5eeb7d32c3 Mon Sep 17 00:00:00 2001 From: Lorenzo Bianconi Date: Sat, 3 Jul 2021 21:17:27 +0200 Subject: net: marvell: always set skb_shared_info in mvneta_swbm_add_rx_fragment Always set skb_shared_info data structure in mvneta_swbm_add_rx_fragment routine even if the fragment contains only the ethernet FCS. Fixes: 039fbc47f9f1 ("net: mvneta: alloc skb_shared_info on the mvneta_rx_swbm stack") Signed-off-by: Lorenzo Bianconi Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/mvneta.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 361bc4fbe20b..76a7777c746d 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp, skb_frag_off_set(frag, pp->rx_offset_correction); skb_frag_size_set(frag, data_len); __skb_frag_set_page(frag, page); - - /* last fragment */ - if (len == *size) { - struct skb_shared_info *sinfo; - - sinfo = xdp_get_shared_info_from_buff(xdp); - sinfo->nr_frags = xdp_sinfo->nr_frags; - memcpy(sinfo->frags, xdp_sinfo->frags, - sinfo->nr_frags * sizeof(skb_frag_t)); - } } else { page_pool_put_full_page(rxq->page_pool, page, true); } + + /* last fragment */ + if (len == *size) { + struct skb_shared_info *sinfo; + + sinfo = xdp_get_shared_info_from_buff(xdp); + sinfo->nr_frags = xdp_sinfo->nr_frags; + memcpy(sinfo->frags, xdp_sinfo->frags, + sinfo->nr_frags * sizeof(skb_frag_t)); + } *size -= len; } -- cgit v1.2.3 From 55eac20617ca84129273ab248f4d7bfe456967de Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 5 Jul 2021 16:53:06 +0800 Subject: ptp: fix NULL pointer dereference in ptp_clock_register Fix NULL pointer dereference in ptp_clock_register. The argument "parent" of ptp_clock_register may be NULL pointer. Fixes: 73f37068d540 ("ptp: support ptp physical/virtual clocks conversion") Reported-by: kernel test robot Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index f012fa581cf4..ce6d9fc85607 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -236,7 +236,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, } /* PTP virtual clock is being registered under physical clock */ - if (parent->class && parent->class->name && + if (parent && parent->class && parent->class->name && strcmp(parent->class->name, "ptp") == 0) ptp->is_virtual_clock = true; -- cgit v1.2.3 From f6a175cfcc8df578adfdf06b05c82b3b8c8b5cfd Mon Sep 17 00:00:00 2001 From: Yangbo Lu Date: Mon, 5 Jul 2021 17:46:17 +0800 Subject: ptp: fix format string mismatch in ptp_sysfs.c Fix format string mismatch in ptp_sysfs.c. Use %u for unsigned int. Fixes: 73f37068d540 ("ptp: support ptp physical/virtual clocks conversion") Reported-by: kernel test robot Signed-off-by: Yangbo Lu Signed-off-by: David S. Miller --- drivers/ptp/ptp_sysfs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c index 6a36590ca77a..b3d96b747292 100644 --- a/drivers/ptp/ptp_sysfs.c +++ b/drivers/ptp/ptp_sysfs.c @@ -179,7 +179,7 @@ static ssize_t n_vclocks_show(struct device *dev, if (mutex_lock_interruptible(&ptp->n_vclocks_mux)) return -ERESTARTSYS; - size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->n_vclocks); + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->n_vclocks); mutex_unlock(&ptp->n_vclocks_mux); @@ -252,7 +252,7 @@ static ssize_t max_vclocks_show(struct device *dev, struct ptp_clock *ptp = dev_get_drvdata(dev); ssize_t size; - size = snprintf(page, PAGE_SIZE - 1, "%d\n", ptp->max_vclocks); + size = snprintf(page, PAGE_SIZE - 1, "%u\n", ptp->max_vclocks); return size; } -- cgit v1.2.3 From 81c52c42afd92b741289208c65e5063b9e23ffb4 Mon Sep 17 00:00:00 2001 From: Xiaoliang Yang Date: Mon, 5 Jul 2021 18:26:53 +0800 Subject: net: stmmac: separate the tas basetime calculation function Separate the TAS basetime calculation function so that it can be called by other functions. Signed-off-by: Xiaoliang Yang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac.h | 3 ++ drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 38 ++++++++++++++++--------- 2 files changed, 28 insertions(+), 13 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index e735134e8487..fcdb1d20389b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue); void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue); int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags); +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time); #if IS_ENABLED(CONFIG_STMMAC_SELFTESTS) void stmmac_selftest_run(struct net_device *dev, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 92dab609d4f8..596626c71189 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -711,6 +711,29 @@ static int tc_setup_cls(struct stmmac_priv *priv, return ret; } +struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time, + ktime_t current_time, + u64 cycle_time) +{ + struct timespec64 time; + + if (ktime_after(old_base_time, current_time)) { + time = ktime_to_timespec64(old_base_time); + } else { + s64 n; + ktime_t base_time; + + n = div64_s64(ktime_sub_ns(current_time, old_base_time), + cycle_time); + base_time = ktime_add_ns(old_base_time, + (n + 1) * cycle_time); + + time = ktime_to_timespec64(base_time); + } + + return time; +} + static int tc_setup_taprio(struct stmmac_priv *priv, struct tc_taprio_qopt_offload *qopt) { @@ -814,19 +837,8 @@ static int tc_setup_taprio(struct stmmac_priv *priv, /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); - if (ktime_after(qopt->base_time, current_time_ns)) { - time = ktime_to_timespec64(qopt->base_time); - } else { - ktime_t base_time; - s64 n; - - n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time), - qopt->cycle_time); - base_time = ktime_add_ns(qopt->base_time, - (n + 1) * qopt->cycle_time); - - time = ktime_to_timespec64(base_time); - } + time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns, + qopt->cycle_time); priv->plat->est->btr[0] = (u32)time.tv_nsec; priv->plat->est->btr[1] = (u32)time.tv_sec; -- cgit v1.2.3 From b2aae654a4794ef898ad33a179f341eb610f6b85 Mon Sep 17 00:00:00 2001 From: Xiaoliang Yang Date: Mon, 5 Jul 2021 18:26:54 +0800 Subject: net: stmmac: add mutex lock to protect est parameters Add a mutex lock to protect est structure parameters so that the EST parameters can be updated by other threads. Signed-off-by: Xiaoliang Yang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 12 +++++++++++- include/linux/stmmac.h | 1 + 2 files changed, 12 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 596626c71189..2e3cdf540168 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -796,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv, GFP_KERNEL); if (!plat->est) return -ENOMEM; + + mutex_init(&priv->plat->est->lock); } else { memset(plat->est, 0, sizeof(*plat->est)); } size = qopt->num_entries; + mutex_lock(&priv->plat->est->lock); priv->plat->est->gcl_size = size; priv->plat->est->enable = qopt->enable; + mutex_unlock(&priv->plat->est->lock); for (i = 0; i < size; i++) { s64 delta_ns = qopt->entries[i].interval; @@ -834,6 +838,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->gcl[i] = delta_ns | (gates << wid); } + mutex_lock(&priv->plat->est->lock); /* Adjust for real system time */ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); current_time_ns = timespec64_to_ktime(current_time); @@ -847,8 +852,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); priv->plat->est->ctr[1] = (u32)ctr; - if (fpe && !priv->dma_cap.fpesel) + if (fpe && !priv->dma_cap.fpesel) { + mutex_unlock(&priv->plat->est->lock); return -EOPNOTSUPP; + } /* Actual FPE register configuration will be done after FPE handshake * is success. @@ -857,6 +864,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); if (ret) { netdev_err(priv->dev, "failed to configure EST\n"); goto disable; @@ -872,9 +880,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: + mutex_lock(&priv->plat->est->lock); priv->plat->est->enable = false; stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index d5ae621d66ba..09157b8a5810 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -115,6 +115,7 @@ struct stmmac_axi { #define EST_GCL 1024 struct stmmac_est { + struct mutex lock; int enable; u32 btr_offset[2]; u32 btr[2]; -- cgit v1.2.3 From e9e3720002f61cd637a49ecafae77cac230eefae Mon Sep 17 00:00:00 2001 From: Xiaoliang Yang Date: Mon, 5 Jul 2021 18:26:55 +0800 Subject: net: stmmac: ptp: update tas basetime after ptp adjust After adjusting the ptp time, the Qbv base time may be the past time of the new current time. dwmac5 hardware limited the base time cannot be set as past time. This patch add a btr_reserve to store the base time get from qopt, then calculate the base time and reset the Qbv configuration after ptp time adjust. Signed-off-by: Xiaoliang Yang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c | 41 +++++++++++++++++++++++- drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c | 6 +++- include/linux/stmmac.h | 1 + 3 files changed, 46 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c index 4e86cdf2bc9f..580cc035536b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c @@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) u32 sec, nsec; u32 quotient, reminder; int neg_adj = 0; - bool xmac; + bool xmac, est_rst = false; + int ret; xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; @@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta) sec = quotient; nsec = reminder; + /* If EST is enabled, disabled it before adjust ptp time. */ + if (priv->plat->est && priv->plat->est->enable) { + est_rst = true; + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } + spin_lock_irqsave(&priv->ptp_lock, flags); stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac); spin_unlock_irqrestore(&priv->ptp_lock, flags); + /* Caculate new basetime and re-configured EST after PTP time adjust. */ + if (est_rst) { + struct timespec64 current_time, time; + ktime_t current_time_ns, basetime; + u64 cycle_time; + + mutex_lock(&priv->plat->est->lock); + priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, ¤t_time); + current_time_ns = timespec64_to_ktime(current_time); + time.tv_nsec = priv->plat->est->btr_reserve[0]; + time.tv_sec = priv->plat->est->btr_reserve[1]; + basetime = timespec64_to_ktime(time); + cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC + + priv->plat->est->ctr[0]; + time = stmmac_calc_tas_basetime(basetime, + current_time_ns, + cycle_time); + + priv->plat->est->btr[0] = (u32)time.tv_nsec; + priv->plat->est->btr[1] = (u32)time.tv_sec; + priv->plat->est->enable = true; + ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + if (ret) + netdev_err(priv->dev, "failed to configure EST\n"); + } + return 0; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 2e3cdf540168..4f3b6437b114 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -739,7 +739,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv, { u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep; struct plat_stmmacenet_data *plat = priv->plat; - struct timespec64 time, current_time; + struct timespec64 time, current_time, qopt_time; ktime_t current_time_ns; bool fpe = false; int i, ret = 0; @@ -848,6 +848,10 @@ static int tc_setup_taprio(struct stmmac_priv *priv, priv->plat->est->btr[0] = (u32)time.tv_nsec; priv->plat->est->btr[1] = (u32)time.tv_sec; + qopt_time = ktime_to_timespec64(qopt->base_time); + priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec; + priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec; + ctr = qopt->cycle_time; priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC); priv->plat->est->ctr[1] = (u32)ctr; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 09157b8a5810..a6f03b36fc4f 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -117,6 +117,7 @@ struct stmmac_axi { struct stmmac_est { struct mutex lock; int enable; + u32 btr_reserve[2]; u32 btr_offset[2]; u32 btr[2]; u32 ctr[2]; -- cgit v1.2.3 From b648eba4c69e5819880b4907e7fcb2bb576069ab Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:06 +0000 Subject: bonding: fix suspicious RCU usage in bond_ipsec_add_sa() To dereference bond->curr_active_slave, it uses rcu_dereference(). But it and the caller doesn't acquire RCU so a warning occurs. So add rcu_read_lock(). Test commands: ip link add dummy0 type dummy ip link add bond0 type bond ip link set dummy0 master bond0 ip link set dummy0 up ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi 0x07 \ mode transport \ reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel \ src 14.0.0.52/24 dst 14.0.0.70/24 proto tcp offload \ dev bond0 dir in Splat looks like: ============================= WARNING: suspicious RCU usage 5.13.0-rc3+ #1168 Not tainted ----------------------------- drivers/net/bonding/bond_main.c:411 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by ip/684: #0: ffffffff9a2757c0 (&net->xfrm.xfrm_cfg_mutex){+.+.}-{3:3}, at: xfrm_netlink_rcv+0x59/0x80 [xfrm_user] 55.191733][ T684] stack backtrace: CPU: 0 PID: 684 Comm: ip Not tainted 5.13.0-rc3+ #1168 Call Trace: dump_stack+0xa4/0xe5 bond_ipsec_add_sa+0x18c/0x1f0 [bonding] xfrm_dev_state_add+0x2a9/0x770 ? memcpy+0x38/0x60 xfrm_add_sa+0x2278/0x3b10 [xfrm_user] ? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user] ? register_lock_class+0x1750/0x1750 xfrm_user_rcv_msg+0x331/0x660 [xfrm_user] ? rcu_read_lock_sched_held+0x91/0xc0 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? find_held_lock+0x3a/0x1c0 ? mutex_lock_io_nested+0x1210/0x1210 ? sched_clock_cpu+0x18/0x170 netlink_rcv_skb+0x121/0x350 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? netlink_ack+0x9d0/0x9d0 ? netlink_deliver_tap+0x17c/0xa50 xfrm_netlink_rcv+0x68/0x80 [xfrm_user] netlink_unicast+0x41c/0x610 ? netlink_attachskb+0x710/0x710 netlink_sendmsg+0x6b9/0xb70 [ ... ] Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 0ff7567bd04f..d4d718e04dcc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -403,10 +403,12 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) struct net_device *bond_dev = xs->xso.dev; struct bonding *bond; struct slave *slave; + int err; if (!bond_dev) return -EINVAL; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); xs->xso.real_dev = slave->dev; @@ -415,10 +417,13 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) if (!(slave->dev->xfrmdev_ops && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); + rcu_read_unlock(); return -EINVAL; } - return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + rcu_read_unlock(); + return err; } /** -- cgit v1.2.3 From 105cd17a866017b45f3c45901b394c711c97bf40 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:07 +0000 Subject: bonding: fix null dereference in bond_ipsec_add_sa() If bond doesn't have real device, bond->curr_active_slave is null. But bond_ipsec_add_sa() dereferences bond->curr_active_slave without null checking. So, null-ptr-deref would occur. Test commands: ip link add bond0 type bond ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi \ 0x07 mode transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \ dst 14.0.0.70/24 proto tcp offload dev bond0 dir in Splat looks like: KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] CPU: 4 PID: 680 Comm: ip Not tainted 5.13.0-rc3+ #1168 RIP: 0010:bond_ipsec_add_sa+0xc4/0x2e0 [bonding] Code: 85 21 02 00 00 4d 8b a6 48 0c 00 00 e8 75 58 44 ce 85 c0 0f 85 14 01 00 00 48 b8 00 00 00 00 00 fc ff df 4c 89 e2 48 c1 ea 03 <80> 3c 02 00 0f 85 fc 01 00 00 48 8d bb e0 02 00 00 4d 8b 2c 24 48 RSP: 0018:ffff88810946f508 EFLAGS: 00010246 RAX: dffffc0000000000 RBX: ffff88810b4e8040 RCX: 0000000000000001 RDX: 0000000000000000 RSI: ffffffff8fe34280 RDI: ffff888115abe100 RBP: ffff88810946f528 R08: 0000000000000003 R09: fffffbfff2287e11 R10: 0000000000000001 R11: ffff888115abe0c8 R12: 0000000000000000 R13: ffffffffc0aea9a0 R14: ffff88800d7d2000 R15: ffff88810b4e8330 FS: 00007efc5552e680(0000) GS:ffff888119c00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055c2530dbf40 CR3: 0000000103056004 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: xfrm_dev_state_add+0x2a9/0x770 ? memcpy+0x38/0x60 xfrm_add_sa+0x2278/0x3b10 [xfrm_user] ? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user] ? register_lock_class+0x1750/0x1750 xfrm_user_rcv_msg+0x331/0x660 [xfrm_user] ? rcu_read_lock_sched_held+0x91/0xc0 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? find_held_lock+0x3a/0x1c0 ? mutex_lock_io_nested+0x1210/0x1210 ? sched_clock_cpu+0x18/0x170 netlink_rcv_skb+0x121/0x350 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? netlink_ack+0x9d0/0x9d0 ? netlink_deliver_tap+0x17c/0xa50 xfrm_netlink_rcv+0x68/0x80 [xfrm_user] netlink_unicast+0x41c/0x610 ? netlink_attachskb+0x710/0x710 netlink_sendmsg+0x6b9/0xb70 [ ...] Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d4d718e04dcc..5466b24ceab6 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -411,6 +411,11 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); + if (!slave) { + rcu_read_unlock(); + return -ENODEV; + } + xs->xso.real_dev = slave->dev; bond->xs = xs; -- cgit v1.2.3 From 09adf7566d436322ced595b166dea48b06852efe Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:08 +0000 Subject: net: netdevsim: use xso.real_dev instead of xso.dev in callback functions of struct xfrmdev_ops There are two pointers in struct xfrm_state_offload, *dev, *real_dev. These are used in callback functions of struct xfrmdev_ops. The *dev points whether bonding interface or real interface. If bonding ipsec offload is used, it points bonding interface If not, it points real interface. And real_dev always points real interface. So, netdevsim should always use real_dev instead of dev. Of course, real_dev always not be null. Test commands: ip netns add A ip netns exec A bash modprobe netdevsim echo "1 1" > /sys/bus/netdevsim/new_device ip link add bond0 type bond mode active-backup ip link set eth0 master bond0 ip link set eth0 up ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi 0x07 mode \ transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \ dst 14.0.0.70/24 proto tcp offload dev bond0 dir in Splat looks like: BUG: spinlock bad magic on CPU#5, kworker/5:1/53 lock: 0xffff8881068c2cc8, .magic: 11121314, .owner: /-1, .owner_cpu: -235736076 CPU: 5 PID: 53 Comm: kworker/5:1 Not tainted 5.13.0-rc3+ #1168 Workqueue: events linkwatch_event Call Trace: dump_stack+0xa4/0xe5 do_raw_spin_lock+0x20b/0x270 ? rwlock_bug.part.1+0x90/0x90 _raw_spin_lock_nested+0x5f/0x70 bond_get_stats+0xe4/0x4c0 [bonding] ? rcu_read_lock_sched_held+0xc0/0xc0 ? bond_neigh_init+0x2c0/0x2c0 [bonding] ? dev_get_alias+0xe2/0x190 ? dev_get_port_parent_id+0x14a/0x360 ? rtnl_unregister+0x190/0x190 ? dev_get_phys_port_name+0xa0/0xa0 ? memset+0x1f/0x40 ? memcpy+0x38/0x60 ? rtnl_phys_switch_id_fill+0x91/0x100 dev_get_stats+0x8c/0x270 rtnl_fill_stats+0x44/0xbe0 ? nla_put+0xbe/0x140 rtnl_fill_ifinfo+0x1054/0x3ad0 [ ... ] Fixes: 272c2330adc9 ("xfrm: bail early on slave pass over skb") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/netdevsim/ipsec.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 3811f1bde84e..b80ed2ffd45e 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { const char aes_gcm_name[] = "rfc4106(gcm(aes))"; - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) u16 sa_idx; int ret; - dev = xs->xso.dev; + dev = xs->xso.real_dev; ns = netdev_priv(dev); ipsec = &ns->ipsec; @@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs) static void nsim_ipsec_del_sa(struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; u16 sa_idx; @@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs) static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { - struct netdevsim *ns = netdev_priv(xs->xso.dev); + struct netdevsim *ns = netdev_priv(xs->xso.real_dev); struct nsim_ipsec *ipsec = &ns->ipsec; ipsec->ok++; -- cgit v1.2.3 From 2de7e4f67599affc97132bd07e30e3bd59d0b777 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:09 +0000 Subject: ixgbevf: use xso.real_dev instead of xso.dev in callback functions of struct xfrmdev_ops There are two pointers in struct xfrm_state_offload, *dev, *real_dev. These are used in callback functions of struct xfrmdev_ops. The *dev points whether bonding interface or real interface. If bonding ipsec offload is used, it points bonding interface If not, it points real interface. And real_dev always points real interface. So, ixgbevf should always use real_dev instead of dev. Of course, real_dev always not be null. Test commands: ip link add bond0 type bond #eth0 is ixgbevf interface ip link set eth0 master bond0 ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi 0x07 mode \ transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \ dst 14.0.0.70/24 proto tcp offload dev bond0 dir in Splat looks like: KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] CPU: 6 PID: 688 Comm: ip Not tainted 5.13.0-rc3+ #1168 RIP: 0010:ixgbevf_ipsec_find_empty_idx+0x28/0x1b0 [ixgbevf] Code: 00 00 0f 1f 44 00 00 55 53 48 89 fb 48 83 ec 08 40 84 f6 0f 84 9c 00 00 00 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 01 0f 8e 4c 01 00 00 66 81 3b 00 04 0f RSP: 0018:ffff8880089af390 EFLAGS: 00010246 RAX: dffffc0000000000 RBX: 0000000000000000 RCX: 0000000000000001 RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000000000000 RBP: ffff8880089af4f8 R08: 0000000000000003 R09: fffffbfff4287e11 R10: 0000000000000001 R11: ffff888005de8908 R12: 0000000000000000 R13: ffff88810936a000 R14: ffff88810936a000 R15: ffff888004d78040 FS: 00007fdf9883a680(0000) GS:ffff88811a400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000055bc14adbf40 CR3: 000000000b87c005 CR4: 00000000003706e0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ixgbevf_ipsec_add_sa+0x1bf/0x9c0 [ixgbevf] ? rcu_read_lock_sched_held+0x91/0xc0 ? ixgbevf_ipsec_parse_proto_keys.isra.9+0x280/0x280 [ixgbevf] ? lock_acquire+0x191/0x720 ? bond_ipsec_add_sa+0x48/0x350 [bonding] ? lockdep_hardirqs_on_prepare+0x3e0/0x3e0 ? rcu_read_lock_held+0x91/0xa0 ? rcu_read_lock_sched_held+0xc0/0xc0 bond_ipsec_add_sa+0x193/0x350 [bonding] xfrm_dev_state_add+0x2a9/0x770 ? memcpy+0x38/0x60 xfrm_add_sa+0x2278/0x3b10 [xfrm_user] ? xfrm_get_policy+0xaa0/0xaa0 [xfrm_user] ? register_lock_class+0x1750/0x1750 xfrm_user_rcv_msg+0x331/0x660 [xfrm_user] ? rcu_read_lock_sched_held+0x91/0xc0 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? find_held_lock+0x3a/0x1c0 ? mutex_lock_io_nested+0x1210/0x1210 ? sched_clock_cpu+0x18/0x170 netlink_rcv_skb+0x121/0x350 [ ... ] Fixes: 272c2330adc9 ("xfrm: bail early on slave pass over skb") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/ethernet/intel/ixgbevf/ipsec.c | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index caaea2c920a6..e3e4676af9e4 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec, static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, u32 *mykey, u32 *mysalt) { - struct net_device *dev = xs->xso.dev; + struct net_device *dev = xs->xso.real_dev; unsigned char *key_data; char *alg_name = NULL; int key_len; @@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs, **/ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; int ret; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) { netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n", xs->id.proto); @@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs) **/ static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs) { - struct net_device *dev = xs->xso.dev; - struct ixgbevf_adapter *adapter = netdev_priv(dev); - struct ixgbevf_ipsec *ipsec = adapter->ipsec; + struct net_device *dev = xs->xso.real_dev; + struct ixgbevf_adapter *adapter; + struct ixgbevf_ipsec *ipsec; u16 sa_idx; + adapter = netdev_priv(dev); + ipsec = adapter->ipsec; + if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) { sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX; -- cgit v1.2.3 From a22c39b831a081da9b2c488bd970a4412d926f30 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:10 +0000 Subject: bonding: fix suspicious RCU usage in bond_ipsec_del_sa() To dereference bond->curr_active_slave, it uses rcu_dereference(). But it and the caller doesn't acquire RCU so a warning occurs. So add rcu_read_lock(). Test commands: ip netns add A ip netns exec A bash modprobe netdevsim echo "1 1" > /sys/bus/netdevsim/new_device ip link add bond0 type bond ip link set eth0 master bond0 ip link set eth0 up ip link set bond0 up ip x s add proto esp dst 14.1.1.1 src 15.1.1.1 spi 0x07 mode \ transport reqid 0x07 replay-window 32 aead 'rfc4106(gcm(aes))' \ 0x44434241343332312423222114131211f4f3f2f1 128 sel src 14.0.0.52/24 \ dst 14.0.0.70/24 proto tcp offload dev bond0 dir in ip x s f Splat looks like: ============================= WARNING: suspicious RCU usage 5.13.0-rc3+ #1168 Not tainted ----------------------------- drivers/net/bonding/bond_main.c:448 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 2 locks held by ip/705: #0: ffff888106701780 (&net->xfrm.xfrm_cfg_mutex){+.+.}-{3:3}, at: xfrm_netlink_rcv+0x59/0x80 [xfrm_user] #1: ffff8880075b0098 (&x->lock){+.-.}-{2:2}, at: xfrm_state_delete+0x16/0x30 stack backtrace: CPU: 6 PID: 705 Comm: ip Not tainted 5.13.0-rc3+ #1168 Call Trace: dump_stack+0xa4/0xe5 bond_ipsec_del_sa+0x16a/0x1c0 [bonding] __xfrm_state_delete+0x51f/0x730 xfrm_state_delete+0x1e/0x30 xfrm_state_flush+0x22f/0x390 xfrm_flush_sa+0xd8/0x260 [xfrm_user] ? xfrm_flush_policy+0x290/0x290 [xfrm_user] xfrm_user_rcv_msg+0x331/0x660 [xfrm_user] ? rcu_read_lock_sched_held+0x91/0xc0 ? xfrm_user_state_lookup.constprop.39+0x320/0x320 [xfrm_user] ? find_held_lock+0x3a/0x1c0 ? mutex_lock_io_nested+0x1210/0x1210 ? sched_clock_cpu+0x18/0x170 netlink_rcv_skb+0x121/0x350 [ ... ] Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 5466b24ceab6..aa9c469ebbb5 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -444,21 +444,24 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) if (!bond_dev) return; + rcu_read_lock(); bond = netdev_priv(bond_dev); slave = rcu_dereference(bond->curr_active_slave); if (!slave) - return; + goto out; xs->xso.real_dev = slave->dev; if (!(slave->dev->xfrmdev_ops && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); - return; + goto out; } slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); +out: + rcu_read_unlock(); } /** -- cgit v1.2.3 From b121693381b112b78c076dea171ee113e237c0e4 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:11 +0000 Subject: bonding: disallow setting nested bonding + ipsec offload bonding interface can be nested and it supports ipsec offload. So, it allows setting the nested bonding + ipsec scenario. But code does not support this scenario. So, it should be disallowed. interface graph: bond2 | bond1 | eth0 The nested bonding + ipsec offload may not a real usecase. So, disallowing this scenario is fine. Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index aa9c469ebbb5..f7b89743fab9 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -419,8 +419,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) xs->xso.real_dev = slave->dev; bond->xs = xs; - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_add)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n"); rcu_read_unlock(); return -EINVAL; @@ -453,8 +454,9 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) xs->xso.real_dev = slave->dev; - if (!(slave->dev->xfrmdev_ops - && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) { + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__); goto out; } @@ -479,8 +481,9 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) return true; - if (!(slave_dev->xfrmdev_ops - && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) { + if (!slave_dev->xfrmdev_ops || + !slave_dev->xfrmdev_ops->xdo_dev_offload_ok || + netif_is_bond_master(slave_dev)) { slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); return false; } -- cgit v1.2.3 From 9a5605505d9c7dbfdb89cc29a8f5fc5cf9fd2334 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:12 +0000 Subject: bonding: Add struct bond_ipesc to manage SA bonding has been supporting ipsec offload. When SA is added, bonding just passes SA to its own active real interface. But it doesn't manage SA. So, when events(add/del real interface, active real interface change, etc) occur, bonding can't handle that well because It doesn't manage SA. So some problems(panic, UAF, refcnt leak)occur. In order to make it stable, it should manage SA. That's the reason why struct bond_ipsec is added. When a new SA is added to bonding interface, it is stored in the bond_ipsec list. And the SA is passed to a current active real interface. If events occur, it uses bond_ipsec data to handle these events. bond->ipsec_list is protected by bond->ipsec_lock. If a current active real interface is changed, the following logic works. 1. delete all SAs from old active real interface 2. Add all SAs to the new active real interface. 3. If a new active real interface doesn't support ipsec offload or SA's option, it sets real_dev to NULL. Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 139 ++++++++++++++++++++++++++++++++++------ include/net/bonding.h | 9 ++- 2 files changed, 127 insertions(+), 21 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f7b89743fab9..165fa55cfb38 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -401,6 +401,7 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev, static int bond_ipsec_add_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; int err; @@ -416,9 +417,6 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) return -ENODEV; } - xs->xso.real_dev = slave->dev; - bond->xs = xs; - if (!slave->dev->xfrmdev_ops || !slave->dev->xfrmdev_ops->xdo_dev_state_add || netif_is_bond_master(slave->dev)) { @@ -427,11 +425,63 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) return -EINVAL; } + ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC); + if (!ipsec) { + rcu_read_unlock(); + return -ENOMEM; + } + xs->xso.real_dev = slave->dev; + err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs); + if (!err) { + ipsec->xs = xs; + INIT_LIST_HEAD(&ipsec->list); + spin_lock_bh(&bond->ipsec_lock); + list_add(&ipsec->list, &bond->ipsec_list); + spin_unlock_bh(&bond->ipsec_lock); + } else { + kfree(ipsec); + } rcu_read_unlock(); return err; } +static void bond_ipsec_add_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) + goto out; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_add || + netif_is_bond_master(slave->dev)) { + spin_lock_bh(&bond->ipsec_lock); + if (!list_empty(&bond->ipsec_list)) + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_add\n", + __func__); + spin_unlock_bh(&bond->ipsec_lock); + goto out; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + ipsec->xs->xso.real_dev = slave->dev; + if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) { + slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__); + ipsec->xs->xso.real_dev = NULL; + } + } + spin_unlock_bh(&bond->ipsec_lock); +out: + rcu_read_unlock(); +} + /** * bond_ipsec_del_sa - clear out this specific SA * @xs: pointer to transformer state struct @@ -439,6 +489,7 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs) static void bond_ipsec_del_sa(struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; + struct bond_ipsec *ipsec; struct bonding *bond; struct slave *slave; @@ -452,7 +503,10 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) if (!slave) goto out; - xs->xso.real_dev = slave->dev; + if (!xs->xso.real_dev) + goto out; + + WARN_ON(xs->xso.real_dev != slave->dev); if (!slave->dev->xfrmdev_ops || !slave->dev->xfrmdev_ops->xdo_dev_state_delete || @@ -463,6 +517,48 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs) slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs); out: + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (ipsec->xs == xs) { + list_del(&ipsec->list); + kfree(ipsec); + break; + } + } + spin_unlock_bh(&bond->ipsec_lock); + rcu_read_unlock(); +} + +static void bond_ipsec_del_sa_all(struct bonding *bond) +{ + struct net_device *bond_dev = bond->dev; + struct bond_ipsec *ipsec; + struct slave *slave; + + rcu_read_lock(); + slave = rcu_dereference(bond->curr_active_slave); + if (!slave) { + rcu_read_unlock(); + return; + } + + spin_lock_bh(&bond->ipsec_lock); + list_for_each_entry(ipsec, &bond->ipsec_list, list) { + if (!ipsec->xs->xso.real_dev) + continue; + + if (!slave->dev->xfrmdev_ops || + !slave->dev->xfrmdev_ops->xdo_dev_state_delete || + netif_is_bond_master(slave->dev)) { + slave_warn(bond_dev, slave->dev, + "%s: no slave xdo_dev_state_delete\n", + __func__); + } else { + slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); + } + ipsec->xs->xso.real_dev = NULL; + } + spin_unlock_bh(&bond->ipsec_lock); rcu_read_unlock(); } @@ -474,22 +570,27 @@ out: static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) { struct net_device *bond_dev = xs->xso.dev; - struct bonding *bond = netdev_priv(bond_dev); - struct slave *curr_active = rcu_dereference(bond->curr_active_slave); - struct net_device *slave_dev = curr_active->dev; + struct net_device *real_dev; + struct slave *curr_active; + struct bonding *bond; + + bond = netdev_priv(bond_dev); + curr_active = rcu_dereference(bond->curr_active_slave); + real_dev = curr_active->dev; if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) return true; - if (!slave_dev->xfrmdev_ops || - !slave_dev->xfrmdev_ops->xdo_dev_offload_ok || - netif_is_bond_master(slave_dev)) { - slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__); + if (!xs->xso.real_dev) + return false; + + if (!real_dev->xfrmdev_ops || + !real_dev->xfrmdev_ops->xdo_dev_offload_ok || + netif_is_bond_master(real_dev)) { return false; } - xs->xso.real_dev = slave_dev; - return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + return real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); } static const struct xfrmdev_ops bond_xfrmdev_ops = { @@ -1006,8 +1107,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) return; #ifdef CONFIG_XFRM_OFFLOAD - if (old_active && bond->xs) - bond_ipsec_del_sa(bond->xs); + bond_ipsec_del_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ if (new_active) { @@ -1082,10 +1182,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active) } #ifdef CONFIG_XFRM_OFFLOAD - if (new_active && bond->xs) { - xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true); - bond_ipsec_add_sa(bond->xs); - } + bond_ipsec_add_sa_all(bond); #endif /* CONFIG_XFRM_OFFLOAD */ /* resend IGMP joins since active slave has changed or @@ -3343,6 +3440,7 @@ static int bond_master_netdev_event(unsigned long event, return bond_event_changename(event_bond); case NETDEV_UNREGISTER: bond_remove_proc_entry(event_bond); + xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); break; case NETDEV_REGISTER: bond_create_proc_entry(event_bond); @@ -4910,7 +5008,8 @@ void bond_setup(struct net_device *bond_dev) #ifdef CONFIG_XFRM_OFFLOAD /* set up xfrm device ops (only supported in active-backup right now) */ bond_dev->xfrmdev_ops = &bond_xfrmdev_ops; - bond->xs = NULL; + INIT_LIST_HEAD(&bond->ipsec_list); + spin_lock_init(&bond->ipsec_lock); #endif /* CONFIG_XFRM_OFFLOAD */ /* don't acquire bond device's netif_tx_lock when transmitting */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 15335732e166..625d9c72dee3 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -201,6 +201,11 @@ struct bond_up_slave { */ #define BOND_LINK_NOCHANGE -1 +struct bond_ipsec { + struct list_head list; + struct xfrm_state *xs; +}; + /* * Here are the locking policies for the two bonding locks: * Get rcu_read_lock when reading or RTNL when writing slave list. @@ -249,7 +254,9 @@ struct bonding { #endif /* CONFIG_DEBUG_FS */ struct rtnl_link_stats64 bond_stats; #ifdef CONFIG_XFRM_OFFLOAD - struct xfrm_state *xs; + struct list_head ipsec_list; + /* protecting ipsec_list */ + spinlock_t ipsec_lock; #endif /* CONFIG_XFRM_OFFLOAD */ }; -- cgit v1.2.3 From 955b785ec6b3b2f9b91914d6eeac8ee66ee29239 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:13 +0000 Subject: bonding: fix suspicious RCU usage in bond_ipsec_offload_ok() To dereference bond->curr_active_slave, it uses rcu_dereference(). But it and the caller doesn't acquire RCU so a warning occurs. So add rcu_read_lock(). Splat looks like: WARNING: suspicious RCU usage 5.13.0-rc6+ #1179 Not tainted drivers/net/bonding/bond_main.c:571 suspicious rcu_dereference_check() usage! other info that might help us debug this: rcu_scheduler_active = 2, debug_locks = 1 1 lock held by ping/974: #0: ffff888109e7db70 (sk_lock-AF_INET){+.+.}-{0:0}, at: raw_sendmsg+0x1303/0x2cb0 stack backtrace: CPU: 2 PID: 974 Comm: ping Not tainted 5.13.0-rc6+ #1179 Call Trace: dump_stack+0xa4/0xe5 bond_ipsec_offload_ok+0x1f4/0x260 [bonding] xfrm_output+0x179/0x890 xfrm4_output+0xfa/0x410 ? __xfrm4_output+0x4b0/0x4b0 ? __ip_make_skb+0xecc/0x2030 ? xfrm4_udp_encap_rcv+0x800/0x800 ? ip_local_out+0x21/0x3a0 ip_send_skb+0x37/0xa0 raw_sendmsg+0x1bfd/0x2cb0 Fixes: 18cb261afd7b ("bonding: support hardware encryption offload to slaves") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 165fa55cfb38..780f87869e36 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -573,24 +573,34 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) struct net_device *real_dev; struct slave *curr_active; struct bonding *bond; + int err; bond = netdev_priv(bond_dev); + rcu_read_lock(); curr_active = rcu_dereference(bond->curr_active_slave); real_dev = curr_active->dev; - if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) - return true; + if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { + err = true; + goto out; + } - if (!xs->xso.real_dev) - return false; + if (!xs->xso.real_dev) { + err = false; + goto out; + } if (!real_dev->xfrmdev_ops || !real_dev->xfrmdev_ops->xdo_dev_offload_ok || netif_is_bond_master(real_dev)) { - return false; + err = false; + goto out; } - return real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); + err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs); +out: + rcu_read_unlock(); + return err; } static const struct xfrmdev_ops bond_xfrmdev_ops = { -- cgit v1.2.3 From 168e696a36792a4a3b2525a06249e7472ef90186 Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Mon, 5 Jul 2021 15:38:14 +0000 Subject: bonding: fix incorrect return value of bond_ipsec_offload_ok() bond_ipsec_offload_ok() is called to check whether the interface supports ipsec offload or not. bonding interface support ipsec offload only in active-backup mode. So, if a bond interface is not in active-backup mode, it should return false but it returns true. Fixes: a3b658cfb664 ("bonding: allow xfrm offload setup post-module-load") Signed-off-by: Taehee Yoo Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 780f87869e36..d22d78303311 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -581,7 +581,7 @@ static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs) real_dev = curr_active->dev; if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) { - err = true; + err = false; goto out; } -- cgit v1.2.3 From ad1f37970875eef98eeaf478f55045f388b794a5 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 6 Jul 2021 12:18:02 +0100 Subject: octeontx2-pf: Fix assigned error return value that is never used Currently when the call to otx2_mbox_alloc_msg_cgx_mac_addr_update fails the error return variable rc is being assigned -ENOMEM and does not return early. rc is then re-assigned and the error case is not handled correctly. Fix this by returning -ENOMEM rather than assigning rc. Addresses-Coverity: ("Unused value") Fixes: 79d2be385e9e ("octeontx2-pf: offload DMAC filters to CGX/RPM block") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index ffe3e94562d0..383a6b5cb698 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -161,7 +161,7 @@ int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos) if (!req) { mutex_unlock(&pf->mbox.lock); - rc = -ENOMEM; + return -ENOMEM; } ether_addr_copy(req->mac_addr, mac); -- cgit v1.2.3 From 0d472c69c6a5e22cef9e5809e2f6d0ccd5934f4a Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 7 Jul 2021 15:50:57 +0800 Subject: stmmac: dwmac-loongson: Fix unsigned comparison to zero plat->phy_interface is unsigned integer, so the condition can't be less than zero and the warning will never printed. Fixes: 30bba69d7db4 ("stmmac: pci: Add dwmac support for Loongson") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index e108b0d2bd28..4c9a37dd0d3f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id { struct plat_stmmacenet_data *plat; struct stmmac_resources res; - bool mdio = false; - int ret, i; struct device_node *np; + int ret, i, phy_mode; + bool mdio = false; np = dev_of_node(&pdev->dev); @@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (plat->bus_id < 0) plat->bus_id = pci_dev_id(pdev); - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) dev_err(&pdev->dev, "phy_mode not found\n"); + plat->phy_interface = phy_mode; plat->interface = PHY_INTERFACE_MODE_GMII; pci_set_master(pdev); -- cgit v1.2.3 From eca81f09145d765c21dd8fb1ba5d874ca255c32c Mon Sep 17 00:00:00 2001 From: YueHaibing Date: Wed, 7 Jul 2021 15:53:35 +0800 Subject: stmmac: platform: Fix signedness bug in stmmac_probe_config_dt() The "plat->phy_interface" variable is an enum and in this context GCC will treat it as an unsigned int so the error handling is never triggered. Fixes: b9f0b2f634c0 ("net: stmmac: platform: fix probe for ACPI devices") Signed-off-by: YueHaibing Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 072eff8079d0..5ca710844cc1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) struct device_node *np = pdev->dev.of_node; struct plat_stmmacenet_data *plat; struct stmmac_dma_cfg *dma_cfg; + int phy_mode; void *ret; int rc; @@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac) eth_zero_addr(mac); } - plat->phy_interface = device_get_phy_mode(&pdev->dev); - if (plat->phy_interface < 0) - return ERR_PTR(plat->phy_interface); + phy_mode = device_get_phy_mode(&pdev->dev); + if (phy_mode < 0) + return ERR_PTR(phy_mode); + plat->phy_interface = phy_mode; plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) plat->interface = plat->phy_interface; -- cgit v1.2.3 From debdd8e31895fdd1e2cfeb7a5aff1c83e49a91ba Mon Sep 17 00:00:00 2001 From: Jonathan Lemon Date: Thu, 8 Jul 2021 11:04:08 -0700 Subject: ptp: Relocate lookup cookie to correct block. An earlier commit set the pps_lookup cookie, but the line was somehow added to the wrong code block. Correct this. Fixes: 8602e40fc813 ("ptp: Set lookup cookie when creating a PTP PPS source.") Signed-off-by: Jonathan Lemon Signed-off-by: Dario Binacchi Acked-by: Richard Cochran Signed-off-by: David S. Miller --- drivers/ptp/ptp_clock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c index ce6d9fc85607..4dfc52e06704 100644 --- a/drivers/ptp/ptp_clock.c +++ b/drivers/ptp/ptp_clock.c @@ -232,7 +232,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to create ptp aux_worker %d\n", err); goto kworker_err; } - ptp->pps_source->lookup_cookie = ptp; } /* PTP virtual clock is being registered under physical clock */ @@ -268,6 +267,7 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info, pr_err("failed to register pps source\n"); goto no_pps; } + ptp->pps_source->lookup_cookie = ptp; } /* Initialize a new device of our class in our clock structure. */ -- cgit v1.2.3 From b9d233ea21f192702f8bbf3f5f640e2dde308b25 Mon Sep 17 00:00:00 2001 From: Gatis Peisenieks Date: Thu, 8 Jul 2021 12:49:04 +0300 Subject: atl1c: fix Mikrotik 10/25G NIC detection Since Mikrotik 10/25G NIC MDIO op emulation is not 100% reliable, on rare occasions it can happen that some physical functions of the NIC do not get initialized due to timeouted early MDIO op. This changes the atl1c probe on Mikrotik 10/25G NIC not to depend on MDIO op emulation. Signed-off-by: Gatis Peisenieks Signed-off-by: David S. Miller --- drivers/net/ethernet/atheros/atl1c/atl1c_hw.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c index 7dff20350865..f19370c33444 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c @@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw) int ret_val; u16 mii_bmcr_data = BMCR_RESET; + if (hw->nic_type == athr_mt) { + hw->phy_configured = true; + return 0; + } + if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) || (atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) { dev_err(&pdev->dev, "Error get phy ID\n"); -- cgit v1.2.3 From c34269041185dad1bab7a34f42ef9fab967a1684 Mon Sep 17 00:00:00 2001 From: Aaron Ma Date: Thu, 8 Jul 2021 21:17:10 +0800 Subject: mt76: mt7921: continue to probe driver when fw already downloaded When reboot system, no power cycles, firmware is already downloaded, return -EIO will break driver as error: mt7921e: probe of 0000:03:00.0 failed with error -5 Skip firmware download and continue to probe. Signed-off-by: Aaron Ma Fixes: 1c099ab44727c ("mt76: mt7921: add MCU support") Signed-off-by: David S. Miller --- drivers/net/wireless/mediatek/mt76/mt7921/mcu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index c2c4dc196802..cd690c64f65b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY); if (ret) { dev_dbg(dev->mt76.dev, "Firmware is already download\n"); - return -EIO; + goto fw_loaded; } ret = mt7921_load_patch(dev); @@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev) return -EIO; } +fw_loaded: mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false); #ifdef CONFIG_PM -- cgit v1.2.3 From 015fe6fd29c4b9ac0f61b8c4455ef88e6018b9cc Mon Sep 17 00:00:00 2001 From: Shahjada Abul Husain Date: Thu, 8 Jul 2021 21:51:56 +0530 Subject: cxgb4: fix IRQ free race during driver unload IRQs are requested during driver's ndo_open() and then later freed up in disable_interrupts() during driver unload. A race exists where driver can set the CXGB4_FULL_INIT_DONE flag in ndo_open() after the disable_interrupts() in driver unload path checks it, and hence misses calling free_irq(). Fix by unregistering netdevice first and sync with driver's ndo_open(). This ensures disable_interrupts() checks the flag correctly and frees up the IRQs properly. Fixes: b37987e8db5f ("cxgb4: Disable interrupts and napi before unregistering netdev") Signed-off-by: Shahjada Abul Husain Signed-off-by: Raju Rangoju Signed-off-by: David S. Miller --- drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c | 18 ++++++++++-------- drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c | 3 +++ 2 files changed, 13 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 9a2b166d651e..dbf9a0e6601d 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); list_del(&adap->list_node); @@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) { - detach_ulds(adapter); - t4_uld_clean_up(adapter); - } + detach_ulds(adapter); + + for_each_port(adapter, i) + if (adapter->port[i]->reg_state == NETREG_REGISTERED) + unregister_netdev(adapter->port[i]); + + t4_uld_clean_up(adapter); adap_free_hma_mem(adapter); @@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev) cxgb4_free_mps_ref_entries(adapter); - for_each_port(adapter, i) - if (adapter->port[i]->reg_state == NETREG_REGISTERED) - unregister_netdev(adapter->port[i]); - debugfs_remove_recursive(adapter->debugfs_root); if (!is_t4(adapter->params.chip)) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index 743af9e654aa..17faac715882 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap) { unsigned int i; + if (!is_uld(adap)) + return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) -- cgit v1.2.3 From 96248d6da65744e1baaa29e5c4e5dc233e29838b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Thu, 8 Jul 2021 10:33:10 -0700 Subject: net: microchip: sparx5: fix kconfig warning PHY_SPARX5_SERDES depends on OF so SPARX5_SWITCH should also depend on OF since 'select' does not follow any dependencies. WARNING: unmet direct dependencies detected for PHY_SPARX5_SERDES Depends on [n]: (ARCH_SPARX5 || COMPILE_TEST [=n]) && OF [=n] && HAS_IOMEM [=y] Selected by [y]: - SPARX5_SWITCH [=y] && NETDEVICES [=y] && ETHERNET [=y] && NET_VENDOR_MICROCHIP [=y] && NET_SWITCHDEV [=y] && HAS_IOMEM [=y] Fixes: 3cfa11bac9bb ("net: sparx5: add the basic sparx5 driver") Signed-off-by: Randy Dunlap Cc: Lars Povlsen Cc: Steen Hegelund Cc: UNGLinuxDriver@microchip.com Cc: linux-arm-kernel@lists.infradead.org Cc: "David S. Miller" Cc: Jakub Kicinski Cc: netdev@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/ethernet/microchip/sparx5/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index a80419d8d4b5..ac403d43c74c 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -2,6 +2,7 @@ config SPARX5_SWITCH tristate "Sparx5 switch driver" depends on NET_SWITCHDEV depends on HAS_IOMEM + depends on OF select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER -- cgit v1.2.3 From 2b452550a203d88112eaf0ba9fc4b750a000b496 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 8 Jul 2021 18:55:32 -0700 Subject: net: bcmgenet: Ensure all TX/RX queues DMAs are disabled Make sure that we disable each of the TX and RX queues in the TDMA and RDMA control registers. This is a correctness change to be symmetrical with the code that enables the TX and RX queues. Tested-by: Maxime Ripard Fixes: 1c1008c793fa ("net: bcmgenet: add main driver file") Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- drivers/net/ethernet/broadcom/genet/bcmgenet.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 35e9956e930c..db74241935ab 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -3238,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv, /* Returns a reusable dma control register value */ static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv) { + unsigned int i; u32 reg; u32 dma_ctrl; /* disable DMA */ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->tx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_tdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_tdma_writel(priv, reg, DMA_CTRL); + dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN; + for (i = 0; i < priv->hw_params->rx_queues; i++) + dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT)); reg = bcmgenet_rdma_readl(priv, DMA_CTRL); reg &= ~dma_ctrl; bcmgenet_rdma_writel(priv, reg, DMA_CTRL); -- cgit v1.2.3 From c78eaeebe855fd93f2e77142ffd0404a54070d84 Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Fri, 9 Jul 2021 17:09:53 +0300 Subject: net: moxa: fix UAF in moxart_mac_probe In case of netdev registration failure the code path will jump to init_fail label: init_fail: netdev_err(ndev, "init failed\n"); moxart_mac_free_memory(ndev); irq_map_fail: free_netdev(ndev); return ret; So, there is no need to call free_netdev() before jumping to error handling path, since it can cause UAF or double-free bug. Fixes: 6c821bd9edc9 ("net: Add MOXA ART SoCs ethernet driver") Signed-off-by: Pavel Skripkin Signed-off-by: David S. Miller --- drivers/net/ethernet/moxa/moxart_ether.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c index 5249b64f4fc5..49def6934cad 100644 --- a/drivers/net/ethernet/moxa/moxart_ether.c +++ b/drivers/net/ethernet/moxa/moxart_ether.c @@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); ret = register_netdev(ndev); - if (ret) { - free_netdev(ndev); + if (ret) goto init_fail; - } netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n", __func__, ndev->irq, ndev->dev_addr); -- cgit v1.2.3 From ad297cd2db8953e2202970e9504cab247b6c7cb4 Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Fri, 9 Jul 2021 17:24:18 +0300 Subject: net: qcom/emac: fix UAF in emac_remove adpt is netdev private data and it cannot be used after free_netdev() call. Using adpt after free_netdev() can cause UAF bug. Fix it by moving free_netdev() at the end of the function. Fixes: 54e19bc74f33 ("net: qcom/emac: do not use devm on internal phy pdev") Signed-off-by: Pavel Skripkin Signed-off-by: David S. Miller --- drivers/net/ethernet/qualcomm/emac/emac.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 8543bf3c3484..ad655f0a4965 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev) put_device(&adpt->phydev->mdio.dev); mdiobus_unregister(adpt->mii_bus); - free_netdev(netdev); if (adpt->phy.digital) iounmap(adpt->phy.digital); iounmap(adpt->phy.base); + free_netdev(netdev); + return 0; } -- cgit v1.2.3 From 0336f8ffece62f882ab3012820965a786a983f70 Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Fri, 9 Jul 2021 17:58:29 +0300 Subject: net: ti: fix UAF in tlan_remove_one priv is netdev private data and it cannot be used after free_netdev() call. Using priv after free_netdev() can cause UAF bug. Fix it by moving free_netdev() at the end of the function. Fixes: 1e0a8b13d355 ("tlan: cancel work at remove path") Signed-off-by: Pavel Skripkin Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/tlan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 0b2ce4bdc2c3..e0cb713193ea 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev) pci_release_regions(pdev); #endif - free_netdev(dev); - cancel_work_sync(&priv->tlan_tqueue); + free_netdev(dev); } static void tlan_start(struct net_device *dev) -- cgit v1.2.3 From 222722bc6ebfabf5d54467070f05cf9c0a55ea8c Mon Sep 17 00:00:00 2001 From: Yunjian Wang Date: Sat, 10 Jul 2021 11:32:49 +0800 Subject: virtio_net: check virtqueue_add_sgs() return value As virtqueue_add_sgs() can fail, we should check the return value. Addresses-Coverity-ID: 1464439 ("Unchecked return value") Signed-off-by: Yunjian Wang Signed-off-by: David S. Miller --- drivers/net/virtio_net.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index b0b81458ca94..13952e2dba5e 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -1743,6 +1743,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, { struct scatterlist *sgs[4], hdr, stat; unsigned out_num = 0, tmp; + int ret; /* Caller should know better */ BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)); @@ -1762,7 +1763,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd, sgs[out_num] = &stat; BUG_ON(out_num + 1 > ARRAY_SIZE(sgs)); - virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC); + if (ret < 0) { + dev_warn(&vi->vdev->dev, + "Failed to add sgs for command vq: %d\n.", ret); + return false; + } if (unlikely(!virtqueue_kick(vi->cvq))) return vi->ctrl->status == VIRTIO_NET_OK; -- cgit v1.2.3 From 84f7e0bb4809f4497124b6b6904c07c8a0c73c58 Mon Sep 17 00:00:00 2001 From: kernel test robot Date: Sun, 11 Jul 2021 18:12:56 +0200 Subject: dsa: fix for_each_child.cocci warnings For_each_available_child_of_node should have of_node_put() before return around line 423. Generated by: scripts/coccinelle/iterators/for_each_child.cocci CC: Alexander Lobakin Reported-by: kernel test robot Signed-off-by: kernel test robot Signed-off-by: Julia Lawall Signed-off-by: David S. Miller --- drivers/net/dsa/microchip/ksz_common.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index a7e5ac60baef..1542bfb8b5e5 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev, if (of_property_read_u32(port, "reg", &port_num)) continue; - if (!(dev->port_mask & BIT(port_num))) + if (!(dev->port_mask & BIT(port_num))) { + of_node_put(port); return -EINVAL; + } of_get_phy_mode(port, &dev->ports[port_num].interface); } -- cgit v1.2.3 From a5de4be0aaaa66a2fa98e8a33bdbed3bd0682804 Mon Sep 17 00:00:00 2001 From: Marek Behún Date: Sun, 11 Jul 2021 18:38:15 +0200 Subject: net: phy: marvell10g: fix differentiation of 88X3310 from 88X3340 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It seems that we cannot differentiate 88X3310 from 88X3340 by simply looking at bit 3 of revision ID. This only works on revisions A0 and A1. On revision B0, this bit is always 1. Instead use the 3.d00d register for differentiation, since this register contains information about number of ports on the device. Fixes: 9885d016ffa9 ("net: phy: marvell10g: add separate structure for 88X3340") Signed-off-by: Marek Behún Reported-by: Matteo Croce Tested-by: Matteo Croce Signed-off-by: David S. Miller --- drivers/net/phy/marvell10g.c | 40 +++++++++++++++++++++++++++++++++++----- include/linux/marvell_phy.h | 6 +----- 2 files changed, 36 insertions(+), 10 deletions(-) (limited to 'drivers') diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index bbbc6ac8fa82..53a433442803 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -78,6 +78,11 @@ enum { /* Temperature read register (88E2110 only) */ MV_PCS_TEMP = 0x8042, + /* Number of ports on the device */ + MV_PCS_PORT_INFO = 0xd00d, + MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380, + MV_PCS_PORT_INFO_NPORTS_SHIFT = 7, + /* These registers appear at 0x800X and 0xa00X - the 0xa00X control * registers appear to set themselves to the 0x800X when AN is * restarted, but status registers appear readable from either. @@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = { #endif }; +static int mv3310_get_number_of_ports(struct phy_device *phydev) +{ + int ret; + + ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO); + if (ret < 0) + return ret; + + ret &= MV_PCS_PORT_INFO_NPORTS_MASK; + ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT; + + return ret + 1; +} + +static int mv3310_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 1; +} + +static int mv3340_match_phy_device(struct phy_device *phydev) +{ + return mv3310_get_number_of_ports(phydev) == 4; +} + static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g) { int val; @@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev) static struct phy_driver mv3310_drivers[] = { { .phy_id = MARVELL_PHY_ID_88X3310, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3310_match_phy_device, .name = "mv88x3310", .driver_data = &mv3310_type, .get_features = mv3310_get_features, @@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = { .set_loopback = genphy_c45_loopback, }, { - .phy_id = MARVELL_PHY_ID_88X3340, - .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK, + .phy_id = MARVELL_PHY_ID_88X3310, + .phy_id_mask = MARVELL_PHY_ID_MASK, + .match_phy_device = mv3340_match_phy_device, .name = "mv88x3340", .driver_data = &mv3340_type, .get_features = mv3310_get_features, @@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = { module_phy_driver(mv3310_drivers); static struct mdio_device_id __maybe_unused mv3310_tbl[] = { - { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK }, - { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK }, + { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK }, { MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK }, { }, }; diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index acee44b9db26..0f06c2287b52 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h @@ -22,14 +22,10 @@ #define MARVELL_PHY_ID_88E1545 0x01410ea0 #define MARVELL_PHY_ID_88E1548P 0x01410ec0 #define MARVELL_PHY_ID_88E3016 0x01410e60 +#define MARVELL_PHY_ID_88X3310 0x002b09a0 #define MARVELL_PHY_ID_88E2110 0x002b09b0 #define MARVELL_PHY_ID_88X2222 0x01410f10 -/* PHY IDs and mask for Alaska 10G PHYs */ -#define MARVELL_PHY_ID_88X33X0_MASK 0xfffffff8 -#define MARVELL_PHY_ID_88X3310 0x002b09a0 -#define MARVELL_PHY_ID_88X3340 0x002b09a8 - /* Marvel 88E1111 in Finisar SFP module with modified PHY ID */ #define MARVELL_PHY_ID_88E1111_FINISAR 0x01ff0cc0 -- cgit v1.2.3 From a1739c307a072e46473a2ba239eb60e6d711c96c Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 11 Jul 2021 15:31:47 -0700 Subject: net: hdlc: rename 'mod_init' & 'mod_exit' functions to be module-specific Rename module_init & module_exit functions that are named "mod_init" and "mod_exit" so that they are unique in both the System.map file and in initcall_debug output instead of showing up as almost anonymous "mod_init". This is helpful for debugging and in determining how long certain module_init calls take to execute. Signed-off-by: Randy Dunlap Cc: Krzysztof Halasa Cc: netdev@vger.kernel.org Cc: "David S. Miller" Cc: Jakub Kicinski Cc: Martin Schiller Cc: linux-x25@vger.kernel.org Signed-off-by: David S. Miller --- drivers/net/wan/hdlc_cisco.c | 8 ++++---- drivers/net/wan/hdlc_fr.c | 8 ++++---- drivers/net/wan/hdlc_ppp.c | 8 ++++---- drivers/net/wan/hdlc_raw.c | 8 ++++---- drivers/net/wan/hdlc_raw_eth.c | 8 ++++---- drivers/net/wan/hdlc_x25.c | 8 ++++---- 6 files changed, 24 insertions(+), 24 deletions(-) (limited to 'drivers') diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 349ca18088e8..c54fdae950fb 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_cisco_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_cisco_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_cisco_init); +module_exit(hdlc_cisco_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 72250fe0a1df..25e3564ce118 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_fr_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_fr_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_fr_init); +module_exit(hdlc_fr_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 834be2ae3e9e..b81ecf432a0c 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_ppp_init(void) { skb_queue_head_init(&tx_queue); register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_ppp_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_ppp_init); +module_exit(hdlc_ppp_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("PPP protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c index 388fcc09b4dd..54d28496fefd 100644 --- a/drivers/net/wan/hdlc_raw.c +++ b/drivers/net/wan/hdlc_raw.c @@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_raw_init(void) { register_hdlc_protocol(&proto); return 0; @@ -98,14 +98,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_raw_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_raw_init); +module_exit(hdlc_raw_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c index c70a518b8b47..927596276a07 100644 --- a/drivers/net/wan/hdlc_raw_eth.c +++ b/drivers/net/wan/hdlc_raw_eth.c @@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr) } -static int __init mod_init(void) +static int __init hdlc_eth_init(void) { register_hdlc_protocol(&proto); return 0; @@ -118,14 +118,14 @@ static int __init mod_init(void) -static void __exit mod_exit(void) +static void __exit hdlc_eth_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_eth_init); +module_exit(hdlc_eth_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC"); diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index d2bf72bf3bd7..9b7ebf8bd85c 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr) return -EINVAL; } -static int __init mod_init(void) +static int __init hdlc_x25_init(void) { register_hdlc_protocol(&proto); return 0; } -static void __exit mod_exit(void) +static void __exit hdlc_x25_exit(void) { unregister_hdlc_protocol(&proto); } -module_init(mod_init); -module_exit(mod_exit); +module_init(hdlc_x25_init); +module_exit(hdlc_x25_exit); MODULE_AUTHOR("Krzysztof Halasa "); MODULE_DESCRIPTION("X.25 protocol support for generic HDLC"); -- cgit v1.2.3 From 71ce9d92fc7089f287c3e95a981bdec7545a8588 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Mon, 12 Jul 2021 15:37:50 +0100 Subject: octeontx2-pf: Fix uninitialized boolean variable pps In the case where act->id is FLOW_ACTION_POLICE and also act->police.rate_bytes_ps > 0 or act->police.rate_pkt_ps is not > 0 the boolean variable pps contains an uninitialized value when function otx2_tc_act_set_police is called. Fix this by initializing pps to false. Addresses-Coverity: ("Uninitialized scalar variable)" Fixes: 68fbff68dbea ("octeontx2-pf: Add police action for TC flower") Signed-off-by: Colin Ian King Signed-off-by: David S. Miller --- drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 905fc02a7dfe..972b202b9884 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic, struct otx2_nic *priv; u32 burst, mark = 0; u8 nr_police = 0; - bool pps; + bool pps = false; u64 rate; int i; -- cgit v1.2.3 From e56c6bbd98dc1cefb6f9c5d795fd29016e4f2fe7 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 13 Jul 2021 12:33:50 +0300 Subject: net: ocelot: fix switchdev objects synced for wrong netdev with LAG offload The point with a *dev and a *brport_dev is that when we have a LAG net device that is a bridge port, *dev is an ocelot net device and *brport_dev is the bonding/team net device. The ocelot net device beneath the LAG does not exist from the bridge's perspective, so we need to sync the switchdev objects belonging to the brport_dev and not to the dev. Fixes: e4bd44e89dcf ("net: ocelot: replay switchdev events when joining bridge") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/ethernet/mscc/ocelot_net.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 3e89e34f86d5..e9d260d84bf3 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev, } static int ocelot_netdevice_changeupper(struct net_device *dev, + struct net_device *brport_dev, struct netdev_notifier_changeupper_info *info) { struct netlink_ext_ack *extack; @@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = ocelot_netdevice_bridge_join(dev, dev, + err = ocelot_netdevice_bridge_join(dev, brport_dev, info->upper_dev, extack); else - err = ocelot_netdevice_bridge_leave(dev, dev, + err = ocelot_netdevice_bridge_leave(dev, brport_dev, info->upper_dev); } if (netif_is_lag_master(info->upper_dev)) { @@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, if (ocelot_port->bond != dev) return NOTIFY_OK; - err = ocelot_netdevice_changeupper(lower, info); + err = ocelot_netdevice_changeupper(lower, dev, info); if (err) return notifier_from_errno(err); } @@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct netdev_notifier_changeupper_info *info = ptr; if (ocelot_netdevice_dev_check(dev)) - return ocelot_netdevice_changeupper(dev, info); + return ocelot_netdevice_changeupper(dev, dev, info); if (netif_is_lag_master(dev)) return ocelot_netdevice_lag_changeupper(dev, info); -- cgit v1.2.3 From b0b33b048dcfbd7da82c3cde4fab02751dfab4d6 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 13 Jul 2021 12:37:19 +0300 Subject: net: dsa: sja1105: fix address learning getting disabled on the CPU port In May 2019 when commit 640f763f98c2 ("net: dsa: sja1105: Add support for Spanning Tree Protocol") was introduced, the comment that "STP does not get called for the CPU port" was true. This changed after commit 0394a63acfe2 ("net: dsa: enable and disable all ports") in August 2019 and went largely unnoticed, because the sja1105_bridge_stp_state_set() method did nothing different compared to the static setup done by sja1105_init_mac_settings(). With the ability to turn address learning off introduced by the blamed commit, there is a new priv->learn_ena port mask in the driver. When sja1105_bridge_stp_state_set() gets called and we are in BR_STATE_LEARNING or later, address learning is enabled or not depending on priv->learn_ena & BIT(port). So what happens is that priv->learn_ena is not being set from anywhere for the CPU port, and the static configuration done by sja1105_init_mac_settings() is being overwritten. To solve this, acknowledge that the static configuration of STP state is no longer necessary because the STP state is being set by the DSA core now, but what is necessary is to set priv->learn_ena for the CPU port. Fixes: 4d9423549501 ("net: dsa: sja1105: offload bridge port flags to device") Signed-off-by: Vladimir Oltean Signed-off-by: David S. Miller --- drivers/net/dsa/sja1105/sja1105_main.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'drivers') diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 4f0545605f6b..ced8c9cb29c2 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv) for (i = 0; i < ds->num_ports; i++) { mac[i] = default_mac; - if (i == dsa_upstream_port(priv->ds, i)) { - /* STP doesn't get called for CPU port, so we need to - * set the I/O parameters statically. - */ - mac[i].dyn_learn = true; - mac[i].ingress = true; - mac[i].egress = true; - } + + /* Let sja1105_bridge_stp_state_set() keep address learning + * enabled for the CPU port. + */ + if (dsa_is_cpu_port(ds, i)) + priv->learn_ena |= BIT(i); } return 0; -- cgit v1.2.3 From deb7178eb940e2c5caca1b1db084a69b2e59b4c9 Mon Sep 17 00:00:00 2001 From: Pavel Skripkin Date: Tue, 13 Jul 2021 13:58:53 +0300 Subject: net: fddi: fix UAF in fza_probe fp is netdev private data and it cannot be used after free_netdev() call. Using fp after free_netdev() can cause UAF bug. Fix it by moving free_netdev() after error message. Fixes: 61414f5ec983 ("FDDI: defza: Add support for DEC FDDIcontroller 700 TURBOchannel adapter") Signed-off-by: Pavel Skripkin Signed-off-by: David S. Miller --- drivers/net/fddi/defza.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers') diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c index 14f07050b6b1..0de2c4552f5e 100644 --- a/drivers/net/fddi/defza.c +++ b/drivers/net/fddi/defza.c @@ -1504,9 +1504,8 @@ err_out_resource: release_mem_region(start, len); err_out_kfree: - free_netdev(dev); - pr_err("%s: initialization failure, aborting!\n", fp->name); + free_netdev(dev); return ret; } -- cgit v1.2.3 From f28100cb9c9645c07cbd22431278ac9492f6a01c Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Tue, 13 Jul 2021 16:21:27 +0200 Subject: sfc: fix lack of XDP TX queues - error XDP TX failed (-22) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes: e26ca4b53582 sfc: reduce the number of requested xdp ev queues The buggy commit intended to allocate less channels for XDP in order to be more unlikely to reach the limit of 32 channels of the driver. The idea was to use each IRQ/eventqeue for more XDP TX queues than before, calculating which is the maximum number of TX queues that one event queue can handle. For example, in EF10 each event queue could handle up to 8 queues, better than the 4 they were handling before the change. This way, it would have to allocate half of channels than before for XDP TX. The problem is that the TX queues are also contained inside the channel structs, and there are only 4 queues per channel. Reducing the number of channels means also reducing the number of queues, resulting in not having the desired number of 1 queue per CPU. This leads to getting errors on XDP_TX and XDP_REDIRECT if they're executed from a high numbered CPU, because there only exist queues for the low half of CPUs, actually. If XDP_TX/REDIRECT is executed in a low numbered CPU, the error doesn't happen. This is the error in the logs (repeated many times, even rate limited): sfc 0000:5e:00.0 ens3f0np0: XDP TX failed (-22) This errors happens in function efx_xdp_tx_buffers, where it expects to have a dedicated XDP TX queue per CPU. Reverting the change makes again more likely to reach the limit of 32 channels in machines with many CPUs. If this happen, no XDP_TX/REDIRECT will be possible at all, and we will have this log error messages: At interface probe: sfc 0000:5e:00.0: Insufficient resources for 12 XDP event queues (24 other channels, max 32) At every subsequent XDP_TX/REDIRECT failure, rate limited: sfc 0000:5e:00.0 ens3f0np0: XDP TX failed (-22) However, without reverting the change, it makes the user to think that everything is OK at probe time, but later it fails in an unpredictable way, depending on the CPU that handles the packet. It is better to restore the predictable behaviour. If the user sees the error message at probe time, he/she can try to configure the best way it fits his/her needs. At least, he/she will have 2 options: - Accept that XDP_TX/REDIRECT is not available (he/she may not need it) - Load sfc module with modparam 'rss_cpus' with a lower number, thus creating less normal RX queues/channels, letting more free resources for XDP, with some performance penalty. Anyway, let the calculation of maximum TX queues that can be handled by a single event queue, and use it only if it's less than the number of TX queues per channel. This doesn't happen in practice, but could happen if some constant values are tweaked in the future, such us EFX_MAX_TXQ_PER_CHANNEL, EFX_MAX_EVQ_SIZE or EFX_MAX_DMAQ_SIZE. Related mailing list thread: https://lore.kernel.org/bpf/20201215104327.2be76156@carbon/ Signed-off-by: Íñigo Huguet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index a3ca406a3561..5b71f8a03a6d 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, * maximum size. */ tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx); + tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL); n_xdp_tx = num_possible_cpus(); n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev); @@ -181,7 +182,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, efx->xdp_tx_queue_count = 0; } else { efx->n_xdp_channels = n_xdp_ev; - efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL; + efx->xdp_tx_per_channel = tx_per_ev; efx->xdp_tx_queue_count = n_xdp_tx; n_channels += n_xdp_ev; netif_dbg(efx, drv, efx->net_dev, -- cgit v1.2.3 From 788bc000d4c2f25232db19ab3a0add0ba4e27671 Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Tue, 13 Jul 2021 16:21:28 +0200 Subject: sfc: ensure correct number of XDP queues MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Commit 99ba0ea616aa ("sfc: adjust efx->xdp_tx_queue_count with the real number of initialized queues") intended to fix a problem caused by a round up when calculating the number of XDP channels and queues. However, this was not the real problem. The real problem was that the number of XDP TX queues had been reduced to half in commit e26ca4b53582 ("sfc: reduce the number of requested xdp ev queues"), but the variable xdp_tx_queue_count had remained the same. Once the correct number of XDP TX queues is created again in the previous patch of this series, this also can be reverted since the error doesn't actually exist. Only in the case that there is a bug in the code we can have different values in xdp_queue_number and efx->xdp_tx_queue_count. Because of this, and per Edward Cree's suggestion, I add instead a WARN_ON to catch if it happens again in the future. Note that the number of allocated queues can be higher than the number of used ones due to the round up, as explained in the existing comment in the code. That's why we also have to stop increasing xdp_queue_number beyond efx->xdp_tx_queue_count. Signed-off-by: Íñigo Huguet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index 5b71f8a03a6d..bb48a139dd15 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -892,18 +892,20 @@ int efx_set_channels(struct efx_nic *efx) if (efx_channel_is_xdp_tx(channel)) { efx_for_each_channel_tx_queue(tx_queue, channel) { tx_queue->queue = next_queue++; - netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", - channel->channel, tx_queue->label, - xdp_queue_number, tx_queue->queue); + /* We may have a few left-over XDP TX * queues owing to xdp_tx_queue_count * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL. * We still allocate and probe those * TXQs, but never use them. */ - if (xdp_queue_number < efx->xdp_tx_queue_count) + if (xdp_queue_number < efx->xdp_tx_queue_count) { + netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n", + channel->channel, tx_queue->label, + xdp_queue_number, tx_queue->queue); efx->xdp_tx_queues[xdp_queue_number] = tx_queue; - xdp_queue_number++; + xdp_queue_number++; + } } } else { efx_for_each_channel_tx_queue(tx_queue, channel) { @@ -915,8 +917,7 @@ int efx_set_channels(struct efx_nic *efx) } } } - if (xdp_queue_number) - efx->xdp_tx_queue_count = xdp_queue_number; + WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count); rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels); if (rc) -- cgit v1.2.3 From d2a16bde77322fca6b6f36ebe19097a1c3c46f74 Mon Sep 17 00:00:00 2001 From: Íñigo Huguet Date: Tue, 13 Jul 2021 16:21:29 +0200 Subject: sfc: add logs explaining XDP_TX/REDIRECT is not available MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If it's not possible to allocate enough channels for XDP, XDP_TX and XDP_REDIRECT don't work. However, only a message saying that not enough channels were available was shown, but not saying what are the consequences in that case. The user didn't know if he/she can use XDP or not, if the performance is reduced, or what. Signed-off-by: Íñigo Huguet Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/efx_channels.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'drivers') diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c index bb48a139dd15..e5b0d795c301 100644 --- a/drivers/net/ethernet/sfc/efx_channels.c +++ b/drivers/net/ethernet/sfc/efx_channels.c @@ -170,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n", n_xdp_ev, n_channels, max_channels); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; @@ -177,6 +179,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx, netif_err(efx, drv, efx->net_dev, "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n", n_xdp_tx, n_channels, efx->max_vis); + netif_err(efx, drv, efx->net_dev, + "XDP_TX and XDP_REDIRECT will not work on this interface"); efx->n_xdp_channels = 0; efx->xdp_tx_per_channel = 0; efx->xdp_tx_queue_count = 0; -- cgit v1.2.3