summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-10 22:37:24 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-10 22:37:24 +0300
commit25aa0bebba72b318e71fe205bfd1236550cc9534 (patch)
treefd6740b6f837c4faf52102d24fc492a36edbb8a8 /drivers
parent30813656c6b827947be024484d6da8b18e50c186 (diff)
parent5e3d20617b055e725e785e0058426368269949f3 (diff)
downloadlinux-25aa0bebba72b318e71fe205bfd1236550cc9534.tar.xz
Merge tag 'net-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter, wireless and bpf. Still trending up in size but the good news is that the "current" regressions are resolved, AFAIK. We're getting weirdly many fixes for Wake-on-LAN and suspend/resume handling on embedded this week (most not merged yet), not sure why. But those are all for older bugs. Current release - regressions: - tls: set MSG_SPLICE_PAGES consistently when handing encrypted data over to TCP Current release - new code bugs: - eth: mlx5: correct IDs on VFs internal to the device (IPU) Previous releases - regressions: - phy: at803x: fix WoL support / reporting on AR8032 - bonding: fix incorrect deletion of ETH_P_8021AD protocol VID from slaves, leading to BUG_ON() - tun: prevent tun_build_skb() from exceeding the packet size limit - wifi: rtw89: fix 8852AE disconnection caused by RX full flags - eth/PCI: enetc: fix probing after 6fffbc7ae137 ("PCI: Honor firmware's device disabled status"), keep PCI devices around even if they are disabled / not going to be probed to be able to apply quirks on them - eth: prestera: fix handling IPv4 routes with nexthop IDs Previous releases - always broken: - netfilter: re-work garbage collection to avoid races between user-facing API and timeouts - tunnels: fix generating ipv4 PMTU error on non-linear skbs - nexthop: fix infinite nexthop bucket dump when using maximum nexthop ID - wifi: nl80211: fix integer overflow in nl80211_parse_mbssid_elems() Misc: - unix: use consistent error code in SO_PEERPIDFD - ipv6: adjust ndisc_is_useropt() to include PREFIX_INFO, in prep for upcoming IETF RFC" * tag 'net-6.5-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (94 commits) net: hns3: fix strscpy causing content truncation issue net: tls: set MSG_SPLICE_PAGES consistently ibmvnic: Ensure login failure recovery is safe from other resets ibmvnic: Do partial reset on login failure ibmvnic: Handle DMA unmapping of login buffs in release functions ibmvnic: Unmap DMA login rsp buffer on send login fail ibmvnic: Enforce stronger sanity checks on login response net: mana: Fix MANA VF unload when hardware is unresponsive netfilter: nf_tables: remove busy mark and gc batch API netfilter: nft_set_hash: mark set element as dead when deleting from packet path netfilter: nf_tables: adapt set backend to use GC transaction API netfilter: nf_tables: GC transaction API to avoid race with control plane selftests/bpf: Add sockmap test for redirecting partial skb data selftests/bpf: fix a CI failure caused by vsock sockmap test bpf, sockmap: Fix bug that strp_done cannot be called bpf, sockmap: Fix map type error in sock_map_del_link xsk: fix refcount underflow in error path ipv6: adjust ndisc_is_useropt() to also return true for PIO selftests: forwarding: bridge_mdb: Make test more robust selftests: forwarding: bridge_mdb_max: Fix failing test with old libnet ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/isdn/mISDN/dsp.h2
-rw-r--r--drivers/isdn/mISDN/dsp_cmx.c2
-rw-r--r--drivers/isdn/mISDN/dsp_core.c2
-rw-r--r--drivers/net/bonding/bond_main.c4
-rw-r--r--drivers/net/dsa/ocelot/felix.c2
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c111
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c29
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c112
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_fdir.c11
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c34
-rw-r--r--drivers/net/ethernet/marvell/prestera/prestera_router.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c2
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c37
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_lif.c23
-rw-r--r--drivers/net/macsec.c28
-rw-r--r--drivers/net/phy/at803x.c47
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/vxlan/vxlan_vnifilter.c11
-rw-r--r--drivers/net/wireguard/allowedips.c8
-rw-r--r--drivers/net/wireguard/selftest/allowedips.c16
-rw-r--r--drivers/net/wireless/ath/ath12k/wmi.c3
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c5
-rw-r--r--drivers/net/wireless/realtek/rtw89/mac.c2
-rw-r--r--drivers/pci/bus.c4
-rw-r--r--drivers/pci/of.c5
42 files changed, 501 insertions, 219 deletions
diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h
index fa09d511a8ed..baf31258f5c9 100644
--- a/drivers/isdn/mISDN/dsp.h
+++ b/drivers/isdn/mISDN/dsp.h
@@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp);
extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id);
extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb);
extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb);
-extern void dsp_cmx_send(void *arg);
+extern void dsp_cmx_send(struct timer_list *arg);
extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb);
extern int dsp_cmx_del_conf_member(struct dsp *dsp);
extern int dsp_cmx_del_conf(struct dsp_conf *conf);
diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
index 357b87592eb4..61cb45c5d0d8 100644
--- a/drivers/isdn/mISDN/dsp_cmx.c
+++ b/drivers/isdn/mISDN/dsp_cmx.c
@@ -1614,7 +1614,7 @@ static u16 dsp_count; /* last sample count */
static int dsp_count_valid; /* if we have last sample count */
void
-dsp_cmx_send(void *arg)
+dsp_cmx_send(struct timer_list *arg)
{
struct dsp_conf *conf;
struct dsp_conf_member *member;
diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c
index 386084530c2f..fae95f166688 100644
--- a/drivers/isdn/mISDN/dsp_core.c
+++ b/drivers/isdn/mISDN/dsp_core.c
@@ -1195,7 +1195,7 @@ static int __init dsp_init(void)
}
/* set sample timer */
- timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0);
+ timer_setup(&dsp_spl_tl, dsp_cmx_send, 0);
dsp_spl_tl.expires = jiffies + dsp_tics;
dsp_spl_jiffies = dsp_spl_tl.expires;
add_timer(&dsp_spl_tl);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 484c9e3e5e82..447b06ea4fc9 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -5901,7 +5901,9 @@ void bond_setup(struct net_device *bond_dev)
bond_dev->hw_features = BOND_VLAN_FEATURES |
NETIF_F_HW_VLAN_CTAG_RX |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_STAG_RX |
+ NETIF_F_HW_VLAN_STAG_FILTER;
bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
bond_dev->features |= bond_dev->hw_features;
diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c
index 8da46d284e35..bef879c6d500 100644
--- a/drivers/net/dsa/ocelot/felix.c
+++ b/drivers/net/dsa/ocelot/felix.c
@@ -1625,8 +1625,10 @@ static void felix_teardown(struct dsa_switch *ds)
struct felix *felix = ocelot_to_felix(ocelot);
struct dsa_port *dp;
+ rtnl_lock();
if (felix->tag_proto_ops)
felix->tag_proto_ops->teardown(ds);
+ rtnl_unlock();
dsa_switch_for_each_available_port(dp, ds)
ocelot_deinit_port(ocelot, dp->index);
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index 1416262d4296..e0a4cb7e3f50 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1186,14 +1186,9 @@ static int enetc_init_port_rss_memory(struct enetc_si *si)
static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
{
- struct device_node *node = pdev->dev.of_node;
struct platform_device *ierb_pdev;
struct device_node *ierb_node;
- /* Don't register with the IERB if the PF itself is disabled */
- if (!node || !of_device_is_available(node))
- return 0;
-
ierb_node = of_find_compatible_node(NULL, NULL,
"fsl,ls1028a-enetc-ierb");
if (!ierb_node || !of_device_is_available(ierb_node))
@@ -1208,56 +1203,81 @@ static int enetc_pf_register_with_ierb(struct pci_dev *pdev)
return enetc_ierb_register_pf(ierb_pdev, pdev);
}
-static int enetc_pf_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static struct enetc_si *enetc_psi_create(struct pci_dev *pdev)
{
- struct device_node *node = pdev->dev.of_node;
- struct enetc_ndev_priv *priv;
- struct net_device *ndev;
struct enetc_si *si;
- struct enetc_pf *pf;
int err;
- err = enetc_pf_register_with_ierb(pdev);
- if (err == -EPROBE_DEFER)
- return err;
- if (err)
- dev_warn(&pdev->dev,
- "Could not register with IERB driver: %pe, please update the device tree\n",
- ERR_PTR(err));
-
- err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(*pf));
- if (err)
- return dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+ err = enetc_pci_probe(pdev, KBUILD_MODNAME, sizeof(struct enetc_pf));
+ if (err) {
+ dev_err_probe(&pdev->dev, err, "PCI probing failed\n");
+ goto out;
+ }
si = pci_get_drvdata(pdev);
if (!si->hw.port || !si->hw.global) {
err = -ENODEV;
dev_err(&pdev->dev, "could not map PF space, probing a VF?\n");
- goto err_map_pf_space;
+ goto out_pci_remove;
}
err = enetc_setup_cbdr(&pdev->dev, &si->hw, ENETC_CBDR_DEFAULT_SIZE,
&si->cbd_ring);
if (err)
- goto err_setup_cbdr;
+ goto out_pci_remove;
err = enetc_init_port_rfs_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
- goto err_init_port_rfs;
+ goto out_teardown_cbdr;
}
err = enetc_init_port_rss_memory(si);
if (err) {
dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
- goto err_init_port_rss;
+ goto out_teardown_cbdr;
}
- if (node && !of_device_is_available(node)) {
- dev_info(&pdev->dev, "device is disabled, skipping\n");
- err = -ENODEV;
- goto err_device_disabled;
+ return si;
+
+out_teardown_cbdr:
+ enetc_teardown_cbdr(&si->cbd_ring);
+out_pci_remove:
+ enetc_pci_remove(pdev);
+out:
+ return ERR_PTR(err);
+}
+
+static void enetc_psi_destroy(struct pci_dev *pdev)
+{
+ struct enetc_si *si = pci_get_drvdata(pdev);
+
+ enetc_teardown_cbdr(&si->cbd_ring);
+ enetc_pci_remove(pdev);
+}
+
+static int enetc_pf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct enetc_ndev_priv *priv;
+ struct net_device *ndev;
+ struct enetc_si *si;
+ struct enetc_pf *pf;
+ int err;
+
+ err = enetc_pf_register_with_ierb(pdev);
+ if (err == -EPROBE_DEFER)
+ return err;
+ if (err)
+ dev_warn(&pdev->dev,
+ "Could not register with IERB driver: %pe, please update the device tree\n",
+ ERR_PTR(err));
+
+ si = enetc_psi_create(pdev);
+ if (IS_ERR(si)) {
+ err = PTR_ERR(si);
+ goto err_psi_create;
}
pf = enetc_si_priv(si);
@@ -1339,15 +1359,9 @@ err_alloc_si_res:
si->ndev = NULL;
free_netdev(ndev);
err_alloc_netdev:
-err_init_port_rss:
-err_init_port_rfs:
-err_device_disabled:
err_setup_mac_addresses:
- enetc_teardown_cbdr(&si->cbd_ring);
-err_setup_cbdr:
-err_map_pf_space:
- enetc_pci_remove(pdev);
-
+ enetc_psi_destroy(pdev);
+err_psi_create:
return err;
}
@@ -1370,12 +1384,29 @@ static void enetc_pf_remove(struct pci_dev *pdev)
enetc_free_msix(priv);
enetc_free_si_resources(priv);
- enetc_teardown_cbdr(&si->cbd_ring);
free_netdev(si->ndev);
- enetc_pci_remove(pdev);
+ enetc_psi_destroy(pdev);
+}
+
+static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct enetc_si *si;
+
+ /* Only apply quirk for disabled functions. For the ones
+ * that are enabled, enetc_pf_probe() will apply it.
+ */
+ if (node && of_device_is_available(node))
+ return;
+
+ si = enetc_psi_create(pdev);
+ if (si)
+ enetc_psi_destroy(pdev);
}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
+ enetc_fixup_clear_rss_rfs);
static const struct pci_device_id enetc_pf_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF) },
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index 52546f625c8b..f276b5ecb431 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -464,9 +464,9 @@ static void hns3_dbg_fill_content(char *content, u16 len,
if (result) {
if (item_len < strlen(result[i]))
break;
- strscpy(pos, result[i], strlen(result[i]));
+ memcpy(pos, result[i], strlen(result[i]));
} else {
- strscpy(pos, items[i].name, strlen(items[i].name));
+ memcpy(pos, items[i].name, strlen(items[i].name));
}
pos += item_len;
len -= item_len;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index 9f6890059666..b7b51e56b030 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -5854,6 +5854,9 @@ void hns3_external_lb_prepare(struct net_device *ndev, bool if_running)
if (!if_running)
return;
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
netif_carrier_off(ndev);
netif_tx_disable(ndev);
@@ -5882,7 +5885,16 @@ void hns3_external_lb_restore(struct net_device *ndev, bool if_running)
if (!if_running)
return;
- hns3_nic_reset_all_ring(priv->ae_handle);
+ if (hns3_nic_resetting(ndev))
+ return;
+
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ if (hns3_nic_reset_all_ring(priv->ae_handle))
+ return;
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
for (i = 0; i < priv->vector_num; i++)
hns3_vector_enable(&priv->tqp_vector[i]);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index 409db2e70965..0fb2eaee3e8a 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -111,9 +111,9 @@ static void hclge_dbg_fill_content(char *content, u16 len,
if (result) {
if (item_len < strlen(result[i]))
break;
- strscpy(pos, result[i], strlen(result[i]));
+ memcpy(pos, result[i], strlen(result[i]));
} else {
- strscpy(pos, items[i].name, strlen(items[i].name));
+ memcpy(pos, items[i].name, strlen(items[i].name));
}
pos += item_len;
len -= item_len;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index bf675c15fbb9..a940e35aef29 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -72,6 +72,8 @@ static void hclge_restore_hw_table(struct hclge_dev *hdev);
static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_update_fec_stats(struct hclge_dev *hdev);
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
static struct hnae3_ae_algo ae_algo;
@@ -7558,6 +7560,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
{
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
+
struct hclge_desc desc;
struct hclge_config_mac_mode_cmd *req =
(struct hclge_config_mac_mode_cmd *)desc.data;
@@ -7582,9 +7586,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
- if (ret)
+ if (ret) {
dev_err(&hdev->pdev->dev,
"mac enable fail, ret =%d.\n", ret);
+ return;
+ }
+
+ if (!enable)
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
+ HCLGE_LINK_STATUS_WAIT_CNT);
}
static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
@@ -7647,10 +7657,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
} while (++i < HCLGE_PHY_LINK_STATUS_NUM);
}
-static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt)
{
-#define HCLGE_MAC_LINK_STATUS_NUM 100
-
int link_status;
int i = 0;
int ret;
@@ -7663,13 +7672,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret)
return 0;
msleep(HCLGE_LINK_STATUS_MS);
- } while (++i < HCLGE_MAC_LINK_STATUS_NUM);
+ } while (++i < wait_cnt);
return -EBUSY;
}
static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
bool is_phy)
{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
int link_ret;
link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
@@ -7677,7 +7688,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
if (is_phy)
hclge_phy_link_status_wait(hdev, link_ret);
- return hclge_mac_link_status_wait(hdev, link_ret);
+ return hclge_mac_link_status_wait(hdev, link_ret,
+ HCLGE_MAC_LINK_STATUS_NUM);
}
static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
@@ -10915,9 +10927,12 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev)
u32 rx_pause, tx_pause;
u8 flowctl;
- if (!phydev->link || !phydev->autoneg)
+ if (!phydev->link)
return 0;
+ if (!phydev->autoneg)
+ return hclge_mac_pause_setup_hw(hdev);
+
local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
if (phydev->pause)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index de509e5751a7..c58c31221762 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -1553,7 +1553,7 @@ static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
return 0;
}
-static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
{
bool tx_en, rx_en;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
index 45dcfef3f90c..53eec6df5194 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -245,6 +245,7 @@ int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
u8 pfc_bitmap);
int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
+int hclge_mac_pause_setup_hw(struct hclge_dev *hdev);
void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 763d613adbcc..df76cdaddcfb 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -97,6 +97,8 @@ static int pending_scrq(struct ibmvnic_adapter *,
static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
struct ibmvnic_sub_crq_queue *);
static int ibmvnic_poll(struct napi_struct *napi, int data);
+static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter);
+static inline void reinit_init_done(struct ibmvnic_adapter *adapter);
static void send_query_map(struct ibmvnic_adapter *adapter);
static int send_request_map(struct ibmvnic_adapter *, dma_addr_t, u32, u8);
static int send_request_unmap(struct ibmvnic_adapter *, u8);
@@ -114,6 +116,7 @@ static void ibmvnic_tx_scrq_clean_buffer(struct ibmvnic_adapter *adapter,
static void free_long_term_buff(struct ibmvnic_adapter *adapter,
struct ibmvnic_long_term_buff *ltb);
static void ibmvnic_disable_irqs(struct ibmvnic_adapter *adapter);
+static void flush_reset_queue(struct ibmvnic_adapter *adapter);
struct ibmvnic_stat {
char name[ETH_GSTRING_LEN];
@@ -1505,8 +1508,8 @@ static const char *adapter_state_to_string(enum vnic_state state)
static int ibmvnic_login(struct net_device *netdev)
{
+ unsigned long flags, timeout = msecs_to_jiffies(20000);
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
- unsigned long timeout = msecs_to_jiffies(20000);
int retry_count = 0;
int retries = 10;
bool retry;
@@ -1527,11 +1530,9 @@ static int ibmvnic_login(struct net_device *netdev)
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
- netdev_warn(netdev, "Login timed out, retrying...\n");
- retry = true;
- adapter->init_done_rc = 0;
- retry_count++;
- continue;
+ netdev_warn(netdev, "Login timed out\n");
+ adapter->login_pending = false;
+ goto partial_reset;
}
if (adapter->init_done_rc == ABORTED) {
@@ -1573,10 +1574,69 @@ static int ibmvnic_login(struct net_device *netdev)
"SCRQ irq initialization failed\n");
return rc;
}
+ /* Default/timeout error handling, reset and start fresh */
} else if (adapter->init_done_rc) {
netdev_warn(netdev, "Adapter login failed, init_done_rc = %d\n",
adapter->init_done_rc);
- return -EIO;
+
+partial_reset:
+ /* adapter login failed, so free any CRQs or sub-CRQs
+ * and register again before attempting to login again.
+ * If we don't do this then the VIOS may think that
+ * we are already logged in and reject any subsequent
+ * attempts
+ */
+ netdev_warn(netdev,
+ "Freeing and re-registering CRQs before attempting to login again\n");
+ retry = true;
+ adapter->init_done_rc = 0;
+ release_sub_crqs(adapter, true);
+ /* Much of this is similar logic as ibmvnic_probe(),
+ * we are essentially re-initializing communication
+ * with the server. We really should not run any
+ * resets/failovers here because this is already a form
+ * of reset and we do not want parallel resets occurring
+ */
+ do {
+ reinit_init_done(adapter);
+ /* Clear any failovers we got in the previous
+ * pass since we are re-initializing the CRQ
+ */
+ adapter->failover_pending = false;
+ release_crq_queue(adapter);
+ /* If we don't sleep here then we risk an
+ * unnecessary failover event from the VIOS.
+ * This is a known VIOS issue caused by a vnic
+ * device freeing and registering a CRQ too
+ * quickly.
+ */
+ msleep(1500);
+ /* Avoid any resets, since we are currently
+ * resetting.
+ */
+ spin_lock_irqsave(&adapter->rwi_lock, flags);
+ flush_reset_queue(adapter);
+ spin_unlock_irqrestore(&adapter->rwi_lock,
+ flags);
+
+ rc = init_crq_queue(adapter);
+ if (rc) {
+ netdev_err(netdev, "login recovery: init CRQ failed %d\n",
+ rc);
+ return -EIO;
+ }
+
+ rc = ibmvnic_reset_init(adapter, false);
+ if (rc)
+ netdev_err(netdev, "login recovery: Reset init failed %d\n",
+ rc);
+ /* IBMVNIC_CRQ_INIT will return EAGAIN if it
+ * fails, since ibmvnic_reset_init will free
+ * irq's in failure, we won't be able to receive
+ * new CRQs so we need to keep trying. probe()
+ * handles this similarly.
+ */
+ } while (rc == -EAGAIN && retry_count++ < retries);
}
} while (retry);
@@ -1588,12 +1648,22 @@ static int ibmvnic_login(struct net_device *netdev)
static void release_login_buffer(struct ibmvnic_adapter *adapter)
{
+ if (!adapter->login_buf)
+ return;
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token,
+ adapter->login_buf_sz, DMA_TO_DEVICE);
kfree(adapter->login_buf);
adapter->login_buf = NULL;
}
static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
{
+ if (!adapter->login_rsp_buf)
+ return;
+
+ dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token,
+ adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
kfree(adapter->login_rsp_buf);
adapter->login_rsp_buf = NULL;
}
@@ -4830,11 +4900,14 @@ static int send_login(struct ibmvnic_adapter *adapter)
if (rc) {
adapter->login_pending = false;
netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc);
- goto buf_rsp_map_failed;
+ goto buf_send_failed;
}
return 0;
+buf_send_failed:
+ dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
+ DMA_FROM_DEVICE);
buf_rsp_map_failed:
kfree(login_rsp_buffer);
adapter->login_rsp_buf = NULL;
@@ -5396,6 +5469,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
int num_tx_pools;
int num_rx_pools;
u64 *size_array;
+ u32 rsp_len;
int i;
/* CHECK: Test/set of login_pending does not need to be atomic
@@ -5407,11 +5481,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
}
adapter->login_pending = false;
- dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
- DMA_TO_DEVICE);
- dma_unmap_single(dev, adapter->login_rsp_buf_token,
- adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
-
/* If the number of queues requested can't be allocated by the
* server, the login response will return with code 1. We will need
* to resend the login buffer with fewer queues requested.
@@ -5447,6 +5516,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
ibmvnic_reset(adapter, VNIC_RESET_FATAL);
return -EIO;
}
+
+ rsp_len = be32_to_cpu(login_rsp->len);
+ if (be32_to_cpu(login->login_rsp_len) < rsp_len ||
+ rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) ||
+ rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) ||
+ rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) ||
+ rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) {
+ /* This can happen if a login request times out and there are
+ * 2 outstanding login requests sent, the LOGIN_RSP crq
+ * could have been for the older login request. So we are
+ * parsing the newer response buffer which may be incomplete
+ */
+ dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n");
+ ibmvnic_reset(adapter, VNIC_RESET_FATAL);
+ return -EIO;
+ }
+
size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size));
/* variable buffer sizes are not supported, so just read the
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index 2f47cfa7f06e..460ca561819a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -1401,14 +1401,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx
if (fsp->flow_type & FLOW_MAC_EXT)
return -EINVAL;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) {
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
dev_err(&adapter->pdev->dev,
"Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n",
IAVF_MAX_FDIR_FILTERS);
return -ENOSPC;
}
- spin_lock_bh(&adapter->fdir_fltr_lock);
if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) {
dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n");
spin_unlock_bh(&adapter->fdir_fltr_lock);
@@ -1781,7 +1782,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLCNT:
if (!FDIR_FLTR_SUPPORT(adapter))
break;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
cmd->rule_cnt = adapter->fdir_active_fltr;
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
cmd->data = IAVF_MAX_FDIR_FILTERS;
ret = 0;
break;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
index 6146203efd84..505e82ebafe4 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c
@@ -722,7 +722,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f
bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr)
{
struct iavf_fdir_fltr *tmp;
+ bool ret = false;
+ spin_lock_bh(&adapter->fdir_fltr_lock);
list_for_each_entry(tmp, &adapter->fdir_list_head, list) {
if (tmp->flow_type != fltr->flow_type)
continue;
@@ -732,11 +734,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *
!memcmp(&tmp->ip_data, &fltr->ip_data,
sizeof(fltr->ip_data)) &&
!memcmp(&tmp->ext_data, &fltr->ext_data,
- sizeof(fltr->ext_data)))
- return true;
+ sizeof(fltr->ext_data))) {
+ ret = true;
+ break;
+ }
}
+ spin_unlock_bh(&adapter->fdir_fltr_lock);
- return false;
+ return ret;
}
/**
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 9db384f66a8e..38901d2a4680 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -195,6 +195,10 @@ struct igc_adapter {
u32 qbv_config_change_errors;
bool qbv_transition;
unsigned int qbv_count;
+ /* Access to oper_gate_closed, admin_gate_closed and qbv_transition
+ * are protected by the qbv_tx_lock.
+ */
+ spinlock_t qbv_tx_lock;
/* OS defined structs */
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index bdeb36790d77..6f557e843e49 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -4801,6 +4801,7 @@ static int igc_sw_init(struct igc_adapter *adapter)
adapter->nfc_rule_count = 0;
spin_lock_init(&adapter->stats64_lock);
+ spin_lock_init(&adapter->qbv_tx_lock);
/* Assume MSI-X interrupts, will be checked during IRQ allocation */
adapter->flags |= IGC_FLAG_HAS_MSIX;
@@ -6119,15 +6120,15 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter,
return igc_tsn_offload_apply(adapter);
}
-static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+static int igc_qbv_clear_schedule(struct igc_adapter *adapter)
{
+ unsigned long flags;
int i;
adapter->base_time = 0;
adapter->cycle_time = NSEC_PER_SEC;
adapter->taprio_offload_enable = false;
adapter->qbv_config_change_errors = 0;
- adapter->qbv_transition = false;
adapter->qbv_count = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -6136,10 +6137,28 @@ static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
ring->start_time = 0;
ring->end_time = NSEC_PER_SEC;
ring->max_sdu = 0;
+ }
+
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
+ adapter->qbv_transition = false;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct igc_ring *ring = adapter->tx_ring[i];
+
ring->oper_gate_closed = false;
ring->admin_gate_closed = false;
}
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
+ return 0;
+}
+
+static int igc_tsn_clear_schedule(struct igc_adapter *adapter)
+{
+ igc_qbv_clear_schedule(adapter);
+
return 0;
}
@@ -6150,6 +6169,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
struct igc_hw *hw = &adapter->hw;
u32 start_time = 0, end_time = 0;
struct timespec64 now;
+ unsigned long flags;
size_t n;
int i;
@@ -6217,6 +6237,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
start_time += e->interval;
}
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
/* Check whether a queue gets configured.
* If not, set the start and end time to be end time.
*/
@@ -6241,6 +6263,8 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter,
}
}
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *ring = adapter->tx_ring[i];
struct net_device *dev = adapter->netdev;
@@ -6619,8 +6643,11 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
{
struct igc_adapter *adapter = container_of(timer, struct igc_adapter,
hrtimer);
+ unsigned long flags;
unsigned int i;
+ spin_lock_irqsave(&adapter->qbv_tx_lock, flags);
+
adapter->qbv_transition = true;
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igc_ring *tx_ring = adapter->tx_ring[i];
@@ -6633,6 +6660,9 @@ static enum hrtimer_restart igc_qbv_scheduling_timer(struct hrtimer *timer)
}
}
adapter->qbv_transition = false;
+
+ spin_unlock_irqrestore(&adapter->qbv_tx_lock, flags);
+
return HRTIMER_NORESTART;
}
diff --git a/drivers/net/ethernet/marvell/prestera/prestera_router.c b/drivers/net/ethernet/marvell/prestera/prestera_router.c
index a9a1028cb17b..de317179a7dc 100644
--- a/drivers/net/ethernet/marvell/prestera/prestera_router.c
+++ b/drivers/net/ethernet/marvell/prestera/prestera_router.c
@@ -166,11 +166,11 @@ prestera_util_neigh2nc_key(struct prestera_switch *sw, struct neighbour *n,
static bool __prestera_fi_is_direct(struct fib_info *fi)
{
- struct fib_nh *fib_nh;
+ struct fib_nh_common *fib_nhc;
if (fib_info_num_path(fi) == 1) {
- fib_nh = fib_info_nh(fi, 0);
- if (fib_nh->fib_nh_gw_family == AF_UNSPEC)
+ fib_nhc = fib_info_nhc(fi, 0);
+ if (fib_nhc->nhc_gw_family == AF_UNSPEC)
return true;
}
@@ -261,7 +261,7 @@ static bool
__prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
struct net_device *dev)
{
- struct fib_nh *fib_nh;
+ struct fib_nh_common *fib_nhc;
struct fib_result res;
bool reachable;
@@ -269,8 +269,8 @@ __prestera_util_kern_n_is_reachable_v4(u32 tb_id, __be32 *addr,
if (!prestera_util_kern_get_route(&res, tb_id, addr))
if (prestera_fi_is_direct(res.fi)) {
- fib_nh = fib_info_nh(res.fi, 0);
- if (dev == fib_nh->fib_nh_dev)
+ fib_nhc = fib_info_nhc(res.fi, 0);
+ if (dev == fib_nhc->nhc_dev)
reachable = true;
}
@@ -324,7 +324,7 @@ prestera_kern_fib_info_nhc(struct fib_notifier_info *info, int n)
if (info->family == AF_INET) {
fen4_info = container_of(info, struct fib_entry_notifier_info,
info);
- return &fib_info_nh(fen4_info->fi, n)->nh_common;
+ return fib_info_nhc(fen4_info->fi, n);
} else if (info->family == AF_INET6) {
fen6_info = container_of(info, struct fib6_entry_notifier_info,
info);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
index b0128336ff01..e869c65d8e90 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/reporter_vnic.c
@@ -2,6 +2,7 @@
/* Copyright (c) 2023, NVIDIA CORPORATION & AFFILIATES. */
#include "reporter_vnic.h"
+#include "en_stats.h"
#include "devlink.h"
#define VNIC_ENV_GET64(vnic_env_stats, c) \
@@ -36,55 +37,72 @@ int mlx5_reporter_vnic_diagnose_counters(struct mlx5_core_dev *dev,
if (err)
return err;
- err = devlink_fmsg_u64_pair_put(fmsg, "total_error_queues",
- VNIC_ENV_GET64(&vnic, total_error_queues));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "send_queue_priority_update_flow",
- VNIC_ENV_GET64(&vnic, send_queue_priority_update_flow));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "comp_eq_overrun",
- VNIC_ENV_GET64(&vnic, comp_eq_overrun));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "async_eq_overrun",
- VNIC_ENV_GET64(&vnic, async_eq_overrun));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "cq_overrun",
- VNIC_ENV_GET64(&vnic, cq_overrun));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "invalid_command",
- VNIC_ENV_GET64(&vnic, invalid_command));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "quota_exceeded_command",
- VNIC_ENV_GET64(&vnic, quota_exceeded_command));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
- VNIC_ENV_GET64(&vnic, nic_receive_steering_discard));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic, generated_pkt_steering_fail));
- if (err)
- return err;
-
- err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
- VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
- if (err)
- return err;
+ if (MLX5_CAP_GEN(dev, vnic_env_queue_counters)) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "total_error_queues",
+ VNIC_ENV_GET(&vnic, total_error_queues));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "send_queue_priority_update_flow",
+ VNIC_ENV_GET(&vnic,
+ send_queue_priority_update_flow));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, eq_overrun_count)) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "comp_eq_overrun",
+ VNIC_ENV_GET(&vnic, comp_eq_overrun));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u32_pair_put(fmsg, "async_eq_overrun",
+ VNIC_ENV_GET(&vnic, async_eq_overrun));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, vnic_env_cq_overrun)) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "cq_overrun",
+ VNIC_ENV_GET(&vnic, cq_overrun));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, invalid_command_count)) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "invalid_command",
+ VNIC_ENV_GET(&vnic, invalid_command));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, quota_exceeded_count)) {
+ err = devlink_fmsg_u32_pair_put(fmsg, "quota_exceeded_command",
+ VNIC_ENV_GET(&vnic, quota_exceeded_command));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_receive_steering_discard)) {
+ err = devlink_fmsg_u64_pair_put(fmsg, "nic_receive_steering_discard",
+ VNIC_ENV_GET64(&vnic,
+ nic_receive_steering_discard));
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, vnic_env_cnt_steering_fail)) {
+ err = devlink_fmsg_u64_pair_put(fmsg, "generated_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic,
+ generated_pkt_steering_fail));
+ if (err)
+ return err;
+
+ err = devlink_fmsg_u64_pair_put(fmsg, "handled_pkt_steering_fail",
+ VNIC_ENV_GET64(&vnic, handled_pkt_steering_fail));
+ if (err)
+ return err;
+ }
err = devlink_fmsg_obj_nest_end(fmsg);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
index 0c88cf47af01..1730f6a716ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c
@@ -1461,10 +1461,12 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
- if (flow_flag_test(flow, SLOW))
+ if (flow_flag_test(flow, SLOW)) {
mlx5e_tc_unoffload_from_slow_path(esw, flow);
- else
+ } else {
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
+ mlx5e_tc_unoffload_flow_post_acts(flow);
+ }
mlx5e_tc_detach_mod_hdr(priv, flow, attr);
attr->modify_hdr = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 1c820119e438..c27df14df145 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -5266,6 +5266,7 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv)
static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
+ const bool take_rtnl = netdev->reg_state == NETREG_REGISTERED;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_flow_steering *fs;
int err;
@@ -5294,9 +5295,19 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
mlx5e_health_create_reporters(priv);
+
+ /* If netdev is already registered (e.g. move from uplink to nic profile),
+ * RTNL lock must be held before triggering netdev notifiers.
+ */
+ if (take_rtnl)
+ rtnl_lock();
+
/* update XDP supported features */
mlx5e_set_xdp_feature(netdev);
+ if (take_rtnl)
+ rtnl_unlock();
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 92377632f9e0..31708d5aa608 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1943,9 +1943,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_flow_attr *attr = flow->attr;
- struct mlx5_esw_flow_attr *esw_attr;
- esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow);
remove_unready_flow(flow);
@@ -1966,12 +1964,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
- if (esw_attr->int_port)
- mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
-
- if (esw_attr->dest_int_port)
- mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
-
if (flow_flag_test(flow, L3_TO_L2_DECAP))
mlx5e_detach_decap(priv, flow);
@@ -4268,6 +4260,7 @@ static void
mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
{
struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
+ struct mlx5_esw_flow_attr *esw_attr;
if (!attr)
return;
@@ -4285,6 +4278,18 @@ mlx5_free_flow_attr_actions(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *a
mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
}
+ if (mlx5e_is_eswitch_flow(flow)) {
+ esw_attr = attr->esw_attr;
+
+ if (esw_attr->int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
+ esw_attr->int_port);
+
+ if (esw_attr->dest_int_port)
+ mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(flow->priv),
+ esw_attr->dest_int_port);
+ }
+
mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
free_branch_attr(flow, attr->branch_true);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
index af779c700278..fdf2be548e85 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c
@@ -60,7 +60,7 @@ static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16
} else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
- devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
+ devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum,
vport_num - 1, false);
}
return dl_port;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
index d3a3fe4ce670..7d9bbb494d95 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c
@@ -574,7 +574,7 @@ static int __mlx5_lag_modify_definers_destinations(struct mlx5_lag *ldev,
for (i = 0; i < ldev->ports; i++) {
for (j = 0; j < ldev->buckets; j++) {
idx = i * ldev->buckets + j;
- if (ldev->v2p_map[i] == ports[i])
+ if (ldev->v2p_map[idx] == ports[idx])
continue;
dest.vport.vhca_id = MLX5_CAP_GEN(ldev->pf[ports[idx] - 1].dev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 973babfaff25..377372f0578a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -227,10 +227,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work)
clock = container_of(timer, struct mlx5_clock, timer);
mdev = container_of(clock, struct mlx5_core_dev, clock);
+ if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
+ goto out;
+
write_seqlock_irqsave(&clock->lock, flags);
timecounter_read(&timer->tc);
mlx5_update_clock_info_page(mdev);
write_sequnlock_irqrestore(&clock->lock, flags);
+
+out:
schedule_delayed_work(&timer->overflow_work, timer->overflow_period);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index f42abc2ea73c..72ae560a1c68 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1989,7 +1989,7 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
- mlx5_unload_one(dev, true);
+ mlx5_unload_one(dev, false);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index c4be257c043d..682d3dc00dd1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -361,7 +361,7 @@ static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16
static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func)
{
- return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev)
+ return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1
: vport;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 4e42a3b9b8ee..a2fc937d5461 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -285,8 +285,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev)
host_total_vfs = MLX5_GET(query_esw_functions_out, out,
host_params_context.host_total_vfs);
kvfree(out);
- if (host_total_vfs)
- return host_total_vfs;
+ return host_total_vfs;
}
done:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
index d6947fe13d56..8ca534ef5d03 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ptrn.c
@@ -82,7 +82,7 @@ dr_ptrn_alloc_pattern(struct mlx5dr_ptrn_mgr *mgr,
u32 chunk_size;
u32 index;
- chunk_size = ilog2(num_of_actions);
+ chunk_size = ilog2(roundup_pow_of_two(num_of_actions));
/* HW modify action index granularity is at least 64B */
chunk_size = max_t(u32, chunk_size, DR_CHUNK_SIZE_8);
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index a499e460594b..c2ad0921e893 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -8,6 +8,7 @@
#include <linux/ethtool.h>
#include <linux/filter.h>
#include <linux/mm.h>
+#include <linux/pci.h>
#include <net/checksum.h>
#include <net/ip6_checksum.h>
@@ -2345,9 +2346,12 @@ int mana_attach(struct net_device *ndev)
static int mana_dealloc_queues(struct net_device *ndev)
{
struct mana_port_context *apc = netdev_priv(ndev);
+ unsigned long timeout = jiffies + 120 * HZ;
struct gdma_dev *gd = apc->ac->gdma_dev;
struct mana_txq *txq;
+ struct sk_buff *skb;
int i, err;
+ u32 tsleep;
if (apc->port_is_up)
return -EINVAL;
@@ -2363,15 +2367,40 @@ static int mana_dealloc_queues(struct net_device *ndev)
* to false, but it doesn't matter since mana_start_xmit() drops any
* new packets due to apc->port_is_up being false.
*
- * Drain all the in-flight TX packets
+ * Drain all the in-flight TX packets.
+ * A timeout of 120 seconds for all the queues is used.
+ * This will break the while loop when h/w is not responding.
+ * This value of 120 has been decided here considering max
+ * number of queues.
*/
+
for (i = 0; i < apc->num_queues; i++) {
txq = &apc->tx_qp[i].txq;
-
- while (atomic_read(&txq->pending_sends) > 0)
- usleep_range(1000, 2000);
+ tsleep = 1000;
+ while (atomic_read(&txq->pending_sends) > 0 &&
+ time_before(jiffies, timeout)) {
+ usleep_range(tsleep, tsleep + 1000);
+ tsleep <<= 1;
+ }
+ if (atomic_read(&txq->pending_sends)) {
+ err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
+ if (err) {
+ netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
+ err, atomic_read(&txq->pending_sends),
+ txq->gdma_txq_id);
+ }
+ break;
+ }
}
+ for (i = 0; i < apc->num_queues; i++) {
+ txq = &apc->tx_qp[i].txq;
+ while ((skb = skb_dequeue(&txq->pending_skbs))) {
+ mana_unmap_skb(skb, apc);
+ dev_kfree_skb_any(skb);
+ }
+ atomic_set(&txq->pending_sends, 0);
+ }
/* We're 100% sure the queues can no longer be woken up, because
* we're sure now mana_poll_tx_cq() can't be running.
*/
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
index 612b0015dc43..432fb93aa801 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
@@ -1817,6 +1817,7 @@ static int ionic_change_mtu(struct net_device *netdev, int new_mtu)
static void ionic_tx_timeout_work(struct work_struct *ws)
{
struct ionic_lif *lif = container_of(ws, struct ionic_lif, tx_timeout_work);
+ int err;
if (test_bit(IONIC_LIF_F_FW_RESET, lif->state))
return;
@@ -1829,8 +1830,11 @@ static void ionic_tx_timeout_work(struct work_struct *ws)
mutex_lock(&lif->queue_lock);
ionic_stop_queues_reconfig(lif);
- ionic_start_queues_reconfig(lif);
+ err = ionic_start_queues_reconfig(lif);
mutex_unlock(&lif->queue_lock);
+
+ if (err)
+ dev_err(lif->ionic->dev, "%s: Restarting queues failed\n", __func__);
}
static void ionic_tx_timeout(struct net_device *netdev, unsigned int txqueue)
@@ -2800,17 +2804,22 @@ static int ionic_cmb_reconfig(struct ionic_lif *lif,
if (err) {
dev_err(lif->ionic->dev,
"CMB restore failed: %d\n", err);
- goto errout;
+ goto err_out;
}
}
- ionic_start_queues_reconfig(lif);
- } else {
- /* This was detached in ionic_stop_queues_reconfig() */
- netif_device_attach(lif->netdev);
+ err = ionic_start_queues_reconfig(lif);
+ if (err) {
+ dev_err(lif->ionic->dev,
+ "CMB reconfig failed: %d\n", err);
+ goto err_out;
+ }
}
-errout:
+err_out:
+ /* This was detached in ionic_stop_queues_reconfig() */
+ netif_device_attach(lif->netdev);
+
return err;
}
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 984dfa5d6c11..144ec756c796 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -743,7 +743,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
- secy->netdev->stats.rx_dropped++;
+ DEV_STATS_INC(secy->netdev, rx_dropped);
return false;
}
@@ -767,7 +767,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u
rxsc_stats->stats.InPktsNotValid++;
u64_stats_update_end(&rxsc_stats->syncp);
this_cpu_inc(rx_sa->stats->InPktsNotValid);
- secy->netdev->stats.rx_errors++;
+ DEV_STATS_INC(secy->netdev, rx_errors);
return false;
}
@@ -1069,7 +1069,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoTag++;
u64_stats_update_end(&secy_stats->syncp);
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
continue;
}
@@ -1179,7 +1179,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsBadTag++;
u64_stats_update_end(&secy_stats->syncp);
- secy->netdev->stats.rx_errors++;
+ DEV_STATS_INC(secy->netdev, rx_errors);
goto drop_nosa;
}
@@ -1196,7 +1196,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsNotUsingSA++;
u64_stats_update_end(&rxsc_stats->syncp);
- secy->netdev->stats.rx_errors++;
+ DEV_STATS_INC(secy->netdev, rx_errors);
if (active_rx_sa)
this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
goto drop_nosa;
@@ -1230,7 +1230,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
u64_stats_update_begin(&rxsc_stats->syncp);
rxsc_stats->stats.InPktsLate++;
u64_stats_update_end(&rxsc_stats->syncp);
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
goto drop;
}
}
@@ -1271,7 +1271,7 @@ deliver:
if (ret == NET_RX_SUCCESS)
count_rx(dev, len);
else
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
rcu_read_unlock();
@@ -1308,7 +1308,7 @@ nosci:
u64_stats_update_begin(&secy_stats->syncp);
secy_stats->stats.InPktsNoSCI++;
u64_stats_update_end(&secy_stats->syncp);
- macsec->secy.netdev->stats.rx_errors++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_errors);
continue;
}
@@ -1327,7 +1327,7 @@ nosci:
secy_stats->stats.InPktsUnknownSCI++;
u64_stats_update_end(&secy_stats->syncp);
} else {
- macsec->secy.netdev->stats.rx_dropped++;
+ DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
}
}
@@ -3422,7 +3422,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
if (!secy->operational) {
kfree_skb(skb);
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
@@ -3430,7 +3430,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
skb = macsec_encrypt(skb, dev);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EINPROGRESS)
- dev->stats.tx_dropped++;
+ DEV_STATS_INC(dev, tx_dropped);
return NETDEV_TX_OK;
}
@@ -3667,9 +3667,9 @@ static void macsec_get_stats64(struct net_device *dev,
dev_fetch_sw_netstats(s, dev->tstats);
- s->rx_dropped = dev->stats.rx_dropped;
- s->tx_dropped = dev->stats.tx_dropped;
- s->rx_errors = dev->stats.rx_errors;
+ s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped);
+ s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped);
+ s->rx_errors = atomic_long_read(&dev->stats.__rx_errors);
}
static int macsec_get_iflink(const struct net_device *dev)
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index c1f307d90518..8a77ec33b417 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -459,21 +459,27 @@ static int at803x_set_wol(struct phy_device *phydev,
phy_write_mmd(phydev, MDIO_MMD_PCS, offsets[i],
mac[(i * 2) + 1] | (mac[(i * 2)] << 8));
- /* Enable WOL function */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
- 0, AT803X_WOL_EN);
- if (ret)
- return ret;
+ /* Enable WOL function for 1588 */
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ 0, AT803X_WOL_EN);
+ if (ret)
+ return ret;
+ }
/* Enable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, 0, AT803X_INTR_ENABLE_WOL);
if (ret)
return ret;
} else {
- /* Disable WoL function */
- ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL,
- AT803X_WOL_EN, 0);
- if (ret)
- return ret;
+ /* Disable WoL function for 1588 */
+ if (phydev->drv->phy_id == ATH8031_PHY_ID) {
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+ if (ret)
+ return ret;
+ }
/* Disable WOL interrupt */
ret = phy_modify(phydev, AT803X_INTR_ENABLE, AT803X_INTR_ENABLE_WOL, 0);
if (ret)
@@ -508,11 +514,11 @@ static void at803x_get_wol(struct phy_device *phydev,
wol->supported = WAKE_MAGIC;
wol->wolopts = 0;
- value = phy_read_mmd(phydev, MDIO_MMD_PCS, AT803X_PHY_MMD3_WOL_CTRL);
+ value = phy_read(phydev, AT803X_INTR_ENABLE);
if (value < 0)
return;
- if (value & AT803X_WOL_EN)
+ if (value & AT803X_INTR_ENABLE_WOL)
wol->wolopts |= WAKE_MAGIC;
}
@@ -858,9 +864,6 @@ static int at803x_probe(struct phy_device *phydev)
if (phydev->drv->phy_id == ATH8031_PHY_ID) {
int ccr = phy_read(phydev, AT803X_REG_CHIP_CONFIG);
int mode_cfg;
- struct ethtool_wolinfo wol = {
- .wolopts = 0,
- };
if (ccr < 0)
return ccr;
@@ -877,12 +880,14 @@ static int at803x_probe(struct phy_device *phydev)
break;
}
- /* Disable WOL by default */
- ret = at803x_set_wol(phydev, &wol);
- if (ret < 0) {
- phydev_err(phydev, "failed to disable WOL on probe: %d\n", ret);
+ /* Disable WoL in 1588 register which is enabled
+ * by default
+ */
+ ret = phy_modify_mmd(phydev, MDIO_MMD_PCS,
+ AT803X_PHY_MMD3_WOL_CTRL,
+ AT803X_WOL_EN, 0);
+ if (ret)
return ret;
- }
}
return 0;
@@ -2059,8 +2064,6 @@ static struct phy_driver at803x_driver[] = {
.flags = PHY_POLL_CABLE_TEST,
.config_init = at803x_config_init,
.link_change_notify = at803x_link_change_notify,
- .set_wol = at803x_set_wol,
- .get_wol = at803x_get_wol,
.suspend = at803x_suspend,
.resume = at803x_resume,
/* PHY_BASIC_FEATURES */
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 25f0191df00b..100339bc8b04 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1594,7 +1594,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile,
if (zerocopy)
return false;
- if (SKB_DATA_ALIGN(len + TUN_RX_PAD) +
+ if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE)
return false;
diff --git a/drivers/net/vxlan/vxlan_vnifilter.c b/drivers/net/vxlan/vxlan_vnifilter.c
index a3de081cda5e..c3ff30ab782e 100644
--- a/drivers/net/vxlan/vxlan_vnifilter.c
+++ b/drivers/net/vxlan/vxlan_vnifilter.c
@@ -713,6 +713,12 @@ static struct vxlan_vni_node *vxlan_vni_alloc(struct vxlan_dev *vxlan,
return vninode;
}
+static void vxlan_vni_free(struct vxlan_vni_node *vninode)
+{
+ free_percpu(vninode->stats);
+ kfree(vninode);
+}
+
static int vxlan_vni_add(struct vxlan_dev *vxlan,
struct vxlan_vni_group *vg,
u32 vni, union vxlan_addr *group,
@@ -740,7 +746,7 @@ static int vxlan_vni_add(struct vxlan_dev *vxlan,
&vninode->vnode,
vxlan_vni_rht_params);
if (err) {
- kfree(vninode);
+ vxlan_vni_free(vninode);
return err;
}
@@ -763,8 +769,7 @@ static void vxlan_vni_node_rcu_free(struct rcu_head *rcu)
struct vxlan_vni_node *v;
v = container_of(rcu, struct vxlan_vni_node, rcu);
- free_percpu(v->stats);
- kfree(v);
+ vxlan_vni_free(v);
}
static int vxlan_vni_del(struct vxlan_dev *vxlan,
diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c
index 5bf7822c53f1..0ba714ca5185 100644
--- a/drivers/net/wireguard/allowedips.c
+++ b/drivers/net/wireguard/allowedips.c
@@ -6,7 +6,7 @@
#include "allowedips.h"
#include "peer.h"
-enum { MAX_ALLOWEDIPS_BITS = 128 };
+enum { MAX_ALLOWEDIPS_DEPTH = 129 };
static struct kmem_cache *node_cache;
@@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack,
struct allowedips_node __rcu *p, unsigned int *len)
{
if (rcu_access_pointer(p)) {
- if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS))
+ if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH))
return;
stack[(*len)++] = rcu_dereference_raw(p);
}
@@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu)
static void root_free_rcu(struct rcu_head *rcu)
{
- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = {
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = {
container_of(rcu, struct allowedips_node, rcu) };
unsigned int len = 1;
@@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu)
static void root_remove_peer_lists(struct allowedips_node *root)
{
- struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root };
+ struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root };
unsigned int len = 1;
while (len > 0 && (node = stack[--len])) {
diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c
index 78ebe2892a78..3d1f64ff2e12 100644
--- a/drivers/net/wireguard/selftest/allowedips.c
+++ b/drivers/net/wireguard/selftest/allowedips.c
@@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void)
wg_allowedips_remove_by_peer(&t, a, &mutex);
test_negative(4, a, 192, 168, 0, 1);
- /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node
+ /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node
* if something goes wrong.
*/
- for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) {
- part = cpu_to_be64(~(1LLU << (i % 64)));
- memset(&ip, 0xff, 16);
- memcpy((u8 *)&ip + (i < 64) * 8, &part, 8);
+ for (i = 0; i < 64; ++i) {
+ part = cpu_to_be64(~0LLU << i);
+ memset(&ip, 0xff, 8);
+ memcpy((u8 *)&ip + 8, &part, 8);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
+ memcpy(&ip, &part, 8);
+ memset((u8 *)&ip + 8, 0, 8);
wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
}
-
+ memset(&ip, 0, 16);
+ wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex);
wg_allowedips_free(&t, &mutex);
wg_allowedips_init(&t);
diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c
index 6512267ae4ca..4928e4e91660 100644
--- a/drivers/net/wireless/ath/ath12k/wmi.c
+++ b/drivers/net/wireless/ath/ath12k/wmi.c
@@ -2144,8 +2144,7 @@ int ath12k_wmi_send_scan_start_cmd(struct ath12k *ar,
struct wmi_tlv *tlv;
void *ptr;
int i, ret, len;
- u32 *tmp_ptr;
- u8 extraie_len_with_pad = 0;
+ u32 *tmp_ptr, extraie_len_with_pad = 0;
struct ath12k_wmi_hint_short_ssid_arg *s_ssid = NULL;
struct ath12k_wmi_hint_bssid_arg *hint_bssid = NULL;
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
index de8a2e27f49c..2a90bb24ba77 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
@@ -1456,6 +1456,10 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
params_size -= BRCMF_SCAN_PARAMS_V2_FIXED_SIZE;
params_size += BRCMF_SCAN_PARAMS_FIXED_SIZE;
params_v1 = kzalloc(params_size, GFP_KERNEL);
+ if (!params_v1) {
+ err = -ENOMEM;
+ goto exit_params;
+ }
params_v1->version = cpu_to_le32(BRCMF_ESCAN_REQ_VERSION);
brcmf_scan_params_v2_to_v1(&params->params_v2_le, &params_v1->params_le);
kfree(params);
@@ -1473,6 +1477,7 @@ brcmf_run_escan(struct brcmf_cfg80211_info *cfg, struct brcmf_if *ifp,
bphy_err(drvr, "error (%d)\n", err);
}
+exit_params:
kfree(params);
exit:
return err;
diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c
index b114babec698..c93e6250cb8b 100644
--- a/drivers/net/wireless/realtek/rtw89/mac.c
+++ b/drivers/net/wireless/realtek/rtw89/mac.c
@@ -2524,7 +2524,7 @@ static int cmac_dma_init(struct rtw89_dev *rtwdev, u8 mac_idx)
u32 reg;
int ret;
- if (chip_id != RTL8852A && chip_id != RTL8852B)
+ if (chip_id != RTL8852B)
return 0;
ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL);
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 5bc81cc0a2de..46b252bbe500 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/errno.h>
#include <linux/ioport.h>
+#include <linux/of.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
@@ -332,6 +333,7 @@ void __weak pcibios_bus_add_device(struct pci_dev *pdev) { }
*/
void pci_bus_add_device(struct pci_dev *dev)
{
+ struct device_node *dn = dev->dev.of_node;
int retval;
/*
@@ -344,7 +346,7 @@ void pci_bus_add_device(struct pci_dev *dev)
pci_proc_attach_device(dev);
pci_bridge_d3_update(dev);
- dev->match_driver = true;
+ dev->match_driver = !dn || of_device_is_available(dn);
retval = device_attach(&dev->dev);
if (retval < 0 && retval != -EPROBE_DEFER)
pci_warn(dev, "device attach failed (%d)\n", retval);
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index e51219f9f523..3c158b17dcb5 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -34,11 +34,6 @@ int pci_set_of_node(struct pci_dev *dev)
if (!node)
return 0;
- if (!of_device_is_available(node)) {
- of_node_put(node);
- return -ENODEV;
- }
-
device_set_node(&dev->dev, of_fwnode_handle(node));
return 0;
}