summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2019-05-16 08:51:48 +0300
committerOlof Johansson <olof@lixom.net>2019-05-16 08:51:48 +0300
commit7a0c4c17089a8aff52f516f0f52002be52950aae (patch)
tree474dc451d62a1ba788ed52c4cecf93f9e50cc39a /drivers/net
parenta842b514db723d7bec5e8f9e6f57e5cfbb6b1f37 (diff)
parentaffe2a51001450c32886f70248b57eac0f9b68ef (diff)
downloadlinux-7a0c4c17089a8aff52f516f0f52002be52950aae.tar.xz
Merge branch 'fixes' into arm/soc
Merge in a few pending fixes from pre-5.1 that didn't get sent in: MAINTAINERS: update arch/arm/mach-davinci ARM: dts: ls1021: Fix SGMII PCS link remaining down after PHY disconnect ARM: dts: imx6q-logicpd: Reduce inrush current on USBH1 ARM: dts: imx6q-logicpd: Reduce inrush current on start ARM: dts: imx: Fix the AR803X phy-mode ARM: dts: sun8i: a33: Reintroduce default pinctrl muxing arm64: dts: allwinner: a64: Rename hpvcc-supply to cpvdd-supply ARM: sunxi: fix a leaked reference by adding missing of_node_put ARM: sunxi: fix a leaked reference by adding missing of_node_put Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/port.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h4
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c4
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c42
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c30
-rw-r--r--drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c9
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c30
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c53
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c6
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile2
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c18
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c37
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h16
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c28
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h2
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c57
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c55
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c94
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c82
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c53
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c2
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c24
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/action.c3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h3
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c27
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_repr.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h7
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c85
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.c83
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_int.h4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c34
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c6
-rw-r--r--drivers/net/hyperv/netvsc_drv.c32
-rw-r--r--drivers/net/team/team.c26
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/vrf.c9
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c5
-rw-r--r--drivers/net/wireless/intel/iwlwifi/cfg/22000.c30
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/dbg.c34
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/init.c1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-config.h3
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-csr.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/iwl-trans.h12
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c71
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.c43
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/sta.h7
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/drv.c13
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/internal.h2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/trans.c11
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/pcie/tx.c2
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c19
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/init.c2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mac.c53
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/main.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h2
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_mac.c14
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00mac.c10
-rw-r--r--drivers/net/wireless/ralink/rt2x00/rt2x00queue.c15
113 files changed, 995 insertions, 730 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index b59708c35faf..ee610721098e 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3213,8 +3213,12 @@ static int bond_netdev_event(struct notifier_block *this,
return NOTIFY_DONE;
if (event_dev->flags & IFF_MASTER) {
+ int ret;
+
netdev_dbg(event_dev, "IFF_MASTER\n");
- return bond_master_netdev_event(event, event_dev);
+ ret = bond_master_netdev_event(event, event_dev);
+ if (ret != NOTIFY_DONE)
+ return ret;
}
if (event_dev->flags & IFF_SLAVE) {
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 2f120b2ffef0..4985268e2273 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -55,7 +55,9 @@ static SLAVE_ATTR_RO(link_failure_count);
static ssize_t perm_hwaddr_show(struct slave *slave, char *buf)
{
- return sprintf(buf, "%pM\n", slave->perm_hwaddr);
+ return sprintf(buf, "%*phC\n",
+ slave->dev->addr_len,
+ slave->perm_hwaddr);
}
static SLAVE_ATTR_RO(perm_hwaddr);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index dce84a2a65c7..c44b2822e4dd 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -427,18 +427,22 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
return 0;
lane = mv88e6390x_serdes_get_lane(chip, port);
- if (lane < 0)
+ if (lane < 0 && lane != -ENODEV)
return lane;
- if (chip->ports[port].serdes_irq) {
- err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (lane >= 0) {
+ if (chip->ports[port].serdes_irq) {
+ err = mv88e6390_serdes_irq_disable(chip, port, lane);
+ if (err)
+ return err;
+ }
+
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
}
- err = mv88e6390x_serdes_power(chip, port, false);
- if (err)
- return err;
+ chip->ports[port].cmode = 0;
if (cmode) {
err = mv88e6xxx_port_read(chip, port, MV88E6XXX_PORT_STS, &reg);
@@ -452,6 +456,12 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
if (err)
return err;
+ chip->ports[port].cmode = cmode;
+
+ lane = mv88e6390x_serdes_get_lane(chip, port);
+ if (lane < 0)
+ return lane;
+
err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
@@ -463,8 +473,6 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
}
}
- chip->ports[port].cmode = cmode;
-
return 0;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index a9bdc21873d3..10ff37d6dc78 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -957,7 +957,7 @@ int bnx2x_vfpf_update_vlan(struct bnx2x *bp, u16 vid, u8 vf_qid, bool add)
bnx2x_sample_bulletin(bp);
if (bp->shadow_bulletin.content.valid_bitmap & 1 << VLAN_VALID) {
- BNX2X_ERR("Hypervisor will dicline the request, avoiding\n");
+ BNX2X_ERR("Hypervisor will decline the request, avoiding\n");
rc = -EINVAL;
goto out;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 0bb9d7b3a2b6..4c586ba4364b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1133,6 +1133,8 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
tpa_info = &rxr->rx_tpa[agg_id];
if (unlikely(cons != rxr->rx_next_cons)) {
+ netdev_warn(bp->dev, "TPA cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return;
}
@@ -1585,15 +1587,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
}
cons = rxcmp->rx_cmp_opaque;
- rx_buf = &rxr->rx_buf_ring[cons];
- data = rx_buf->data;
- data_ptr = rx_buf->data_ptr;
if (unlikely(cons != rxr->rx_next_cons)) {
int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
+ netdev_warn(bp->dev, "RX cons %x != expected cons %x\n",
+ cons, rxr->rx_next_cons);
bnxt_sched_reset(bp, rxr);
return rc1;
}
+ rx_buf = &rxr->rx_buf_ring[cons];
+ data = rx_buf->data;
+ data_ptr = rx_buf->data_ptr;
prefetch(data_ptr);
misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
@@ -1610,11 +1614,17 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
rx_buf->data = NULL;
if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
+ u32 rx_err = le32_to_cpu(rxcmp1->rx_cmp_cfa_code_errors_v2);
+
bnxt_reuse_rx_data(rxr, cons, data);
if (agg_bufs)
bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
rc = -EIO;
+ if (rx_err & RX_CMPL_ERRORS_BUFFER_ERROR_MASK) {
+ netdev_warn(bp->dev, "RX buffer error %x\n", rx_err);
+ bnxt_sched_reset(bp, rxr);
+ }
goto next_rx;
}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 328373e0578f..060a6f386104 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4283,7 +4283,7 @@ static void tg3_power_down(struct tg3 *tp)
pci_set_power_state(tp->pdev, PCI_D3hot);
}
-static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
+static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u32 *speed, u8 *duplex)
{
switch (val & MII_TG3_AUX_STAT_SPDMASK) {
case MII_TG3_AUX_STAT_10HALF:
@@ -4787,7 +4787,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
bool current_link_up;
u32 bmsr, val;
u32 lcl_adv, rmt_adv;
- u16 current_speed;
+ u32 current_speed;
u8 current_duplex;
int i, err;
@@ -5719,7 +5719,7 @@ out:
static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
{
u32 orig_pause_cfg;
- u16 orig_active_speed;
+ u32 orig_active_speed;
u8 orig_active_duplex;
u32 mac_status;
bool current_link_up;
@@ -5823,7 +5823,7 @@ static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
{
int err = 0;
u32 bmsr, bmcr;
- u16 current_speed = SPEED_UNKNOWN;
+ u32 current_speed = SPEED_UNKNOWN;
u8 current_duplex = DUPLEX_UNKNOWN;
bool current_link_up = false;
u32 local_adv, remote_adv, sgsr;
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index a772a33b685c..6953d0546acb 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2873,7 +2873,7 @@ struct tg3_tx_ring_info {
struct tg3_link_config {
/* Describes what we're trying to get. */
u32 advertising;
- u16 speed;
+ u32 speed;
u8 duplex;
u8 autoneg;
u8 flowctrl;
@@ -2882,7 +2882,7 @@ struct tg3_link_config {
u8 active_flowctrl;
u8 active_duplex;
- u16 active_speed;
+ u32 active_speed;
u32 rmt_adv;
};
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 1522aee81884..3da2795e2486 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -898,7 +898,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */
if (skb) {
- if (gem_ptp_do_txstamp(queue, skb, desc) == 0) {
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP) &&
+ gem_ptp_do_txstamp(queue, skb, desc) == 0) {
/* skb now belongs to timestamp buffer
* and will be removed later
*/
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index aa2be4807191..c032bef1b776 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -32,6 +32,13 @@
#define DRV_NAME "nicvf"
#define DRV_VERSION "1.0"
+/* NOTE: Packets bigger than 1530 are split across multiple pages and XDP needs
+ * the buffer to be contiguous. Allow XDP to be set up only if we don't exceed
+ * this value, keeping headroom for the 14 byte Ethernet header and two
+ * VLAN tags (for QinQ)
+ */
+#define MAX_XDP_MTU (1530 - ETH_HLEN - VLAN_HLEN * 2)
+
/* Supported devices */
static const struct pci_device_id nicvf_id_table[] = {
{ PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
@@ -1328,10 +1335,11 @@ int nicvf_stop(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
union nic_mbx mbx = {};
- cancel_delayed_work_sync(&nic->link_change_work);
-
/* wait till all queued set_rx_mode tasks completes */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq) {
+ cancel_delayed_work_sync(&nic->link_change_work);
+ drain_workqueue(nic->nicvf_rx_mode_wq);
+ }
mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
nicvf_send_msg_to_pf(nic, &mbx);
@@ -1452,7 +1460,8 @@ int nicvf_open(struct net_device *netdev)
struct nicvf_cq_poll *cq_poll = NULL;
/* wait till all queued set_rx_mode tasks completes if any */
- drain_workqueue(nic->nicvf_rx_mode_wq);
+ if (nic->nicvf_rx_mode_wq)
+ drain_workqueue(nic->nicvf_rx_mode_wq);
netif_carrier_off(netdev);
@@ -1550,10 +1559,12 @@ int nicvf_open(struct net_device *netdev)
/* Send VF config done msg to PF */
nicvf_send_cfg_done(nic);
- INIT_DELAYED_WORK(&nic->link_change_work,
- nicvf_link_status_check_task);
- queue_delayed_work(nic->nicvf_rx_mode_wq,
- &nic->link_change_work, 0);
+ if (nic->nicvf_rx_mode_wq) {
+ INIT_DELAYED_WORK(&nic->link_change_work,
+ nicvf_link_status_check_task);
+ queue_delayed_work(nic->nicvf_rx_mode_wq,
+ &nic->link_change_work, 0);
+ }
return 0;
cleanup:
@@ -1578,6 +1589,15 @@ static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
struct nicvf *nic = netdev_priv(netdev);
int orig_mtu = netdev->mtu;
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (nic->xdp_prog && new_mtu > MAX_XDP_MTU) {
+ netdev_warn(netdev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
+ netdev->mtu);
+ return -EINVAL;
+ }
+
netdev->mtu = new_mtu;
if (!netif_running(netdev))
@@ -1826,8 +1846,10 @@ static int nicvf_xdp_setup(struct nicvf *nic, struct bpf_prog *prog)
bool bpf_attached = false;
int ret = 0;
- /* For now just support only the usual MTU sized frames */
- if (prog && (dev->mtu > 1500)) {
+ /* For now just support only the usual MTU sized frames,
+ * plus some headroom for VLAN, QinQ.
+ */
+ if (prog && dev->mtu > MAX_XDP_MTU) {
netdev_warn(dev, "Jumbo frames not yet supported with XDP, current MTU %d.\n",
dev->mtu);
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 5b4d3badcb73..e246f9733bb8 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -105,20 +105,19 @@ static inline struct pgcache *nicvf_alloc_page(struct nicvf *nic,
/* Check if page can be recycled */
if (page) {
ref_count = page_ref_count(page);
- /* Check if this page has been used once i.e 'put_page'
- * called after packet transmission i.e internal ref_count
- * and page's ref_count are equal i.e page can be recycled.
+ /* This page can be recycled if internal ref_count and page's
+ * ref_count are equal, indicating that the page has been used
+ * once for packet transmission. For non-XDP mode, internal
+ * ref_count is always '1'.
*/
- if (rbdr->is_xdp && (ref_count == pgcache->ref_count))
- pgcache->ref_count--;
- else
- page = NULL;
-
- /* In non-XDP mode, page's ref_count needs to be '1' for it
- * to be recycled.
- */
- if (!rbdr->is_xdp && (ref_count != 1))
+ if (rbdr->is_xdp) {
+ if (ref_count == pgcache->ref_count)
+ pgcache->ref_count--;
+ else
+ page = NULL;
+ } else if (ref_count != 1) {
page = NULL;
+ }
}
if (!page) {
@@ -365,11 +364,10 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
while (head < rbdr->pgcnt) {
pgcache = &rbdr->pgcache[head];
if (pgcache->page && page_ref_count(pgcache->page) != 0) {
- if (!rbdr->is_xdp) {
- put_page(pgcache->page);
- continue;
+ if (rbdr->is_xdp) {
+ page_ref_sub(pgcache->page,
+ pgcache->ref_count - 1);
}
- page_ref_sub(pgcache->page, pgcache->ref_count - 1);
put_page(pgcache->page);
}
head++;
diff --git a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
index 74849be5f004..e2919005ead3 100644
--- a/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
+++ b/drivers/net/ethernet/chelsio/libcxgb/libcxgb_ppm.c
@@ -354,7 +354,10 @@ static struct cxgbi_ppm_pool *ppm_alloc_cpu_pool(unsigned int *total,
ppmax = max;
/* pool size must be multiple of unsigned long */
- bmap = BITS_TO_LONGS(ppmax);
+ bmap = ppmax / BITS_PER_TYPE(unsigned long);
+ if (!bmap)
+ return NULL;
+
ppmax = (bmap * sizeof(unsigned long)) << 3;
alloc_sz = sizeof(*pools) + sizeof(unsigned long) * bmap;
@@ -402,6 +405,10 @@ int cxgbi_ppm_init(void **ppm_pp, struct net_device *ndev,
if (reserve_factor) {
ppmax_pool = ppmax / reserve_factor;
pool = ppm_alloc_cpu_pool(&ppmax_pool, &pool_index_max);
+ if (!pool) {
+ ppmax_pool = 0;
+ reserve_factor = 0;
+ }
pr_debug("%s: ppmax %u, cpu total %u, per cpu %u.\n",
ndev->name, ppmax, ppmax_pool, pool_index_max);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 697c2427f2b7..a96ad20ee484 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1840,13 +1840,9 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
int ret;
if (enable) {
- ret = clk_prepare_enable(fep->clk_ahb);
- if (ret)
- return ret;
-
ret = clk_prepare_enable(fep->clk_enet_out);
if (ret)
- goto failed_clk_enet_out;
+ return ret;
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1866,7 +1862,6 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
phy_reset_after_clk_enable(ndev->phydev);
} else {
- clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_enet_out);
if (fep->clk_ptp) {
mutex_lock(&fep->ptp_clk_mutex);
@@ -1885,8 +1880,6 @@ failed_clk_ref:
failed_clk_ptp:
if (fep->clk_enet_out)
clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ahb);
return ret;
}
@@ -3470,6 +3463,9 @@ fec_probe(struct platform_device *pdev)
ret = clk_prepare_enable(fep->clk_ipg);
if (ret)
goto failed_clk_ipg;
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ goto failed_clk_ahb;
fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
@@ -3563,6 +3559,9 @@ failed_reset:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
failed_regulator:
+ clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+ clk_disable_unprepare(fep->clk_ipg);
failed_clk_ipg:
fec_enet_clk_enable(ndev, false);
failed_clk:
@@ -3686,6 +3685,7 @@ static int __maybe_unused fec_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ clk_disable_unprepare(fep->clk_ahb);
clk_disable_unprepare(fep->clk_ipg);
return 0;
@@ -3695,8 +3695,20 @@ static int __maybe_unused fec_runtime_resume(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
- return clk_prepare_enable(fep->clk_ipg);
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+
+ return 0;
+
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+ return ret;
}
static const struct dev_pm_ops fec_pm_ops = {
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index 79d03f8ee7b1..c7fa97a7e1f4 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -150,7 +150,6 @@ out_buffer_fail:
/* free desc along with its attached buffer */
static void hnae_free_desc(struct hnae_ring *ring)
{
- hnae_free_buffers(ring);
dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
ring->desc_num * sizeof(ring->desc[0]),
ring_to_dma_dir(ring));
@@ -183,6 +182,9 @@ static int hnae_alloc_desc(struct hnae_ring *ring)
/* fini ring, also free the buffer for the ring */
static void hnae_fini_ring(struct hnae_ring *ring)
{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
hnae_free_desc(ring);
kfree(ring->desc_cb);
ring->desc_cb = NULL;
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index 08a750fb60c4..d6fb83437230 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -357,7 +357,7 @@ struct hnae_buf_ops {
};
struct hnae_queue {
- void __iomem *io_base;
+ u8 __iomem *io_base;
phys_addr_t phy_base;
struct hnae_ae_dev *dev; /* the device who use this queue */
struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
index a97228c93831..6c0507921623 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -370,7 +370,7 @@ int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
static void hns_mac_param_get(struct mac_params *param,
struct hns_mac_cb *mac_cb)
{
- param->vaddr = (void *)mac_cb->vaddr;
+ param->vaddr = mac_cb->vaddr;
param->mac_mode = hns_get_enet_interface(mac_cb);
ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
param->mac_id = mac_cb->mac_id;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
index fbc75341bef7..22589799f1a5 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -187,7 +187,7 @@ struct mac_statistics {
/*mac para struct ,mac get param from nic or dsaf when initialize*/
struct mac_params {
char addr[ETH_ALEN];
- void *vaddr; /*virtual address*/
+ u8 __iomem *vaddr; /*virtual address*/
struct device *dev;
u8 mac_id;
/**< Ethernet operation mode (MAC-PHY interface and speed) */
@@ -402,7 +402,7 @@ struct mac_driver {
enum mac_mode mac_mode;
u8 mac_id;
struct hns_mac_cb *mac_cb;
- void __iomem *io_base;
+ u8 __iomem *io_base;
unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
unsigned int virt_dev_num;
struct device *dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
index ac55db065f16..61eea6ac846f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -1602,8 +1602,6 @@ static void hns_dsaf_set_mac_key(
DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
DSAF_TBL_TCAM_KEY_PORT_S, port);
-
- mac_key->low.bits.port_vlan = le16_to_cpu(mac_key->low.bits.port_vlan);
}
/**
@@ -1663,8 +1661,8 @@ int hns_dsaf_set_mac_uc_entry(
/* default config dvc to 0 */
mac_data.tbl_ucast_dvc = 0;
mac_data.tbl_ucast_out_port = mac_entry->port_num;
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
@@ -1786,9 +1784,6 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
0xff,
mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -1840,8 +1835,8 @@ int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
dsaf_dev->ae_dev.name, mac_key.high.val,
mac_key.low.val, entry_index);
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
/* config mc entry with mask */
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
@@ -1956,9 +1951,6 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
/* config key mask */
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
- mask_key.high.val = le32_to_cpu(mask_key.high.val);
- mask_key.low.val = le32_to_cpu(mask_key.low.val);
-
pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
}
@@ -2012,8 +2004,8 @@ int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
soft_mac_entry += entry_index;
soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
} else { /* not zero, just del port, update */
- tcam_data.tbl_tcam_data_high = cpu_to_le32(mac_key.high.val);
- tcam_data.tbl_tcam_data_low = cpu_to_le32(mac_key.low.val);
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
&tcam_data,
@@ -2750,6 +2742,17 @@ int hns_dsaf_get_regs_count(void)
return DSAF_DUMP_REGS_NUM;
}
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
{
struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
@@ -2815,23 +2818,33 @@ static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
memset(&temp_key, 0x0, sizeof(temp_key));
mask_entry.addr[0] = 0x01;
hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
- port, mask_entry.addr);
+ 0xf, mask_entry.addr);
tbl_tcam_mcast.tbl_mcast_item_vld = 1;
tbl_tcam_mcast.tbl_mcast_old_en = 0;
- if (port < DSAF_SERVICE_NW_NUM) {
- mskid = port;
- } else if (port >= DSAF_BASE_INNER_PORT_NUM) {
- mskid = port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
- } else {
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
dsaf_dev->ae_dev.name, port,
mask_key.high.val, mask_key.low.val);
return;
}
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
mskid % 32, 1);
+
memcpy(&temp_key, &mask_key, sizeof(mask_key));
hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
(struct dsaf_tbl_tcam_data *)(&mask_key),
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
index 0e1cd99831a6..76cc8887e1a8 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -467,4 +467,6 @@ int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
u8 mac_id, u8 port_num);
int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
index 16294cd3c954..19b94879691f 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -670,7 +670,7 @@ static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
dsaf_set_field(origin, 1ull << 10, 10, en);
dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
} else {
- u8 *base_addr = (u8 *)mac_cb->serdes_vaddr +
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
(mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index 3d07c8a7639d..17c019106e6e 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -61,7 +61,7 @@ void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
}
}
-static void __iomem *
+static u8 __iomem *
hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
{
return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
@@ -111,8 +111,8 @@ hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
dsaf_dev->ppe_common[comm_index] = NULL;
}
-static void __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
- int ppe_idx)
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
{
return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
index f670e63a5a01..110c6e8222c7 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -80,7 +80,7 @@ struct hns_ppe_cb {
struct hns_ppe_hw_stats hw_stats;
u8 index; /* index in a ppe common device */
- void __iomem *io_base;
+ u8 __iomem *io_base;
int virq;
u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
@@ -89,7 +89,7 @@ struct hns_ppe_cb {
struct ppe_common_cb {
struct device *dev;
struct dsaf_device *dsaf_dev;
- void __iomem *io_base;
+ u8 __iomem *io_base;
enum ppe_common_mode ppe_mode;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
index 6bf346c11b25..ac3518ca4d7b 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -458,7 +458,7 @@ static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
} else {
ring = &q->tx_ring;
- ring->io_base = (u8 __iomem *)ring_pair_cb->q.io_base +
+ ring->io_base = ring_pair_cb->q.io_base +
HNS_RCB_TX_REG_OFFSET;
irq_idx = HNS_RCB_IRQ_IDX_TX;
mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
@@ -764,7 +764,7 @@ static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
}
}
-static void __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
{
struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
index b9733b0b8482..b9e7f11f0896 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -1018,7 +1018,7 @@
#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
-static inline void dsaf_write_reg(void __iomem *base, u32 reg, u32 value)
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
writel(value, base + reg);
}
@@ -1053,7 +1053,7 @@ static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
#define dsaf_set_bit(origin, shift, val) \
dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
-static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift, u32 val)
{
u32 origin = dsaf_read_reg(base, reg);
@@ -1073,7 +1073,7 @@ static inline void dsaf_set_reg_field(void __iomem *base, u32 reg, u32 mask,
#define dsaf_get_bit(origin, shift) \
dsaf_get_field((origin), (1ull << (shift)), (shift))
-static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
u32 shift)
{
u32 origin;
@@ -1089,11 +1089,11 @@ static inline u32 dsaf_get_reg_field(void __iomem *base, u32 reg, u32 mask,
dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
#define dsaf_write_b(addr, data)\
- writeb((data), (__iomem unsigned char *)(addr))
+ writeb((data), (__iomem u8 *)(addr))
#define dsaf_read_b(addr)\
- readb((__iomem unsigned char *)(addr))
+ readb((__iomem u8 *)(addr))
#define hns_mac_reg_read64(drv, offset) \
- readq((__iomem void *)(((u8 *)(drv)->io_base + 0xc00 + (offset))))
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
index ba4316910dea..a60f207768fc 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -129,7 +129,7 @@ static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
- dsaf_write_reg(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
}
/**
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 60e7d7ae3787..4cd86ba1f050 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -29,9 +29,6 @@
#define SERVICE_TIMER_HZ (1 * HZ)
-#define NIC_TX_CLEAN_MAX_NUM 256
-#define NIC_RX_CLEAN_MAX_NUM 64
-
#define RCB_IRQ_NOT_INITED 0
#define RCB_IRQ_INITED 1
#define HNS_BUFFER_SIZE_2048 2048
@@ -376,8 +373,6 @@ netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
wmb(); /* commit all data before submit */
assert(skb->queue_mapping < priv->ae_handle->q_num);
hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
- ring->stats.tx_pkts++;
- ring->stats.tx_bytes += skb->len;
return NETDEV_TX_OK;
@@ -999,6 +994,9 @@ static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
/* issue prefetch for next Tx descriptor */
prefetch(&ring->desc_cb[ring->next_to_clean]);
}
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
NETIF_TX_UNLOCK(ring);
@@ -2152,7 +2150,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_tx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
for (i = h->q_num; i < h->q_num * 2; i++) {
@@ -2165,7 +2163,7 @@ static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
hns_nic_rx_fini_pro_v2;
netif_napi_add(priv->netdev, &rd->napi,
- hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
index fffe8c1c45d3..0fb61d440d3b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGE) += hclge.o
hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
index fb93bbd35845..6193f8fa7cf3 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -3,7 +3,7 @@
# Makefile for the HISILICON network device drivers.
#
-ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o \ No newline at end of file
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index baf5cc251f32..8b8a7d00e8e0 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -39,7 +39,7 @@ struct hns_mdio_sc_reg {
};
struct hns_mdio_device {
- void *vbase; /* mdio reg base address */
+ u8 __iomem *vbase; /* mdio reg base address */
struct regmap *subctrl_vbase;
struct hns_mdio_sc_reg sc_reg;
};
@@ -96,21 +96,17 @@ enum mdio_c45_op_seq {
#define MDIO_SC_CLK_ST 0x531C
#define MDIO_SC_RESET_ST 0x5A1C
-static void mdio_write_reg(void *base, u32 reg, u32 value)
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- writel_relaxed(value, reg_addr + reg);
+ writel_relaxed(value, base + reg);
}
#define MDIO_WRITE_REG(a, reg, value) \
mdio_write_reg((a)->vbase, (reg), (value))
-static u32 mdio_read_reg(void *base, u32 reg)
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
{
- u8 __iomem *reg_addr = (u8 __iomem *)base;
-
- return readl_relaxed(reg_addr + reg);
+ return readl_relaxed(base + reg);
}
#define mdio_set_field(origin, mask, shift, val) \
@@ -121,7 +117,7 @@ static u32 mdio_read_reg(void *base, u32 reg)
#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
-static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
u32 val)
{
u32 origin = mdio_read_reg(base, reg);
@@ -133,7 +129,7 @@ static void mdio_set_reg_field(void *base, u32 reg, u32 mask, u32 shift,
#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
-static u32 mdio_get_reg_field(void *base, u32 reg, u32 mask, u32 shift)
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
{
u32 origin;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 5ecbb1adcf3b..3dfb2d131eb7 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -1885,6 +1885,7 @@ static int do_hard_reset(struct ibmvnic_adapter *adapter,
*/
adapter->state = VNIC_PROBED;
+ reinit_completion(&adapter->init_done);
rc = init_crq_queue(adapter);
if (rc) {
netdev_err(adapter->netdev,
@@ -3761,6 +3762,7 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
+ netdev_features_t old_hw_features = 0;
union ibmvnic_crq crq;
int i;
@@ -3836,24 +3838,41 @@ static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
- adapter->netdev->features = NETIF_F_SG | NETIF_F_GSO;
+ if (adapter->state != VNIC_PROBING) {
+ old_hw_features = adapter->netdev->hw_features;
+ adapter->netdev->hw_features = 0;
+ }
+
+ adapter->netdev->hw_features = NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
- adapter->netdev->features |= NETIF_F_IP_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IP_CSUM;
if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
- adapter->netdev->features |= NETIF_F_IPV6_CSUM;
+ adapter->netdev->hw_features |= NETIF_F_IPV6_CSUM;
if ((adapter->netdev->features &
(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
- adapter->netdev->features |= NETIF_F_RXCSUM;
+ adapter->netdev->hw_features |= NETIF_F_RXCSUM;
if (buf->large_tx_ipv4)
- adapter->netdev->features |= NETIF_F_TSO;
+ adapter->netdev->hw_features |= NETIF_F_TSO;
if (buf->large_tx_ipv6)
- adapter->netdev->features |= NETIF_F_TSO6;
+ adapter->netdev->hw_features |= NETIF_F_TSO6;
+
+ if (adapter->state == VNIC_PROBING) {
+ adapter->netdev->features |= adapter->netdev->hw_features;
+ } else if (old_hw_features != adapter->netdev->hw_features) {
+ netdev_features_t tmp = 0;
- adapter->netdev->hw_features |= adapter->netdev->features;
+ /* disable features no longer supported */
+ adapter->netdev->features &= adapter->netdev->hw_features;
+ /* turn on features now supported if previously enabled */
+ tmp = (old_hw_features ^ adapter->netdev->hw_features) &
+ adapter->netdev->hw_features;
+ adapter->netdev->features |=
+ tmp & adapter->netdev->wanted_features;
+ }
memset(&crq, 0, sizeof(crq));
crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
@@ -4625,7 +4644,7 @@ static int ibmvnic_reset_init(struct ibmvnic_adapter *adapter)
old_num_rx_queues = adapter->req_rx_queues;
old_num_tx_queues = adapter->req_tx_queues;
- init_completion(&adapter->init_done);
+ reinit_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4680,7 +4699,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
- init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
@@ -4759,6 +4777,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
INIT_WORK(&adapter->ibmvnic_reset, __ibmvnic_reset);
INIT_LIST_HEAD(&adapter->rwi_list);
spin_lock_init(&adapter->rwi_lock);
+ init_completion(&adapter->init_done);
adapter->resetting = false;
adapter->mac_change_pending = false;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 5a0419421511..ecef949f3baa 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -41,6 +41,8 @@ static int __init fm10k_init_module(void)
/* create driver workqueue */
fm10k_workqueue = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
fm10k_driver_name);
+ if (!fm10k_workqueue)
+ return -ENOMEM;
fm10k_dbg_init();
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index d684998ba2b0..d3cc3427caad 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -790,6 +790,8 @@ struct i40e_vsi {
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
+
+ unsigned long *af_xdp_zc_qps; /* tracks AF_XDP ZC enabled qps */
} ____cacheline_internodealigned_in_smp;
struct i40e_netdev_priv {
@@ -1096,20 +1098,6 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
return !!vsi->xdp_prog;
}
-static inline struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
-{
- bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
- int qid = ring->queue_index;
-
- if (ring_is_xdp(ring))
- qid -= ring->vsi->alloc_queue_pairs;
-
- if (!xdp_on)
- return NULL;
-
- return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
-}
-
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4c885801fa26..7874d0ec7fb0 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -2573,8 +2573,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
/* only magic packet is supported */
- if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)
- | (wol->wolopts != WAKE_FILTER))
+ if (wol->wolopts & ~WAKE_MAGIC)
return -EOPNOTSUPP;
/* is this a new value? */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index da62218eb70a..b1c265012c8a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -3064,6 +3064,26 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
}
/**
+ * i40e_xsk_umem - Retrieve the AF_XDP ZC if XDP and ZC is enabled
+ * @ring: The Tx or Rx ring
+ *
+ * Returns the UMEM or NULL.
+ **/
+static struct xdp_umem *i40e_xsk_umem(struct i40e_ring *ring)
+{
+ bool xdp_on = i40e_enabled_xdp_vsi(ring->vsi);
+ int qid = ring->queue_index;
+
+ if (ring_is_xdp(ring))
+ qid -= ring->vsi->alloc_queue_pairs;
+
+ if (!xdp_on || !test_bit(qid, ring->vsi->af_xdp_zc_qps))
+ return NULL;
+
+ return xdp_get_umem_from_qid(ring->vsi->netdev, qid);
+}
+
+/**
* i40e_configure_tx_ring - Configure a transmit ring context and rest
* @ring: The Tx ring to configure
*
@@ -10064,6 +10084,12 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
hash_init(vsi->mac_filter_hash);
vsi->irqs_ready = false;
+ if (type == I40E_VSI_MAIN) {
+ vsi->af_xdp_zc_qps = bitmap_zalloc(pf->num_lan_qps, GFP_KERNEL);
+ if (!vsi->af_xdp_zc_qps)
+ goto err_rings;
+ }
+
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
goto err_rings;
@@ -10082,6 +10108,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
goto unlock_pf;
err_rings:
+ bitmap_free(vsi->af_xdp_zc_qps);
pf->next_vsi = i - 1;
kfree(vsi);
unlock_pf:
@@ -10162,6 +10189,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
+ bitmap_free(vsi->af_xdp_zc_qps);
i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 5fb4353c742b..31575c0bb884 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -146,12 +146,13 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
{
struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps);
- struct timespec64 now;
+ struct timespec64 now, then;
+ then = ns_to_timespec64(delta);
mutex_lock(&pf->tmreg_lock);
i40e_ptp_read(pf, &now, NULL);
- timespec64_add_ns(&now, delta);
+ now = timespec64_add(now, then);
i40e_ptp_write(pf, (const struct timespec64 *)&now);
mutex_unlock(&pf->tmreg_lock);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index b5c182e688e3..1b17486543ac 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -102,6 +102,8 @@ static int i40e_xsk_umem_enable(struct i40e_vsi *vsi, struct xdp_umem *umem,
if (err)
return err;
+ set_bit(qid, vsi->af_xdp_zc_qps);
+
if_running = netif_running(vsi->netdev) && i40e_enabled_xdp_vsi(vsi);
if (if_running) {
@@ -148,6 +150,7 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
return err;
}
+ clear_bit(qid, vsi->af_xdp_zc_qps);
i40e_xsk_umem_dma_unmap(vsi, umem);
if (if_running) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 01fcfc6f3415..d2e2c50ce257 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -194,6 +194,8 @@
/* enable link status from external LINK_0 and LINK_1 pins */
#define E1000_CTRL_SWDPIN0 0x00040000 /* SWDPIN 0 value */
#define E1000_CTRL_SWDPIN1 0x00080000 /* SWDPIN 1 value */
+#define E1000_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */
+#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000 /* PHY PM enable */
#define E1000_CTRL_SDP0_DIR 0x00400000 /* SDP0 Data direction */
#define E1000_CTRL_SDP1_DIR 0x00800000 /* SDP1 Data direction */
#define E1000_CTRL_RST 0x04000000 /* Global reset */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 69b230c53fed..3269d8e94744 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -8740,9 +8740,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
struct e1000_hw *hw = &adapter->hw;
u32 ctrl, rctl, status;
u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
-#ifdef CONFIG_PM
- int retval = 0;
-#endif
+ bool wake;
rtnl_lock();
netif_device_detach(netdev);
@@ -8755,14 +8753,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
igb_clear_interrupt_scheme(adapter);
rtnl_unlock();
-#ifdef CONFIG_PM
- if (!runtime) {
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
- }
-#endif
-
status = rd32(E1000_STATUS);
if (status & E1000_STATUS_LU)
wufc &= ~E1000_WUFC_LNKC;
@@ -8779,10 +8769,6 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
}
ctrl = rd32(E1000_CTRL);
- /* advertise wake from D3Cold */
- #define E1000_CTRL_ADVD3WUC 0x00100000
- /* phy power management enable */
- #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
ctrl |= E1000_CTRL_ADVD3WUC;
wr32(E1000_CTRL, ctrl);
@@ -8796,12 +8782,15 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
wr32(E1000_WUFC, 0);
}
- *enable_wake = wufc || adapter->en_mng_pt;
- if (!*enable_wake)
+ wake = wufc || adapter->en_mng_pt;
+ if (!wake)
igb_power_down_link(adapter);
else
igb_power_up_link(adapter);
+ if (enable_wake)
+ *enable_wake = wake;
+
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant.
*/
@@ -8844,22 +8833,7 @@ static void igb_deliver_wake_packet(struct net_device *netdev)
static int __maybe_unused igb_suspend(struct device *dev)
{
- int retval;
- bool wake;
- struct pci_dev *pdev = to_pci_dev(dev);
-
- retval = __igb_shutdown(pdev, &wake, 0);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 0);
}
static int __maybe_unused igb_resume(struct device *dev)
@@ -8930,22 +8904,7 @@ static int __maybe_unused igb_runtime_idle(struct device *dev)
static int __maybe_unused igb_runtime_suspend(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- int retval;
- bool wake;
-
- retval = __igb_shutdown(pdev, &wake, 1);
- if (retval)
- return retval;
-
- if (wake) {
- pci_prepare_to_sleep(pdev);
- } else {
- pci_wake_from_d3(pdev, false);
- pci_set_power_state(pdev, PCI_D3hot);
- }
-
- return 0;
+ return __igb_shutdown(to_pci_dev(dev), NULL, 1);
}
static int __maybe_unused igb_runtime_resume(struct device *dev)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index cc4907f9ff02..2fb97967961c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -905,13 +905,12 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
struct pci_dev *pdev = adapter->pdev;
struct device *dev = &adapter->netdev->dev;
struct mii_bus *bus;
+ int err = -ENODEV;
- adapter->mii_bus = devm_mdiobus_alloc(dev);
- if (!adapter->mii_bus)
+ bus = devm_mdiobus_alloc(dev);
+ if (!bus)
return -ENOMEM;
- bus = adapter->mii_bus;
-
switch (hw->device_id) {
/* C3000 SoCs */
case IXGBE_DEV_ID_X550EM_A_KR:
@@ -949,12 +948,15 @@ s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw)
*/
hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22;
- return mdiobus_register(bus);
+ err = mdiobus_register(bus);
+ if (!err) {
+ adapter->mii_bus = bus;
+ return 0;
+ }
ixgbe_no_mii_bus:
devm_mdiobus_free(dev, bus);
- adapter->mii_bus = NULL;
- return -ENODEV;
+ return err;
}
/**
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 71c65cc17904..d3eaf2ceaa39 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -858,6 +858,7 @@ void mlx5e_close_channels(struct mlx5e_channels *chs);
* switching channels
*/
typedef int (*mlx5e_fp_hw_modify)(struct mlx5e_priv *priv);
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv);
int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
struct mlx5e_channels *new_chs,
mlx5e_fp_hw_modify hw_modify);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
index 122927f3a600..d5e5afbdca6d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c
@@ -96,9 +96,6 @@ int mlx5_port_query_eth_proto(struct mlx5_core_dev *dev, u8 port, bool ext,
if (!eproto)
return -EINVAL;
- if (ext != MLX5_CAP_PCAM_FEATURE(dev, ptys_extended_ethernet))
- return -EOPNOTSUPP;
-
err = mlx5_query_port_ptys(dev, out, sizeof(out), MLX5_PTYS_EN, port);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
index eac245a93f91..4ab0d030b544 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port_buffer.c
@@ -122,7 +122,9 @@ out:
return err;
}
-/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B]) */
+/* xoff = ((301+2.16 * len [m]) * speed [Gbps] + 2.72 MTU [B])
+ * minimum speed value is 40Gbps
+ */
static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
{
u32 speed;
@@ -130,10 +132,9 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
int err;
err = mlx5e_port_linkspeed(priv->mdev, &speed);
- if (err) {
- mlx5_core_warn(priv->mdev, "cannot get port speed\n");
- return 0;
- }
+ if (err)
+ speed = SPEED_40000;
+ speed = max_t(u32, speed, SPEED_40000);
xoff = (301 + 216 * priv->dcbx.cable_len / 100) * speed / 1000 + 272 * mtu / 100;
@@ -142,7 +143,7 @@ static u32 calculate_xoff(struct mlx5e_priv *priv, unsigned int mtu)
}
static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
- u32 xoff, unsigned int mtu)
+ u32 xoff, unsigned int max_mtu)
{
int i;
@@ -154,11 +155,12 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
}
if (port_buffer->buffer[i].size <
- (xoff + mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
+ (xoff + max_mtu + (1 << MLX5E_BUFFER_CELL_SHIFT)))
return -ENOMEM;
port_buffer->buffer[i].xoff = port_buffer->buffer[i].size - xoff;
- port_buffer->buffer[i].xon = port_buffer->buffer[i].xoff - mtu;
+ port_buffer->buffer[i].xon =
+ port_buffer->buffer[i].xoff - max_mtu;
}
return 0;
@@ -166,7 +168,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
/**
* update_buffer_lossy()
- * mtu: device's MTU
+ * max_mtu: netdev's max_mtu
* pfc_en: <input> current pfc configuration
* buffer: <input> current prio to buffer mapping
* xoff: <input> xoff value
@@ -183,7 +185,7 @@ static int update_xoff_threshold(struct mlx5e_port_buffer *port_buffer,
* Return 0 if no error.
* Set change to true if buffer configuration is modified.
*/
-static int update_buffer_lossy(unsigned int mtu,
+static int update_buffer_lossy(unsigned int max_mtu,
u8 pfc_en, u8 *buffer, u32 xoff,
struct mlx5e_port_buffer *port_buffer,
bool *change)
@@ -220,7 +222,7 @@ static int update_buffer_lossy(unsigned int mtu,
}
if (changed) {
- err = update_xoff_threshold(port_buffer, xoff, mtu);
+ err = update_xoff_threshold(port_buffer, xoff, max_mtu);
if (err)
return err;
@@ -230,6 +232,7 @@ static int update_buffer_lossy(unsigned int mtu,
return 0;
}
+#define MINIMUM_MAX_MTU 9216
int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
u32 change, unsigned int mtu,
struct ieee_pfc *pfc,
@@ -241,12 +244,14 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
bool update_prio2buffer = false;
u8 buffer[MLX5E_MAX_PRIORITY];
bool update_buffer = false;
+ unsigned int max_mtu;
u32 total_used = 0;
u8 curr_pfc_en;
int err;
int i;
mlx5e_dbg(HW, priv, "%s: change=%x\n", __func__, change);
+ max_mtu = max_t(unsigned int, priv->netdev->max_mtu, MINIMUM_MAX_MTU);
err = mlx5e_port_query_buffer(priv, &port_buffer);
if (err)
@@ -254,7 +259,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (change & MLX5E_PORT_BUFFER_CABLE_LEN) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -264,7 +269,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, pfc->pfc_en, buffer, xoff,
+ err = update_buffer_lossy(max_mtu, pfc->pfc_en, buffer, xoff,
&port_buffer, &update_buffer);
if (err)
return err;
@@ -276,8 +281,8 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
if (err)
return err;
- err = update_buffer_lossy(mtu, curr_pfc_en, prio2buffer, xoff,
- &port_buffer, &update_buffer);
+ err = update_buffer_lossy(max_mtu, curr_pfc_en, prio2buffer,
+ xoff, &port_buffer, &update_buffer);
if (err)
return err;
}
@@ -301,7 +306,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
return -EINVAL;
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
@@ -309,7 +314,7 @@ int mlx5e_port_manual_buffer_config(struct mlx5e_priv *priv,
/* Need to update buffer configuration if xoff value is changed */
if (!update_buffer && xoff != priv->dcbx.xoff) {
update_buffer = true;
- err = update_xoff_threshold(&port_buffer, xoff, mtu);
+ err = update_xoff_threshold(&port_buffer, xoff, max_mtu);
if (err)
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
index 9d38e62cdf24..476dd97f7f2f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c
@@ -186,12 +186,17 @@ static int mlx5e_tx_reporter_recover_from_ctx(struct mlx5e_tx_err_ctx *err_ctx)
static int mlx5e_tx_reporter_recover_all(struct mlx5e_priv *priv)
{
- int err;
+ int err = 0;
rtnl_lock();
mutex_lock(&priv->state_lock);
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ err = mlx5e_safe_reopen_channels(priv);
+
+out:
mutex_unlock(&priv->state_lock);
rtnl_unlock();
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
index fa2a3c444cdc..eec07b34b4ad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c
@@ -39,6 +39,10 @@ static int get_route_and_out_devs(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
+ if (!(mlx5e_eswitch_rep(*out_dev) &&
+ mlx5e_is_uplink_rep(netdev_priv(*out_dev))))
+ return -EOPNOTSUPP;
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 3078491cc0d0..1539cf3de5dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -45,7 +45,9 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
if (err)
return err;
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_add(&tir->list, &mdev->mlx5e_res.td.tirs_list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return 0;
}
@@ -53,8 +55,10 @@ int mlx5e_create_tir(struct mlx5_core_dev *mdev,
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir)
{
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
mlx5_core_destroy_tir(mdev, tir->tirn);
list_del(&tir->list);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
}
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
@@ -114,6 +118,7 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
}
INIT_LIST_HEAD(&mdev->mlx5e_res.td.tirs_list);
+ mutex_init(&mdev->mlx5e_res.td.list_lock);
return 0;
@@ -141,15 +146,17 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_tir *tir;
- int err = -ENOMEM;
+ int err = 0;
u32 tirn = 0;
int inlen;
void *in;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
+ if (!in) {
+ err = -ENOMEM;
goto out;
+ }
if (enable_uc_lb)
MLX5_SET(modify_tir_in, in, ctx.self_lb_block,
@@ -157,6 +164,7 @@ int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+ mutex_lock(&mdev->mlx5e_res.td.list_lock);
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
tirn = tir->tirn;
err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
@@ -168,6 +176,7 @@ out:
kvfree(in);
if (err)
netdev_err(priv->netdev, "refresh tir(0x%x) failed, %d\n", tirn, err);
+ mutex_unlock(&mdev->mlx5e_res.td.list_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index a0987cc5fe4a..76a3d01a489e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -603,16 +603,18 @@ static void ptys2ethtool_supported_link(struct mlx5_core_dev *mdev,
__ETHTOOL_LINK_MODE_MASK_NBITS);
}
-static void ptys2ethtool_adver_link(struct mlx5_core_dev *mdev,
- unsigned long *advertising_modes,
- u32 eth_proto_cap)
+static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
+ u32 eth_proto_cap, bool ext)
{
unsigned long proto_cap = eth_proto_cap;
struct ptys2ethtool_config *table;
u32 max_size;
int proto;
- mlx5e_ethtool_get_speed_arr(mdev, &table, &max_size);
+ table = ext ? ptys2ext_ethtool_table : ptys2legacy_ethtool_table;
+ max_size = ext ? ARRAY_SIZE(ptys2ext_ethtool_table) :
+ ARRAY_SIZE(ptys2legacy_ethtool_table);
+
for_each_set_bit(proto, &proto_cap, max_size)
bitmap_or(advertising_modes, advertising_modes,
table[proto].advertised,
@@ -794,12 +796,12 @@ static void get_supported(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
ethtool_link_ksettings_add_link_mode(link_ksettings, supported, Pause);
}
-static void get_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_cap,
- u8 tx_pause, u8 rx_pause,
- struct ethtool_link_ksettings *link_ksettings)
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause, u8 rx_pause,
+ struct ethtool_link_ksettings *link_ksettings,
+ bool ext)
{
unsigned long *advertising = link_ksettings->link_modes.advertising;
- ptys2ethtool_adver_link(mdev, advertising, eth_proto_cap);
+ ptys2ethtool_adver_link(advertising, eth_proto_cap, ext);
if (rx_pause)
ethtool_link_ksettings_add_link_mode(link_ksettings, advertising, Pause);
@@ -854,8 +856,9 @@ static void get_lp_advertising(struct mlx5_core_dev *mdev, u32 eth_proto_lp,
struct ethtool_link_ksettings *link_ksettings)
{
unsigned long *lp_advertising = link_ksettings->link_modes.lp_advertising;
+ bool ext = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
- ptys2ethtool_adver_link(mdev, lp_advertising, eth_proto_lp);
+ ptys2ethtool_adver_link(lp_advertising, eth_proto_lp, ext);
}
int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
@@ -872,6 +875,7 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
u8 an_disable_admin;
u8 an_status;
u8 connector_type;
+ bool admin_ext;
bool ext;
int err;
@@ -886,6 +890,19 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_capability);
eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_admin);
+ /* Fields: eth_proto_admin and ext_eth_proto_admin are
+ * mutually exclusive. Hence try reading legacy advertising
+ * when extended advertising is zero.
+ * admin_ext indicates how eth_proto_admin should be
+ * interpreted
+ */
+ admin_ext = ext;
+ if (ext && !eth_proto_admin) {
+ eth_proto_admin = MLX5_GET_ETH_PROTO(ptys_reg, out, false,
+ eth_proto_admin);
+ admin_ext = false;
+ }
+
eth_proto_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext,
eth_proto_oper);
eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
@@ -899,7 +916,8 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
get_supported(mdev, eth_proto_cap, link_ksettings);
- get_advertising(mdev, eth_proto_admin, tx_pause, rx_pause, link_ksettings);
+ get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings,
+ admin_ext);
get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings);
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
@@ -997,19 +1015,17 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
#define MLX5E_PTYS_EXT ((1ULL << ETHTOOL_LINK_MODE_50000baseKR_Full_BIT) - 1)
- ext_requested = (link_ksettings->link_modes.advertising[0] >
- MLX5E_PTYS_EXT);
+ ext_requested = !!(link_ksettings->link_modes.advertising[0] >
+ MLX5E_PTYS_EXT ||
+ link_ksettings->link_modes.advertising[1]);
ext_supported = MLX5_CAP_PCAM_FEATURE(mdev, ptys_extended_ethernet);
-
- /*when ptys_extended_ethernet is set legacy link modes are deprecated */
- if (ext_requested != ext_supported)
- return -EPROTONOSUPPORT;
+ ext_requested &= ext_supported;
speed = link_ksettings->base.speed;
ethtool2ptys_adver_func = ext_requested ?
mlx5e_ethtool2ptys_ext_adver_link :
mlx5e_ethtool2ptys_adver_link;
- err = mlx5_port_query_eth_proto(mdev, 1, ext_supported, &eproto);
+ err = mlx5_port_query_eth_proto(mdev, 1, ext_requested, &eproto);
if (err) {
netdev_err(priv->netdev, "%s: query port eth proto failed: %d\n",
__func__, err);
@@ -1037,7 +1053,7 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
if (!an_changes && link_modes == eproto.admin)
goto out;
- mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_supported);
+ mlx5_port_set_eth_ptys(mdev, an_disable, link_modes, ext_requested);
mlx5_toggle_port_link(mdev);
out:
@@ -1752,7 +1768,8 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
struct mlx5e_channel *c;
int i;
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state) ||
+ priv->channels.params.xdp_prog)
return 0;
for (i = 0; i < channels->num; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index b5fdbd3190d9..f7eb521db580 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -951,7 +951,11 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE))
+ /* We disable csum_complete when XDP is enabled since
+ * XDP programs might manipulate packets which will render
+ * skb->checksum incorrect.
+ */
+ if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE) || c->xdp)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0;
@@ -2937,6 +2941,14 @@ int mlx5e_safe_switch_channels(struct mlx5e_priv *priv,
return 0;
}
+int mlx5e_safe_reopen_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ new_channels.params = priv->channels.params;
+ return mlx5e_safe_switch_channels(priv, &new_channels, NULL);
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
@@ -4161,11 +4173,10 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
if (!report_failed)
goto unlock;
- mlx5e_close_locked(priv->netdev);
- err = mlx5e_open_locked(priv->netdev);
+ err = mlx5e_safe_reopen_channels(priv);
if (err)
netdev_err(priv->netdev,
- "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
+ "mlx5e_safe_reopen_channels failed recovering from a tx_timeout, err(%d).\n",
err);
unlock:
@@ -4553,7 +4564,7 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
{
enum mlx5e_traffic_types tt;
- rss_params->hfunc = ETH_RSS_HASH_XOR;
+ rss_params->hfunc = ETH_RSS_HASH_TOP;
netdev_rss_key_fill(rss_params->toeplitz_hash_key,
sizeof(rss_params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(rss_params->indirection_rqt,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 3dde5c7e0739..c3b3002ff62f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -692,7 +692,14 @@ static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
{
*proto = ((struct ethhdr *)skb->data)->h_proto;
*proto = __vlan_get_protocol(skb, *proto, network_depth);
- return (*proto == htons(ETH_P_IP) || *proto == htons(ETH_P_IPV6));
+
+ if (*proto == htons(ETH_P_IP))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
+
+ if (*proto == htons(ETH_P_IPV6))
+ return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
+
+ return false;
}
static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
@@ -712,17 +719,6 @@ static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
rq->stats->ecn_mark += !!rc;
}
-static u32 mlx5e_get_fcs(const struct sk_buff *skb)
-{
- const void *fcs_bytes;
- u32 _fcs_bytes;
-
- fcs_bytes = skb_header_pointer(skb, skb->len - ETH_FCS_LEN,
- ETH_FCS_LEN, &_fcs_bytes);
-
- return __get_unaligned_cpu32(fcs_bytes);
-}
-
static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
{
void *ip_p = skb->data + network_depth;
@@ -733,6 +729,68 @@ static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
#define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
+#define MAX_PADDING 8
+
+static void
+tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
+ struct mlx5e_rq_stats *stats)
+{
+ stats->csum_complete_tail_slow++;
+ skb->csum = csum_block_add(skb->csum,
+ skb_checksum(skb, offset, len, 0),
+ offset);
+}
+
+static void
+tail_padding_csum(struct sk_buff *skb, int offset,
+ struct mlx5e_rq_stats *stats)
+{
+ u8 tail_padding[MAX_PADDING];
+ int len = skb->len - offset;
+ void *tail;
+
+ if (unlikely(len > MAX_PADDING)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ tail = skb_header_pointer(skb, offset, len, tail_padding);
+ if (unlikely(!tail)) {
+ tail_padding_csum_slow(skb, offset, len, stats);
+ return;
+ }
+
+ stats->csum_complete_tail++;
+ skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
+}
+
+static void
+mlx5e_skb_padding_csum(struct sk_buff *skb, int network_depth, __be16 proto,
+ struct mlx5e_rq_stats *stats)
+{
+ struct ipv6hdr *ip6;
+ struct iphdr *ip4;
+ int pkt_len;
+
+ switch (proto) {
+ case htons(ETH_P_IP):
+ ip4 = (struct iphdr *)(skb->data + network_depth);
+ pkt_len = network_depth + ntohs(ip4->tot_len);
+ break;
+ case htons(ETH_P_IPV6):
+ ip6 = (struct ipv6hdr *)(skb->data + network_depth);
+ pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
+ break;
+ default:
+ return;
+ }
+
+ if (likely(pkt_len >= skb->len))
+ return;
+
+ tail_padding_csum(skb, pkt_len, stats);
+}
+
static inline void mlx5e_handle_csum(struct net_device *netdev,
struct mlx5_cqe64 *cqe,
struct mlx5e_rq *rq,
@@ -752,7 +810,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
+ /* True when explicitly set via priv flag, or XDP prog is loaded */
+ if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state))
goto csum_unnecessary;
/* CQE csum doesn't cover padding octets in short ethernet
@@ -780,18 +839,15 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
skb->csum = csum_partial(skb->data + ETH_HLEN,
network_depth - ETH_HLEN,
skb->csum);
- if (unlikely(netdev->features & NETIF_F_RXFCS))
- skb->csum = csum_block_add(skb->csum,
- (__force __wsum)mlx5e_get_fcs(skb),
- skb->len - ETH_FCS_LEN);
+
+ mlx5e_skb_padding_csum(skb, network_depth, proto, stats);
stats->csum_complete++;
return;
}
csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
- ((cqe->hds_ip_ext & CQE_L4_OK) ||
- (get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
+ (cqe->hds_ip_ext & CQE_L4_OK))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
index 1a78e05cbba8..b75aa8b8bf04 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -59,6 +59,8 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete_tail_slow) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_redirect) },
@@ -151,6 +153,8 @@ static void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv)
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
+ s->rx_csum_complete_tail += rq_stats->csum_complete_tail;
+ s->rx_csum_complete_tail_slow += rq_stats->csum_complete_tail_slow;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
@@ -1190,6 +1194,8 @@ static const struct counter_desc rq_stats_desc[] = {
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete_tail_slow) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
{ MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 4640d4f986f8..16c3b785f282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -71,6 +71,8 @@ struct mlx5e_sw_stats {
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
+ u64 rx_csum_complete_tail;
+ u64 rx_csum_complete_tail_slow;
u64 rx_csum_unnecessary_inner;
u64 rx_xdp_drop;
u64 rx_xdp_redirect;
@@ -181,6 +183,8 @@ struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
u64 csum_complete;
+ u64 csum_complete_tail;
+ u64 csum_complete_tail_slow;
u64 csum_unnecessary;
u64 csum_unnecessary_inner;
u64 csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index b4967a0ff8c7..d75dc44eb2ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -2158,6 +2158,52 @@ static bool csum_offload_supported(struct mlx5e_priv *priv,
return true;
}
+struct ip_ttl_word {
+ __u8 ttl;
+ __u8 protocol;
+ __sum16 check;
+};
+
+struct ipv6_hoplimit_word {
+ __be16 payload_len;
+ __u8 nexthdr;
+ __u8 hop_limit;
+};
+
+static bool is_action_keys_supported(const struct flow_action_entry *act)
+{
+ u32 mask, offset;
+ u8 htype;
+
+ htype = act->mangle.htype;
+ offset = act->mangle.offset;
+ mask = ~act->mangle.mask;
+ /* For IPv4 & IPv6 header check 4 byte word,
+ * to determine that modified fields
+ * are NOT ttl & hop_limit only.
+ */
+ if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
+ struct ip_ttl_word *ttl_word =
+ (struct ip_ttl_word *)&mask;
+
+ if (offset != offsetof(struct iphdr, ttl) ||
+ ttl_word->protocol ||
+ ttl_word->check) {
+ return true;
+ }
+ } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ struct ipv6_hoplimit_word *hoplimit_word =
+ (struct ipv6_hoplimit_word *)&mask;
+
+ if (offset != offsetof(struct ipv6hdr, payload_len) ||
+ hoplimit_word->payload_len ||
+ hoplimit_word->nexthdr) {
+ return true;
+ }
+ }
+ return false;
+}
+
static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
struct flow_action *flow_action,
u32 actions,
@@ -2165,9 +2211,9 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
{
const struct flow_action_entry *act;
bool modify_ip_header;
- u8 htype, ip_proto;
void *headers_v;
u16 ethertype;
+ u8 ip_proto;
int i;
if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
@@ -2187,9 +2233,7 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
act->id != FLOW_ACTION_ADD)
continue;
- htype = act->mangle.htype;
- if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
- htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
+ if (is_action_keys_supported(act)) {
modify_ip_header = true;
break;
}
@@ -2340,15 +2384,22 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv,
return 0;
}
-static inline int cmp_encap_info(struct ip_tunnel_key *a,
- struct ip_tunnel_key *b)
+struct encap_key {
+ struct ip_tunnel_key *ip_tun_key;
+ int tunnel_type;
+};
+
+static inline int cmp_encap_info(struct encap_key *a,
+ struct encap_key *b)
{
- return memcmp(a, b, sizeof(*a));
+ return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
+ a->tunnel_type != b->tunnel_type;
}
-static inline int hash_encap_info(struct ip_tunnel_key *key)
+static inline int hash_encap_info(struct encap_key *key)
{
- return jhash(key, sizeof(*key), 0);
+ return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
+ key->tunnel_type);
}
@@ -2379,7 +2430,7 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_esw_flow_attr *attr = flow->esw_attr;
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct ip_tunnel_info *tun_info;
- struct ip_tunnel_key *key;
+ struct encap_key key, e_key;
struct mlx5e_encap_entry *e;
unsigned short family;
uintptr_t hash_key;
@@ -2389,13 +2440,16 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
parse_attr = attr->parse_attr;
tun_info = &parse_attr->tun_info[out_index];
family = ip_tunnel_info_af(tun_info);
- key = &tun_info->key;
+ key.ip_tun_key = &tun_info->key;
+ key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
- hash_key = hash_encap_info(key);
+ hash_key = hash_encap_info(&key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info.key, key)) {
+ e_key.ip_tun_key = &e->tun_info.key;
+ e_key.tunnel_type = e->tunnel_type;
+ if (!cmp_encap_info(&e_key, &key)) {
found = true;
break;
}
@@ -2657,7 +2711,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
- err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
+ err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
parse_attr, hdrs, extack);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index ecd2c747f726..8a67fd197b79 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -105,8 +105,7 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
in, nic_vport_context);
@@ -134,8 +133,7 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(modify_esw_vport_context_in, in, opcode,
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
- if (vport)
- MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
+ MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
}
@@ -431,6 +429,8 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
{
int err;
+ memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
+
err = esw_create_legacy_vepa_table(esw);
if (err)
return err;
@@ -2157,6 +2157,7 @@ static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
/* Star rule to forward all traffic to uplink vport */
memset(spec, 0, sizeof(*spec));
+ memset(&dest, 0, sizeof(dest));
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = MLX5_VPORT_UPLINK;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index f2260391be5b..9b2d78ee22b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1611,6 +1611,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
+ memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
err = esw_create_offloads_fdb_tables(esw, nvports);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
index 5cf5f2a9d51f..22a2ef111514 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/tls.c
@@ -148,14 +148,16 @@ static int mlx5_fpga_tls_alloc_swid(struct idr *idr, spinlock_t *idr_spinlock,
return ret;
}
-static void mlx5_fpga_tls_release_swid(struct idr *idr,
- spinlock_t *idr_spinlock, u32 swid)
+static void *mlx5_fpga_tls_release_swid(struct idr *idr,
+ spinlock_t *idr_spinlock, u32 swid)
{
unsigned long flags;
+ void *ptr;
spin_lock_irqsave(idr_spinlock, flags);
- idr_remove(idr, swid);
+ ptr = idr_remove(idr, swid);
spin_unlock_irqrestore(idr_spinlock, flags);
+ return ptr;
}
static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
@@ -165,20 +167,12 @@ static void mlx_tls_kfree_complete(struct mlx5_fpga_conn *conn,
kfree(buf);
}
-struct mlx5_teardown_stream_context {
- struct mlx5_fpga_tls_command_context cmd;
- u32 swid;
-};
-
static void
mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
struct mlx5_fpga_device *fdev,
struct mlx5_fpga_tls_command_context *cmd,
struct mlx5_fpga_dma_buf *resp)
{
- struct mlx5_teardown_stream_context *ctx =
- container_of(cmd, struct mlx5_teardown_stream_context, cmd);
-
if (resp) {
u32 syndrome = MLX5_GET(tls_resp, resp->sg[0].data, syndrome);
@@ -186,14 +180,6 @@ mlx5_fpga_tls_teardown_completion(struct mlx5_fpga_conn *conn,
mlx5_fpga_err(fdev,
"Teardown stream failed with syndrome = %d",
syndrome);
- else if (MLX5_GET(tls_cmd, cmd->buf.sg[0].data, direction_sx))
- mlx5_fpga_tls_release_swid(&fdev->tls->tx_idr,
- &fdev->tls->tx_idr_spinlock,
- ctx->swid);
- else
- mlx5_fpga_tls_release_swid(&fdev->tls->rx_idr,
- &fdev->tls->rx_idr_spinlock,
- ctx->swid);
}
mlx5_fpga_tls_put_command_ctx(cmd);
}
@@ -225,8 +211,14 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
rcu_read_lock();
flow = idr_find(&mdev->fpga->tls->rx_idr, ntohl(handle));
- rcu_read_unlock();
+ if (unlikely(!flow)) {
+ rcu_read_unlock();
+ WARN_ONCE(1, "Received NULL pointer for handle\n");
+ kfree(buf);
+ return -EINVAL;
+ }
mlx5_fpga_tls_flow_to_cmd(flow, cmd);
+ rcu_read_unlock();
MLX5_SET(tls_cmd, cmd, swid, ntohl(handle));
MLX5_SET64(tls_cmd, cmd, tls_rcd_sn, be64_to_cpu(rcd_sn));
@@ -238,6 +230,8 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
buf->complete = mlx_tls_kfree_complete;
ret = mlx5_fpga_sbu_conn_sendmsg(mdev->fpga->tls->conn, buf);
+ if (ret < 0)
+ kfree(buf);
return ret;
}
@@ -245,7 +239,7 @@ int mlx5_fpga_tls_resync_rx(struct mlx5_core_dev *mdev, u32 handle, u32 seq,
static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
void *flow, u32 swid, gfp_t flags)
{
- struct mlx5_teardown_stream_context *ctx;
+ struct mlx5_fpga_tls_command_context *ctx;
struct mlx5_fpga_dma_buf *buf;
void *cmd;
@@ -253,7 +247,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
if (!ctx)
return;
- buf = &ctx->cmd.buf;
+ buf = &ctx->buf;
cmd = (ctx + 1);
MLX5_SET(tls_cmd, cmd, command_type, CMD_TEARDOWN_STREAM);
MLX5_SET(tls_cmd, cmd, swid, swid);
@@ -264,8 +258,7 @@ static void mlx5_fpga_tls_send_teardown_cmd(struct mlx5_core_dev *mdev,
buf->sg[0].data = cmd;
buf->sg[0].size = MLX5_TLS_COMMAND_SIZE;
- ctx->swid = swid;
- mlx5_fpga_tls_cmd_send(mdev->fpga, &ctx->cmd,
+ mlx5_fpga_tls_cmd_send(mdev->fpga, ctx,
mlx5_fpga_tls_teardown_completion);
}
@@ -275,13 +268,14 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
struct mlx5_fpga_tls *tls = mdev->fpga->tls;
void *flow;
- rcu_read_lock();
if (direction_sx)
- flow = idr_find(&tls->tx_idr, swid);
+ flow = mlx5_fpga_tls_release_swid(&tls->tx_idr,
+ &tls->tx_idr_spinlock,
+ swid);
else
- flow = idr_find(&tls->rx_idr, swid);
-
- rcu_read_unlock();
+ flow = mlx5_fpga_tls_release_swid(&tls->rx_idr,
+ &tls->rx_idr_spinlock,
+ swid);
if (!flow) {
mlx5_fpga_err(mdev->fpga, "No flow information for swid %u\n",
@@ -289,6 +283,7 @@ void mlx5_fpga_tls_del_flow(struct mlx5_core_dev *mdev, u32 swid,
return;
}
+ synchronize_rcu(); /* before kfree(flow) */
mlx5_fpga_tls_send_teardown_cmd(mdev, flow, swid, flags);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 70cc906a102b..76716419370d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -164,26 +164,6 @@ static struct mlx5_profile profile[] = {
.size = 8,
.limit = 4
},
- .mr_cache[16] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[17] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[18] = {
- .size = 8,
- .limit = 4
- },
- .mr_cache[19] = {
- .size = 4,
- .limit = 2
- },
- .mr_cache[20] = {
- .size = 4,
- .limit = 2
- },
},
};
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index d23d53c0e284..f26a4ca29363 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -568,7 +568,7 @@ static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
return 0;
- emad_wq = alloc_workqueue("mlxsw_core_emad", WQ_MEM_RECLAIM, 0);
+ emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
if (!emad_wq)
return -ENOMEM;
mlxsw_core->emad_wq = emad_wq;
@@ -1958,10 +1958,10 @@ static int __init mlxsw_core_module_init(void)
{
int err;
- mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
+ mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
if (!mlxsw_wq)
return -ENOMEM;
- mlxsw_owq = alloc_ordered_workqueue("%s_ordered", WQ_MEM_RECLAIM,
+ mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
mlxsw_core_driver_name);
if (!mlxsw_owq) {
err = -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 9a79b5e11597..d633bef5f105 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -70,6 +70,7 @@ static const struct mlxsw_sp_sb_pool_des mlxsw_sp2_sb_pool_dess[] = {
{MLXSW_REG_SBXX_DIR_EGRESS, 1},
{MLXSW_REG_SBXX_DIR_EGRESS, 2},
{MLXSW_REG_SBXX_DIR_EGRESS, 3},
+ {MLXSW_REG_SBXX_DIR_EGRESS, 15},
};
#define MLXSW_SP_SB_ING_TC_COUNT 8
@@ -428,6 +429,7 @@ static const struct mlxsw_sp_sb_pr mlxsw_sp2_sb_prs[] = {
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, 0),
+ MLXSW_SP_SB_PR(MLXSW_REG_SBPR_MODE_STATIC, MLXSW_SP_SB_INFI),
};
static int mlxsw_sp_sb_prs_init(struct mlxsw_sp *mlxsw_sp,
@@ -517,14 +519,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp2_sb_cms_egress[] = {
MLXSW_SP_SB_CM(0, 7, 4),
MLXSW_SP_SB_CM(0, 7, 4),
MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
- MLXSW_SP_SB_CM(0, 7, 4),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
+ MLXSW_SP_SB_CM(0, MLXSW_SP_SB_INFI, 8),
MLXSW_SP_SB_CM(1, 0xff, 4),
};
@@ -671,6 +673,7 @@ static const struct mlxsw_sp_sb_pm mlxsw_sp2_sb_pms[] = {
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
MLXSW_SP_SB_PM(0, 0),
+ MLXSW_SP_SB_PM(10000, 90000),
};
static int mlxsw_sp_port_sb_pms_init(struct mlxsw_sp_port *mlxsw_sp_port)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 52fed8c7bf1e..902e766a8ed3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -6781,7 +6781,7 @@ static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp,
/* A RIF is not created for macvlan netdevs. Their MAC is used to
* populate the FDB
*/
- if (netif_is_macvlan(dev))
+ if (netif_is_macvlan(dev) || netif_is_l3_master(dev))
return 0;
for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index f6ce386c3036..50111f228d77 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1630,7 +1630,7 @@ static int mlxsw_sp_port_mdb_add(struct mlxsw_sp_port *mlxsw_sp_port,
u16 fid_index;
int err = 0;
- if (switchdev_trans_ph_prepare(trans))
+ if (switchdev_trans_ph_commit(trans))
return 0;
bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index a1d0d6e42533..d715ef4fc92f 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -613,7 +613,7 @@ static int ocelot_mact_mc_add(struct ocelot_port *port,
struct netdev_hw_addr *hw_addr)
{
struct ocelot *ocelot = port->ocelot;
- struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_KERNEL);
+ struct netdev_hw_addr *ha = kzalloc(sizeof(*ha), GFP_ATOMIC);
if (!ha)
return -ENOMEM;
@@ -959,10 +959,8 @@ static void ocelot_get_strings(struct net_device *netdev, u32 sset, u8 *data)
ETH_GSTRING_LEN);
}
-static void ocelot_check_stats(struct work_struct *work)
+static void ocelot_update_stats(struct ocelot *ocelot)
{
- struct delayed_work *del_work = to_delayed_work(work);
- struct ocelot *ocelot = container_of(del_work, struct ocelot, stats_work);
int i, j;
mutex_lock(&ocelot->stats_lock);
@@ -986,11 +984,19 @@ static void ocelot_check_stats(struct work_struct *work)
}
}
- cancel_delayed_work(&ocelot->stats_work);
+ mutex_unlock(&ocelot->stats_lock);
+}
+
+static void ocelot_check_stats_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct ocelot *ocelot = container_of(del_work, struct ocelot,
+ stats_work);
+
+ ocelot_update_stats(ocelot);
+
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
-
- mutex_unlock(&ocelot->stats_lock);
}
static void ocelot_get_ethtool_stats(struct net_device *dev,
@@ -1001,7 +1007,7 @@ static void ocelot_get_ethtool_stats(struct net_device *dev,
int i;
/* check and update now */
- ocelot_check_stats(&ocelot->stats_work.work);
+ ocelot_update_stats(ocelot);
/* Copy all counters */
for (i = 0; i < ocelot->num_stats; i++)
@@ -1809,7 +1815,7 @@ int ocelot_init(struct ocelot *ocelot)
ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(6),
ANA_CPUQ_8021_CFG, i);
- INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats);
+ INIT_DELAYED_WORK(&ocelot->stats_work, ocelot_check_stats_work);
queue_delayed_work(ocelot->stats_queue, &ocelot->stats_work,
OCELOT_STATS_CHECK_DELAY);
return 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 7cde387e5ec6..51cd57ab3d95 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -2366,6 +2366,7 @@ static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
dma_object->addr))) {
vxge_os_dma_free(devh->pdev, memblock,
&dma_object->acc_handle);
+ memblock = NULL;
goto exit;
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
index eeda4ed98333..e336f6ee94f5 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/action.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
@@ -48,8 +48,7 @@ nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
tmp_push_vlan_tci =
FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, act->vlan.prio) |
- FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid) |
- NFP_FL_PUSH_VLAN_CFI;
+ FIELD_PREP(NFP_FL_PUSH_VLAN_VID, act->vlan.vid);
push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index 4fcaf11ed56e..0ed51e79db00 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -26,7 +26,7 @@
#define NFP_FLOWER_LAYER2_GENEVE_OP BIT(6)
#define NFP_FLOWER_MASK_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FLOWER_MASK_VLAN_CFI BIT(12)
+#define NFP_FLOWER_MASK_VLAN_PRESENT BIT(12)
#define NFP_FLOWER_MASK_VLAN_VID GENMASK(11, 0)
#define NFP_FLOWER_MASK_MPLS_LB GENMASK(31, 12)
@@ -82,7 +82,6 @@
#define NFP_FL_OUT_FLAGS_TYPE_IDX GENMASK(2, 0)
#define NFP_FL_PUSH_VLAN_PRIO GENMASK(15, 13)
-#define NFP_FL_PUSH_VLAN_CFI BIT(12)
#define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0)
#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index e03c8ef2c28c..9b8b843d0340 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -30,20 +30,19 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext,
flow_rule_match_vlan(rule, &match);
/* Populate the tci field. */
- if (match.key->vlan_id || match.key->vlan_priority) {
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.key->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.key->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- ext->tci = cpu_to_be16(tmp_tci);
- tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
- match.mask->vlan_priority) |
- FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
- match.mask->vlan_id) |
- NFP_FLOWER_MASK_VLAN_CFI;
- msk->tci = cpu_to_be16(tmp_tci);
- }
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.key->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.key->vlan_id);
+ ext->tci = cpu_to_be16(tmp_tci);
+
+ tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT;
+ tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO,
+ match.mask->vlan_priority) |
+ FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID,
+ match.mask->vlan_id);
+ msk->tci = cpu_to_be16(tmp_tci);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
index d2c803bb4e56..94d228c04496 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c
@@ -195,7 +195,7 @@ static netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
- return ret;
+ return NETDEV_TX_OK;
}
static int nfp_repr_stop(struct net_device *netdev)
@@ -383,7 +383,7 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS;
- netdev->priv_flags |= IFF_NO_QUEUE;
+ netdev->priv_flags |= IFF_NO_QUEUE | IFF_DISABLE_NETPOLL;
netdev->features |= NETIF_F_LLTX;
if (nfp_app_has_tc(app)) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 43a57ec296fd..127c89b22ef0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -431,12 +431,16 @@ struct qed_qm_info {
u8 num_pf_rls;
};
+#define QED_OVERFLOW_BIT 1
+
struct qed_db_recovery_info {
struct list_head list;
/* Lock to protect the doorbell recovery mechanism list */
spinlock_t lock;
+ bool dorq_attn;
u32 db_recovery_counter;
+ unsigned long overflow;
};
struct storm_stats {
@@ -920,8 +924,7 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc);
/* doorbell recovery mechanism */
void qed_db_recovery_dp(struct qed_hwfn *p_hwfn);
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec);
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn);
bool qed_edpm_enabled(struct qed_hwfn *p_hwfn);
/* Other Linux specific common definitions */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 9df8c4b3b54e..866cdc86a3f2 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -102,11 +102,15 @@ static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn,
/* Doorbell address sanity (address within doorbell bar range) */
static bool qed_db_rec_sanity(struct qed_dev *cdev,
- void __iomem *db_addr, void *db_data)
+ void __iomem *db_addr,
+ enum qed_db_rec_width db_width,
+ void *db_data)
{
+ u32 width = (db_width == DB_REC_WIDTH_32B) ? 32 : 64;
+
/* Make sure doorbell address is within the doorbell bar */
if (db_addr < cdev->doorbells ||
- (u8 __iomem *)db_addr >
+ (u8 __iomem *)db_addr + width >
(u8 __iomem *)cdev->doorbells + cdev->db_size) {
WARN(true,
"Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n",
@@ -159,7 +163,7 @@ int qed_db_recovery_add(struct qed_dev *cdev,
}
/* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
+ if (!qed_db_rec_sanity(cdev, db_addr, db_width, db_data))
return -EINVAL;
/* Obtain hwfn from doorbell address */
@@ -205,10 +209,6 @@ int qed_db_recovery_del(struct qed_dev *cdev,
return 0;
}
- /* Sanitize doorbell address */
- if (!qed_db_rec_sanity(cdev, db_addr, db_data))
- return -EINVAL;
-
/* Obtain hwfn from doorbell address */
p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr);
@@ -300,31 +300,24 @@ void qed_db_recovery_dp(struct qed_hwfn *p_hwfn)
/* Ring the doorbell of a single doorbell recovery entry */
static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
- struct qed_db_recovery_entry *db_entry,
- enum qed_db_rec_exec db_exec)
-{
- if (db_exec != DB_REC_ONCE) {
- /* Print according to width */
- if (db_entry->db_width == DB_REC_WIDTH_32B) {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %x\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u32 *)db_entry->db_data);
- } else {
- DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
- "%s doorbell address %p data %llx\n",
- db_exec == DB_REC_DRY_RUN ?
- "would have rung" : "ringing",
- db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ struct qed_db_recovery_entry *db_entry)
+{
+ /* Print according to width */
+ if (db_entry->db_width == DB_REC_WIDTH_32B) {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %x\n",
+ db_entry->db_addr,
+ *(u32 *)db_entry->db_data);
+ } else {
+ DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
+ "ringing doorbell address %p data %llx\n",
+ db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
}
/* Sanity */
if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr,
- db_entry->db_data))
+ db_entry->db_width, db_entry->db_data))
return;
/* Flush the write combined buffer. Since there are multiple doorbelling
@@ -334,14 +327,12 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
wmb();
/* Ring the doorbell */
- if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) {
- if (db_entry->db_width == DB_REC_WIDTH_32B)
- DIRECT_REG_WR(db_entry->db_addr,
- *(u32 *)(db_entry->db_data));
- else
- DIRECT_REG_WR64(db_entry->db_addr,
- *(u64 *)(db_entry->db_data));
- }
+ if (db_entry->db_width == DB_REC_WIDTH_32B)
+ DIRECT_REG_WR(db_entry->db_addr,
+ *(u32 *)(db_entry->db_data));
+ else
+ DIRECT_REG_WR64(db_entry->db_addr,
+ *(u64 *)(db_entry->db_data));
/* Flush the write combined buffer. Next doorbell may come from a
* different entity to the same address...
@@ -350,29 +341,21 @@ static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn,
}
/* Traverse the doorbell recovery entry list and ring all the doorbells */
-void qed_db_recovery_execute(struct qed_hwfn *p_hwfn,
- enum qed_db_rec_exec db_exec)
+void qed_db_recovery_execute(struct qed_hwfn *p_hwfn)
{
struct qed_db_recovery_entry *db_entry = NULL;
- if (db_exec != DB_REC_ONCE) {
- DP_NOTICE(p_hwfn,
- "Executing doorbell recovery. Counter was %d\n",
- p_hwfn->db_recovery_info.db_recovery_counter);
+ DP_NOTICE(p_hwfn, "Executing doorbell recovery. Counter was %d\n",
+ p_hwfn->db_recovery_info.db_recovery_counter);
- /* Track amount of times recovery was executed */
- p_hwfn->db_recovery_info.db_recovery_counter++;
- }
+ /* Track amount of times recovery was executed */
+ p_hwfn->db_recovery_info.db_recovery_counter++;
/* Protect the list */
spin_lock_bh(&p_hwfn->db_recovery_info.lock);
list_for_each_entry(db_entry,
- &p_hwfn->db_recovery_info.list, list_entry) {
- qed_db_recovery_ring(p_hwfn, db_entry, db_exec);
- if (db_exec == DB_REC_ONCE)
- break;
- }
-
+ &p_hwfn->db_recovery_info.list, list_entry)
+ qed_db_recovery_ring(p_hwfn, db_entry);
spin_unlock_bh(&p_hwfn->db_recovery_info.lock);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c
index e23980e301b6..8848d5bed6e5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.c
@@ -378,6 +378,9 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
u32 count = QED_DB_REC_COUNT;
u32 usage = 1;
+ /* Flush any pending (e)dpms as they may never arrive */
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
+
/* wait for usage to zero or count to run out. This is necessary since
* EDPM doorbell transactions can take multiple 64b cycles, and as such
* can "split" over the pci. Possibly, the doorbell drop can happen with
@@ -406,51 +409,74 @@ static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn,
int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 overflow;
+ u32 attn_ovfl, cur_ovfl;
int rc;
- overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
- DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow);
- if (!overflow) {
- qed_db_recovery_execute(p_hwfn, DB_REC_ONCE);
+ attn_ovfl = test_and_clear_bit(QED_OVERFLOW_BIT,
+ &p_hwfn->db_recovery_info.overflow);
+ cur_ovfl = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!cur_ovfl && !attn_ovfl)
return 0;
- }
- if (qed_edpm_enabled(p_hwfn)) {
+ DP_NOTICE(p_hwfn, "PF Overflow sticky: attn %u current %u\n",
+ attn_ovfl, cur_ovfl);
+
+ if (cur_ovfl && !p_hwfn->db_bar_no_edpm) {
rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
if (rc)
return rc;
}
- /* Flush any pending (e)dpm as they may never arrive */
- qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
-
/* Release overflow sticky indication (stop silently dropping everything) */
qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
/* Repeat all last doorbells (doorbell drop recovery) */
- qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
+ qed_db_recovery_execute(p_hwfn);
return 0;
}
-static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+static void qed_dorq_attn_overflow(struct qed_hwfn *p_hwfn)
{
- u32 int_sts, first_drop_reason, details, address, all_drops_reason;
struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
+ u32 overflow;
int rc;
- int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
- DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+ overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY);
+ if (!overflow)
+ goto out;
+
+ /* Run PF doorbell recovery in next periodic handler */
+ set_bit(QED_OVERFLOW_BIT, &p_hwfn->db_recovery_info.overflow);
+
+ if (!p_hwfn->db_bar_no_edpm) {
+ rc = qed_db_rec_flush_queue(p_hwfn, p_ptt);
+ if (rc)
+ goto out;
+ }
+
+ qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
+out:
+ /* Schedule the handler even if overflow was not detected */
+ qed_periodic_db_rec_start(p_hwfn);
+}
+
+static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn)
+{
+ u32 int_sts, first_drop_reason, details, address, all_drops_reason;
+ struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt;
/* int_sts may be zero since all PFs were interrupted for doorbell
* overflow but another one already handled it. Can abort here. If
* This PF also requires overflow recovery we will be interrupted again.
* The masked almost full indication may also be set. Ignoring.
*/
+ int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL))
return 0;
+ DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts);
+
/* check if db_drop or overflow happened */
if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
@@ -477,11 +503,6 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4,
first_drop_reason, all_drops_reason);
- rc = qed_db_rec_handler(p_hwfn, p_ptt);
- qed_periodic_db_rec_start(p_hwfn);
- if (rc)
- return rc;
-
/* Clear the doorbell drop details and prepare for next drop */
qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
@@ -507,6 +528,25 @@ static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
return -EINVAL;
}
+static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn)
+{
+ p_hwfn->db_recovery_info.dorq_attn = true;
+ qed_dorq_attn_overflow(p_hwfn);
+
+ return qed_dorq_attn_int_sts(p_hwfn);
+}
+
+static void qed_dorq_attn_handler(struct qed_hwfn *p_hwfn)
+{
+ if (p_hwfn->db_recovery_info.dorq_attn)
+ goto out;
+
+ /* Call DORQ callback if the attention was missed */
+ qed_dorq_attn_cb(p_hwfn);
+out:
+ p_hwfn->db_recovery_info.dorq_attn = false;
+}
+
/* Instead of major changes to the data-structure, we have a some 'special'
* identifiers for sources that changed meaning between adapters.
*/
@@ -1080,6 +1120,9 @@ static int qed_int_deassertion(struct qed_hwfn *p_hwfn,
}
}
+ /* Handle missed DORQ attention */
+ qed_dorq_attn_handler(p_hwfn);
+
/* Clear IGU indication for the deasserted bits */
DIRECT_REG_WR((u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_IGU_CMD +
diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h
index 1f356ed4f761..d473b522afc5 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_int.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_int.h
@@ -192,8 +192,8 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
/**
* @brief - Doorbell Recovery handler.
- * Run DB_REAL_DEAL doorbell recovery in case of PF overflow
- * (and flush DORQ if needed), otherwise run DB_REC_ONCE.
+ * Run doorbell recovery in case of PF overflow (and flush DORQ if
+ * needed).
*
* @param p_hwfn
* @param p_ptt
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index f164d4acebcb..6de23b56b294 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -970,7 +970,7 @@ static void qed_update_pf_params(struct qed_dev *cdev,
}
}
-#define QED_PERIODIC_DB_REC_COUNT 100
+#define QED_PERIODIC_DB_REC_COUNT 10
#define QED_PERIODIC_DB_REC_INTERVAL_MS 100
#define QED_PERIODIC_DB_REC_INTERVAL \
msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index 9faaa6df78ed..2f318aaf2b05 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1591,7 +1591,7 @@ static void qed_iov_vf_mbx_acquire(struct qed_hwfn *p_hwfn,
p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
} else {
DP_INFO(p_hwfn,
- "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's faspath HSI %02x.%02x\n",
+ "VF[%d] needs fastpath HSI %02x.%02x, which is incompatible with loaded FW's fastpath HSI %02x.%02x\n",
vf->abs_vf_id,
req->vfdev_info.eth_fp_hsi_major,
req->vfdev_info.eth_fp_hsi_minor,
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 5f3f42a25361..bddb2b5982dc 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -490,18 +490,17 @@ int qede_ptp_enable(struct qede_dev *edev, bool init_tc)
ptp->clock = ptp_clock_register(&ptp->clock_info, &edev->pdev->dev);
if (IS_ERR(ptp->clock)) {
- rc = -EINVAL;
DP_ERR(edev, "PTP clock registration failed\n");
+ qede_ptp_disable(edev);
+ rc = -EINVAL;
goto err2;
}
return 0;
-err2:
- qede_ptp_disable(edev);
- ptp->clock = NULL;
err1:
kfree(ptp);
+err2:
edev->ptp = NULL;
return rc;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 0c443ea98479..374a4d4371f9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -497,7 +497,7 @@ struct qlcnic_hardware_context {
u16 board_type;
u16 supported_type;
- u16 link_speed;
+ u32 link_speed;
u16 link_duplex;
u16 link_autoneg;
u16 module_type;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 7562ccbbb39a..ed651dde6ef9 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -28,6 +28,7 @@
#include <linux/pm_runtime.h>
#include <linux/firmware.h>
#include <linux/prefetch.h>
+#include <linux/pci-aspm.h>
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
@@ -5460,7 +5461,7 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp)
tp->cp_cmd |= PktCntrDisable | INTT_1;
RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(tp, IntrMitigate, 0x5151);
+ RTL_W16(tp, IntrMitigate, 0x5100);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -7352,6 +7353,11 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
+ /* Disable ASPM completely as that cause random device stop working
+ * problems as well as full system hangs for some PCIe devices users.
+ */
+ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
+
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pcim_enable_device(pdev);
if (rc < 0) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
index 40d6356a7e73..3dfb07a78952 100644
--- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -29,11 +29,13 @@
/* Specific functions used for Ring mode */
/* Enhanced descriptors */
-static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
{
- p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
- << ERDES1_BUFFER2_SIZE_SHIFT)
- & ERDES1_BUFFER2_SIZE_MASK);
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
if (end)
p->des1 |= cpu_to_le32(ERDES1_END_RING);
@@ -59,11 +61,15 @@ static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
}
/* Normal descriptors */
-static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
{
- p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
- << RDES1_BUFFER2_SIZE_SHIFT)
- & RDES1_BUFFER2_SIZE_MASK);
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
if (end)
p->des1 |= cpu_to_le32(RDES1_END_RING);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index 7fbb6a4dbf51..e061e9f5fad7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -296,7 +296,7 @@ exit:
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwmac4_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
index 1d858fdec997..98fa471da7c0 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -123,7 +123,7 @@ static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
}
static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
dwxgmac2_set_rx_owner(p, disable_rx_ic);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 5ef91a790f9d..5202d6ad7919 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -201,6 +201,11 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(rdes0 & RDES0_OWN))
return dma_own;
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
x->rx_desc++;
@@ -231,9 +236,10 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
- !!(rdes0 & RDES0_FRAME_TYPE),
- !!(rdes0 & ERDES0_RX_MAC_ADDR));
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
if (unlikely(rdes0 & RDES0_DRIBBLING))
x->dribbling_bit++;
@@ -259,15 +265,19 @@ static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
- int mode, int end)
+ int mode, int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32(BUF_SIZE_8KiB & ERDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ehn_desc_rx_set_on_chain(p);
else
- ehn_desc_rx_set_on_ring(p, end);
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index 92b8944f26e3..5bb00234d961 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -33,7 +33,7 @@ struct dma_extended_desc;
struct stmmac_desc_ops {
/* DMA RX descriptor ring initialization */
void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
- int end);
+ int end, int bfsize);
/* DMA TX descriptor ring initialization */
void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
/* Invoked by the xmit function to prepare the tx descriptor */
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
index de65bb29feba..b7dd4e3c760d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -91,8 +91,6 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
return dma_own;
if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
- pr_warn("%s: Oversized frame spanned multiple buffers\n",
- __func__);
stats->rx_length_errors++;
return discard_frame;
}
@@ -135,15 +133,19 @@ static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
}
static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
- int end)
+ int end, int bfsize)
{
+ int bfsize1;
+
p->des0 |= cpu_to_le32(RDES0_OWN);
- p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize & RDES1_BUFFER1_SIZE_MASK);
if (mode == STMMAC_CHAIN_MODE)
ndesc_rx_set_on_chain(p, end);
else
- ndesc_rx_set_on_ring(p, end);
+ ndesc_rx_set_on_ring(p, end, bfsize);
if (disable_rx_ic)
p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6a2e1031a62a..a26e36dbb5df 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1136,11 +1136,13 @@ static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
if (priv->extend_desc)
stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
else
stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
priv->use_riwt, priv->mode,
- (i == DMA_RX_SIZE - 1));
+ (i == DMA_RX_SIZE - 1),
+ priv->dma_buf_sz);
}
/**
@@ -3352,9 +3354,8 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
- unsigned int entry = rx_q->cur_rx;
+ unsigned int next_entry = rx_q->cur_rx;
int coe = priv->hw->rx_csum;
- unsigned int next_entry;
unsigned int count = 0;
bool xmac;
@@ -3372,10 +3373,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
stmmac_display_ring(priv, rx_head, DMA_RX_SIZE, true);
}
while (count < limit) {
- int status;
+ int entry, status;
struct dma_desc *p;
struct dma_desc *np;
+ entry = next_entry;
+
if (priv->extend_desc)
p = (struct dma_desc *)(rx_q->dma_erx + entry);
else
@@ -3431,11 +3434,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
* ignored
*/
if (frame_len > priv->dma_buf_sz) {
- netdev_err(priv->dev,
- "len %d larger than size (%d)\n",
- frame_len, priv->dma_buf_sz);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "len %d larger than size (%d)\n",
+ frame_len, priv->dma_buf_sz);
priv->dev->stats.rx_length_errors++;
- break;
+ continue;
}
/* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
@@ -3470,7 +3474,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dev_warn(priv->device,
"packet dropped\n");
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
dma_sync_single_for_cpu(priv->device,
@@ -3490,11 +3494,12 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
} else {
skb = rx_q->rx_skbuff[entry];
if (unlikely(!skb)) {
- netdev_err(priv->dev,
- "%s: Inconsistent Rx chain\n",
- priv->dev->name);
+ if (net_ratelimit())
+ netdev_err(priv->dev,
+ "%s: Inconsistent Rx chain\n",
+ priv->dev->name);
priv->dev->stats.rx_dropped++;
- break;
+ continue;
}
prefetch(skb->data - NET_IP_ALIGN);
rx_q->rx_skbuff[entry] = NULL;
@@ -3529,7 +3534,6 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += frame_len;
}
- entry = next_entry;
}
stmmac_rx_refill(priv, queue);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index e859ae2e42d5..49f41b64077b 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -987,6 +987,7 @@ struct netvsc_device {
wait_queue_head_t wait_drain;
bool destroy;
+ bool tx_disable; /* if true, do not wake up queue again */
/* Receive buffer allocated by us but manages by NetVSP */
void *recv_buf;
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 813d195bbd57..e0dce373cdd9 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -110,6 +110,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
+ net_device->tx_disable = false;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
@@ -719,7 +720,7 @@ static void netvsc_send_tx_complete(struct net_device *ndev,
} else {
struct netdev_queue *txq = netdev_get_tx_queue(ndev, q_idx);
- if (netif_tx_queue_stopped(txq) &&
+ if (netif_tx_queue_stopped(txq) && !net_device->tx_disable &&
(hv_get_avail_to_write_percent(&channel->outbound) >
RING_AVAIL_PERCENT_HIWATER || queue_sends < 1)) {
netif_tx_wake_queue(txq);
@@ -874,7 +875,8 @@ static inline int netvsc_send_pkt(
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(txq);
ndev_ctx->eth_stats.stop_queue++;
- if (atomic_read(&nvchan->queue_sends) < 1) {
+ if (atomic_read(&nvchan->queue_sends) < 1 &&
+ !net_device->tx_disable) {
netif_tx_wake_queue(txq);
ndev_ctx->eth_stats.wake_queue++;
ret = -ENOSPC;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index cf4897043e83..b20fb0fb595b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -109,6 +109,15 @@ static void netvsc_set_rx_mode(struct net_device *net)
rcu_read_unlock();
}
+static void netvsc_tx_enable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ nvscdev->tx_disable = false;
+ virt_wmb(); /* ensure queue wake up mechanism is on */
+
+ netif_tx_wake_all_queues(ndev);
+}
+
static int netvsc_open(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
@@ -129,7 +138,7 @@ static int netvsc_open(struct net_device *net)
rdev = nvdev->extension;
if (!rdev->link_state) {
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(nvdev, net);
}
if (vf_netdev) {
@@ -184,6 +193,17 @@ static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
}
}
+static void netvsc_tx_disable(struct netvsc_device *nvscdev,
+ struct net_device *ndev)
+{
+ if (nvscdev) {
+ nvscdev->tx_disable = true;
+ virt_wmb(); /* ensure txq will not wake up after stop */
+ }
+
+ netif_tx_disable(ndev);
+}
+
static int netvsc_close(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
@@ -192,7 +212,7 @@ static int netvsc_close(struct net_device *net)
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
int ret;
- netif_tx_disable(net);
+ netvsc_tx_disable(nvdev, net);
/* No need to close rndis filter if it is removed already */
if (!nvdev)
@@ -920,7 +940,7 @@ static int netvsc_detach(struct net_device *ndev,
/* If device was up (receiving) then shutdown */
if (netif_running(ndev)) {
- netif_tx_disable(ndev);
+ netvsc_tx_disable(nvdev, ndev);
ret = rndis_filter_close(nvdev);
if (ret) {
@@ -1908,7 +1928,7 @@ static void netvsc_link_change(struct work_struct *w)
if (rdev->link_state) {
rdev->link_state = false;
netif_carrier_on(net);
- netif_tx_wake_all_queues(net);
+ netvsc_tx_enable(net_device, net);
} else {
notify = true;
}
@@ -1918,7 +1938,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
}
kfree(event);
break;
@@ -1927,7 +1947,7 @@ static void netvsc_link_change(struct work_struct *w)
if (!rdev->link_state) {
rdev->link_state = true;
netif_carrier_off(net);
- netif_tx_stop_all_queues(net);
+ netvsc_tx_disable(net_device, net);
event->event = RNDIS_STATUS_MEDIA_CONNECT;
spin_lock_irqsave(&ndev_ctx->lock, flags);
list_add(&event->list, &ndev_ctx->reconfig_events);
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 6ed96fdfd96d..9ce61b019aad 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1246,6 +1246,23 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
goto err_option_port_add;
}
+ /* set promiscuity level to new slave */
+ if (dev->flags & IFF_PROMISC) {
+ err = dev_set_promiscuity(port_dev, 1);
+ if (err)
+ goto err_set_slave_promisc;
+ }
+
+ /* set allmulti level to new slave */
+ if (dev->flags & IFF_ALLMULTI) {
+ err = dev_set_allmulti(port_dev, 1);
+ if (err) {
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ goto err_set_slave_promisc;
+ }
+ }
+
netif_addr_lock_bh(dev);
dev_uc_sync_multiple(port_dev, dev);
dev_mc_sync_multiple(port_dev, dev);
@@ -1262,6 +1279,9 @@ static int team_port_add(struct team *team, struct net_device *port_dev,
return 0;
+err_set_slave_promisc:
+ __team_option_inst_del_port(team, port);
+
err_option_port_add:
team_upper_dev_unlink(team, port);
@@ -1307,6 +1327,12 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
team_port_disable(team, port);
list_del_rcu(&port->list);
+
+ if (dev->flags & IFF_PROMISC)
+ dev_set_promiscuity(port_dev, -1);
+ if (dev->flags & IFF_ALLMULTI)
+ dev_set_allmulti(port_dev, -1);
+
team_upper_dev_unlink(team, port);
netdev_rx_handler_unregister(port_dev);
team_port_disable_netpoll(port);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 74bebbdb4b15..9195f3476b1d 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1203,6 +1203,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
{QMI_FIXED_INTF(0x2001, 0x7e19, 4)}, /* D-Link DWM-221 B1 */
{QMI_FIXED_INTF(0x2001, 0x7e35, 4)}, /* D-Link DWM-222 */
+ {QMI_FIXED_INTF(0x2020, 0x2031, 4)}, /* Olicard 600 */
{QMI_FIXED_INTF(0x2020, 0x2033, 4)}, /* BroadMobi BM806U */
{QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
{QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 7c1430ed0244..cd15c32b2e43 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -1273,9 +1273,14 @@ static void vrf_setup(struct net_device *dev)
/* default to no qdisc; user can add if desired */
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->priv_flags |= IFF_NO_RX_HANDLER;
- dev->min_mtu = 0;
- dev->max_mtu = 0;
+ /* VRF devices do not care about MTU, but if the MTU is set
+ * too low then the ipv4 and ipv6 protocols are disabled
+ * which breaks networking.
+ */
+ dev->min_mtu = IPV6_MIN_MTU;
+ dev->max_mtu = ETH_MAX_MTU;
}
static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index a20ea270d519..1acc622d2183 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2728,7 +2728,7 @@ static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
num_msdus++;
num_bytes += ret;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ieee80211_txq_schedule_end(hw, txq->ac);
record->num_msdus = cpu_to_le16(num_msdus);
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index b73c23d4ce86..41e89db244d2 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -4089,7 +4089,7 @@ static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
if (ret == -EBUSY)
break;
@@ -4374,7 +4374,7 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
if (ret < 0)
break;
}
- ieee80211_return_txq(hw, txq);
+ ieee80211_return_txq(hw, txq, false);
ath10k_htt_tx_txq_update(hw, txq);
out:
ieee80211_txq_schedule_end(hw, ac);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 773d428ff1b0..b17e1ca40995 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1938,12 +1938,15 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
goto out;
while ((queue = ieee80211_next_txq(hw, txq->mac80211_qnum))) {
+ bool force;
+
tid = (struct ath_atx_tid *)queue->drv_priv;
ret = ath_tx_sched_aggr(sc, txq, tid);
ath_dbg(common, QUEUE, "ath_tx_sched_aggr returned %d\n", ret);
- ieee80211_return_txq(hw, queue);
+ force = !skb_queue_empty(&tid->retry_q);
+ ieee80211_return_txq(hw, queue, force);
}
out:
diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
index fdc56f821b5a..eb6defb6d0cd 100644
--- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
+++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c
@@ -82,6 +82,7 @@
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_QU_B_JF_B_FW_PRE "iwlwifi-Qu-b0-jf-b0-"
+#define IWL_QUZ_A_HR_B_FW_PRE "iwlwifi-QuZ-a0-hr-b0-"
#define IWL_QNJ_B_JF_B_FW_PRE "iwlwifi-QuQnj-b0-jf-b0-"
#define IWL_CC_A_FW_PRE "iwlwifi-cc-a0-"
#define IWL_22000_SO_A_JF_B_FW_PRE "iwlwifi-so-a0-jf-b0-"
@@ -105,8 +106,8 @@
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
-#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
- IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
+#define IWL_QUZ_A_HR_B_MODULE_FIRMWARE(api) \
+ IWL_QUZ_A_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \
IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode"
#define IWL_QNJ_B_JF_B_MODULE_FIRMWARE(api) \
@@ -235,8 +236,20 @@ const struct iwl_cfg iwl_ax101_cfg_qu_hr = {
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
-const struct iwl_cfg iwl22260_2ax_cfg = {
- .name = "Intel(R) Wireless-AX 22260",
+const struct iwl_cfg iwl_ax101_cfg_quz_hr = {
+ .name = "Intel(R) Wi-Fi 6 AX101",
+ .fw_name_pre = IWL_QUZ_A_HR_B_FW_PRE,
+ IWL_DEVICE_22500,
+ /*
+ * This device doesn't support receiving BlockAck with a large bitmap
+ * so we need to restrict the size of transmitted aggregation to the
+ * HT size; mac80211 would otherwise pick the HE max (256) by default.
+ */
+ .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
+};
+
+const struct iwl_cfg iwl_ax200_cfg_cc = {
+ .name = "Intel(R) Wi-Fi 6 AX200 160MHz",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -249,7 +262,7 @@ const struct iwl_cfg iwl22260_2ax_cfg = {
};
const struct iwl_cfg killer1650x_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650x Wireless Network Adapter (200NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650x 160MHz Wireless Network Adapter (200NGW)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -262,7 +275,7 @@ const struct iwl_cfg killer1650x_2ax_cfg = {
};
const struct iwl_cfg killer1650w_2ax_cfg = {
- .name = "Killer(R) Wireless-AX 1650w Wireless Network Adapter (200D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650w 160MHz Wireless Network Adapter (200D2W)",
.fw_name_pre = IWL_CC_A_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -328,7 +341,7 @@ const struct iwl_cfg killer1550s_2ac_cfg_qu_b0_jf_b0 = {
};
const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650i Wireless Network Adapter (22560NGW)",
+ .name = "Killer(R) Wi-Fi 6 AX1650i 160MHz Wireless Network Adapter (201NGW)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -340,7 +353,7 @@ const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0 = {
};
const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0 = {
- .name = "Killer(R) Wireless-AX 1650s Wireless Network Adapter (22560D2W)",
+ .name = "Killer(R) Wi-Fi 6 AX1650s 160MHz Wireless Network Adapter (201D2W)",
.fw_name_pre = IWL_22000_QU_B_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
@@ -444,6 +457,7 @@ MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QU_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL_QUZ_A_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_QNJ_B_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_CC_A_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SO_A_JF_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
index f119c49cd39c..d7380016f1c0 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c
@@ -1614,6 +1614,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
if (!range) {
IWL_ERR(fwrt, "Failed to fill region header: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
@@ -1623,6 +1624,7 @@ iwl_dump_ini_mem(struct iwl_fw_runtime *fwrt,
if (range_size < 0) {
IWL_ERR(fwrt, "Failed to dump region: id=%d, type=%d\n",
le32_to_cpu(reg->region_id), type);
+ memset(*data, 0, le32_to_cpu((*data)->len));
return;
}
range = range + range_size;
@@ -1807,12 +1809,12 @@ _iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt,
trigger = fwrt->dump.active_trigs[id].trig;
- size = sizeof(*dump_file);
- size += iwl_fw_ini_get_trigger_len(fwrt, trigger);
-
+ size = iwl_fw_ini_get_trigger_len(fwrt, trigger);
if (!size)
return NULL;
+ size += sizeof(*dump_file);
+
dump_file = vzalloc(size);
if (!dump_file)
return NULL;
@@ -1942,14 +1944,10 @@ int iwl_fw_dbg_error_collect(struct iwl_fw_runtime *fwrt,
iwl_dump_error_desc->len = 0;
ret = iwl_fw_dbg_collect_desc(fwrt, iwl_dump_error_desc, false, 0);
- if (ret) {
+ if (ret)
kfree(iwl_dump_error_desc);
- } else {
- set_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
-
- /* trigger nmi to halt the fw */
- iwl_force_nmi(fwrt->trans);
- }
+ else
+ iwl_trans_sync_nmi(fwrt->trans);
return ret;
}
@@ -2489,22 +2487,6 @@ IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point);
void iwl_fwrt_stop_device(struct iwl_fw_runtime *fwrt)
{
- /* if the wait event timeout elapses instead of wake up then
- * the driver did not receive NMI interrupt and can not assume the FW
- * is halted
- */
- int ret = wait_event_timeout(fwrt->trans->fw_halt_waitq,
- !test_bit(STATUS_FW_WAIT_DUMP,
- &fwrt->trans->status),
- msecs_to_jiffies(2000));
- if (!ret) {
- /* failed to receive NMI interrupt, assuming the FW is stuck */
- set_bit(STATUS_FW_ERROR, &fwrt->trans->status);
-
- clear_bit(STATUS_FW_WAIT_DUMP, &fwrt->trans->status);
- }
-
- /* Assuming the op mode mutex is held at this point */
iwl_fw_dbg_collect_sync(fwrt);
iwl_trans_stop_device(fwrt->trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c
index 7adf4e4e841a..12310e3d2fc5 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/init.c
+++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c
@@ -76,7 +76,6 @@ void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans,
fwrt->ops_ctx = ops_ctx;
INIT_DELAYED_WORK(&fwrt->dump.wk, iwl_fw_error_dump_wk);
iwl_fwrt_dbgfs_register(fwrt, dbgfs_dir);
- init_waitqueue_head(&fwrt->trans->fw_halt_waitq);
}
IWL_EXPORT_SYMBOL(iwl_fw_runtime_init);
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
index f5f87773667b..93070848280a 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
@@ -549,8 +549,9 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl_ax101_cfg_qu_hr;
+extern const struct iwl_cfg iwl_ax101_cfg_quz_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
-extern const struct iwl_cfg iwl22260_2ax_cfg;
+extern const struct iwl_cfg iwl_ax200_cfg_cc;
extern const struct iwl_cfg killer1650s_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650i_2ax_cfg_qu_b0_hr_b0;
extern const struct iwl_cfg killer1650x_2ax_cfg;
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
index aea6d03e545a..e539bc94eff7 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h
@@ -327,6 +327,7 @@ enum {
#define CSR_HW_REV_TYPE_NONE (0x00001F0)
#define CSR_HW_REV_TYPE_QNJ (0x0000360)
#define CSR_HW_REV_TYPE_QNJ_B0 (0x0000364)
+#define CSR_HW_REV_TYPE_QUZ (0x0000354)
#define CSR_HW_REV_TYPE_HR_CDB (0x0000340)
#define CSR_HW_REV_TYPE_SO (0x0000370)
#define CSR_HW_REV_TYPE_TY (0x0000420)
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
index bbebbf3efd57..d8690acee40c 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h
@@ -338,7 +338,6 @@ enum iwl_d3_status {
* are sent
* @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
* @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
- * @STATUS_FW_WAIT_DUMP: if set, wait until cleared before collecting dump
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -351,7 +350,6 @@ enum iwl_trans_status {
STATUS_TRANS_GOING_IDLE,
STATUS_TRANS_IDLE,
STATUS_TRANS_DEAD,
- STATUS_FW_WAIT_DUMP,
};
static inline int
@@ -618,6 +616,7 @@ struct iwl_trans_ops {
struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
u32 dump_mask);
void (*debugfs_cleanup)(struct iwl_trans *trans);
+ void (*sync_nmi)(struct iwl_trans *trans);
};
/**
@@ -831,7 +830,6 @@ struct iwl_trans {
u32 lmac_error_event_table[2];
u32 umac_error_event_table;
unsigned int error_event_table_tlv_status;
- wait_queue_head_t fw_halt_waitq;
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
@@ -1239,10 +1237,12 @@ static inline void iwl_trans_fw_error(struct iwl_trans *trans)
/* prevent double restarts due to the same erroneous FW */
if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
iwl_op_mode_nic_error(trans->op_mode);
+}
- if (test_and_clear_bit(STATUS_FW_WAIT_DUMP, &trans->status))
- wake_up(&trans->fw_halt_waitq);
-
+static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
+{
+ if (trans->ops->sync_nmi)
+ trans->ops->sync_nmi(trans);
}
/*****************************************************
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
index 3a92c09d4692..6a3b11dd2edf 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c
@@ -2714,9 +2714,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_mac_ctxt_remove(mvm, vif);
- kfree(mvmvif->ap_wep_key);
- mvmvif->ap_wep_key = NULL;
-
mutex_unlock(&mvm->mutex);
}
@@ -3183,24 +3180,7 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
ret = iwl_mvm_update_sta(mvm, vif, sta);
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
- /* if wep is used, need to set the key for the station now */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- mvm_sta->wep_key =
- kmemdup(mvmvif->ap_wep_key,
- sizeof(*mvmvif->ap_wep_key) +
- mvmvif->ap_wep_key->keylen,
- GFP_KERNEL);
- if (!mvm_sta->wep_key) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- ret = iwl_mvm_set_sta_key(mvm, vif, sta,
- mvm_sta->wep_key,
- STA_KEY_IDX_INVALID);
- } else {
- ret = 0;
- }
+ ret = 0;
/* we don't support TDLS during DCM */
if (iwl_mvm_phy_ctx_count(mvm) > 1)
@@ -3242,17 +3222,6 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
NL80211_TDLS_DISABLE_LINK);
}
- /* Remove STA key if this is an AP using WEP */
- if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) {
- int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta,
- mvm_sta->wep_key);
-
- if (!ret)
- ret = rm_ret;
- kfree(mvm_sta->wep_key);
- mvm_sta->wep_key = NULL;
- }
-
if (unlikely(ret &&
test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
&mvm->status)))
@@ -3289,6 +3258,13 @@ static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_sta *sta, u32 changed)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (changed & (IEEE80211_RC_BW_CHANGED |
+ IEEE80211_RC_SUPP_RATES_CHANGED |
+ IEEE80211_RC_NSS_CHANGED))
+ iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band,
+ true);
if (vif->type == NL80211_IFTYPE_STATION &&
changed & IEEE80211_RC_NSS_CHANGED)
@@ -3439,20 +3415,12 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
- if (vif->type == NL80211_IFTYPE_AP) {
- struct iwl_mvm_vif *mvmvif =
- iwl_mvm_vif_from_mac80211(vif);
-
- mvmvif->ap_wep_key = kmemdup(key,
- sizeof(*key) + key->keylen,
- GFP_KERNEL);
- if (!mvmvif->ap_wep_key)
- return -ENOMEM;
- }
-
- if (vif->type != NL80211_IFTYPE_STATION)
- return 0;
- break;
+ if (vif->type == NL80211_IFTYPE_STATION)
+ break;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return -EOPNOTSUPP;
+ /* support HW crypto on TX */
+ return 0;
default:
/* currently FW supports only one optional cipher scheme */
if (hw->n_cipher_schemes &&
@@ -3540,12 +3508,17 @@ static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset);
if (ret) {
IWL_WARN(mvm, "set key failed\n");
+ key->hw_key_idx = STA_KEY_IDX_INVALID;
/*
* can't add key for RX, but we don't need it
- * in the device for TX so still return 0
+ * in the device for TX so still return 0,
+ * unless we have new TX API where we cannot
+ * put key material into the TX_CMD
*/
- key->hw_key_idx = STA_KEY_IDX_INVALID;
- ret = 0;
+ if (iwl_mvm_has_new_tx_api(mvm))
+ ret = -EOPNOTSUPP;
+ else
+ ret = 0;
}
break;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
index bca6f6b536d9..a50dc53df086 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h
@@ -498,7 +498,6 @@ struct iwl_mvm_vif {
netdev_features_t features;
struct iwl_probe_resp_data __rcu *probe_resp_data;
- struct ieee80211_key_conf *ap_wep_key;
};
static inline struct iwl_mvm_vif *
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
index 498c315291cf..98d123dd7177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -1399,7 +1399,9 @@ void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid);
list_del_init(&mvmtxq->list);
+ local_bh_disable();
iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
+ local_bh_enable();
}
mutex_unlock(&mvm->mutex);
@@ -2333,21 +2335,6 @@ int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
timeout);
- if (mvmvif->ap_wep_key) {
- u8 key_offset = iwl_mvm_set_fw_key_idx(mvm);
-
- __set_bit(key_offset, mvm->fw_key_table);
-
- if (key_offset == STA_KEY_IDX_INVALID)
- return -ENOSPC;
-
- ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true, 0, NULL, 0,
- key_offset, 0);
- if (ret)
- return ret;
- }
-
return 0;
}
@@ -2419,28 +2406,6 @@ int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0);
- if (mvmvif->ap_wep_key) {
- int i;
-
- if (!__test_and_clear_bit(mvmvif->ap_wep_key->hw_key_idx,
- mvm->fw_key_table)) {
- IWL_ERR(mvm, "offset %d not used in fw key table.\n",
- mvmvif->ap_wep_key->hw_key_idx);
- return -ENOENT;
- }
-
- /* track which key was deleted last */
- for (i = 0; i < STA_KEY_MAX_NUM; i++) {
- if (mvm->fw_key_deleted[i] < U8_MAX)
- mvm->fw_key_deleted[i]++;
- }
- mvm->fw_key_deleted[mvmvif->ap_wep_key->hw_key_idx] = 0;
- ret = __iwl_mvm_remove_sta_key(mvm, mvmvif->mcast_sta.sta_id,
- mvmvif->ap_wep_key, true);
- if (ret)
- return ret;
- }
-
ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
if (ret)
IWL_WARN(mvm, "Failed sending remove station\n");
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
index 79700c7310a1..b4d4071b865d 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h
@@ -8,7 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -31,7 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2016 Intel Deutschland GmbH
- * Copyright(c) 2018 Intel Corporation
+ * Copyright(c) 2018 - 2019 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -394,7 +394,6 @@ struct iwl_mvm_rxq_dup_data {
* the BA window. To be used for UAPSD only.
* @ptk_pn: per-queue PTK PN data structures
* @dup_data: per queue duplicate packet detection data
- * @wep_key: used in AP mode. Is a duplicate of the WEP key.
* @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID
* @tx_ant: the index of the antenna to use for data tx to this station. Only
* used during connection establishment (e.g. for the 4 way handshake
@@ -426,8 +425,6 @@ struct iwl_mvm_sta {
struct iwl_mvm_key_pn __rcu *ptk_pn[4];
struct iwl_mvm_rxq_dup_data *dup_data;
- struct ieee80211_key_conf *wep_key;
-
u8 reserved_queue;
/* Temporary, until the new TLC will control the Tx protection */
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
index 2b94e4cef56c..9f1af8da9dc1 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
@@ -953,14 +953,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA0F0, 0x1652, killer1650i_2ax_cfg_qu_b0_hr_b0)},
{IWL_PCI_DEVICE(0xA0F0, 0x4070, iwl_ax101_cfg_qu_hr)},
- {IWL_PCI_DEVICE(0x2723, 0x0080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0084, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x0088, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x008C, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x0080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0084, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x0088, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x008C, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x2723, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x2723, 0x1654, killer1650x_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4080, iwl22260_2ax_cfg)},
- {IWL_PCI_DEVICE(0x2723, 0x4088, iwl22260_2ax_cfg)},
+ {IWL_PCI_DEVICE(0x2723, 0x2080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4080, iwl_ax200_cfg_cc)},
+ {IWL_PCI_DEVICE(0x2723, 0x4088, iwl_ax200_cfg_cc)},
{IWL_PCI_DEVICE(0x1a56, 0x1653, killer1650w_2ax_cfg)},
{IWL_PCI_DEVICE(0x1a56, 0x1654, killer1650x_2ax_cfg)},
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index bf8b61a476c5..59213164f35e 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -1043,7 +1043,7 @@ static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans)
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
void iwl_trans_pcie_dump_regs(struct iwl_trans *trans);
-void iwl_trans_sync_nmi(struct iwl_trans *trans);
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans);
#ifdef CONFIG_IWLWIFI_DEBUGFS
int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index fe8269d023de..79c1dc05f948 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -3318,7 +3318,8 @@ static void iwl_trans_pcie_resume(struct iwl_trans *trans)
.unref = iwl_trans_pcie_unref, \
.dump_data = iwl_trans_pcie_dump_data, \
.d3_suspend = iwl_trans_pcie_d3_suspend, \
- .d3_resume = iwl_trans_pcie_d3_resume
+ .d3_resume = iwl_trans_pcie_d3_resume, \
+ .sync_nmi = iwl_trans_pcie_sync_nmi
#ifdef CONFIG_PM_SLEEP
#define IWL_TRANS_PM_OPS \
@@ -3542,6 +3543,10 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (cfg == &iwl_ax101_cfg_qu_hr) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
+ CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
+ trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0) {
+ trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
+ } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
trans->cfg = &iwl_ax101_cfg_qu_hr;
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
@@ -3560,7 +3565,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
}
} else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR) &&
- (trans->cfg != &iwl22260_2ax_cfg ||
+ (trans->cfg != &iwl_ax200_cfg_cc ||
trans->hw_rev == CSR_HW_REV_TYPE_QNJ_B0)) {
u32 hw_status;
@@ -3637,7 +3642,7 @@ out_no_pci:
return ERR_PTR(ret);
}
-void iwl_trans_sync_nmi(struct iwl_trans *trans)
+void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
{
unsigned long timeout = jiffies + IWL_TRANS_NMI_TIMEOUT;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
index 88530d9f4a54..38d110338987 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c
@@ -965,7 +965,7 @@ static int iwl_pcie_gen2_send_hcmd_sync(struct iwl_trans *trans,
cmd_str);
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
index 9fbd37d23e85..7be73e2c4681 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c
@@ -1960,7 +1960,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
iwl_get_cmd_string(trans, cmd->id));
ret = -ETIMEDOUT;
- iwl_trans_sync_nmi(trans);
+ iwl_trans_pcie_sync_nmi(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 0838af04d681..524eb5805995 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2644,7 +2644,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
enum nl80211_band band;
const struct ieee80211_ops *ops = &mac80211_hwsim_ops;
struct net *net;
- int idx;
+ int idx, i;
int n_limits = 0;
if (WARN_ON(param->channels > 1 && !param->use_chanctx))
@@ -2768,12 +2768,23 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
goto failed_hw;
}
+ data->if_combination.max_interfaces = 0;
+ for (i = 0; i < n_limits; i++)
+ data->if_combination.max_interfaces +=
+ data->if_limits[i].max;
+
data->if_combination.n_limits = n_limits;
- data->if_combination.max_interfaces = 2048;
data->if_combination.limits = data->if_limits;
- hw->wiphy->iface_combinations = &data->if_combination;
- hw->wiphy->n_iface_combinations = 1;
+ /*
+ * If we actually were asked to support combinations,
+ * advertise them - if there's only a single thing like
+ * only IBSS then don't advertise it as combinations.
+ */
+ if (data->if_combination.max_interfaces > 1) {
+ hw->wiphy->iface_combinations = &data->if_combination;
+ hw->wiphy->n_iface_combinations = 1;
+ }
if (param->ciphers) {
memcpy(data->ciphers, param->ciphers,
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/init.c b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
index d54dda67d036..3af45949e868 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/init.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/init.c
@@ -510,6 +510,8 @@ int mt7603_register_device(struct mt7603_dev *dev)
bus_ops->rmw = mt7603_rmw;
dev->mt76.bus = bus_ops;
+ spin_lock_init(&dev->ps_lock);
+
INIT_DELAYED_WORK(&dev->mac_work, mt7603_mac_work);
tasklet_init(&dev->pre_tbtt_tasklet, mt7603_pre_tbtt_tasklet,
(unsigned long)dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
index 5e31d7da96fc..5abc02b57818 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mac.c
@@ -343,7 +343,7 @@ void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid)
MT_BA_CONTROL_1_RESET));
}
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size)
{
u32 addr = mt7603_wtbl2_addr(wcid);
@@ -358,43 +358,6 @@ void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
mt76_clear(dev, addr + (15 * 4), tid_mask);
return;
}
- mt76_poll(dev, MT_WTBL_UPDATE, MT_WTBL_UPDATE_BUSY, 0, 5000);
-
- mt7603_mac_stop(dev);
- switch (tid) {
- case 0:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID0_SN, ssn);
- break;
- case 1:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID1_SN, ssn);
- break;
- case 2:
- mt76_rmw_field(dev, addr + (2 * 4), MT_WTBL2_W2_TID2_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID2_SN_HI,
- ssn >> 8);
- break;
- case 3:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID3_SN, ssn);
- break;
- case 4:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID4_SN, ssn);
- break;
- case 5:
- mt76_rmw_field(dev, addr + (3 * 4), MT_WTBL2_W3_TID5_SN_LO,
- ssn);
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID5_SN_HI,
- ssn >> 4);
- break;
- case 6:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID6_SN, ssn);
- break;
- case 7:
- mt76_rmw_field(dev, addr + (4 * 4), MT_WTBL2_W4_TID7_SN, ssn);
- break;
- }
- mt7603_wtbl_update(dev, wcid, MT_WTBL_UPDATE_WTBL2);
- mt7603_mac_start(dev);
for (i = 7; i > 0; i--) {
if (ba_size >= MT_AGG_SIZE_LIMIT(i))
@@ -827,6 +790,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_tx_rate *rate = &info->control.rates[0];
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
struct ieee80211_vif *vif = info->control.vif;
struct mt7603_vif *mvif;
int wlan_idx;
@@ -834,6 +798,7 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
int tx_count = 8;
u8 frame_type, frame_subtype;
u16 fc = le16_to_cpu(hdr->frame_control);
+ u16 seqno = 0;
u8 vif_idx = 0;
u32 val;
u8 bw;
@@ -919,7 +884,17 @@ mt7603_mac_write_txwi(struct mt7603_dev *dev, __le32 *txwi,
tx_count = 0x1f;
val = FIELD_PREP(MT_TXD3_REM_TX_COUNT, tx_count) |
- FIELD_PREP(MT_TXD3_SEQ, le16_to_cpu(hdr->seq_ctrl));
+ MT_TXD3_SN_VALID;
+
+ if (ieee80211_is_data_qos(hdr->frame_control))
+ seqno = le16_to_cpu(hdr->seq_ctrl);
+ else if (ieee80211_is_back_req(hdr->frame_control))
+ seqno = le16_to_cpu(bar->start_seq_num);
+ else
+ val &= ~MT_TXD3_SN_VALID;
+
+ val |= FIELD_PREP(MT_TXD3_SEQ, seqno >> 4);
+
txwi[3] = cpu_to_le32(val);
if (key) {
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
index cc0fe0933b2d..a3c4ef198bfe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c
@@ -372,7 +372,7 @@ mt7603_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps)
struct mt7603_sta *msta = (struct mt7603_sta *)sta->drv_priv;
struct sk_buff_head list;
- mt76_stop_tx_queues(&dev->mt76, sta, false);
+ mt76_stop_tx_queues(&dev->mt76, sta, true);
mt7603_wtbl_set_ps(dev, msta, ps);
if (ps)
return;
@@ -584,13 +584,13 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
case IEEE80211_AMPDU_TX_OPERATIONAL:
mtxq->aggr = true;
mtxq->send_bar = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, ba_size);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, ba_size);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
mtxq->aggr = false;
ieee80211_send_bar(vif, sta->addr, tid, mtxq->agg_ssn);
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
break;
case IEEE80211_AMPDU_TX_START:
mtxq->agg_ssn = *ssn << 4;
@@ -598,7 +598,7 @@ mt7603_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
break;
case IEEE80211_AMPDU_TX_STOP_CONT:
mtxq->aggr = false;
- mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, *ssn, -1);
+ mt7603_mac_tx_ba_reset(dev, msta->wcid.idx, tid, -1);
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
index 79f332429432..6049f3b7c8fe 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
+++ b/drivers/net/wireless/mediatek/mt76/mt7603/mt7603.h
@@ -200,7 +200,7 @@ void mt7603_beacon_set_timer(struct mt7603_dev *dev, int idx, int intval);
int mt7603_mac_fill_rx(struct mt7603_dev *dev, struct sk_buff *skb);
void mt7603_mac_add_txs(struct mt7603_dev *dev, void *data);
void mt7603_mac_rx_ba_reset(struct mt7603_dev *dev, void *addr, u8 tid);
-void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid, int ssn,
+void mt7603_mac_tx_ba_reset(struct mt7603_dev *dev, int wcid, int tid,
int ba_size);
void mt7603_pse_client_reset(struct mt7603_dev *dev);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
index 9ed231abe916..4fe5a83ca5a4 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c
@@ -466,7 +466,6 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
return;
rcu_read_lock();
- mt76_tx_status_lock(mdev, &list);
if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid))
wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]);
@@ -479,6 +478,8 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
drv_priv);
}
+ mt76_tx_status_lock(mdev, &list);
+
if (wcid) {
if (stat->pktid >= MT_PACKET_ID_FIRST)
status.skb = mt76_tx_status_skb_get(mdev, wcid,
@@ -498,7 +499,9 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (*update == 0 && stat_val == stat_cache &&
stat->wcid == msta->status.wcid && msta->n_frames < 32) {
msta->n_frames++;
- goto out;
+ mt76_tx_status_unlock(mdev, &list);
+ rcu_read_unlock();
+ return;
}
mt76x02_mac_fill_tx_status(dev, status.info, &msta->status,
@@ -514,11 +517,10 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev,
if (status.skb)
mt76_tx_status_skb_done(mdev, status.skb, &list);
- else
- ieee80211_tx_status_ext(mt76_hw(dev), &status);
-
-out:
mt76_tx_status_unlock(mdev, &list);
+
+ if (!status.skb)
+ ieee80211_tx_status_ext(mt76_hw(dev), &status);
rcu_read_unlock();
}
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
index 4b1744e9fb78..50b92ca92bd7 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h
@@ -673,7 +673,6 @@ enum rt2x00_state_flags {
CONFIG_CHANNEL_HT40,
CONFIG_POWERSAVING,
CONFIG_HT_DISABLED,
- CONFIG_QOS_DISABLED,
CONFIG_MONITORING,
/*
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
index 2825560e2424..e8462f25d252 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
@@ -642,19 +642,9 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
rt2x00dev->intf_associated--;
rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated);
-
- clear_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
}
/*
- * Check for access point which do not support 802.11e . We have to
- * generate data frames sequence number in S/W for such AP, because
- * of H/W bug.
- */
- if (changes & BSS_CHANGED_QOS && !bss_conf->qos)
- set_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags);
-
- /*
* When the erp information has changed, we should perform
* additional configuration steps. For all other changes we are done.
*/
diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
index 92ddc19e7bf7..4834b4eb0206 100644
--- a/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
@@ -201,15 +201,18 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) {
/*
* rt2800 has a H/W (or F/W) bug, device incorrectly increase
- * seqno on retransmited data (non-QOS) frames. To workaround
- * the problem let's generate seqno in software if QOS is
- * disabled.
+ * seqno on retransmitted data (non-QOS) and management frames.
+ * To workaround the problem let's generate seqno in software.
+ * Except for beacons which are transmitted periodically by H/W
+ * hence hardware has to assign seqno for them.
*/
- if (test_bit(CONFIG_QOS_DISABLED, &rt2x00dev->flags))
- __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
- else
+ if (ieee80211_is_beacon(hdr->frame_control)) {
+ __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
/* H/W will generate sequence number */
return;
+ }
+
+ __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags);
}
/*