summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c16
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c6
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c31
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.h1
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h1
-rw-r--r--drivers/net/dsa/sja1105/sja1105.h4
-rw-r--r--drivers/net/dsa/sja1105/sja1105_dynamic_config.c93
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c120
-rw-r--r--drivers/net/dsa/sja1105/sja1105_spi.c4
-rw-r--r--drivers/net/ethernet/adi/adin1110.c2
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c3
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.c10
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp.h4
-rw-r--r--drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c5
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h54
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c5
-rw-r--r--drivers/net/ethernet/engleder/tsnep_ethtool.c6
-rw-r--r--drivers/net/ethernet/engleder/tsnep_main.c18
-rw-r--r--drivers/net/ethernet/freescale/enetc/enetc_pf.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c5
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h1
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c11
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c28
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c4
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c20
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c14
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c18
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_port.c3
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c8
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf.h2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_ethtool.c2
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c17
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl.c12
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c10
-rw-r--r--drivers/net/ethernet/intel/igbvf/igbvf.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h4
-rw-r--r--drivers/net/ethernet/intel/igc/igc_ethtool.c31
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c28
-rw-r--r--drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c5
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_main.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.c8
-rw-r--r--drivers/net/ethernet/marvell/octeon_ep/octep_tx.h16
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c21
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c6
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c43
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c49
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h4
-rw-r--r--drivers/net/ethernet/marvell/sky2.h2
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c12
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c49
-rw-r--r--drivers/net/ethernet/microchip/Kconfig1
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api.c18
-rw-r--r--drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c59
-rw-r--r--drivers/net/ethernet/microsoft/mana/mana_en.c211
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.h1
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_txrx.c10
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h2
-rw-r--r--drivers/net/ethernet/renesas/rswitch.c33
-rw-r--r--drivers/net/ethernet/renesas/rswitch.h4
-rw-r--r--drivers/net/ethernet/sfc/rx.c20
-rw-r--r--drivers/net/ethernet/sfc/tc.c21
-rw-r--r--drivers/net/ethernet/sfc/tc_conntrack.c7
-rw-r--r--drivers/net/ethernet/sfc/tc_counters.c2
-rw-r--r--drivers/net/ethernet/sfc/tc_encap_actions.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c16
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c32
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c165
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c7
-rw-r--r--drivers/net/ethernet/ti/Kconfig1
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c3
-rw-r--r--drivers/net/ethernet/ti/icssg/icssg_prueth.c6
-rw-r--r--drivers/net/macsec.c3
-rw-r--r--drivers/net/phy/micrel.c9
-rw-r--r--drivers/net/team/team.c10
-rw-r--r--drivers/net/thunderbolt/main.c3
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/veth.c6
-rw-r--r--drivers/net/virtio_net.c228
-rw-r--r--drivers/net/vxlan/vxlan_core.c4
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c12
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h9
-rw-r--r--drivers/net/wireless/intel/iwlwifi/fw/error-dump.h6
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/fw.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c121
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/scan.c2
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/tx.c3
-rw-r--r--drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c22
-rw-r--r--drivers/net/wireless/marvell/mwifiex/fw.h2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/sta_rx.c16
-rw-r--r--drivers/net/wireless/mediatek/mt76/dma.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c7
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c13
-rw-r--r--drivers/net/wireless/realtek/rtw88/rtw8723d.h1
121 files changed, 1404 insertions, 705 deletions
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 6673122266b7..42db7679c360 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -2335,13 +2335,27 @@ static u32 ksz_get_phy_flags(struct dsa_switch *ds, int port)
{
struct ksz_device *dev = ds->priv;
- if (dev->chip_id == KSZ8830_CHIP_ID) {
+ switch (dev->chip_id) {
+ case KSZ8830_CHIP_ID:
/* Silicon Errata Sheet (DS80000830A):
* Port 1 does not work with LinkMD Cable-Testing.
* Port 1 does not respond to received PAUSE control frames.
*/
if (!port)
return MICREL_KSZ8_P1_ERRATA;
+ break;
+ case KSZ9477_CHIP_ID:
+ /* KSZ9477 Errata DS80000754C
+ *
+ * Module 4: Energy Efficient Ethernet (EEE) feature select must
+ * be manually disabled
+ * The EEE feature is enabled by default, but it is not fully
+ * operational. It must be manually disabled through register
+ * controls. If not disabled, the PHY ports can auto-negotiate
+ * to enable EEE, and this feature can cause link drops when
+ * linked to another device supporting EEE.
+ */
+ return MICREL_NO_EEE;
}
return 0;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 52a99d8bada0..ab434a77b059 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -2958,14 +2958,16 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip)
* from the wrong location resulting in the switch booting
* to wrong mode and inoperable.
*/
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
gpiod_set_value_cansleep(gpiod, 1);
usleep_range(10000, 20000);
gpiod_set_value_cansleep(gpiod, 0);
usleep_range(10000, 20000);
- mv88e6xxx_g1_wait_eeprom_done(chip);
+ if (chip->info->ops->get_eeprom)
+ mv88e6xxx_g2_eeprom_wait(chip);
}
}
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index 2fa55a643591..174c773b38c2 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -75,37 +75,6 @@ static int mv88e6xxx_g1_wait_init_ready(struct mv88e6xxx_chip *chip)
return mv88e6xxx_g1_wait_bit(chip, MV88E6XXX_G1_STS, bit, 1);
}
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip)
-{
- const unsigned long timeout = jiffies + 1 * HZ;
- u16 val;
- int err;
-
- /* Wait up to 1 second for the switch to finish reading the
- * EEPROM.
- */
- while (time_before(jiffies, timeout)) {
- err = mv88e6xxx_g1_read(chip, MV88E6XXX_G1_STS, &val);
- if (err) {
- dev_err(chip->dev, "Error reading status");
- return;
- }
-
- /* If the switch is still resetting, it may not
- * respond on the bus, and so MDIO read returns
- * 0xffff. Differentiate between that, and waiting for
- * the EEPROM to be done by bit 0 being set.
- */
- if (val != 0xffff &&
- val & BIT(MV88E6XXX_G1_STS_IRQ_EEPROM_DONE))
- return;
-
- usleep_range(1000, 2000);
- }
-
- dev_err(chip->dev, "Timeout waiting for EEPROM done");
-}
-
/* Offset 0x01: Switch MAC Address Register Bytes 0 & 1
* Offset 0x02: Switch MAC Address Register Bytes 2 & 3
* Offset 0x03: Switch MAC Address Register Bytes 4 & 5
diff --git a/drivers/net/dsa/mv88e6xxx/global1.h b/drivers/net/dsa/mv88e6xxx/global1.h
index c99ddd117fe6..1095261f5b49 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.h
+++ b/drivers/net/dsa/mv88e6xxx/global1.h
@@ -282,7 +282,6 @@ int mv88e6xxx_g1_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
int mv88e6185_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6352_g1_reset(struct mv88e6xxx_chip *chip);
int mv88e6250_g1_reset(struct mv88e6xxx_chip *chip);
-void mv88e6xxx_g1_wait_eeprom_done(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_enable(struct mv88e6xxx_chip *chip);
int mv88e6185_g1_ppu_disable(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 937a01f2ba75..b2b5f6ba438f 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -340,7 +340,7 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
* Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
-static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
{
int bit = __bf_shf(MV88E6XXX_G2_EEPROM_CMD_BUSY);
int err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 7e091965582b..d9434f7cae53 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -365,6 +365,7 @@ int mv88e6xxx_g2_trunk_clear(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_device_mapping_write(struct mv88e6xxx_chip *chip, int target,
int port);
+int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6250_watchdog_ops;
diff --git a/drivers/net/dsa/sja1105/sja1105.h b/drivers/net/dsa/sja1105/sja1105.h
index dee35ba924ad..8c66d3bf61f0 100644
--- a/drivers/net/dsa/sja1105/sja1105.h
+++ b/drivers/net/dsa/sja1105/sja1105.h
@@ -132,6 +132,8 @@ struct sja1105_info {
int max_frame_mem;
int num_ports;
bool multiple_cascade_ports;
+ /* Every {port, TXQ} has its own CBS shaper */
+ bool fixed_cbs_mapping;
enum dsa_tag_protocol tag_proto;
const struct sja1105_dynamic_table_ops *dyn_ops;
const struct sja1105_table_ops *static_ops;
@@ -264,6 +266,8 @@ struct sja1105_private {
* the switch doesn't confuse them with one another.
*/
struct mutex mgmt_lock;
+ /* Serializes accesses to the FDB */
+ struct mutex fdb_lock;
/* PTP two-step TX timestamp ID, and its serialization lock */
spinlock_t ts_id_lock;
u8 ts_id;
diff --git a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
index 7729d3f8b7f5..984c0e604e8d 100644
--- a/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
+++ b/drivers/net/dsa/sja1105/sja1105_dynamic_config.c
@@ -1175,18 +1175,15 @@ const struct sja1105_dynamic_table_ops sja1110_dyn_ops[BLK_IDX_MAX_DYN] = {
static int
sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
u8 packed_buf[SJA1105_MAX_DYN_CMD_SIZE] = {};
+ struct sja1105_dyn_cmd cmd = {};
int rc;
- /* We don't _need_ to read the full entry, just the command area which
- * is a fixed SJA1105_SIZE_DYN_CMD. But our cmd_packing() API expects a
- * buffer that contains the full entry too. Additionally, our API
- * doesn't really know how many bytes into the buffer does the command
- * area really begin. So just read back the whole entry.
- */
+ /* Read back the whole entry + command structure. */
rc = sja1105_xfer_buf(priv, SPI_READ, ops->addr, packed_buf,
ops->packed_size);
if (rc)
@@ -1195,11 +1192,25 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
/* Unpack the command structure, and return it to the caller in case it
* needs to perform further checks on it (VALIDENT).
*/
- memset(cmd, 0, sizeof(*cmd));
- ops->cmd_packing(packed_buf, cmd, UNPACK);
+ ops->cmd_packing(packed_buf, &cmd, UNPACK);
/* Hardware hasn't cleared VALID => still working on it */
- return cmd->valid ? -EAGAIN : 0;
+ if (cmd.valid)
+ return -EAGAIN;
+
+ if (check_valident && !cmd.valident && !(ops->access & OP_VALID_ANYWAY))
+ return -ENOENT;
+
+ if (check_errors && cmd.errors)
+ return -EINVAL;
+
+ /* Don't dereference possibly NULL pointer - maybe caller
+ * only wanted to see whether the entry existed or not.
+ */
+ if (entry)
+ ops->entry_packing(packed_buf, entry, UNPACK);
+
+ return 0;
}
/* Poll the dynamic config entry's control area until the hardware has
@@ -1208,16 +1219,19 @@ sja1105_dynamic_config_poll_valid(struct sja1105_private *priv,
*/
static int
sja1105_dynamic_config_wait_complete(struct sja1105_private *priv,
- struct sja1105_dyn_cmd *cmd,
- const struct sja1105_dynamic_table_ops *ops)
+ const struct sja1105_dynamic_table_ops *ops,
+ void *entry, bool check_valident,
+ bool check_errors)
{
- int rc;
-
- return read_poll_timeout(sja1105_dynamic_config_poll_valid,
- rc, rc != -EAGAIN,
- SJA1105_DYNAMIC_CONFIG_SLEEP_US,
- SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
- false, priv, cmd, ops);
+ int err, rc;
+
+ err = read_poll_timeout(sja1105_dynamic_config_poll_valid,
+ rc, rc != -EAGAIN,
+ SJA1105_DYNAMIC_CONFIG_SLEEP_US,
+ SJA1105_DYNAMIC_CONFIG_TIMEOUT_US,
+ false, priv, ops, entry, check_valident,
+ check_errors);
+ return err < 0 ? err : rc;
}
/* Provides read access to the settings through the dynamic interface
@@ -1286,25 +1300,14 @@ int sja1105_dynamic_config_read(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- if (!cmd.valident && !(ops->access & OP_VALID_ANYWAY))
- return -ENOENT;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, entry, true, false);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- /* Don't dereference possibly NULL pointer - maybe caller
- * only wanted to see whether the entry existed or not.
- */
- if (entry)
- ops->entry_packing(packed_buf, entry, UNPACK);
- return 0;
+ return rc;
}
int sja1105_dynamic_config_write(struct sja1105_private *priv,
@@ -1356,22 +1359,14 @@ int sja1105_dynamic_config_write(struct sja1105_private *priv,
mutex_lock(&priv->dynamic_config_lock);
rc = sja1105_xfer_buf(priv, SPI_WRITE, ops->addr, packed_buf,
ops->packed_size);
- if (rc < 0) {
- mutex_unlock(&priv->dynamic_config_lock);
- return rc;
- }
-
- rc = sja1105_dynamic_config_wait_complete(priv, &cmd, ops);
- mutex_unlock(&priv->dynamic_config_lock);
if (rc < 0)
- return rc;
+ goto out;
- cmd = (struct sja1105_dyn_cmd) {0};
- ops->cmd_packing(packed_buf, &cmd, UNPACK);
- if (cmd.errors)
- return -EINVAL;
+ rc = sja1105_dynamic_config_wait_complete(priv, ops, NULL, false, true);
+out:
+ mutex_unlock(&priv->dynamic_config_lock);
- return 0;
+ return rc;
}
static u8 sja1105_crc8_add(u8 crc, u8 byte, u8 poly)
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 331bb1c6676a..1a367e64bc3b 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -1798,6 +1798,7 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
+ int rc;
if (!vid) {
switch (db.type) {
@@ -1812,12 +1813,16 @@ static int sja1105_fdb_add(struct dsa_switch *ds, int port,
}
}
- return priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_lock(&priv->fdb_lock);
+ rc = priv->info->fdb_add_cmd(ds, port, addr, vid);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
-static int sja1105_fdb_del(struct dsa_switch *ds, int port,
- const unsigned char *addr, u16 vid,
- struct dsa_db db)
+static int __sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
{
struct sja1105_private *priv = ds->priv;
@@ -1837,6 +1842,20 @@ static int sja1105_fdb_del(struct dsa_switch *ds, int port,
return priv->info->fdb_del_cmd(ds, port, addr, vid);
}
+static int sja1105_fdb_del(struct dsa_switch *ds, int port,
+ const unsigned char *addr, u16 vid,
+ struct dsa_db db)
+{
+ struct sja1105_private *priv = ds->priv;
+ int rc;
+
+ mutex_lock(&priv->fdb_lock);
+ rc = __sja1105_fdb_del(ds, port, addr, vid, db);
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
+}
+
static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
@@ -1868,13 +1887,14 @@ static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
if (!(l2_lookup.destports & BIT(port)))
continue;
- /* We need to hide the FDB entry for unknown multicast */
- if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
- l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
- continue;
-
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
+ /* Hardware FDB is shared for fdb and mdb, "bridge fdb show"
+ * only wants to see unicast
+ */
+ if (is_multicast_ether_addr(macaddr))
+ continue;
+
/* We need to hide the dsa_8021q VLANs from the user. */
if (vid_is_dsa_8021q(l2_lookup.vlanid))
l2_lookup.vlanid = 0;
@@ -1898,6 +1918,8 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
};
int i;
+ mutex_lock(&priv->fdb_lock);
+
for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
struct sja1105_l2_lookup_entry l2_lookup = {0};
u8 macaddr[ETH_ALEN];
@@ -1911,7 +1933,7 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
if (rc) {
dev_err(ds->dev, "Failed to read FDB: %pe\n",
ERR_PTR(rc));
- return;
+ break;
}
if (!(l2_lookup.destports & BIT(port)))
@@ -1923,14 +1945,16 @@ static void sja1105_fast_age(struct dsa_switch *ds, int port)
u64_to_ether_addr(l2_lookup.macaddr, macaddr);
- rc = sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
+ rc = __sja1105_fdb_del(ds, port, macaddr, l2_lookup.vlanid, db);
if (rc) {
dev_err(ds->dev,
"Failed to delete FDB entry %pM vid %lld: %pe\n",
macaddr, l2_lookup.vlanid, ERR_PTR(rc));
- return;
+ break;
}
}
+
+ mutex_unlock(&priv->fdb_lock);
}
static int sja1105_mdb_add(struct dsa_switch *ds, int port,
@@ -2115,11 +2139,36 @@ static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
}
#define BYTES_PER_KBIT (1000LL / 8)
+/* Port 0 (the uC port) does not have CBS shapers */
+#define SJA1110_FIXED_CBS(port, prio) ((((port) - 1) * SJA1105_NUM_TC) + (prio))
+
+static int sja1105_find_cbs_shaper(struct sja1105_private *priv,
+ int port, int prio)
+{
+ int i;
+
+ if (priv->info->fixed_cbs_mapping) {
+ i = SJA1110_FIXED_CBS(port, prio);
+ if (i >= 0 && i < priv->info->num_cbs_shapers)
+ return i;
+
+ return -1;
+ }
+
+ for (i = 0; i < priv->info->num_cbs_shapers; i++)
+ if (priv->cbs[i].port == port && priv->cbs[i].prio == prio)
+ return i;
+
+ return -1;
+}
static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
{
int i;
+ if (priv->info->fixed_cbs_mapping)
+ return -1;
+
for (i = 0; i < priv->info->num_cbs_shapers; i++)
if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
return i;
@@ -2150,14 +2199,20 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
{
struct sja1105_private *priv = ds->priv;
struct sja1105_cbs_entry *cbs;
+ s64 port_transmit_rate_kbps;
int index;
if (!offload->enable)
return sja1105_delete_cbs_shaper(priv, port, offload->queue);
- index = sja1105_find_unused_cbs_shaper(priv);
- if (index < 0)
- return -ENOSPC;
+ /* The user may be replacing an existing shaper */
+ index = sja1105_find_cbs_shaper(priv, port, offload->queue);
+ if (index < 0) {
+ /* That isn't the case - see if we can allocate a new one */
+ index = sja1105_find_unused_cbs_shaper(priv);
+ if (index < 0)
+ return -ENOSPC;
+ }
cbs = &priv->cbs[index];
cbs->port = port;
@@ -2167,9 +2222,17 @@ static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
*/
cbs->credit_hi = offload->hicredit;
cbs->credit_lo = abs(offload->locredit);
- /* User space is in kbits/sec, hardware in bytes/sec */
- cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
- cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
+ /* User space is in kbits/sec, while the hardware in bytes/sec times
+ * link speed. Since the given offload->sendslope is good only for the
+ * current link speed anyway, and user space is likely to reprogram it
+ * when that changes, don't even bother to track the port's link speed,
+ * but deduce the port transmit rate from idleslope - sendslope.
+ */
+ port_transmit_rate_kbps = offload->idleslope - offload->sendslope;
+ cbs->idle_slope = div_s64(offload->idleslope * BYTES_PER_KBIT,
+ port_transmit_rate_kbps);
+ cbs->send_slope = div_s64(abs(offload->sendslope * BYTES_PER_KBIT),
+ port_transmit_rate_kbps);
/* Convert the negative values from 64-bit 2's complement
* to 32-bit 2's complement (for the case of 0x80000000 whose
* negative is still negative).
@@ -2234,6 +2297,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
int rc, i;
s64 now;
+ mutex_lock(&priv->fdb_lock);
mutex_lock(&priv->mgmt_lock);
mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
@@ -2346,6 +2410,7 @@ int sja1105_static_config_reload(struct sja1105_private *priv,
goto out;
out:
mutex_unlock(&priv->mgmt_lock);
+ mutex_unlock(&priv->fdb_lock);
return rc;
}
@@ -2915,7 +2980,9 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
{
struct sja1105_l2_lookup_entry *l2_lookup;
struct sja1105_table *table;
- int match;
+ int match, rc;
+
+ mutex_lock(&priv->fdb_lock);
table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
l2_lookup = table->entries;
@@ -2928,7 +2995,8 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
if (match == table->entry_count) {
NL_SET_ERR_MSG_MOD(extack,
"Could not find FDB entry for unknown multicast");
- return -ENOSPC;
+ rc = -ENOSPC;
+ goto out;
}
if (flags.val & BR_MCAST_FLOOD)
@@ -2936,10 +3004,13 @@ static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
else
l2_lookup[match].destports &= ~BIT(to);
- return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
- l2_lookup[match].index,
- &l2_lookup[match],
- true);
+ rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
+ l2_lookup[match].index,
+ &l2_lookup[match], true);
+out:
+ mutex_unlock(&priv->fdb_lock);
+
+ return rc;
}
static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
@@ -3309,6 +3380,7 @@ static int sja1105_probe(struct spi_device *spi)
mutex_init(&priv->ptp_data.lock);
mutex_init(&priv->dynamic_config_lock);
mutex_init(&priv->mgmt_lock);
+ mutex_init(&priv->fdb_lock);
spin_lock_init(&priv->ts_id_lock);
rc = sja1105_parse_dt(priv);
diff --git a/drivers/net/dsa/sja1105/sja1105_spi.c b/drivers/net/dsa/sja1105/sja1105_spi.c
index 5ce29c8057a4..834b5c1b4db0 100644
--- a/drivers/net/dsa/sja1105/sja1105_spi.c
+++ b/drivers/net/dsa/sja1105/sja1105_spi.c
@@ -781,6 +781,7 @@ const struct sja1105_info sja1110a_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -831,6 +832,7 @@ const struct sja1105_info sja1110b_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -881,6 +883,7 @@ const struct sja1105_info sja1110c_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
@@ -931,6 +934,7 @@ const struct sja1105_info sja1110d_info = {
.tag_proto = DSA_TAG_PROTO_SJA1110,
.can_limit_mcast_flood = true,
.multiple_cascade_ports = true,
+ .fixed_cbs_mapping = true,
.ptp_ts_bits = 32,
.ptpegr_ts_bytes = 8,
.max_frame_mem = SJA1110_MAX_FRAME_MEMORY,
diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c
index 1c009b485188..ca66b747b7c5 100644
--- a/drivers/net/ethernet/adi/adin1110.c
+++ b/drivers/net/ethernet/adi/adin1110.c
@@ -1385,7 +1385,7 @@ static int adin1110_fdb_add(struct adin1110_port_priv *port_priv,
return -ENOMEM;
other_port = priv->ports[!port_priv->nr];
- port_rules = adin1110_port_rules(port_priv, false, true);
+ port_rules = adin1110_port_rules(other_port, false, true);
eth_broadcast_addr(mask);
return adin1110_write_mac_address(other_port, mac_nr, (u8 *)fdb->addr,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index ad32ca81f7ef..f955bde10cf9 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -1833,6 +1833,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
return work_done;
error:
+ if (xdp_flags & ENA_XDP_REDIRECT)
+ xdp_do_flush();
+
adapter = netdev_priv(rx_ring->netdev);
if (rc == -ENOSPC) {
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.c b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
index d63d321f3e7b..41a6098eb0c2 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.c
@@ -528,13 +528,16 @@ void bcmasp_netfilt_suspend(struct bcmasp_intf *intf)
ASP_RX_FILTER_BLK_CTRL);
}
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt)
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt)
{
struct bcmasp_priv *priv = intf->parent;
int j = 0, i;
for (i = 0; i < NUM_NET_FILTERS; i++) {
+ if (j == *rule_cnt)
+ return -EMSGSIZE;
+
if (!priv->net_filters[i].claimed ||
priv->net_filters[i].port != intf->port)
continue;
@@ -548,6 +551,8 @@ void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
}
*rule_cnt = j;
+
+ return 0;
}
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf)
@@ -1300,6 +1305,7 @@ static int bcmasp_probe(struct platform_device *pdev)
if (!intf) {
dev_err(dev, "Cannot create eth interface %d\n", i);
bcmasp_remove_intfs(priv);
+ of_node_put(intf_node);
goto of_put_exit;
}
list_add_tail(&intf->list, &priv->intfs);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp.h b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
index 5b512f7f5e94..ec90add6b03e 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp.h
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp.h
@@ -577,8 +577,8 @@ void bcmasp_netfilt_release(struct bcmasp_intf *intf,
int bcmasp_netfilt_get_active(struct bcmasp_intf *intf);
-void bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
- u32 *rule_cnt);
+int bcmasp_netfilt_get_all_active(struct bcmasp_intf *intf, u32 *rule_locs,
+ u32 *rule_cnt);
void bcmasp_netfilt_suspend(struct bcmasp_intf *intf);
diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
index c4f1604d5ab3..ce6a3d56fb23 100644
--- a/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
+++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_ethtool.c
@@ -335,7 +335,7 @@ static int bcmasp_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
err = bcmasp_flow_get(intf, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
- bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
+ err = bcmasp_netfilt_get_all_active(intf, rule_locs, &cmd->rule_cnt);
cmd->data = NUM_NET_FILTERS;
break;
default:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5cc0dbe12132..7551aa8068f8 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2614,6 +2614,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
struct rx_cmp_ext *rxcmp1;
u32 cp_cons, tmp_raw_cons;
u32 raw_cons = cpr->cp_raw_cons;
+ bool flush_xdp = false;
u32 rx_pkts = 0;
u8 event = 0;
@@ -2648,6 +2649,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
rx_pkts++;
else if (rc == -EBUSY) /* partial completion */
break;
+ if (event & BNXT_REDIRECT_EVENT)
+ flush_xdp = true;
} else if (unlikely(TX_CMP_TYPE(txcmp) ==
CMPL_BASE_TYPE_HWRM_DONE)) {
bnxt_hwrm_handler(bp, txcmp);
@@ -2667,6 +2670,8 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
if (event & BNXT_AGG_EVENT)
bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
+ if (flush_xdp)
+ xdp_do_flush();
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
napi_complete_done(napi, rx_pkts);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index f178ed9899a9..3ae8e8af8ab3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -3721,6 +3721,60 @@ struct hwrm_func_backing_store_qcaps_v2_output {
u8 valid;
};
+/* hwrm_func_dbr_pacing_qcfg_input (size:128b/16B) */
+struct hwrm_func_dbr_pacing_qcfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+};
+
+/* hwrm_func_dbr_pacing_qcfg_output (size:512b/64B) */
+struct hwrm_func_dbr_pacing_qcfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 flags;
+#define FUNC_DBR_PACING_QCFG_RESP_FLAGS_DBR_NQ_EVENT_ENABLED 0x1UL
+ u8 unused_0[7];
+ __le32 dbr_stat_db_fifo_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_STAT_DB_FIFO_REG_ADDR_SFT 2
+ __le32 dbr_stat_db_fifo_reg_watermark_mask;
+ u8 dbr_stat_db_fifo_reg_watermark_shift;
+ u8 unused_1[3];
+ __le32 dbr_stat_db_fifo_reg_fifo_room_mask;
+ u8 dbr_stat_db_fifo_reg_fifo_room_shift;
+ u8 unused_2[3];
+ __le32 dbr_throttling_aeq_arm_reg;
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_MASK 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_SFT 0
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_PCIE_CFG 0x0UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_GRC 0x1UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR0 0x2UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1 0x3UL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_LAST \
+ FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SPACE_BAR1
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_MASK 0xfffffffcUL
+#define FUNC_DBR_PACING_QCFG_RESP_DBR_THROTTLING_AEQ_ARM_REG_ADDR_SFT 2
+ u8 dbr_throttling_aeq_arm_reg_val;
+ u8 unused_3[7];
+ __le32 primary_nq_id;
+ __le32 pacing_threshold;
+ u8 unused_4[7];
+ u8 valid;
+};
+
/* hwrm_func_drv_if_change_input (size:192b/24B) */
struct hwrm_func_drv_if_change_input {
__le16 req_type;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 852eb449ccae..6ba2b9398633 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -345,7 +345,7 @@ static void bnxt_set_edev_info(struct bnxt_en_dev *edev, struct bnxt *bp)
edev->hw_ring_stats_size = bp->hw_ring_stats_size;
edev->pf_port_id = bp->pf.port_id;
edev->en_state = bp->state;
-
+ edev->bar0 = bp->bar0;
edev->ulp_tbl->msix_requested = bnxt_get_ulp_msix_num(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
index 80cbc4b6130a..6ff77f082e6c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h
@@ -81,6 +81,7 @@ struct bnxt_en_dev {
* mode only. Will be
* updated in resume.
*/
+ void __iomem *bar0;
};
static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev)
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 31f664ee4d77..b940dcd3ace6 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -756,8 +756,6 @@ static void macb_mac_link_up(struct phylink_config *config,
if (rx_pause)
ctrl |= MACB_BIT(PAE);
- macb_set_tx_clk(bp, speed);
-
/* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
* cleared the pipeline and control registers.
*/
@@ -777,6 +775,9 @@ static void macb_mac_link_up(struct phylink_config *config,
spin_unlock_irqrestore(&bp->lock, flags);
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ macb_set_tx_clk(bp, speed);
+
/* Enable Rx and Tx; Enable PTP unicast */
ctrl = macb_readl(bp, NCR);
if (gem_has_ptp(bp))
diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c
index 716815dad7d2..65ec1abc9442 100644
--- a/drivers/net/ethernet/engleder/tsnep_ethtool.c
+++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c
@@ -300,10 +300,8 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev,
{
struct tsnep_adapter *adapter = netdev_priv(netdev);
- ch->max_rx = adapter->num_rx_queues;
- ch->max_tx = adapter->num_tx_queues;
- ch->rx_count = adapter->num_rx_queues;
- ch->tx_count = adapter->num_tx_queues;
+ ch->max_combined = adapter->num_queues;
+ ch->combined_count = adapter->num_queues;
}
static int tsnep_ethtool_get_ts_info(struct net_device *netdev,
diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c
index f61bd89734c5..8b992dc9bb52 100644
--- a/drivers/net/ethernet/engleder/tsnep_main.c
+++ b/drivers/net/ethernet/engleder/tsnep_main.c
@@ -87,8 +87,11 @@ static irqreturn_t tsnep_irq(int irq, void *arg)
/* handle TX/RX queue 0 interrupt */
if ((active & adapter->queue[0].irq_mask) != 0) {
- tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
- napi_schedule(&adapter->queue[0].napi);
+ if (napi_schedule_prep(&adapter->queue[0].napi)) {
+ tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&adapter->queue[0].napi);
+ }
}
return IRQ_HANDLED;
@@ -99,8 +102,11 @@ static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
struct tsnep_queue *queue = arg;
/* handle TX/RX queue interrupt */
- tsnep_disable_irq(queue->adapter, queue->irq_mask);
- napi_schedule(&queue->napi);
+ if (napi_schedule_prep(&queue->napi)) {
+ tsnep_disable_irq(queue->adapter, queue->irq_mask);
+ /* schedule after masking to avoid races */
+ __napi_schedule(&queue->napi);
+ }
return IRQ_HANDLED;
}
@@ -1728,6 +1734,10 @@ static int tsnep_poll(struct napi_struct *napi, int budget)
if (queue->tx)
complete = tsnep_tx_poll(queue->tx, budget);
+ /* handle case where we are called by netpoll with a budget of 0 */
+ if (unlikely(budget <= 0))
+ return budget;
+
if (queue->rx) {
done = queue->rx->xsk_pool ?
tsnep_rx_poll_zc(queue->rx, napi, budget) :
diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
index e0a4cb7e3f50..c153dc083aff 100644
--- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c
+++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c
@@ -1402,7 +1402,7 @@ static void enetc_fixup_clear_rss_rfs(struct pci_dev *pdev)
return;
si = enetc_psi_create(pdev);
- if (si)
+ if (!IS_ERR(si))
enetc_psi_destroy(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_FREESCALE, ENETC_DEV_ID_PF,
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index ea0e38b4d9e9..f281e42a7ef9 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -570,7 +570,10 @@ static int gve_rx_append_frags(struct napi_struct *napi,
if (!skb)
return -1;
- skb_shinfo(rx->ctx.skb_tail)->frag_list = skb;
+ if (rx->ctx.skb_tail == rx->ctx.skb_head)
+ skb_shinfo(rx->ctx.skb_head)->frag_list = skb;
+ else
+ rx->ctx.skb_tail->next = skb;
rx->ctx.skb_tail = skb;
num_frags = 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
index a4b43bcd2f0c..aaf1f42624a7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -814,6 +814,7 @@ struct hnae3_tc_info {
u8 max_tc; /* Total number of TCs */
u8 num_tc; /* Total number of enabled TCs */
bool mqprio_active;
+ bool dcb_ets_active;
};
#define HNAE3_MAX_DSCP 64
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
index f276b5ecb431..b8508533878b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -1045,6 +1045,7 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct net_device *dev = kinfo->netdev;
*pos += scnprintf(buf + *pos, len - *pos, "dev_spec:\n");
*pos += scnprintf(buf + *pos, len - *pos, "MAC entry num: %u\n",
@@ -1087,6 +1088,9 @@ hns3_dbg_dev_specs(struct hnae3_handle *h, char *buf, int len, int *pos)
dev_specs->mc_mac_size);
*pos += scnprintf(buf + *pos, len - *pos, "MAC statistics number: %u\n",
dev_specs->mac_stats_num);
+ *pos += scnprintf(buf + *pos, len - *pos,
+ "TX timeout threshold: %d seconds\n",
+ dev->watchdog_timeo / HZ);
}
static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len)
@@ -1411,9 +1415,9 @@ int hns3_dbg_init(struct hnae3_handle *handle)
return 0;
out:
- mutex_destroy(&handle->dbgfs_lock);
debugfs_remove_recursive(handle->hnae3_dbgfs);
handle->hnae3_dbgfs = NULL;
+ mutex_destroy(&handle->dbgfs_lock);
return ret;
}
@@ -1421,6 +1425,9 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
{
u32 i;
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+
for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++)
if (handle->dbgfs_buf[i]) {
kvfree(handle->dbgfs_buf[i]);
@@ -1428,8 +1435,6 @@ void hns3_dbg_uninit(struct hnae3_handle *handle)
}
mutex_destroy(&handle->dbgfs_lock);
- debugfs_remove_recursive(handle->hnae3_dbgfs);
- handle->hnae3_dbgfs = NULL;
}
void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index eac2d0573241..cf50368441b7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -2103,8 +2103,12 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
*/
if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num &&
!ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) {
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit
+ * is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
hns3_tx_push_bd(ring, num);
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
return;
}
@@ -2115,6 +2119,11 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
return;
}
+ /* This smp_store_release() pairs with smp_load_aquire() in
+ * hns3_nic_reclaim_desc(). Ensure that the BD valid bit is updated.
+ */
+ smp_store_release(&ring->last_to_use, ring->next_to_use);
+
if (ring->tqp->mem_base)
hns3_tx_mem_doorbell(ring);
else
@@ -2122,7 +2131,6 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
ring->pending_buf = 0;
- WRITE_ONCE(ring->last_to_use, ring->next_to_use);
}
static void hns3_tsyn(struct net_device *netdev, struct sk_buff *skb,
@@ -3308,8 +3316,6 @@ static void hns3_set_default_feature(struct net_device *netdev)
netdev->priv_flags |= IFF_UNICAST_FLT;
- netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
-
netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
@@ -3347,6 +3353,15 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_HW_TC);
netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID;
+
+ /* The device_version V3 hardware can't offload the checksum for IP in
+ * GRE packets, but can do it for NvGRE. So default to disable the
+ * checksum and GSO offload for GRE.
+ */
+ if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) {
+ netdev->features &= ~NETIF_F_GSO_GRE;
+ netdev->features &= ~NETIF_F_GSO_GRE_CSUM;
+ }
}
static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
@@ -3563,9 +3578,8 @@ static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
int *bytes, int *pkts, int budget)
{
- /* pair with ring->last_to_use update in hns3_tx_doorbell(),
- * smp_store_release() is not used in hns3_tx_doorbell() because
- * the doorbell operation already have the needed barrier operation.
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_tx_doorbell().
*/
int ltu = smp_load_acquire(&ring->last_to_use);
int ntc = ring->next_to_clean;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
index 36858a72d771..682239f33082 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -773,7 +773,9 @@ static int hns3_get_link_ksettings(struct net_device *netdev,
hns3_get_ksettings(h, cmd);
break;
case HNAE3_MEDIA_TYPE_FIBER:
- if (module_type == HNAE3_MODULE_TYPE_CR)
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
cmd->base.port = PORT_DA;
else
cmd->base.port = PORT_FIBRE;
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
index fad5a5ff3cda..b98301e205f7 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -259,7 +259,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
int ret;
if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ h->kinfo.tc_info.mqprio_active)
return -EINVAL;
ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
@@ -275,10 +275,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
}
hclge_tm_schd_info_update(hdev, num_tc);
- if (num_tc > 1)
- hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+ h->kinfo.tc_info.dcb_ets_active = num_tc > 1;
ret = hclge_ieee_ets_to_tm_info(hdev, ets);
if (ret)
@@ -487,7 +484,7 @@ static u8 hclge_getdcbx(struct hnae3_handle *h)
struct hclge_vport *vport = hclge_get_vport(h);
struct hclge_dev *hdev = vport->back;
- if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ if (h->kinfo.tc_info.mqprio_active)
return 0;
return hdev->dcbx_cap;
@@ -611,7 +608,8 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
return -EBUSY;
- if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ kinfo = &vport->nic.kinfo;
+ if (kinfo->tc_info.dcb_ets_active)
return -EINVAL;
ret = hclge_mqprio_qopt_check(hdev, mqprio_qopt);
@@ -625,7 +623,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
return ret;
- kinfo = &vport->nic.kinfo;
memcpy(&old_tc_info, &kinfo->tc_info, sizeof(old_tc_info));
hclge_sync_mqprio_qopt(&kinfo->tc_info, mqprio_qopt);
kinfo->tc_info.mqprio_active = tc > 0;
@@ -634,13 +631,6 @@ static int hclge_setup_tc(struct hnae3_handle *h,
if (ret)
goto err_out;
- hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
-
- if (tc > 1)
- hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
- else
- hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
-
return hclge_notify_init_up(hdev);
err_out:
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
index f01a7a9ee02c..ff3f8f424ad9 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -1519,7 +1519,7 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
struct hclge_desc desc[3];
int pos = 0;
int ret, i;
- u32 *req;
+ __le32 *req;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
desc[0].flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_NEXT);
@@ -1544,22 +1544,22 @@ static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, bool sel_x,
tcam_msg.loc);
/* tcam_data0 ~ tcam_data1 */
- req = (u32 *)req1->tcam_data;
+ req = (__le32 *)req1->tcam_data;
for (i = 0; i < 2; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data2 ~ tcam_data7 */
- req = (u32 *)req2->tcam_data;
+ req = (__le32 *)req2->tcam_data;
for (i = 0; i < 6; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
/* tcam_data8 ~ tcam_data12 */
- req = (u32 *)req3->tcam_data;
+ req = (__le32 *)req3->tcam_data;
for (i = 0; i < 5; i++)
pos += scnprintf(tcam_buf + pos, HCLGE_DBG_TCAM_BUF_SIZE - pos,
- "%08x\n", *req++);
+ "%08x\n", le32_to_cpu(*req++));
return ret;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index 0f50dba6cc47..c42574e29747 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -3564,9 +3564,14 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
u32 regclr)
{
+#define HCLGE_IMP_RESET_DELAY 5
+
switch (event_type) {
case HCLGE_VECTOR0_EVENT_PTP:
case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
break;
case HCLGE_VECTOR0_EVENT_MBX:
@@ -7348,6 +7353,12 @@ static int hclge_del_cls_flower(struct hnae3_handle *handle,
ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, rule->location,
NULL, false);
if (ret) {
+ /* if tcam config fail, set rule state to TO_DEL,
+ * so the rule will be deleted when periodic
+ * task being scheduled.
+ */
+ hclge_update_fd_list(hdev, HCLGE_FD_TO_DEL, rule->location, NULL);
+ set_bit(HCLGE_STATE_FD_TBL_CHANGED, &hdev->state);
spin_unlock_bh(&hdev->fd_rule_lock);
return ret;
}
@@ -8824,7 +8835,7 @@ static void hclge_update_overflow_flags(struct hclge_vport *vport,
if (mac_type == HCLGE_MAC_ADDR_UC) {
if (is_all_added)
vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
- else
+ else if (hclge_is_umv_space_full(vport, true))
vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
} else {
if (is_all_added)
@@ -11026,6 +11037,7 @@ static void hclge_get_mdix_mode(struct hnae3_handle *handle,
static void hclge_info_show(struct hclge_dev *hdev)
{
+ struct hnae3_handle *handle = &hdev->vport->nic;
struct device *dev = &hdev->pdev->dev;
dev_info(dev, "PF info begin:\n");
@@ -11042,9 +11054,9 @@ static void hclge_info_show(struct hclge_dev *hdev)
dev_info(dev, "This is %s PF\n",
hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
dev_info(dev, "DCB %s\n",
- hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.dcb_ets_active ? "enable" : "disable");
dev_info(dev, "MQPRIO %s\n",
- hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+ handle->kinfo.tc_info.mqprio_active ? "enable" : "disable");
dev_info(dev, "Default tx spare buffer size: %u\n",
hdev->tx_spare_buf_size);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
index ec233ec57222..7bc2049b723d 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -919,8 +919,6 @@ struct hclge_dev {
#define HCLGE_FLAG_MAIN BIT(0)
#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
-#define HCLGE_FLAG_DCB_ENABLE BIT(2)
-#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
index 7a2f9233d695..a4d68fb216fb 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -1855,7 +1855,8 @@ static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
unsigned long delta = round_jiffies_relative(HZ);
struct hnae3_handle *handle = &hdev->nic;
- if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state) ||
+ test_bit(HCLGE_COMM_STATE_CMD_DISABLE, &hdev->hw.hw.comm_state))
return;
if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c
index 9406237c461e..f81a43d2cdfc 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_port.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c
@@ -456,9 +456,6 @@ int hinic_set_vlan_fliter(struct hinic_dev *nic_dev, u32 en)
u16 out_size = sizeof(vlan_filter);
int err;
- if (!hwdev)
- return -EINVAL;
-
vlan_filter.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
vlan_filter.enable = en;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 832a2ae01950..a8d79ee350f8 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1303,24 +1303,23 @@ static void ibmveth_rx_csum_helper(struct sk_buff *skb,
* the user space for finding a flow. During this process, OVS computes
* checksum on the first packet when CHECKSUM_PARTIAL flag is set.
*
- * So, re-compute TCP pseudo header checksum when configured for
- * trunk mode.
+ * So, re-compute TCP pseudo header checksum.
*/
+
if (iph_proto == IPPROTO_TCP) {
struct tcphdr *tcph = (struct tcphdr *)(skb->data + iphlen);
+
if (tcph->check == 0x0000) {
/* Recompute TCP pseudo header checksum */
- if (adapter->is_active_trunk) {
- tcphdrlen = skb->len - iphlen;
- if (skb_proto == ETH_P_IP)
- tcph->check =
- ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, tcphdrlen, iph_proto, 0);
- else if (skb_proto == ETH_P_IPV6)
- tcph->check =
- ~csum_ipv6_magic(&iph6->saddr,
- &iph6->daddr, tcphdrlen, iph_proto, 0);
- }
+ tcphdrlen = skb->len - iphlen;
+ if (skb_proto == ETH_P_IP)
+ tcph->check =
+ ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, tcphdrlen, iph_proto, 0);
+ else if (skb_proto == ETH_P_IPV6)
+ tcph->check =
+ ~csum_ipv6_magic(&iph6->saddr,
+ &iph6->daddr, tcphdrlen, iph_proto, 0);
/* Setup SKB fields for checksum offload */
skb_partial_csum_set(skb, iphlen,
offsetof(struct tcphdr, check));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 8ea1a238dcef..d3d6415553ed 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4475,9 +4475,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
goto error_pvid;
i40e_vlan_stripping_enable(vsi);
- i40e_vc_reset_vf(vf, true);
- /* During reset the VF got a new VSI, so refresh a pointer. */
- vsi = pf->vsi[vf->lan_vsi_idx];
+
/* Locked once because multiple functions below iterate list */
spin_lock_bh(&vsi->mac_filter_hash_lock);
@@ -4563,6 +4561,10 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id,
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
+ i40e_vc_reset_vf(vf, true);
+ /* During reset the VF got a new VSI, so refresh a pointer. */
+ vsi = pf->vsi[vf->lan_vsi_idx];
+
ret = i40e_config_vf_promiscuous_mode(vf, vsi->id, allmulti, alluni);
if (ret) {
dev_err(&pf->pdev->dev, "Unable to config vf promiscuous mode\n");
diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
index 85fba85fbb23..e110ba346185 100644
--- a/drivers/net/ethernet/intel/iavf/iavf.h
+++ b/drivers/net/ethernet/intel/iavf/iavf.h
@@ -521,7 +521,7 @@ void iavf_down(struct iavf_adapter *adapter);
int iavf_process_config(struct iavf_adapter *adapter);
int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter);
void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags);
-void iavf_schedule_request_stats(struct iavf_adapter *adapter);
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags);
void iavf_schedule_finish_config(struct iavf_adapter *adapter);
void iavf_reset(struct iavf_adapter *adapter);
void iavf_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
index a34303ad057d..90397293525f 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c
@@ -362,7 +362,7 @@ static void iavf_get_ethtool_stats(struct net_device *netdev,
unsigned int i;
/* Explicitly request stats refresh */
- iavf_schedule_request_stats(adapter);
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_REQUEST_STATS);
iavf_add_ethtool_stats(&data, adapter, iavf_gstrings_stats);
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 7b300c86ceda..6a2e6d64bc3a 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -314,15 +314,13 @@ void iavf_schedule_reset(struct iavf_adapter *adapter, u64 flags)
}
/**
- * iavf_schedule_request_stats - Set the flags and schedule statistics request
+ * iavf_schedule_aq_request - Set the flags and schedule aq request
* @adapter: board private structure
- *
- * Sets IAVF_FLAG_AQ_REQUEST_STATS flag so iavf_watchdog_task() will explicitly
- * request and refresh ethtool stats
+ * @flags: requested aq flags
**/
-void iavf_schedule_request_stats(struct iavf_adapter *adapter)
+void iavf_schedule_aq_request(struct iavf_adapter *adapter, u64 flags)
{
- adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_STATS;
+ adapter->aq_required |= flags;
mod_delayed_work(adapter->wq, &adapter->watchdog_task, 0);
}
@@ -823,7 +821,7 @@ iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter,
list_add_tail(&f->list, &adapter->vlan_filter_list);
f->state = IAVF_VLAN_ADD;
adapter->num_vlan_filters++;
- adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_ADD_VLAN_FILTER);
}
clearout:
@@ -845,7 +843,7 @@ static void iavf_del_vlan(struct iavf_adapter *adapter, struct iavf_vlan vlan)
f = iavf_find_vlan(adapter, vlan);
if (f) {
f->state = IAVF_VLAN_REMOVE;
- adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
+ iavf_schedule_aq_request(adapter, IAVF_FLAG_AQ_DEL_VLAN_FILTER);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
@@ -1421,7 +1419,8 @@ void iavf_down(struct iavf_adapter *adapter)
iavf_clear_fdir_filters(adapter);
iavf_clear_adv_rss_conf(adapter);
- if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)) {
+ if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
+ !(test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))) {
/* cancel any current operation */
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
/* Schedule operations to close down the HW. Don't wait
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
index b03426ac932b..db97353efd06 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
@@ -2617,12 +2617,14 @@ static int ice_vc_query_rxdid(struct ice_vf *vf)
goto err;
}
- /* Read flexiflag registers to determine whether the
- * corresponding RXDID is configured and supported or not.
- * Since Legacy 16byte descriptor format is not supported,
- * start from Legacy 32byte descriptor.
+ /* RXDIDs supported by DDP package can be read from the register
+ * to get the supported RXDID bitmap. But the legacy 32byte RXDID
+ * is not listed in DDP package, add it in the bitmap manually.
+ * Legacy 16byte descriptor is not supported.
*/
- for (i = ICE_RXDID_LEGACY_1; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
+ rxdid->supported_rxdids |= BIT(ICE_RXDID_LEGACY_1);
+
+ for (i = ICE_RXDID_FLEX_NIC; i < ICE_FLEX_DESC_RXDID_MAX_NUM; i++) {
regval = rd32(hw, GLFLXP_RXDID_FLAGS(i, 0));
if ((regval >> GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S)
& GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M)
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 015b78144114..a2b759531cb7 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -34,11 +34,11 @@ struct igb_adapter;
/* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
-#define IGB_MIN_TXD 80
+#define IGB_MIN_TXD 64
#define IGB_MAX_TXD 4096
#define IGB_DEFAULT_RXD 256
-#define IGB_MIN_RXD 80
+#define IGB_MIN_RXD 64
#define IGB_MAX_RXD 4096
#define IGB_DEFAULT_ITR 3 /* dynamic */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 1ab787ed254d..76b34cee1da3 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3827,8 +3827,11 @@ static int igb_enable_sriov(struct pci_dev *pdev, int num_vfs, bool reinit)
}
/* only call pci_enable_sriov() if no VFs are allocated already */
- if (!old_vfs)
+ if (!old_vfs) {
err = pci_enable_sriov(pdev, adapter->vfs_allocated_count);
+ if (err)
+ goto err_out;
+ }
goto out;
@@ -3933,8 +3936,9 @@ static void igb_probe_vfs(struct igb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
- /* Virtualization features not supported on i210 family. */
- if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211))
+ /* Virtualization features not supported on i210 and 82580 family. */
+ if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211) ||
+ (hw->mac.type == e1000_82580))
return;
/* Of the below we really only want the effect of getting
diff --git a/drivers/net/ethernet/intel/igbvf/igbvf.h b/drivers/net/ethernet/intel/igbvf/igbvf.h
index 57d39ee00b58..7b83678ba83a 100644
--- a/drivers/net/ethernet/intel/igbvf/igbvf.h
+++ b/drivers/net/ethernet/intel/igbvf/igbvf.h
@@ -39,11 +39,11 @@ enum latency_range {
/* Tx/Rx descriptor defines */
#define IGBVF_DEFAULT_TXD 256
#define IGBVF_MAX_TXD 4096
-#define IGBVF_MIN_TXD 80
+#define IGBVF_MIN_TXD 64
#define IGBVF_DEFAULT_RXD 256
#define IGBVF_MAX_RXD 4096
-#define IGBVF_MIN_RXD 80
+#define IGBVF_MIN_RXD 64
#define IGBVF_MIN_ITR_USECS 10 /* 100000 irq/sec */
#define IGBVF_MAX_ITR_USECS 10000 /* 100 irq/sec */
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 8ebe6999a528..f48f82d5e274 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -379,11 +379,11 @@ static inline u32 igc_rss_type(const union igc_adv_rx_desc *rx_desc)
/* TX/RX descriptor defines */
#define IGC_DEFAULT_TXD 256
#define IGC_DEFAULT_TX_WORK 128
-#define IGC_MIN_TXD 80
+#define IGC_MIN_TXD 64
#define IGC_MAX_TXD 4096
#define IGC_DEFAULT_RXD 256
-#define IGC_MIN_RXD 80
+#define IGC_MIN_RXD 64
#define IGC_MAX_RXD 4096
/* Supported Rx Buffer Sizes */
diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c
index 93bce729be76..7ab6dd58e400 100644
--- a/drivers/net/ethernet/intel/igc/igc_ethtool.c
+++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c
@@ -868,6 +868,18 @@ static void igc_ethtool_get_stats(struct net_device *netdev,
spin_unlock(&adapter->stats64_lock);
}
+static int igc_ethtool_get_previous_rx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->rx_itr_setting <= 3) ?
+ adapter->rx_itr_setting : adapter->rx_itr_setting >> 2;
+}
+
+static int igc_ethtool_get_previous_tx_coalesce(struct igc_adapter *adapter)
+{
+ return (adapter->tx_itr_setting <= 3) ?
+ adapter->tx_itr_setting : adapter->tx_itr_setting >> 2;
+}
+
static int igc_ethtool_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ec,
struct kernel_ethtool_coalesce *kernel_coal,
@@ -875,17 +887,8 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev,
{
struct igc_adapter *adapter = netdev_priv(netdev);
- if (adapter->rx_itr_setting <= 3)
- ec->rx_coalesce_usecs = adapter->rx_itr_setting;
- else
- ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
-
- if (!(adapter->flags & IGC_FLAG_QUEUE_PAIRS)) {
- if (adapter->tx_itr_setting <= 3)
- ec->tx_coalesce_usecs = adapter->tx_itr_setting;
- else
- ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
- }
+ ec->rx_coalesce_usecs = igc_ethtool_get_previous_rx_coalesce(adapter);
+ ec->tx_coalesce_usecs = igc_ethtool_get_previous_tx_coalesce(adapter);
return 0;
}
@@ -910,8 +913,12 @@ static int igc_ethtool_set_coalesce(struct net_device *netdev,
ec->tx_coalesce_usecs == 2)
return -EINVAL;
- if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
+ if ((adapter->flags & IGC_FLAG_QUEUE_PAIRS) &&
+ ec->tx_coalesce_usecs != igc_ethtool_get_previous_tx_coalesce(adapter)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Queue Pair mode enabled, both Rx and Tx coalescing controlled by rx-usecs");
return -EINVAL;
+ }
/* If ITR is disabled, disable DMAC */
if (ec->rx_coalesce_usecs == 0) {
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 293b45717683..98de34d0ce07 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -6491,7 +6491,7 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
struct igc_ring *ring;
int i, drops;
- if (unlikely(test_bit(__IGC_DOWN, &adapter->state)))
+ if (unlikely(!netif_carrier_ok(dev)))
return -ENETDOWN;
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 0310af851086..9339edbd9082 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -979,6 +979,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
+ u32 aflags = adapter->flags;
bool is_l2 = false;
u32 regval;
@@ -996,20 +997,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1;
tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V2_EVENT:
case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
@@ -1023,8 +1024,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
- adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
+ aflags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
+ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_NTP_ALL:
@@ -1035,7 +1036,7 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
if (hw->mac.type >= ixgbe_mac_X550) {
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
break;
}
fallthrough;
@@ -1046,8 +1047,6 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED |
- IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER);
config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -1079,8 +1078,8 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_TSYNCRXCTL_TYPE_ALL |
IXGBE_TSYNCRXCTL_TSIP_UT_EN;
config->rx_filter = HWTSTAMP_FILTER_ALL;
- adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
- adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
+ aflags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED;
+ aflags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER;
is_l2 = true;
break;
default:
@@ -1113,6 +1112,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
IXGBE_WRITE_FLUSH(hw);
+ /* configure adapter flags only when HW is actually configured */
+ adapter->flags = aflags;
+
/* clear TX/RX time stamp registers, just to be sure */
ixgbe_ptp_clear_tx_timestamp(adapter);
IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
index eb74ccddb440..21c3f9b015c8 100644
--- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
+++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
@@ -5586,6 +5586,11 @@ static int mvpp2_ethtool_get_rxnfc(struct net_device *dev,
break;
case ETHTOOL_GRXCLSRLALL:
for (i = 0; i < MVPP2_N_RFS_ENTRIES_PER_FLOW; i++) {
+ if (loc == info->rule_cnt) {
+ ret = -EMSGSIZE;
+ break;
+ }
+
if (port->rfs_rules[i])
rules[loc++] = i;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
index 4424de2ffd70..dbc518ff8276 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
@@ -734,13 +734,13 @@ static netdev_tx_t octep_start_xmit(struct sk_buff *skb,
dma_map_sg_err:
if (si > 0) {
dma_unmap_single(iq->dev, sglist[0].dma_ptr[0],
- sglist[0].len[0], DMA_TO_DEVICE);
- sglist[0].len[0] = 0;
+ sglist[0].len[3], DMA_TO_DEVICE);
+ sglist[0].len[3] = 0;
}
while (si > 1) {
dma_unmap_page(iq->dev, sglist[si >> 2].dma_ptr[si & 3],
- sglist[si >> 2].len[si & 3], DMA_TO_DEVICE);
- sglist[si >> 2].len[si & 3] = 0;
+ sglist[si >> 2].len[3 - (si & 3)], DMA_TO_DEVICE);
+ sglist[si >> 2].len[3 - (si & 3)] = 0;
si--;
}
tx_buffer->gather = 0;
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
index 5a520d37bea0..d0adb82d65c3 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.c
@@ -69,12 +69,12 @@ int octep_iq_process_completions(struct octep_iq *iq, u16 budget)
compl_sg++;
dma_unmap_single(iq->dev, tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0], DMA_TO_DEVICE);
+ tx_buffer->sglist[0].len[3], DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
@@ -131,13 +131,13 @@ static void octep_iq_free_pending(struct octep_iq *iq)
dma_unmap_single(iq->dev,
tx_buffer->sglist[0].dma_ptr[0],
- tx_buffer->sglist[0].len[0],
+ tx_buffer->sglist[0].len[3],
DMA_TO_DEVICE);
i = 1; /* entry 0 is main skb, unmapped above */
while (frags--) {
dma_unmap_page(iq->dev, tx_buffer->sglist[i >> 2].dma_ptr[i & 3],
- tx_buffer->sglist[i >> 2].len[i & 3], DMA_TO_DEVICE);
+ tx_buffer->sglist[i >> 2].len[3 - (i & 3)], DMA_TO_DEVICE);
i++;
}
diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
index 2ef57980eb47..21e75ff9f5e7 100644
--- a/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
+++ b/drivers/net/ethernet/marvell/octeon_ep/octep_tx.h
@@ -17,7 +17,21 @@
#define TX_BUFTYPE_NET_SG 2
#define NUM_TX_BUFTYPES 3
-/* Hardware format for Scatter/Gather list */
+/* Hardware format for Scatter/Gather list
+ *
+ * 63 48|47 32|31 16|15 0
+ * -----------------------------------------
+ * | Len 0 | Len 1 | Len 2 | Len 3 |
+ * -----------------------------------------
+ * | Ptr 0 |
+ * -----------------------------------------
+ * | Ptr 1 |
+ * -----------------------------------------
+ * | Ptr 2 |
+ * -----------------------------------------
+ * | Ptr 3 |
+ * -----------------------------------------
+ */
struct octep_tx_sglist_desc {
u16 len[4];
dma_addr_t dma_ptr[4];
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index c2f68678e947..23c2f2ed2fb8 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -846,6 +846,21 @@ static int nix_aq_enqueue_wait(struct rvu *rvu, struct rvu_block *block,
return 0;
}
+static void nix_get_aq_req_smq(struct rvu *rvu, struct nix_aq_enq_req *req,
+ u16 *smq, u16 *smq_mask)
+{
+ struct nix_cn10k_aq_enq_req *aq_req;
+
+ if (!is_rvu_otx2(rvu)) {
+ aq_req = (struct nix_cn10k_aq_enq_req *)req;
+ *smq = aq_req->sq.smq;
+ *smq_mask = aq_req->sq_mask.smq;
+ } else {
+ *smq = req->sq.smq;
+ *smq_mask = req->sq_mask.smq;
+ }
+}
+
static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct nix_aq_enq_req *req,
struct nix_aq_enq_rsp *rsp)
@@ -857,6 +872,7 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
struct rvu_block *block;
struct admin_queue *aq;
struct rvu_pfvf *pfvf;
+ u16 smq, smq_mask;
void *ctx, *mask;
bool ena;
u64 cfg;
@@ -928,13 +944,14 @@ static int rvu_nix_blk_aq_enq_inst(struct rvu *rvu, struct nix_hw *nix_hw,
if (rc)
return rc;
+ nix_get_aq_req_smq(rvu, req, &smq, &smq_mask);
/* Check if SQ pointed SMQ belongs to this PF/VF or not */
if (req->ctype == NIX_AQ_CTYPE_SQ &&
((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) ||
(req->op == NIX_AQ_INSTOP_WRITE &&
- req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) {
+ req->sq_mask.ena && req->sq.ena && smq_mask))) {
if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ,
- pcifunc, req->sq.smq))
+ pcifunc, smq))
return NIX_AF_ERR_AQ_ENQUEUE;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 826f691de259..a4a258da8dd5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -107,12 +107,13 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
}
#define NPA_MAX_BURST 16
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
u64 ptrs[NPA_MAX_BURST];
- int num_ptrs = 1;
dma_addr_t bufptr;
+ int num_ptrs = 1;
/* Refill pool with new buffers */
while (cq->pool_ptrs) {
@@ -131,6 +132,7 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
num_ptrs = 1;
}
}
+ return cnt - cq->pool_ptrs;
}
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 8ae96815865e..c1861f7de254 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -24,7 +24,7 @@ static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu)
return weight;
}
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
int cn10k_lmtst_init(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index 8511906cb4e2..997fedac3a98 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -574,20 +574,8 @@ int otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool,
int otx2_alloc_buffer(struct otx2_nic *pfvf, struct otx2_cq_queue *cq,
dma_addr_t *dma)
{
- if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma))) {
- struct refill_work *work;
- struct delayed_work *dwork;
-
- work = &pfvf->refill_wrk[cq->cq_idx];
- dwork = &work->pool_refill_work;
- /* Schedule a task if no other task is running */
- if (!cq->refill_task_sched) {
- cq->refill_task_sched = true;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- }
+ if (unlikely(__otx2_alloc_rbuf(pfvf, cq->rbpool, dma)))
return -ENOMEM;
- }
return 0;
}
@@ -1082,39 +1070,20 @@ static int otx2_cq_init(struct otx2_nic *pfvf, u16 qidx)
static void otx2_pool_refill_task(struct work_struct *work)
{
struct otx2_cq_queue *cq;
- struct otx2_pool *rbpool;
struct refill_work *wrk;
- int qidx, free_ptrs = 0;
struct otx2_nic *pfvf;
- dma_addr_t bufptr;
+ int qidx;
wrk = container_of(work, struct refill_work, pool_refill_work.work);
pfvf = wrk->pf;
qidx = wrk - pfvf->refill_wrk;
cq = &pfvf->qset.cq[qidx];
- rbpool = cq->rbpool;
- free_ptrs = cq->pool_ptrs;
- while (cq->pool_ptrs) {
- if (otx2_alloc_rbuf(pfvf, rbpool, &bufptr)) {
- /* Schedule a WQ if we fails to free atleast half of the
- * pointers else enable napi for this RQ.
- */
- if (!((free_ptrs - cq->pool_ptrs) > free_ptrs / 2)) {
- struct delayed_work *dwork;
-
- dwork = &wrk->pool_refill_work;
- schedule_delayed_work(dwork,
- msecs_to_jiffies(100));
- } else {
- cq->refill_task_sched = false;
- }
- return;
- }
- pfvf->hw_ops->aura_freeptr(pfvf, qidx, bufptr + OTX2_HEAD_ROOM);
- cq->pool_ptrs--;
- }
cq->refill_task_sched = false;
+
+ local_bh_disable();
+ napi_schedule(wrk->napi);
+ local_bh_enable();
}
int otx2_config_nix_queues(struct otx2_nic *pfvf)
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 4c6032ee7800..c04a8ee53a82 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -302,6 +302,7 @@ struct flr_work {
struct refill_work {
struct delayed_work pool_refill_work;
struct otx2_nic *pf;
+ struct napi_struct *napi;
};
/* PTPv2 originTimestamp structure */
@@ -370,7 +371,7 @@ struct dev_hw_ops {
int (*sq_aq_init)(void *dev, u16 qidx, u16 sqb_aura);
void (*sqe_flush)(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
- void (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
+ int (*refill_pool_ptrs)(void *dev, struct otx2_cq_queue *cq);
void (*aura_freeptr)(void *dev, int aura, u64 buf);
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 70b9065f7d10..6daf4d58c25d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1943,6 +1943,10 @@ int otx2_stop(struct net_device *netdev)
netif_tx_disable(netdev);
+ for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
+ cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
+ devm_kfree(pf->dev, pf->refill_wrk);
+
otx2_free_hw_resources(pf);
otx2_free_cints(pf, pf->hw.cint_cnt);
otx2_disable_napi(pf);
@@ -1950,9 +1954,6 @@ int otx2_stop(struct net_device *netdev)
for (qidx = 0; qidx < netdev->num_tx_queues; qidx++)
netdev_tx_reset_queue(netdev_get_tx_queue(netdev, qidx));
- for (wrk = 0; wrk < pf->qset.cq_cnt; wrk++)
- cancel_delayed_work_sync(&pf->refill_wrk[wrk].pool_refill_work);
- devm_kfree(pf->dev, pf->refill_wrk);
kfree(qset->sq);
kfree(qset->cq);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
index e369baf11530..53b2a4ef5298 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c
@@ -29,7 +29,8 @@
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq);
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush);
static int otx2_nix_cq_op_status(struct otx2_nic *pfvf,
struct otx2_cq_queue *cq)
@@ -337,7 +338,7 @@ static bool otx2_check_rcv_errors(struct otx2_nic *pfvf,
static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq,
- struct nix_cqe_rx_s *cqe)
+ struct nix_cqe_rx_s *cqe, bool *need_xdp_flush)
{
struct nix_rx_parse_s *parse = &cqe->parse;
struct nix_rx_sg_s *sg = &cqe->sg;
@@ -353,7 +354,7 @@ static void otx2_rcv_pkt_handler(struct otx2_nic *pfvf,
}
if (pfvf->xdp_prog)
- if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq))
+ if (otx2_xdp_rcv_pkt_handler(pfvf, pfvf->xdp_prog, cqe, cq, need_xdp_flush))
return;
skb = napi_get_frags(napi);
@@ -388,6 +389,7 @@ static int otx2_rx_napi_handler(struct otx2_nic *pfvf,
struct napi_struct *napi,
struct otx2_cq_queue *cq, int budget)
{
+ bool need_xdp_flush = false;
struct nix_cqe_rx_s *cqe;
int processed_cqe = 0;
@@ -409,13 +411,15 @@ process_cqe:
cq->cq_head++;
cq->cq_head &= (cq->cqe_cnt - 1);
- otx2_rcv_pkt_handler(pfvf, napi, cq, cqe);
+ otx2_rcv_pkt_handler(pfvf, napi, cq, cqe, &need_xdp_flush);
cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID;
cqe->sg.seg_addr = 0x00;
processed_cqe++;
cq->pend_cqe--;
}
+ if (need_xdp_flush)
+ xdp_do_flush();
/* Free CQEs to HW */
otx2_write64(pfvf, NIX_LF_CQ_OP_DOOR,
@@ -424,9 +428,10 @@ process_cqe:
return processed_cqe;
}
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
{
struct otx2_nic *pfvf = dev;
+ int cnt = cq->pool_ptrs;
dma_addr_t bufptr;
while (cq->pool_ptrs) {
@@ -435,6 +440,8 @@ void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
otx2_aura_freeptr(pfvf, cq->cq_idx, bufptr + OTX2_HEAD_ROOM);
cq->pool_ptrs--;
}
+
+ return cnt - cq->pool_ptrs;
}
static int otx2_tx_napi_handler(struct otx2_nic *pfvf,
@@ -521,6 +528,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
struct otx2_cq_queue *cq;
struct otx2_qset *qset;
struct otx2_nic *pfvf;
+ int filled_cnt = -1;
cq_poll = container_of(napi, struct otx2_cq_poll, napi);
pfvf = (struct otx2_nic *)cq_poll->dev;
@@ -541,7 +549,7 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
}
if (rx_cq && rx_cq->pool_ptrs)
- pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
+ filled_cnt = pfvf->hw_ops->refill_pool_ptrs(pfvf, rx_cq);
/* Clear the IRQ */
otx2_write64(pfvf, NIX_LF_CINTX_INT(cq_poll->cint_idx), BIT_ULL(0));
@@ -561,9 +569,25 @@ int otx2_napi_handler(struct napi_struct *napi, int budget)
otx2_config_irq_coalescing(pfvf, i);
}
- /* Re-enable interrupts */
- otx2_write64(pfvf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
- BIT_ULL(0));
+ if (unlikely(!filled_cnt)) {
+ struct refill_work *work;
+ struct delayed_work *dwork;
+
+ work = &pfvf->refill_wrk[cq->cq_idx];
+ dwork = &work->pool_refill_work;
+ /* Schedule a task if no other task is running */
+ if (!cq->refill_task_sched) {
+ work->napi = napi;
+ cq->refill_task_sched = true;
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(100));
+ }
+ } else {
+ /* Re-enable interrupts */
+ otx2_write64(pfvf,
+ NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx),
+ BIT_ULL(0));
+ }
}
return workdone;
}
@@ -1334,7 +1358,8 @@ bool otx2_xdp_sq_append_pkt(struct otx2_nic *pfvf, u64 iova, int len, u16 qidx)
static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
struct bpf_prog *prog,
struct nix_cqe_rx_s *cqe,
- struct otx2_cq_queue *cq)
+ struct otx2_cq_queue *cq,
+ bool *need_xdp_flush)
{
unsigned char *hard_start, *data;
int qidx = cq->cq_idx;
@@ -1371,8 +1396,10 @@ static bool otx2_xdp_rcv_pkt_handler(struct otx2_nic *pfvf,
otx2_dma_unmap_page(pfvf, iova, pfvf->rbsize,
DMA_FROM_DEVICE);
- if (!err)
+ if (!err) {
+ *need_xdp_flush = true;
return true;
+ }
put_page(page);
break;
default:
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 9e3bfbe5c480..a82ffca8ce1b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -170,6 +170,6 @@ void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
void otx2_sqe_flush(void *dev, struct otx2_snd_queue *sq,
int size, int qidx);
-void otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
-void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int otx2_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
+int cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
#endif /* OTX2_TXRX_H */
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ddec1627f1a7..8d0bacf4e49c 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2195,7 +2195,7 @@ struct rx_ring_info {
struct sk_buff *skb;
dma_addr_t data_addr;
DEFINE_DMA_UNMAP_LEN(data_size);
- dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT];
+ dma_addr_t frag_addr[ETH_JUMBO_MTU >> PAGE_SHIFT ?: 1];
};
enum flow_control {
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 6ad42e3b488f..20afe79f380a 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2005,11 +2005,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
u8 *data, *new_data;
struct mtk_rx_dma_v2 *rxd, trxd;
int done = 0, bytes = 0;
+ dma_addr_t dma_addr = DMA_MAPPING_ERROR;
while (done < budget) {
unsigned int pktlen, *rxdcsum;
struct net_device *netdev;
- dma_addr_t dma_addr;
u32 hash, reason;
int mac = 0;
@@ -2186,7 +2186,8 @@ release_desc:
else
rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
- if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
+ likely(dma_addr != DMA_MAPPING_ERROR))
rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
ring->calc_idx = idx;
@@ -2994,6 +2995,9 @@ static int mtk_hwlro_get_fdir_all(struct net_device *dev,
int i;
for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
if (mac->hwlro_ip[i]) {
rule_locs[cnt] = i;
cnt++;
@@ -3167,8 +3171,8 @@ static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
eth->rx_events++;
if (likely(napi_schedule_prep(&eth->rx_napi))) {
- __napi_schedule(&eth->rx_napi);
mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ __napi_schedule(&eth->rx_napi);
}
return IRQ_HANDLED;
@@ -3180,8 +3184,8 @@ static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
eth->tx_events++;
if (likely(napi_schedule_prep(&eth->tx_napi))) {
- __napi_schedule(&eth->tx_napi);
mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ __napi_schedule(&eth->tx_napi);
}
return IRQ_HANDLED;
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
index a70a5417c173..a4efbeb16208 100644
--- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -214,9 +214,11 @@ mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
dsa_port = mtk_flow_get_dsa_port(&dev);
if (dev == eth->netdev[0])
- pse_port = 1;
+ pse_port = PSE_GDM1_PORT;
else if (dev == eth->netdev[1])
- pse_port = 2;
+ pse_port = PSE_GDM2_PORT;
+ else if (dev == eth->netdev[2])
+ pse_port = PSE_GDM3_PORT;
else
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
index 92d3952dfa8b..feeb41693c17 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/ct.c
@@ -17,8 +17,10 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (mlx5e_is_eswitch_flow(parse_state->flow))
+ if (mlx5e_is_eswitch_flow(parse_state->flow)) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
attr->flags |= MLX5_ATTR_FLAG_CT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
index 291193f7120d..f63402c48028 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/mirred.c
@@ -294,6 +294,7 @@ parse_mirred_ovs_master(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
index 3b272bbf4c53..368a95fa77d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/pedit.c
@@ -98,8 +98,10 @@ tc_act_parse_pedit(struct mlx5e_tc_act_parse_state *parse_state,
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
index ad09a8a5f36e..2d1d4a04501b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/redirect_ingress.c
@@ -66,6 +66,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
+ parse_state->if_count = 0;
esw_attr->out_count++;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
index c8a3eaf189f6..a13c5e707b83 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan.c
@@ -166,6 +166,7 @@ tc_act_parse_vlan(struct mlx5e_tc_act_parse_state *parse_state,
return err;
esw_attr->split_count = esw_attr->out_count;
+ parse_state->if_count = 0;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
index 310b99230760..f17575b09788 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/vlan_mangle.c
@@ -65,8 +65,10 @@ tc_act_parse_vlan_mangle(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
- if (ns_type == MLX5_FLOW_NAMESPACE_FDB)
+ if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
attr->esw_attr->split_count = attr->esw_attr->out_count;
+ parse_state->if_count = 0;
+ }
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 318083690fcd..c24828b688ac 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -3936,6 +3936,7 @@ parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
}
i_split = i + 1;
+ parse_state->if_count = 0;
list_add(&attr->list, &flow->attrs);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 6cd7d6497e10..d4cde6555063 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1276,12 +1276,19 @@ int
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
+ bool pf_needed;
int ret;
+ pf_needed = mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY;
+
/* Enable PF vport */
- ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF, enabled_events);
- if (ret)
- return ret;
+ if (pf_needed) {
+ ret = mlx5_eswitch_load_pf_vf_vport(esw, MLX5_VPORT_PF,
+ enabled_events);
+ if (ret)
+ return ret;
+ }
/* Enable external host PF HCA */
ret = host_pf_enable_hca(esw->dev);
@@ -1317,7 +1324,8 @@ ec_vf_err:
ecpf_err:
host_pf_disable_hca(esw->dev);
pf_hca_err:
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+ if (pf_needed)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
return ret;
}
@@ -1335,7 +1343,10 @@ void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
}
host_pf_disable_hca(esw->dev);
- mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
+
+ if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
+ esw->mode == MLX5_ESWITCH_LEGACY)
+ mlx5_eswitch_unload_pf_vf_vport(esw, MLX5_VPORT_PF);
}
static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 752fb0dfb111..b296ac52a439 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -3216,26 +3216,47 @@ esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
esw_acl_ingress_ofld_cleanup(esw, vport);
}
-static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
{
- struct mlx5_vport *vport;
+ struct mlx5_vport *uplink, *manager;
+ int ret;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return PTR_ERR(vport);
+ uplink = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (IS_ERR(uplink))
+ return PTR_ERR(uplink);
+
+ ret = esw_vport_create_offloads_acl_tables(esw, uplink);
+ if (ret)
+ return ret;
+
+ manager = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (IS_ERR(manager)) {
+ ret = PTR_ERR(manager);
+ goto err_manager;
+ }
- return esw_vport_create_offloads_acl_tables(esw, vport);
+ ret = esw_vport_create_offloads_acl_tables(esw, manager);
+ if (ret)
+ goto err_manager;
+
+ return 0;
+
+err_manager:
+ esw_vport_destroy_offloads_acl_tables(esw, uplink);
+ return ret;
}
-static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
+static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
- vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
- if (IS_ERR(vport))
- return;
+ vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
- esw_vport_destroy_offloads_acl_tables(esw, vport);
+ vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
+ if (!IS_ERR(vport))
+ esw_vport_destroy_offloads_acl_tables(esw, vport);
}
int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
@@ -3280,7 +3301,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
}
esw->fdb_table.offloads.indir = indir;
- err = esw_create_uplink_offloads_acl_tables(esw);
+ err = esw_create_offloads_acl_tables(esw);
if (err)
goto create_acl_err;
@@ -3321,7 +3342,7 @@ create_fdb_err:
create_restore_err:
esw_destroy_offloads_table(esw);
create_offloads_err:
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
create_acl_err:
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
create_indir_err:
@@ -3337,7 +3358,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
esw_destroy_offloads_fdb_tables(esw);
esw_destroy_restore_table(esw);
esw_destroy_offloads_table(esw);
- esw_destroy_uplink_offloads_acl_tables(esw);
+ esw_destroy_offloads_acl_tables(esw);
mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
mutex_destroy(&esw->fdb_table.offloads.vports.lock);
}
diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig
index 329e374b9539..43ba71e82260 100644
--- a/drivers/net/ethernet/microchip/Kconfig
+++ b/drivers/net/ethernet/microchip/Kconfig
@@ -46,6 +46,7 @@ config LAN743X
tristate "LAN743x support"
depends on PCI
depends on PTP_1588_CLOCK_OPTIONAL
+ select PHYLIB
select FIXED_PHY
select CRC16
select CRC32
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.c b/drivers/net/ethernet/microchip/vcap/vcap_api.c
index 300fe1a93dce..ef980e4e5bc2 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api.c
@@ -1021,18 +1021,32 @@ static struct vcap_rule_internal *vcap_dup_rule(struct vcap_rule_internal *ri,
list_for_each_entry(ckf, &ri->data.keyfields, ctrl.list) {
newckf = kmemdup(ckf, sizeof(*newckf), GFP_KERNEL);
if (!newckf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newckf->ctrl.list, &duprule->data.keyfields);
}
list_for_each_entry(caf, &ri->data.actionfields, ctrl.list) {
newcaf = kmemdup(caf, sizeof(*newcaf), GFP_KERNEL);
if (!newcaf)
- return ERR_PTR(-ENOMEM);
+ goto err;
list_add_tail(&newcaf->ctrl.list, &duprule->data.actionfields);
}
return duprule;
+
+err:
+ list_for_each_entry_safe(ckf, newckf, &duprule->data.keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+
+ list_for_each_entry_safe(caf, newcaf, &duprule->data.actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
+
+ kfree(duprule);
+ return ERR_PTR(-ENOMEM);
}
static void vcap_apply_width(u8 *dst, int width, int bytes)
diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
index c07f25e791c7..fe4e166de8a0 100644
--- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
+++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c
@@ -243,10 +243,9 @@ static void vcap_test_api_init(struct vcap_admin *admin)
}
/* Helper function to create a rule of a specific size */
-static struct vcap_rule *
-test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
- u16 priority,
- int id, int size, int expected_addr)
+static void test_vcap_xn_rule_creator(struct kunit *test, int cid,
+ enum vcap_user user, u16 priority,
+ int id, int size, int expected_addr)
{
struct vcap_rule *rule;
struct vcap_rule_internal *ri;
@@ -311,7 +310,7 @@ test_vcap_xn_rule_creator(struct kunit *test, int cid, enum vcap_user user,
ret = vcap_add_rule(rule);
KUNIT_EXPECT_EQ(test, 0, ret);
KUNIT_EXPECT_EQ(test, expected_addr, ri->addr);
- return rule;
+ vcap_free_rule(rule);
}
/* Prepare testing rule deletion */
@@ -995,6 +994,16 @@ static void vcap_api_encode_rule_actionset_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, (u32)0x00000000, actwords[11]);
}
+static void vcap_free_ckf(struct vcap_rule *rule)
+{
+ struct vcap_client_keyfield *ckf, *next_ckf;
+
+ list_for_each_entry_safe(ckf, next_ckf, &rule->keyfields, ctrl.list) {
+ list_del(&ckf->ctrl.list);
+ kfree(ckf);
+ }
+}
+
static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
{
struct vcap_admin admin = {
@@ -1027,6 +1036,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS, VCAP_BIT_1);
@@ -1039,6 +1049,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x1, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_bit(rule, VCAP_KF_LOOKUP_FIRST_IS,
@@ -1052,6 +1063,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.value);
KUNIT_EXPECT_EQ(test, 0x0, kf->data.u1.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u32(rule, VCAP_KF_TYPE, 0x98765432, 0xff00ffab);
@@ -1064,6 +1076,7 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, kf->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, kf->data.u32.value);
KUNIT_EXPECT_EQ(test, 0xff00ffab, kf->data.u32.mask);
+ vcap_free_ckf(rule);
INIT_LIST_HEAD(&rule->keyfields);
ret = vcap_rule_add_key_u128(rule, VCAP_KF_L3_IP6_SIP, &dip);
@@ -1078,6 +1091,18 @@ static void vcap_api_rule_add_keyvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, dip.value[idx], kf->data.u128.value[idx]);
for (idx = 0; idx < ARRAY_SIZE(dip.mask); ++idx)
KUNIT_EXPECT_EQ(test, dip.mask[idx], kf->data.u128.mask[idx]);
+ vcap_free_ckf(rule);
+}
+
+static void vcap_free_caf(struct vcap_rule *rule)
+{
+ struct vcap_client_actionfield *caf, *next_caf;
+
+ list_for_each_entry_safe(caf, next_caf,
+ &rule->actionfields, ctrl.list) {
+ list_del(&caf->ctrl.list);
+ kfree(caf);
+ }
}
static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
@@ -1105,6 +1130,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_1);
@@ -1116,6 +1142,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x1, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_bit(rule, VCAP_AF_POLICE_ENA, VCAP_BIT_ANY);
@@ -1127,6 +1154,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_POLICE_ENA, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_BIT, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x0, af->data.u1.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_TYPE, 0x98765432);
@@ -1138,6 +1166,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_TYPE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0x98765432, af->data.u32.value);
+ vcap_free_caf(rule);
INIT_LIST_HEAD(&rule->actionfields);
ret = vcap_rule_add_action_u32(rule, VCAP_AF_MASK_MODE, 0xaabbccdd);
@@ -1149,6 +1178,7 @@ static void vcap_api_rule_add_actionvalue_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, VCAP_AF_MASK_MODE, af->ctrl.action);
KUNIT_EXPECT_EQ(test, VCAP_FIELD_U32, af->ctrl.type);
KUNIT_EXPECT_EQ(test, 0xaabbccdd, af->data.u32.value);
+ vcap_free_caf(rule);
}
static void vcap_api_rule_find_keyset_basic_test(struct kunit *test)
@@ -1408,6 +1438,10 @@ static void vcap_api_encode_rule_test(struct kunit *test)
ret = list_empty(&is2_admin.rules);
KUNIT_EXPECT_EQ(test, false, ret);
KUNIT_EXPECT_EQ(test, 0, ret);
+
+ vcap_enable_lookups(&test_vctrl, &test_netdev, 0, 0,
+ rule->cookie, false);
+
vcap_free_rule(rule);
/* Check that the rule has been freed: tricky to access since this
@@ -1418,6 +1452,8 @@ static void vcap_api_encode_rule_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, true, ret);
ret = list_empty(&rule->actionfields);
KUNIT_EXPECT_EQ(test, true, ret);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, id);
}
static void vcap_api_set_rule_counter_test(struct kunit *test)
@@ -1561,6 +1597,11 @@ static void vcap_api_rule_insert_in_order_test(struct kunit *test)
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 20, 400, 6, 774);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 30, 300, 3, 771);
test_vcap_xn_rule_creator(test, 10000, VCAP_USER_QOS, 40, 200, 2, 768);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
}
static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
@@ -1619,6 +1660,11 @@ static void vcap_api_rule_insert_reverse_order_test(struct kunit *test)
++idx;
}
KUNIT_EXPECT_EQ(test, 768, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 500);
+ vcap_del_rule(&test_vctrl, &test_netdev, 400);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
}
static void vcap_api_rule_remove_at_end_test(struct kunit *test)
@@ -1819,6 +1865,9 @@ static void vcap_api_rule_remove_in_front_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, 786, test_init_start);
KUNIT_EXPECT_EQ(test, 8, test_init_count);
KUNIT_EXPECT_EQ(test, 794, admin.last_used_addr);
+
+ vcap_del_rule(&test_vctrl, &test_netdev, 200);
+ vcap_del_rule(&test_vctrl, &test_netdev, 300);
}
static struct kunit_case vcap_api_rule_remove_test_cases[] = {
diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c
index 4a16ebff3d1d..48ea4aeeea5d 100644
--- a/drivers/net/ethernet/microsoft/mana/mana_en.c
+++ b/drivers/net/ethernet/microsoft/mana/mana_en.c
@@ -91,63 +91,137 @@ static unsigned int mana_checksum_info(struct sk_buff *skb)
return 0;
}
+static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
+ int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
+{
+ ash->dma_handle[sg_i] = da;
+ ash->size[sg_i] = sge_len;
+
+ tp->wqe_req.sgl[sg_i].address = da;
+ tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
+ tp->wqe_req.sgl[sg_i].size = sge_len;
+}
+
static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
- struct mana_tx_package *tp)
+ struct mana_tx_package *tp, int gso_hs)
{
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
+ int hsg = 1; /* num of SGEs of linear part */
struct gdma_dev *gd = apc->ac->gdma_dev;
+ int skb_hlen = skb_headlen(skb);
+ int sge0_len, sge1_len = 0;
struct gdma_context *gc;
struct device *dev;
skb_frag_t *frag;
dma_addr_t da;
+ int sg_i;
int i;
gc = gd->gdma_context;
dev = gc->dev;
- da = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+ if (gso_hs && gso_hs < skb_hlen) {
+ sge0_len = gso_hs;
+ sge1_len = skb_hlen - gso_hs;
+ } else {
+ sge0_len = skb_hlen;
+ }
+
+ da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
if (dma_mapping_error(dev, da))
return -ENOMEM;
- ash->dma_handle[0] = da;
- ash->size[0] = skb_headlen(skb);
+ mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
- tp->wqe_req.sgl[0].address = ash->dma_handle[0];
- tp->wqe_req.sgl[0].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[0].size = ash->size[0];
+ if (sge1_len) {
+ sg_i = 1;
+ da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, da))
+ goto frag_err;
+
+ mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
+ hsg = 2;
+ }
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ sg_i = hsg + i;
+
frag = &skb_shinfo(skb)->frags[i];
da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
-
if (dma_mapping_error(dev, da))
goto frag_err;
- ash->dma_handle[i + 1] = da;
- ash->size[i + 1] = skb_frag_size(frag);
-
- tp->wqe_req.sgl[i + 1].address = ash->dma_handle[i + 1];
- tp->wqe_req.sgl[i + 1].mem_key = gd->gpa_mkey;
- tp->wqe_req.sgl[i + 1].size = ash->size[i + 1];
+ mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
+ gd->gpa_mkey);
}
return 0;
frag_err:
- for (i = i - 1; i >= 0; i--)
- dma_unmap_page(dev, ash->dma_handle[i + 1], ash->size[i + 1],
+ for (i = sg_i - 1; i >= hsg; i--)
+ dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ for (i = hsg - 1; i >= 0; i--)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
return -ENOMEM;
}
+/* Handle the case when GSO SKB linear length is too large.
+ * MANA NIC requires GSO packets to put only the packet header to SGE0.
+ * So, we need 2 SGEs for the skb linear part which contains more than the
+ * header.
+ * Return a positive value for the number of SGEs, or a negative value
+ * for an error.
+ */
+static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
+ int gso_hs)
+{
+ int num_sge = 1 + skb_shinfo(skb)->nr_frags;
+ int skb_hlen = skb_headlen(skb);
+
+ if (gso_hs < skb_hlen) {
+ num_sge++;
+ } else if (gso_hs > skb_hlen) {
+ if (net_ratelimit())
+ netdev_err(ndev,
+ "TX nonlinear head: hs:%d, skb_hlen:%d\n",
+ gso_hs, skb_hlen);
+
+ return -EINVAL;
+ }
+
+ return num_sge;
+}
+
+/* Get the GSO packet's header size */
+static int mana_get_gso_hs(struct sk_buff *skb)
+{
+ int gso_hs;
+
+ if (skb->encapsulation) {
+ gso_hs = skb_inner_tcp_all_headers(skb);
+ } else {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ gso_hs = skb_transport_offset(skb) +
+ sizeof(struct udphdr);
+ } else {
+ gso_hs = skb_tcp_all_headers(skb);
+ }
+ }
+
+ return gso_hs;
+}
+
netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
struct mana_port_context *apc = netdev_priv(ndev);
+ int gso_hs = 0; /* zero for non-GSO pkts */
u16 txq_idx = skb_get_queue_mapping(skb);
struct gdma_dev *gd = apc->ac->gdma_dev;
bool ipv4 = false, ipv6 = false;
@@ -159,7 +233,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct mana_txq *txq;
struct mana_cq *cq;
int err, len;
- u16 ihs;
if (unlikely(!apc->port_is_up))
goto tx_drop;
@@ -209,19 +282,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pkg.wqe_req.client_data_unit = 0;
pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
- WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
-
- if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
- pkg.wqe_req.sgl = pkg.sgl_array;
- } else {
- pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
- sizeof(struct gdma_sge),
- GFP_ATOMIC);
- if (!pkg.sgl_ptr)
- goto tx_drop_count;
-
- pkg.wqe_req.sgl = pkg.sgl_ptr;
- }
if (skb->protocol == htons(ETH_P_IP))
ipv4 = true;
@@ -229,6 +289,26 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ipv6 = true;
if (skb_is_gso(skb)) {
+ int num_sge;
+
+ gso_hs = mana_get_gso_hs(skb);
+
+ num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
+ if (num_sge > 0)
+ pkg.wqe_req.num_sge = num_sge;
+ else
+ goto tx_drop_count;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ if (skb->encapsulation) {
+ tx_stats->tso_inner_packets++;
+ tx_stats->tso_inner_bytes += skb->len - gso_hs;
+ } else {
+ tx_stats->tso_packets++;
+ tx_stats->tso_bytes += skb->len - gso_hs;
+ }
+ u64_stats_update_end(&tx_stats->syncp);
+
pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
@@ -252,28 +332,6 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
}
-
- if (skb->encapsulation) {
- ihs = skb_inner_tcp_all_headers(skb);
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_inner_packets++;
- tx_stats->tso_inner_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- } else {
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
- ihs = skb_transport_offset(skb) + sizeof(struct udphdr);
- } else {
- ihs = skb_tcp_all_headers(skb);
- if (ipv6_has_hopopt_jumbo(skb))
- ihs -= sizeof(struct hop_jumbo_hdr);
- }
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->tso_packets++;
- tx_stats->tso_bytes += skb->len - ihs;
- u64_stats_update_end(&tx_stats->syncp);
- }
-
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
csum_type = mana_checksum_info(skb);
@@ -296,11 +354,25 @@ netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
} else {
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
- goto free_sgl_ptr;
+ goto tx_drop_count;
}
}
- if (mana_map_skb(skb, apc, &pkg)) {
+ WARN_ON_ONCE(pkg.wqe_req.num_sge > MAX_TX_WQE_SGL_ENTRIES);
+
+ if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
+ pkg.wqe_req.sgl = pkg.sgl_array;
+ } else {
+ pkg.sgl_ptr = kmalloc_array(pkg.wqe_req.num_sge,
+ sizeof(struct gdma_sge),
+ GFP_ATOMIC);
+ if (!pkg.sgl_ptr)
+ goto tx_drop_count;
+
+ pkg.wqe_req.sgl = pkg.sgl_ptr;
+ }
+
+ if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->mana_map_err++;
u64_stats_update_end(&tx_stats->syncp);
@@ -1258,11 +1330,16 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
struct device *dev = gc->dev;
- int i;
+ int hsg, i;
- dma_unmap_single(dev, ash->dma_handle[0], ash->size[0], DMA_TO_DEVICE);
+ /* Number of SGEs of linear part */
+ hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
- for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
+ for (i = 0; i < hsg; i++)
+ dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
+ DMA_TO_DEVICE);
+
+ for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
DMA_TO_DEVICE);
}
@@ -1317,19 +1394,23 @@ static void mana_poll_tx_cq(struct mana_cq *cq)
case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
case CQE_TX_VPORT_DISABLED:
case CQE_TX_VLAN_TAGGING_VIOLATION:
- WARN_ONCE(1, "TX: CQE error %d: ignored.\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: CQE error %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_err++;
break;
default:
- /* If the CQE type is unexpected, log an error, assert,
- * and go through the error path.
+ /* If the CQE type is unknown, log an error,
+ * and still free the SKB, update tail, etc.
*/
- WARN_ONCE(1, "TX: Unexpected CQE type %d: HW BUG?\n",
- cqe_oob->cqe_hdr.cqe_type);
+ if (net_ratelimit())
+ netdev_err(ndev, "TX: unknown CQE type %d\n",
+ cqe_oob->cqe_hdr.cqe_type);
+
apc->eth_stats.tx_cqe_unknown_type++;
- return;
+ break;
}
if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
index 6aac98bcb9f4..aae4131f146a 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h
@@ -187,6 +187,7 @@ typedef void (*ionic_desc_cb)(struct ionic_queue *q,
struct ionic_desc_info *desc_info,
struct ionic_cq_info *cq_info, void *cb_arg);
+#define IONIC_MAX_BUF_LEN ((u16)-1)
#define IONIC_PAGE_SIZE PAGE_SIZE
#define IONIC_PAGE_SPLIT_SZ (PAGE_SIZE / 2)
#define IONIC_PAGE_GFP_MASK (GFP_ATOMIC | __GFP_NOWARN |\
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
index 26798fc635db..44466e8c5d77 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
@@ -207,7 +207,8 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
return NULL;
}
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
len -= frag_len;
dma_sync_single_for_cpu(dev,
@@ -452,7 +453,8 @@ void ionic_rx_fill(struct ionic_queue *q)
/* fill main descriptor - buf[0] */
desc->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE - buf_info->page_offset));
desc->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
@@ -471,7 +473,9 @@ void ionic_rx_fill(struct ionic_queue *q)
}
sg_elem->addr = cpu_to_le64(buf_info->dma_addr + buf_info->page_offset);
- frag_len = min_t(u16, remain_len, IONIC_PAGE_SIZE - buf_info->page_offset);
+ frag_len = min_t(u16, remain_len, min_t(u32, IONIC_MAX_BUF_LEN,
+ IONIC_PAGE_SIZE -
+ buf_info->page_offset));
sg_elem->len = cpu_to_le16(frag_len);
remain_len -= frag_len;
buf_info++;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index 0bfc375161ed..a174c6fc626a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -110,9 +110,9 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
u8 tx_stats_en;
bool main_func_queue;
+ struct qed_ll2_cbs cbs;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- struct qed_ll2_cbs cbs;
};
extern const struct qed_ll2_ops qed_ll2_ops_pass;
diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c
index 6083b1c8e4fb..fc01ad3f340d 100644
--- a/drivers/net/ethernet/renesas/rswitch.c
+++ b/drivers/net/ethernet/renesas/rswitch.c
@@ -4,6 +4,7 @@
* Copyright (C) 2022 Renesas Electronics Corporation
*/
+#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
#include <linux/etherdevice.h>
@@ -799,6 +800,7 @@ static int rswitch_poll(struct napi_struct *napi, int budget)
struct net_device *ndev = napi->dev;
struct rswitch_private *priv;
struct rswitch_device *rdev;
+ unsigned long flags;
int quota = budget;
rdev = netdev_priv(ndev);
@@ -816,10 +818,12 @@ retry:
netif_wake_subqueue(ndev, 0);
- napi_complete(napi);
-
- rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
- rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ if (napi_complete_done(napi, budget - quota)) {
+ spin_lock_irqsave(&priv->lock, flags);
+ rswitch_enadis_data_irq(priv, rdev->tx_queue->index, true);
+ rswitch_enadis_data_irq(priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ }
out:
return budget - quota;
@@ -835,8 +839,10 @@ static void rswitch_queue_interrupt(struct net_device *ndev)
struct rswitch_device *rdev = netdev_priv(ndev);
if (napi_schedule_prep(&rdev->napi)) {
+ spin_lock(&rdev->priv->lock);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock(&rdev->priv->lock);
__napi_schedule(&rdev->napi);
}
}
@@ -1044,7 +1050,7 @@ static void rswitch_rmac_setting(struct rswitch_etha *etha, const u8 *mac)
static void rswitch_etha_enable_mii(struct rswitch_etha *etha)
{
rswitch_modify(etha->addr, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK,
- MPIC_PSMCS(0x05) | MPIC_PSMHT(0x06));
+ MPIC_PSMCS(etha->psmcs) | MPIC_PSMHT(0x06));
rswitch_modify(etha->addr, MPSM, 0, MPSM_MFF_C45);
}
@@ -1440,14 +1446,17 @@ static void rswitch_ether_port_deinit_all(struct rswitch_private *priv)
static int rswitch_open(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
+ unsigned long flags;
phy_start(ndev->phydev);
napi_enable(&rdev->napi);
netif_start_queue(ndev);
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS))
iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE);
@@ -1461,6 +1470,7 @@ static int rswitch_stop(struct net_device *ndev)
{
struct rswitch_device *rdev = netdev_priv(ndev);
struct rswitch_gwca_ts_info *ts_info, *ts_info2;
+ unsigned long flags;
netif_tx_stop_all_queues(ndev);
bitmap_clear(rdev->priv->opened_ports, rdev->port, 1);
@@ -1476,8 +1486,10 @@ static int rswitch_stop(struct net_device *ndev)
kfree(ts_info);
}
+ spin_lock_irqsave(&rdev->priv->lock, flags);
rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, false);
rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, false);
+ spin_unlock_irqrestore(&rdev->priv->lock, flags);
phy_stop(ndev->phydev);
napi_disable(&rdev->napi);
@@ -1682,6 +1694,12 @@ static void rswitch_etha_init(struct rswitch_private *priv, int index)
etha->index = index;
etha->addr = priv->addr + RSWITCH_ETHA_OFFSET + index * RSWITCH_ETHA_SIZE;
etha->coma_addr = priv->addr;
+
+ /* MPIC.PSMCS = (clk [MHz] / (MDC frequency [MHz] * 2) - 1.
+ * Calculating PSMCS value as MDC frequency = 2.5MHz. So, multiply
+ * both the numerator and the denominator by 10.
+ */
+ etha->psmcs = clk_get_rate(priv->clk) / 100000 / (25 * 2) - 1;
}
static int rswitch_device_alloc(struct rswitch_private *priv, int index)
@@ -1887,6 +1905,11 @@ static int renesas_eth_sw_probe(struct platform_device *pdev)
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
+ spin_lock_init(&priv->lock);
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk))
+ return PTR_ERR(priv->clk);
attr = soc_device_match(rswitch_soc_no_speed_change);
if (attr)
diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h
index 54f397effbc6..04f49a7a5843 100644
--- a/drivers/net/ethernet/renesas/rswitch.h
+++ b/drivers/net/ethernet/renesas/rswitch.h
@@ -915,6 +915,7 @@ struct rswitch_etha {
bool external_phy;
struct mii_bus *mii;
phy_interface_t phy_interface;
+ u32 psmcs;
u8 mac_addr[MAX_ADDR_LEN];
int link;
int speed;
@@ -1011,6 +1012,9 @@ struct rswitch_private {
struct rswitch_etha etha[RSWITCH_NUM_PORTS];
struct rswitch_mfwd mfwd;
+ spinlock_t lock; /* lock interrupt registers' control */
+ struct clk *clk;
+
bool etha_no_runtime_change;
bool gwca_halt;
};
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 2375cef577e4..f77a2d3ef37e 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -359,26 +359,36 @@ static bool efx_do_xdp(struct efx_nic *efx, struct efx_channel *channel,
/* Handle a received packet. Second half: Touches packet payload. */
void __efx_rx_packet(struct efx_channel *channel)
{
+ struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
struct efx_nic *efx = channel->efx;
struct efx_rx_buffer *rx_buf =
- efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
+ efx_rx_buffer(rx_queue, channel->rx_pkt_index);
u8 *eh = efx_rx_buf_va(rx_buf);
/* Read length from the prefix if necessary. This already
* excludes the length of the prefix itself.
*/
- if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN)
+ if (rx_buf->flags & EFX_RX_PKT_PREFIX_LEN) {
rx_buf->len = le16_to_cpup((__le16 *)
(eh + efx->rx_packet_len_offset));
+ /* A known issue may prevent this being filled in;
+ * if that happens, just drop the packet.
+ * Must do that in the driver since passing a zero-length
+ * packet up to the stack may cause a crash.
+ */
+ if (unlikely(!rx_buf->len)) {
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
+ channel->n_rx_frm_trunc++;
+ goto out;
+ }
+ }
/* If we're in loopback test, then pass the packet directly to the
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
- struct efx_rx_queue *rx_queue;
-
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- rx_queue = efx_channel_get_rx_queue(channel);
efx_free_rx_buffers(rx_queue, rx_buf,
channel->rx_pkt_n_frags);
goto out;
diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c
index 047322b04d4f..834f000ba1c4 100644
--- a/drivers/net/ethernet/sfc/tc.c
+++ b/drivers/net/ethernet/sfc/tc.c
@@ -136,6 +136,8 @@ static struct efx_tc_mac_pedit_action *efx_tc_flower_get_mac(struct efx_nic *efx
if (old) {
/* don't need our new entry */
kfree(ped);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
@@ -602,6 +604,8 @@ static int efx_tc_flower_record_encap_match(struct efx_nic *efx,
kfree(encap);
if (pseudo) /* don't need our new pseudo either */
efx_tc_flower_release_encap_match(efx, pseudo);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
/* check old and new em_types are compatible */
switch (old->type) {
case EFX_TC_EM_DIRECT:
@@ -700,6 +704,8 @@ static struct efx_tc_recirc_id *efx_tc_get_recirc_id(struct efx_nic *efx,
if (old) {
/* don't need our new entry */
kfree(rid);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
@@ -1482,7 +1488,10 @@ static int efx_tc_flower_replace_foreign(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Ignoring already-offloaded rule (cookie %lx)\n",
tc->cookie);
@@ -1697,7 +1706,10 @@ static int efx_tc_flower_replace_lhs(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->lhs_rule_ht,
&rule->linkage,
efx_tc_lhs_rule_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
@@ -1858,7 +1870,10 @@ static int efx_tc_flower_replace(struct efx_nic *efx,
old = rhashtable_lookup_get_insert_fast(&efx->tc->match_action_ht,
&rule->linkage,
efx_tc_match_action_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded rule (cookie %lx)\n", tc->cookie);
NL_SET_ERR_MSG_MOD(extack, "Rule already offloaded");
diff --git a/drivers/net/ethernet/sfc/tc_conntrack.c b/drivers/net/ethernet/sfc/tc_conntrack.c
index 8e06bfbcbea1..44bb57670340 100644
--- a/drivers/net/ethernet/sfc/tc_conntrack.c
+++ b/drivers/net/ethernet/sfc/tc_conntrack.c
@@ -298,7 +298,10 @@ static int efx_tc_ct_replace(struct efx_tc_ct_zone *ct_zone,
old = rhashtable_lookup_get_insert_fast(&efx->tc->ct_ht,
&conn->linkage,
efx_tc_ct_ht_params);
- if (old) {
+ if (IS_ERR(old)) {
+ rc = PTR_ERR(old);
+ goto release;
+ } else if (old) {
netif_dbg(efx, drv, efx->net_dev,
"Already offloaded conntrack (cookie %lx)\n", tc->cookie);
rc = -EEXIST;
@@ -482,6 +485,8 @@ struct efx_tc_ct_zone *efx_tc_ct_register_zone(struct efx_nic *efx, u16 zone,
if (old) {
/* don't need our new entry */
kfree(ct_zone);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
diff --git a/drivers/net/ethernet/sfc/tc_counters.c b/drivers/net/ethernet/sfc/tc_counters.c
index 0fafb47ea082..c44088424323 100644
--- a/drivers/net/ethernet/sfc/tc_counters.c
+++ b/drivers/net/ethernet/sfc/tc_counters.c
@@ -236,6 +236,8 @@ struct efx_tc_counter_index *efx_tc_flower_get_counter_index(
if (old) {
/* don't need our new entry */
kfree(ctr);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found */
diff --git a/drivers/net/ethernet/sfc/tc_encap_actions.c b/drivers/net/ethernet/sfc/tc_encap_actions.c
index 7e8bcdb222ad..87443f9dfd22 100644
--- a/drivers/net/ethernet/sfc/tc_encap_actions.c
+++ b/drivers/net/ethernet/sfc/tc_encap_actions.c
@@ -132,6 +132,8 @@ static int efx_bind_neigh(struct efx_nic *efx,
/* don't need our new entry */
put_net_track(neigh->net, &neigh->ns_tracker);
kfree(neigh);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return PTR_ERR(old);
if (!refcount_inc_not_zero(&old->ref))
return -EAGAIN;
/* existing entry found, ref taken */
@@ -640,6 +642,8 @@ struct efx_tc_encap_action *efx_tc_flower_create_encap_md(
if (old) {
/* don't need our new entry */
kfree(encap);
+ if (IS_ERR(old)) /* oh dear, it's actually an error */
+ return ERR_CAST(old);
if (!refcount_inc_not_zero(&old->ref))
return ERR_PTR(-EAGAIN);
/* existing entry found, ref taken */
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 403cb397d4d3..1e996c29043d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -70,7 +70,7 @@ struct stmmac_txq_stats {
u64 tx_tso_frames;
u64 tx_tso_nfrags;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
struct stmmac_rxq_stats {
u64 rx_bytes;
@@ -79,7 +79,7 @@ struct stmmac_rxq_stats {
u64 rx_normal_irq_n;
u64 napi_poll;
struct u64_stats_sync syncp;
-};
+} ____cacheline_aligned_in_smp;
/* Extra statistic and debug information exposed by ethtool */
struct stmmac_extra_stats {
@@ -202,6 +202,9 @@ struct stmmac_extra_stats {
unsigned long mtl_est_hlbf;
unsigned long mtl_est_btre;
unsigned long mtl_est_btrlm;
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
unsigned long rx_dropped;
unsigned long rx_errors;
unsigned long tx_dropped;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
index 26ea8c687881..a0e276783e65 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -104,6 +104,7 @@ struct stm32_ops {
int (*parse_data)(struct stm32_dwmac *dwmac,
struct device *dev);
u32 syscfg_eth_mask;
+ bool clk_rx_enable_in_suspend;
};
static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
@@ -121,7 +122,8 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
if (ret)
return ret;
- if (!dwmac->dev->power.is_suspended) {
+ if (!dwmac->ops->clk_rx_enable_in_suspend ||
+ !dwmac->dev->power.is_suspended) {
ret = clk_prepare_enable(dwmac->clk_rx);
if (ret) {
clk_disable_unprepare(dwmac->clk_tx);
@@ -513,7 +515,8 @@ static struct stm32_ops stm32mp1_dwmac_data = {
.suspend = stm32mp1_suspend,
.resume = stm32mp1_resume,
.parse_data = stm32mp1_parse_data,
- .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .clk_rx_enable_in_suspend = true
};
static const struct of_device_id stm32_dwmac_match[] = {
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 01e77368eef1..465ff1fd4785 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -441,8 +441,8 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
u32 v;
@@ -455,9 +455,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_TX_INT) {
ret |= handle_tx;
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
}
if (v & EMAC_TX_DMA_STOP_INT)
@@ -479,9 +479,9 @@ static int sun8i_dwmac_dma_interrupt(struct stmmac_priv *priv,
if (v & EMAC_RX_INT) {
ret |= handle_rx;
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
}
if (v & EMAC_RX_BUF_UA_INT)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
index 980e5f8a37ec..9470d3fd2ded 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -171,8 +171,8 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
const struct dwmac4_addrs *dwmac4_addrs = priv->plat->dwmac4_addrs;
u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(dwmac4_addrs, chan));
u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(dwmac4_addrs, chan));
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
if (dir == DMA_DIR_RX)
@@ -201,15 +201,15 @@ int dwmac4_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
}
/* TX/RX NORMAL interrupts */
if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
index aaa09b16b016..7907d62d3437 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -162,8 +162,8 @@ static void show_rx_process_state(unsigned int status)
int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
struct stmmac_extra_stats *x, u32 chan, u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
int ret = 0;
/* read the status register (CSR5) */
u32 intr_status = readl(ioaddr + DMA_STATUS);
@@ -215,16 +215,16 @@ int dwmac_dma_interrupt(struct stmmac_priv *priv, void __iomem *ioaddr,
u32 value = readl(ioaddr + DMA_INTR_ENA);
/* to schedule NAPI on real RIE event. */
if (likely(value & DMA_INTR_ENA_RIE)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
}
if (likely(intr_status & DMA_STATUS_TI)) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
if (unlikely(intr_status & DMA_STATUS_ERI))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
index fa69d64a8694..3cde695fec91 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -337,8 +337,8 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
struct stmmac_extra_stats *x, u32 chan,
u32 dir)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[chan];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[chan];
u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
int ret = 0;
@@ -367,15 +367,15 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv,
/* TX/RX NORMAL interrupts */
if (likely(intr_status & XGMAC_NIS)) {
if (likely(intr_status & XGMAC_RI)) {
- u64_stats_update_begin(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_normal_irq_n++;
- u64_stats_update_end(&rx_q->rxq_stats.syncp);
+ u64_stats_update_begin(&rxq_stats->syncp);
+ rxq_stats->rx_normal_irq_n++;
+ u64_stats_update_end(&rxq_stats->syncp);
ret |= handle_rx;
}
if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
- u64_stats_update_begin(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_normal_irq_n++;
- u64_stats_update_end(&tx_q->txq_stats.syncp);
+ u64_stats_update_begin(&txq_stats->syncp);
+ txq_stats->tx_normal_irq_n++;
+ u64_stats_update_end(&txq_stats->syncp);
ret |= handle_tx;
}
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 3401e888a9f6..cd7a9768de5f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -78,7 +78,6 @@ struct stmmac_tx_queue {
dma_addr_t dma_tx_phy;
dma_addr_t tx_tail_addr;
u32 mss;
- struct stmmac_txq_stats txq_stats;
};
struct stmmac_rx_buffer {
@@ -123,7 +122,6 @@ struct stmmac_rx_queue {
unsigned int len;
unsigned int error;
} state;
- struct stmmac_rxq_stats rxq_stats;
};
struct stmmac_channel {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index b7ac7abecdd3..6aa5c0556d22 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -548,14 +548,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
pos = data;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[q];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
struct stmmac_txq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_txq_stats, tx_pkt_n);
for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
@@ -566,14 +566,14 @@ static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
pos = data;
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[q];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
struct stmmac_rxq_stats snapshot;
data = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
p = (char *)&snapshot + offsetof(struct stmmac_rxq_stats, rx_pkt_n);
for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
@@ -637,14 +637,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < rx_queues_count; i++) {
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[i];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[i];
struct stmmac_rxq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&rx_q->rxq_stats.syncp);
- snapshot = rx_q->rxq_stats;
- } while (u64_stats_fetch_retry(&rx_q->rxq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&rxq_stats->syncp);
+ snapshot = *rxq_stats;
+ } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
data[j++] += snapshot.rx_pkt_n;
data[j++] += snapshot.rx_normal_irq_n;
@@ -654,14 +654,14 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
pos = j;
for (i = 0; i < tx_queues_count; i++) {
- struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[i];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[i];
struct stmmac_txq_stats snapshot;
j = pos;
do {
- start = u64_stats_fetch_begin(&tx_q->txq_stats.syncp);
- snapshot = tx_q->txq_stats;
- } while (u64_stats_fetch_retry(&tx_q->txq_stats.syncp, start));
+ start = u64_stats_fetch_begin(&txq_stats->syncp);
+ snapshot = *txq_stats;
+ } while (u64_stats_fetch_retry(&txq_stats->syncp, start));
data[j++] += snapshot.tx_pkt_n;
data[j++] += snapshot.tx_normal_irq_n;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 9a3182b9e767..ed1a5a31a491 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2426,6 +2426,7 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
{
struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct xsk_buff_pool *pool = tx_q->xsk_pool;
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc = NULL;
@@ -2505,9 +2506,9 @@ static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
entry = tx_q->cur_tx;
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit += tx_set_ic_bit;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit += tx_set_ic_bit;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (tx_desc) {
stmmac_flush_tx_descriptors(priv, queue);
@@ -2547,6 +2548,7 @@ static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
unsigned int bytes_compl = 0, pkts_compl = 0;
unsigned int entry, xmits = 0, count = 0;
u32 tx_packets = 0, tx_errors = 0;
@@ -2704,15 +2706,13 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
/* We still have pending packets, let's call for a new scheduling */
if (tx_q->dirty_tx != tx_q->cur_tx)
- hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
- HRTIMER_MODE_REL);
+ stmmac_tx_timer_arm(priv, queue);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_packets += tx_packets;
- tx_q->txq_stats.tx_pkt_n += tx_packets;
- tx_q->txq_stats.tx_clean++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_packets += tx_packets;
+ txq_stats->tx_pkt_n += tx_packets;
+ txq_stats->tx_clean++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
priv->xstats.tx_errors += tx_errors;
@@ -2995,9 +2995,13 @@ static int stmmac_init_dma_engine(struct stmmac_priv *priv)
static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
{
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+
+ if (!tx_coal_timer)
+ return;
hrtimer_start(&tx_q->txtimer,
- STMMAC_COAL_TIMER(priv->tx_coal_timer[queue]),
+ STMMAC_COAL_TIMER(tx_coal_timer),
HRTIMER_MODE_REL);
}
@@ -4112,6 +4116,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
u32 queue = skb_get_queue_mapping(skb);
unsigned int first_entry, tx_packets;
+ struct stmmac_txq_stats *txq_stats;
int tmp_pay_len = 0, first_tx;
struct stmmac_tx_queue *tx_q;
bool has_vlan, set_ic;
@@ -4122,6 +4127,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
int i;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
/* Compute header lengths */
@@ -4280,13 +4286,13 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
- tx_q->txq_stats.tx_tso_frames++;
- tx_q->txq_stats.tx_tso_nfrags += nfrags;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
+ txq_stats->tx_tso_frames++;
+ txq_stats->tx_tso_nfrags += nfrags;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4357,6 +4363,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
u32 queue = skb_get_queue_mapping(skb);
int nfrags = skb_shinfo(skb)->nr_frags;
int gso = skb_shinfo(skb)->gso_type;
+ struct stmmac_txq_stats *txq_stats;
struct dma_edesc *tbs_desc = NULL;
struct dma_desc *desc, *first;
struct stmmac_tx_queue *tx_q;
@@ -4366,6 +4373,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dma_addr_t des;
tx_q = &priv->dma_conf.tx_queue[queue];
+ txq_stats = &priv->xstats.txq_stats[queue];
first_tx = tx_q->cur_tx;
if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
@@ -4517,11 +4525,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
}
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_bytes += skb->len;
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_bytes += skb->len;
if (set_ic)
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
if (priv->sarc_type)
stmmac_set_desc_sarc(priv, first, priv->sarc_type);
@@ -4728,6 +4736,7 @@ static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
struct xdp_frame *xdpf, bool dma_map)
{
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
unsigned int entry = tx_q->cur_tx;
struct dma_desc *tx_desc;
@@ -4787,9 +4796,9 @@ static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
unsigned long flags;
tx_q->tx_count_frames = 0;
stmmac_set_tx_ic(priv, tx_desc);
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.tx_set_ic_bit++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->tx_set_ic_bit++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
}
stmmac_enable_dma_transmission(priv, priv->ioaddr);
@@ -4934,7 +4943,7 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
struct dma_desc *p, struct dma_desc *np,
struct xdp_buff *xdp)
{
- struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int len = xdp->data_end - xdp->data;
enum pkt_hash_types hash_type;
@@ -4964,10 +4973,10 @@ static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
skb_record_rx_queue(skb, queue);
napi_gro_receive(&ch->rxtx_napi, skb);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n++;
- rx_q->rxq_stats.rx_bytes += len;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n++;
+ rxq_stats->rx_bytes += len;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
}
static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
@@ -5040,6 +5049,7 @@ static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
{
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
unsigned int count = 0, error = 0, len = 0;
int dirty = stmmac_rx_dirty(priv, queue);
@@ -5203,9 +5213,9 @@ read_again:
stmmac_finalize_xdp_rx(priv, xdp_status);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5233,6 +5243,7 @@ read_again:
static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
{
u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
struct stmmac_channel *ch = &priv->channel[queue];
unsigned int count = 0, error = 0, len = 0;
@@ -5494,11 +5505,11 @@ drain_data:
stmmac_rx_refill(priv, queue);
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.rx_packets += rx_packets;
- rx_q->rxq_stats.rx_bytes += rx_bytes;
- rx_q->rxq_stats.rx_pkt_n += count;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->rx_packets += rx_packets;
+ rxq_stats->rx_bytes += rx_bytes;
+ rxq_stats->rx_pkt_n += count;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
priv->xstats.rx_dropped += rx_dropped;
priv->xstats.rx_errors += rx_errors;
@@ -5511,15 +5522,15 @@ static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, rx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_rx_queue *rx_q;
+ struct stmmac_rxq_stats *rxq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
work_done = stmmac_rx(priv, budget, chan);
if (work_done < budget && napi_complete_done(napi, work_done)) {
@@ -5538,15 +5549,15 @@ static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
struct stmmac_channel *ch =
container_of(napi, struct stmmac_channel, tx_napi);
struct stmmac_priv *priv = ch->priv_data;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
int work_done;
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
work_done = stmmac_tx_clean(priv, budget, chan);
work_done = min(work_done, budget);
@@ -5568,20 +5579,20 @@ static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
container_of(napi, struct stmmac_channel, rxtx_napi);
struct stmmac_priv *priv = ch->priv_data;
int rx_done, tx_done, rxtx_done;
- struct stmmac_rx_queue *rx_q;
- struct stmmac_tx_queue *tx_q;
+ struct stmmac_rxq_stats *rxq_stats;
+ struct stmmac_txq_stats *txq_stats;
u32 chan = ch->index;
unsigned long flags;
- rx_q = &priv->dma_conf.rx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&rx_q->rxq_stats.syncp);
- rx_q->rxq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&rx_q->rxq_stats.syncp, flags);
+ rxq_stats = &priv->xstats.rxq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&rxq_stats->syncp);
+ rxq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&rxq_stats->syncp, flags);
- tx_q = &priv->dma_conf.tx_queue[chan];
- flags = u64_stats_update_begin_irqsave(&tx_q->txq_stats.syncp);
- tx_q->txq_stats.napi_poll++;
- u64_stats_update_end_irqrestore(&tx_q->txq_stats.syncp, flags);
+ txq_stats = &priv->xstats.txq_stats[chan];
+ flags = u64_stats_update_begin_irqsave(&txq_stats->syncp);
+ txq_stats->napi_poll++;
+ u64_stats_update_end_irqrestore(&txq_stats->syncp, flags);
tx_done = stmmac_tx_clean(priv, budget, chan);
tx_done = min(tx_done, budget);
@@ -5991,33 +6002,6 @@ static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
return IRQ_HANDLED;
}
-#ifdef CONFIG_NET_POLL_CONTROLLER
-/* Polling receive - used by NETCONSOLE and other diagnostic tools
- * to allow network I/O with interrupts disabled.
- */
-static void stmmac_poll_controller(struct net_device *dev)
-{
- struct stmmac_priv *priv = netdev_priv(dev);
- int i;
-
- /* If adapter is down, do nothing */
- if (test_bit(STMMAC_DOWN, &priv->state))
- return;
-
- if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
- for (i = 0; i < priv->plat->rx_queues_to_use; i++)
- stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
-
- for (i = 0; i < priv->plat->tx_queues_to_use; i++)
- stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
- } else {
- disable_irq(dev->irq);
- stmmac_interrupt(dev->irq, dev);
- enable_irq(dev->irq);
- }
-}
-#endif
-
/**
* stmmac_ioctl - Entry point for the Ioctl
* @dev: Device pointer.
@@ -6924,7 +6908,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
int q;
for (q = 0; q < tx_cnt; q++) {
- struct stmmac_txq_stats *txq_stats = &priv->dma_conf.tx_queue[q].txq_stats;
+ struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
u64 tx_packets;
u64 tx_bytes;
@@ -6939,7 +6923,7 @@ static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64
}
for (q = 0; q < rx_cnt; q++) {
- struct stmmac_rxq_stats *rxq_stats = &priv->dma_conf.rx_queue[q].rxq_stats;
+ struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
u64 rx_packets;
u64 rx_bytes;
@@ -6978,9 +6962,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_get_stats64 = stmmac_get_stats64,
.ndo_setup_tc = stmmac_setup_tc,
.ndo_select_queue = stmmac_select_queue,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = stmmac_poll_controller,
-#endif
.ndo_set_mac_address = stmmac_set_mac_address,
.ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
@@ -7340,9 +7321,9 @@ int stmmac_dvr_probe(struct device *device,
priv->dev = ndev;
for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.rx_queue[i].rxq_stats.syncp);
+ u64_stats_init(&priv->xstats.rxq_stats[i].syncp);
for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
- u64_stats_init(&priv->dma_conf.tx_queue[i].txq_stats.syncp);
+ u64_stats_init(&priv->xstats.txq_stats[i].syncp);
stmmac_set_ethtool_ops(ndev);
priv->pause = pause;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 35f4b1484029..2f0678f15fb7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -419,9 +419,8 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
return ERR_PTR(phy_mode);
plat->phy_interface = phy_mode;
- plat->mac_interface = stmmac_of_get_mac_mode(np);
- if (plat->mac_interface < 0)
- plat->mac_interface = plat->phy_interface;
+ rc = stmmac_of_get_mac_mode(np);
+ plat->mac_interface = rc < 0 ? plat->phy_interface : rc;
/* Some wrapper drivers still rely on phy_node. Let's save it while
* they are not converted to phylink. */
@@ -902,7 +901,7 @@ static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
struct platform_device *pdev = to_platform_device(dev);
int ret;
- ret = stmmac_pltfr_init(pdev, priv->plat->bsp_priv);
+ ret = stmmac_pltfr_init(pdev, priv->plat);
if (ret)
return ret;
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index 88b5b1b47779..0a3346650e03 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -199,6 +199,7 @@ config TI_ICSSG_PRUETH
config TI_ICSS_IEP
tristate "TI PRU ICSS IEP driver"
+ depends on PTP_1588_CLOCK_OPTIONAL
depends on TI_PRUSS
default TI_PRUSS
help
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
index bea6fc0f324c..24120605502f 100644
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -1747,9 +1747,10 @@ static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
}
tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
+ if (tx_chn->irq < 0) {
dev_err(dev, "Failed to get tx dma irq %d\n",
tx_chn->irq);
+ ret = tx_chn->irq;
goto err;
}
diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
index 410612f43cbd..4914d0ef58e9 100644
--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
@@ -316,12 +316,12 @@ static int prueth_init_tx_chns(struct prueth_emac *emac)
goto fail;
}
- tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
- if (tx_chn->irq <= 0) {
- ret = -EINVAL;
+ ret = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (ret < 0) {
netdev_err(ndev, "failed to get tx irq\n");
goto fail;
}
+ tx_chn->irq = ret;
snprintf(tx_chn->name, sizeof(tx_chn->name), "%s-tx%d",
dev_name(dev), tx_chn->id);
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index c3f30663070f..b7e151439c48 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -1330,8 +1330,7 @@ static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
struct crypto_aead *tfm;
int ret;
- /* Pick a sync gcm(aes) cipher to ensure order is preserved. */
- tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
+ tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
if (IS_ERR(tfm))
return tfm;
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index b6d7981b2d1e..927d3d54658e 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1800,9 +1800,6 @@ static const struct ksz9477_errata_write ksz9477_errata_writes[] = {
/* Transmit waveform amplitude can be improved (1000BASE-T, 100BASE-TX, 10BASE-Te) */
{0x1c, 0x04, 0x00d0},
- /* Energy Efficient Ethernet (EEE) feature select must be manually disabled */
- {0x07, 0x3c, 0x0000},
-
/* Register settings are required to meet data sheet supply current specifications */
{0x1c, 0x13, 0x6eff},
{0x1c, 0x14, 0xe6ff},
@@ -1847,6 +1844,12 @@ static int ksz9477_config_init(struct phy_device *phydev)
return err;
}
+ /* According to KSZ9477 Errata DS80000754C (Module 4) all EEE modes
+ * in this switch shall be regarded as broken.
+ */
+ if (phydev->dev_flags & MICREL_NO_EEE)
+ phydev->eee_broken_modes = -1;
+
err = genphy_restart_aneg(phydev);
if (err)
return err;
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index e8b94580194e..508d9a392ab1 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -2115,7 +2115,12 @@ static const struct ethtool_ops team_ethtool_ops = {
static void team_setup_by_port(struct net_device *dev,
struct net_device *port_dev)
{
- dev->header_ops = port_dev->header_ops;
+ struct team *team = netdev_priv(dev);
+
+ if (port_dev->type == ARPHRD_ETHER)
+ dev->header_ops = team->header_ops_cache;
+ else
+ dev->header_ops = port_dev->header_ops;
dev->type = port_dev->type;
dev->hard_header_len = port_dev->hard_header_len;
dev->needed_headroom = port_dev->needed_headroom;
@@ -2162,8 +2167,11 @@ static int team_dev_type_check_change(struct net_device *dev,
static void team_setup(struct net_device *dev)
{
+ struct team *team = netdev_priv(dev);
+
ether_setup(dev);
dev->max_mtu = ETH_MAX_MTU;
+ team->header_ops_cache = dev->header_ops;
dev->netdev_ops = &team_netdev_ops;
dev->ethtool_ops = &team_ethtool_ops;
diff --git a/drivers/net/thunderbolt/main.c b/drivers/net/thunderbolt/main.c
index 0c1e8970ee58..0a53ec293d04 100644
--- a/drivers/net/thunderbolt/main.c
+++ b/drivers/net/thunderbolt/main.c
@@ -1049,12 +1049,11 @@ static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, 0,
ip_hdr(skb)->protocol, 0);
- } else if (skb_is_gso_v6(skb)) {
+ } else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
- return false;
} else if (protocol == htons(ETH_P_IPV6)) {
tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 332c853ca99b..0c13d9950cd8 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -2636,6 +2636,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
struct r8152 *tp = container_of(napi, struct r8152, napi);
int work_done;
+ if (!budget)
+ return 0;
+
work_done = rx_bottom(tp, budget);
if (work_done < budget) {
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 5d6454fedb3f..78ad2da3ee29 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -90,7 +90,9 @@ static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
ret = fn(dev, USB_VENDOR_REQUEST_READ_REGISTER, USB_DIR_IN
| USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, index, &buf, 4);
- if (unlikely(ret < 0)) {
+ if (unlikely(ret < 4)) {
+ ret = ret < 0 ? ret : -ENODATA;
+
netdev_warn(dev->net, "Failed to read reg index 0x%08x: %d\n",
index, ret);
return ret;
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index d43e62ebc2fc..0deefd1573cf 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -344,6 +344,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
struct veth_rq *rq = NULL;
+ int ret = NETDEV_TX_OK;
struct net_device *rcv;
int length = skb->len;
bool use_napi = false;
@@ -378,11 +379,12 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
} else {
drop:
atomic64_inc(&priv->dropped);
+ ret = NET_XMIT_DROP;
}
rcu_read_unlock();
- return NETDEV_TX_OK;
+ return ret;
}
static u64 veth_stats_tx(struct net_device *dev, u64 *packets, u64 *bytes)
@@ -1444,6 +1446,8 @@ static int veth_open(struct net_device *dev)
netif_carrier_on(peer);
}
+ veth_set_xdp_features(dev);
+
return 0;
}
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 231ad91a919d..fe7f314d65c9 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -132,6 +132,14 @@ struct virtnet_interrupt_coalesce {
u32 max_usecs;
};
+/* The dma information of pages allocated at a time. */
+struct virtnet_rq_dma {
+ dma_addr_t addr;
+ u32 ref;
+ u16 len;
+ u16 need_sync;
+};
+
/* Internal representation of a send virtqueue */
struct send_queue {
/* Virtqueue associated with this send _queue */
@@ -185,6 +193,12 @@ struct receive_queue {
char name[16];
struct xdp_rxq_info xdp_rxq;
+
+ /* Record the last dma info to free after new pages is allocated. */
+ struct virtnet_rq_dma *last_dma;
+
+ /* Do dma by self */
+ bool do_dma;
};
/* This structure can contain rss message with maximum settings for indirection table and keysize
@@ -580,6 +594,156 @@ ok:
return skb;
}
+static void virtnet_rq_unmap(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct page *page = virt_to_head_page(buf);
+ struct virtnet_rq_dma *dma;
+ void *head;
+ int offset;
+
+ head = page_address(page);
+
+ dma = head;
+
+ --dma->ref;
+
+ if (dma->ref) {
+ if (dma->need_sync && len) {
+ offset = buf - (head + sizeof(*dma));
+
+ virtqueue_dma_sync_single_range_for_cpu(rq->vq, dma->addr, offset,
+ len, DMA_FROM_DEVICE);
+ }
+
+ return;
+ }
+
+ virtqueue_dma_unmap_single_attrs(rq->vq, dma->addr, dma->len,
+ DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
+ put_page(page);
+}
+
+static void *virtnet_rq_get_buf(struct receive_queue *rq, u32 *len, void **ctx)
+{
+ void *buf;
+
+ buf = virtqueue_get_buf_ctx(rq->vq, len, ctx);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, *len);
+
+ return buf;
+}
+
+static void *virtnet_rq_detach_unused_buf(struct receive_queue *rq)
+{
+ void *buf;
+
+ buf = virtqueue_detach_unused_buf(rq->vq);
+ if (buf && rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
+
+ return buf;
+}
+
+static void virtnet_rq_init_one_sg(struct receive_queue *rq, void *buf, u32 len)
+{
+ struct virtnet_rq_dma *dma;
+ dma_addr_t addr;
+ u32 offset;
+ void *head;
+
+ if (!rq->do_dma) {
+ sg_init_one(rq->sg, buf, len);
+ return;
+ }
+
+ head = page_address(rq->alloc_frag.page);
+
+ offset = buf - head;
+
+ dma = head;
+
+ addr = dma->addr - sizeof(*dma) + offset;
+
+ sg_init_table(rq->sg, 1);
+ rq->sg[0].dma_address = addr;
+ rq->sg[0].length = len;
+}
+
+static void *virtnet_rq_alloc(struct receive_queue *rq, u32 size, gfp_t gfp)
+{
+ struct page_frag *alloc_frag = &rq->alloc_frag;
+ struct virtnet_rq_dma *dma;
+ void *buf, *head;
+ dma_addr_t addr;
+
+ if (unlikely(!skb_page_frag_refill(size, alloc_frag, gfp)))
+ return NULL;
+
+ head = page_address(alloc_frag->page);
+
+ if (rq->do_dma) {
+ dma = head;
+
+ /* new pages */
+ if (!alloc_frag->offset) {
+ if (rq->last_dma) {
+ /* Now, the new page is allocated, the last dma
+ * will not be used. So the dma can be unmapped
+ * if the ref is 0.
+ */
+ virtnet_rq_unmap(rq, rq->last_dma, 0);
+ rq->last_dma = NULL;
+ }
+
+ dma->len = alloc_frag->size - sizeof(*dma);
+
+ addr = virtqueue_dma_map_single_attrs(rq->vq, dma + 1,
+ dma->len, DMA_FROM_DEVICE, 0);
+ if (virtqueue_dma_mapping_error(rq->vq, addr))
+ return NULL;
+
+ dma->addr = addr;
+ dma->need_sync = virtqueue_dma_need_sync(rq->vq, addr);
+
+ /* Add a reference to dma to prevent the entire dma from
+ * being released during error handling. This reference
+ * will be freed after the pages are no longer used.
+ */
+ get_page(alloc_frag->page);
+ dma->ref = 1;
+ alloc_frag->offset = sizeof(*dma);
+
+ rq->last_dma = dma;
+ }
+
+ ++dma->ref;
+ }
+
+ buf = head + alloc_frag->offset;
+
+ get_page(alloc_frag->page);
+ alloc_frag->offset += size;
+
+ return buf;
+}
+
+static void virtnet_rq_set_premapped(struct virtnet_info *vi)
+{
+ int i;
+
+ /* disable for big mode */
+ if (!vi->mergeable_rx_bufs && vi->big_packets)
+ return;
+
+ for (i = 0; i < vi->max_queue_pairs; i++) {
+ if (virtqueue_set_dma_premapped(vi->rq[i].vq))
+ continue;
+
+ vi->rq[i].do_dma = true;
+ }
+}
+
static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi)
{
unsigned int len;
@@ -935,7 +1099,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq,
void *buf;
int off;
- buf = virtqueue_get_buf(rq->vq, &buflen);
+ buf = virtnet_rq_get_buf(rq, &buflen, NULL);
if (unlikely(!buf))
goto err_buf;
@@ -1155,7 +1319,7 @@ static void mergeable_buf_free(struct receive_queue *rq, int num_buf,
int len;
while (num_buf-- > 1) {
- buf = virtqueue_get_buf(rq->vq, &len);
+ buf = virtnet_rq_get_buf(rq, &len, NULL);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers missing\n",
dev->name, num_buf);
@@ -1263,7 +1427,7 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev,
return -EINVAL;
while (--*num_buf > 0) {
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, *num_buf,
@@ -1492,7 +1656,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
while (--num_buf) {
int num_skb_frags;
- buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx);
+ buf = virtnet_rq_get_buf(rq, &len, &ctx);
if (unlikely(!buf)) {
pr_debug("%s: rx error: %d buffers out of %d missing\n",
dev->name, num_buf,
@@ -1651,7 +1815,6 @@ frame_err:
static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
gfp_t gfp)
{
- struct page_frag *alloc_frag = &rq->alloc_frag;
char *buf;
unsigned int xdp_headroom = virtnet_get_headroom(vi);
void *ctx = (void *)(unsigned long)xdp_headroom;
@@ -1660,17 +1823,21 @@ static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
len = SKB_DATA_ALIGN(len) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
- if (unlikely(!skb_page_frag_refill(len, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
- get_page(alloc_frag->page);
- alloc_frag->offset += len;
- sg_init_one(rq->sg, buf + VIRTNET_RX_PAD + xdp_headroom,
- vi->hdr_len + GOOD_PACKET_LEN);
+ virtnet_rq_init_one_sg(rq, buf + VIRTNET_RX_PAD + xdp_headroom,
+ vi->hdr_len + GOOD_PACKET_LEN);
+
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
+
return err;
}
@@ -1747,23 +1914,22 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
unsigned int headroom = virtnet_get_headroom(vi);
unsigned int tailroom = headroom ? sizeof(struct skb_shared_info) : 0;
unsigned int room = SKB_DATA_ALIGN(headroom + tailroom);
- char *buf;
+ unsigned int len, hole;
void *ctx;
+ char *buf;
int err;
- unsigned int len, hole;
/* Extra tailroom is needed to satisfy XDP's assumption. This
* means rx frags coalescing won't work, but consider we've
* disabled GSO for XDP, it won't be a big issue.
*/
len = get_mergeable_buf_len(rq, &rq->mrg_avg_pkt_len, room);
- if (unlikely(!skb_page_frag_refill(len + room, alloc_frag, gfp)))
+
+ buf = virtnet_rq_alloc(rq, len + room, gfp);
+ if (unlikely(!buf))
return -ENOMEM;
- buf = (char *)page_address(alloc_frag->page) + alloc_frag->offset;
buf += headroom; /* advance address leaving hole at front of pkt */
- get_page(alloc_frag->page);
- alloc_frag->offset += len + room;
hole = alloc_frag->size - alloc_frag->offset;
if (hole < len + room) {
/* To avoid internal fragmentation, if there is very likely not
@@ -1777,11 +1943,15 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi,
alloc_frag->offset += hole;
}
- sg_init_one(rq->sg, buf, len);
+ virtnet_rq_init_one_sg(rq, buf, len);
+
ctx = mergeable_len_to_ctx(len + room, headroom);
err = virtqueue_add_inbuf_ctx(rq->vq, rq->sg, 1, buf, ctx, gfp);
- if (err < 0)
+ if (err < 0) {
+ if (rq->do_dma)
+ virtnet_rq_unmap(rq, buf, 0);
put_page(virt_to_head_page(buf));
+ }
return err;
}
@@ -1902,13 +2072,13 @@ static int virtnet_receive(struct receive_queue *rq, int budget,
void *ctx;
while (stats.packets < budget &&
- (buf = virtqueue_get_buf_ctx(rq->vq, &len, &ctx))) {
+ (buf = virtnet_rq_get_buf(rq, &len, &ctx))) {
receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
stats.packets++;
}
} else {
while (stats.packets < budget &&
- (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
+ (buf = virtnet_rq_get_buf(rq, &len, NULL)) != NULL) {
receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
stats.packets++;
}
@@ -3808,8 +3978,11 @@ static void free_receive_page_frags(struct virtnet_info *vi)
{
int i;
for (i = 0; i < vi->max_queue_pairs; i++)
- if (vi->rq[i].alloc_frag.page)
+ if (vi->rq[i].alloc_frag.page) {
+ if (vi->rq[i].do_dma && vi->rq[i].last_dma)
+ virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
put_page(vi->rq[i].alloc_frag.page);
+ }
}
static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf)
@@ -3846,9 +4019,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
}
for (i = 0; i < vi->max_queue_pairs; i++) {
- struct virtqueue *vq = vi->rq[i].vq;
- while ((buf = virtqueue_detach_unused_buf(vq)) != NULL)
- virtnet_rq_free_unused_buf(vq, buf);
+ struct receive_queue *rq = &vi->rq[i];
+
+ while ((buf = virtnet_rq_detach_unused_buf(rq)) != NULL)
+ virtnet_rq_free_unused_buf(rq->vq, buf);
cond_resched();
}
}
@@ -4022,6 +4196,8 @@ static int init_vqs(struct virtnet_info *vi)
if (ret)
goto err_free;
+ virtnet_rq_set_premapped(vi);
+
cpus_read_lock();
virtnet_set_affinity(vi);
cpus_read_unlock();
diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
index e463f59e95c2..5b5597073b00 100644
--- a/drivers/net/vxlan/vxlan_core.c
+++ b/drivers/net/vxlan/vxlan_core.c
@@ -4331,6 +4331,10 @@ static size_t vxlan_get_size(const struct net_device *dev)
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_TX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_REMCSUM_RX */
nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LOCALBYPASS */
+ nla_total_size(0) + /* IFLA_VXLAN_GBP */
+ nla_total_size(0) + /* IFLA_VXLAN_GPE */
+ nla_total_size(0) + /* IFLA_VXLAN_REMCSUM_NOPARTIAL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_VNIFILTER */
0;
}
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index 47c2ad7a3e42..fd50bb313b92 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -34,6 +34,8 @@
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
+static int uhdlc_close(struct net_device *dev);
+
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
@@ -708,6 +710,7 @@ static int uhdlc_open(struct net_device *dev)
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
+ int rc = 0;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
@@ -731,10 +734,13 @@ static int uhdlc_open(struct net_device *dev)
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
- hdlc_open(dev);
+
+ rc = hdlc_open(dev);
+ if (rc)
+ uhdlc_close(dev);
}
- return 0;
+ return rc;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
@@ -824,6 +830,8 @@ static int uhdlc_close(struct net_device *dev)
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
+ hdlc_close(dev);
+
return 0;
}
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
index bece26741d3a..611d1a6aabb9 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h
@@ -442,7 +442,12 @@ struct brcmf_scan_params_v2_le {
* fixed parameter portion is assumed, otherwise
* ssid in the fixed portion is ignored
*/
- __le16 channel_list[1]; /* list of chanspecs */
+ union {
+ __le16 padding; /* Reserve space for at least 1 entry for abort
+ * which uses an on stack brcmf_scan_params_v2_le
+ */
+ DECLARE_FLEX_ARRAY(__le16, channel_list); /* chanspecs */
+ };
};
struct brcmf_scan_results {
@@ -702,7 +707,7 @@ struct brcmf_sta_info_le {
struct brcmf_chanspec_list {
__le32 count; /* # of entries */
- __le32 element[1]; /* variable length uint32 list */
+ __le32 element[]; /* variable length uint32 list */
};
/*
diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
index f5e08988dc7b..06d6f7f66430 100644
--- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
+++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h
@@ -310,9 +310,9 @@ struct iwl_fw_ini_fifo_hdr {
struct iwl_fw_ini_error_dump_range {
__le32 range_data_size;
union {
- __le32 internal_base_addr;
- __le64 dram_base_addr;
- __le32 page_num;
+ __le32 internal_base_addr __packed;
+ __le64 dram_base_addr __packed;
+ __le32 page_num __packed;
struct iwl_fw_ini_fifo_hdr fifo_hdr;
struct iwl_cmd_header fw_pkt_hdr;
};
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
index 1f5db65a088d..1d5ee4330f29 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
@@ -802,7 +802,7 @@ out:
mvm->nvm_data->bands[0].n_channels = 1;
mvm->nvm_data->bands[0].n_bitrates = 1;
mvm->nvm_data->bands[0].bitrates =
- (void *)((u8 *)mvm->nvm_data->channels + 1);
+ (void *)(mvm->nvm_data->channels + 1);
mvm->nvm_data->bands[0].bitrates->hw_value = 10;
}
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
index 8b6c641772ee..b719843e9457 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c
@@ -731,73 +731,78 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm,
mvmvif->associated = vif->cfg.assoc;
- if (!(changes & BSS_CHANGED_ASSOC))
- return;
-
- if (vif->cfg.assoc) {
- /* clear statistics to get clean beacon counter */
- iwl_mvm_request_statistics(mvm, true);
- iwl_mvm_sf_update(mvm, vif, false);
- iwl_mvm_power_vif_assoc(mvm, vif);
-
- for_each_mvm_vif_valid_link(mvmvif, i) {
- memset(&mvmvif->link[i]->beacon_stats, 0,
- sizeof(mvmvif->link[i]->beacon_stats));
+ if (changes & BSS_CHANGED_ASSOC) {
+ if (vif->cfg.assoc) {
+ /* clear statistics to get clean beacon counter */
+ iwl_mvm_request_statistics(mvm, true);
+ iwl_mvm_sf_update(mvm, vif, false);
+ iwl_mvm_power_vif_assoc(mvm, vif);
+
+ for_each_mvm_vif_valid_link(mvmvif, i) {
+ memset(&mvmvif->link[i]->beacon_stats, 0,
+ sizeof(mvmvif->link[i]->beacon_stats));
+
+ if (vif->p2p) {
+ iwl_mvm_update_smps(mvm, vif,
+ IWL_MVM_SMPS_REQ_PROT,
+ IEEE80211_SMPS_DYNAMIC, i);
+ }
+
+ rcu_read_lock();
+ link_conf = rcu_dereference(vif->link_conf[i]);
+ if (link_conf && !link_conf->dtim_period)
+ protect = true;
+ rcu_read_unlock();
+ }
- if (vif->p2p) {
- iwl_mvm_update_smps(mvm, vif,
- IWL_MVM_SMPS_REQ_PROT,
- IEEE80211_SMPS_DYNAMIC, i);
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
+ protect) {
+ /* If we're not restarting and still haven't
+ * heard a beacon (dtim period unknown) then
+ * make sure we still have enough minimum time
+ * remaining in the time event, since the auth
+ * might actually have taken quite a while
+ * (especially for SAE) and so the remaining
+ * time could be small without us having heard
+ * a beacon yet.
+ */
+ iwl_mvm_protect_assoc(mvm, vif, 0);
}
- rcu_read_lock();
- link_conf = rcu_dereference(vif->link_conf[i]);
- if (link_conf && !link_conf->dtim_period)
- protect = true;
- rcu_read_unlock();
- }
+ iwl_mvm_sf_update(mvm, vif, false);
+
+ /* FIXME: need to decide about misbehaving AP handling */
+ iwl_mvm_power_vif_assoc(mvm, vif);
+ } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
+ iwl_mvm_mei_host_disassociated(mvm);
- if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) &&
- protect) {
- /* If we're not restarting and still haven't
- * heard a beacon (dtim period unknown) then
- * make sure we still have enough minimum time
- * remaining in the time event, since the auth
- * might actually have taken quite a while
- * (especially for SAE) and so the remaining
- * time could be small without us having heard
- * a beacon yet.
+ /* If update fails - SF might be running in associated
+ * mode while disassociated - which is forbidden.
*/
- iwl_mvm_protect_assoc(mvm, vif, 0);
+ ret = iwl_mvm_sf_update(mvm, vif, false);
+ WARN_ONCE(ret &&
+ !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
+ &mvm->status),
+ "Failed to update SF upon disassociation\n");
+
+ /* If we get an assert during the connection (after the
+ * station has been added, but before the vif is set
+ * to associated), mac80211 will re-add the station and
+ * then configure the vif. Since the vif is not
+ * associated, we would remove the station here and
+ * this would fail the recovery.
+ */
+ iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
}
- iwl_mvm_sf_update(mvm, vif, false);
-
- /* FIXME: need to decide about misbehaving AP handling */
- iwl_mvm_power_vif_assoc(mvm, vif);
- } else if (iwl_mvm_mld_vif_have_valid_ap_sta(mvmvif)) {
- iwl_mvm_mei_host_disassociated(mvm);
-
- /* If update fails - SF might be running in associated
- * mode while disassociated - which is forbidden.
- */
- ret = iwl_mvm_sf_update(mvm, vif, false);
- WARN_ONCE(ret &&
- !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED,
- &mvm->status),
- "Failed to update SF upon disassociation\n");
-
- /* If we get an assert during the connection (after the
- * station has been added, but before the vif is set
- * to associated), mac80211 will re-add the station and
- * then configure the vif. Since the vif is not
- * associated, we would remove the station here and
- * this would fail the recovery.
- */
- iwl_mvm_mld_vif_delete_all_stas(mvm, vif);
+ iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
}
- iwl_mvm_bss_info_changed_station_assoc(mvm, vif, changes);
+ if (changes & BSS_CHANGED_PS) {
+ ret = iwl_mvm_power_update_mac(mvm);
+ if (ret)
+ IWL_ERR(mvm, "failed to update power mode\n");
+ }
}
static void
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index c1d9ce753468..3cbe2c0b8d6b 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -2342,7 +2342,7 @@ iwl_mvm_scan_umac_fill_general_p_v12(struct iwl_mvm *mvm,
if (gen_flags & IWL_UMAC_SCAN_GEN_FLAGS_V2_FRAGMENTED_LMAC2)
gp->num_of_fragments[SCAN_HB_LMAC_IDX] = IWL_SCAN_NUM_OF_FRAGS;
- if (version < 12) {
+ if (version < 16) {
gp->scan_start_mac_or_link_id = scan_vif->id;
} else {
struct iwl_mvm_vif_link_info *link_info;
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
index 36d70d589aed..898dca393643 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c
@@ -1612,6 +1612,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
memset(&info->status, 0, sizeof(info->status));
+ info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
/* inform mac80211 about what happened with the frame */
switch (status & TX_STATUS_MSK) {
@@ -1964,6 +1965,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
*/
if (!is_flush)
info->flags |= IEEE80211_TX_STAT_ACK;
+ else
+ info->flags &= ~IEEE80211_TX_STAT_ACK;
}
/*
diff --git a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
index 391793a16adc..10690e82358b 100644
--- a/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c
@@ -918,9 +918,17 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
event_buf, len);
- while (tlv_buf_left >= sizeof(*tlv_rxba)) {
+ while (tlv_buf_left > sizeof(*tlv_rxba)) {
tlv_type = le16_to_cpu(tlv_rxba->header.type);
tlv_len = le16_to_cpu(tlv_rxba->header.len);
+ if (size_add(sizeof(tlv_rxba->header), tlv_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(tlv_rxba->header), tlv_len),
+ tlv_buf_left);
+ return;
+ }
+
if (tlv_type != TLV_TYPE_RXBA_SYNC) {
mwifiex_dbg(priv->adapter, ERROR,
"Wrong TLV id=0x%x\n", tlv_type);
@@ -929,6 +937,14 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
+ if (size_add(sizeof(*tlv_rxba), tlv_bitmap_len) > tlv_buf_left) {
+ mwifiex_dbg(priv->adapter, WARN,
+ "TLV size (%zu) overflows event_buf buf_left=%d\n",
+ size_add(sizeof(*tlv_rxba), tlv_bitmap_len),
+ tlv_buf_left);
+ return;
+ }
+
mwifiex_dbg(priv->adapter, INFO,
"%pM tid=%d seq_num=%d bitmap_len=%d\n",
tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
@@ -965,8 +981,8 @@ void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
}
}
- tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
- tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
+ tlv_buf_left -= (sizeof(tlv_rxba->header) + tlv_len);
+ tmp = (u8 *)tlv_rxba + sizeof(tlv_rxba->header) + tlv_len;
tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
}
}
diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
index f2168fac95ed..8e6db904e5b2 100644
--- a/drivers/net/wireless/marvell/mwifiex/fw.h
+++ b/drivers/net/wireless/marvell/mwifiex/fw.h
@@ -779,7 +779,7 @@ struct mwifiex_ie_types_rxba_sync {
u8 reserved;
__le16 seq_num;
__le16 bitmap_len;
- u8 bitmap[1];
+ u8 bitmap[];
} __packed;
struct chan_band_param_set {
diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
index 65420ad67416..257737137cd7 100644
--- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c
@@ -86,7 +86,8 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
- if (sizeof(*rx_pkt_hdr) + rx_pkt_off > skb->len) {
+ if (sizeof(rx_pkt_hdr->eth803_hdr) + sizeof(rfc1042_header) +
+ rx_pkt_off > skb->len) {
mwifiex_dbg(priv->adapter, ERROR,
"wrong rx packet offset: len=%d, rx_pkt_off=%d\n",
skb->len, rx_pkt_off);
@@ -95,12 +96,13 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return -1;
}
- if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
- sizeof(bridge_tunnel_header))) ||
- (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
- sizeof(rfc1042_header)) &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
- ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX)) {
+ if (sizeof(*rx_pkt_hdr) + rx_pkt_off <= skb->len &&
+ ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
+ sizeof(bridge_tunnel_header))) ||
+ (!memcmp(&rx_pkt_hdr->rfc1042_hdr, rfc1042_header,
+ sizeof(rfc1042_header)) &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_AARP &&
+ ntohs(rx_pkt_hdr->rfc1042_hdr.snap_type) != ETH_P_IPX))) {
/*
* Replace the 803 header and rfc1042 header (llc/snap) with an
* EthernetII header, keep the src/dst and snap_type
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 05d9ab3ce819..dc8f4e157eb2 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -93,13 +93,13 @@ __mt76_get_rxwi(struct mt76_dev *dev)
{
struct mt76_txwi_cache *t = NULL;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
if (!list_empty(&dev->rxwi_cache)) {
t = list_first_entry(&dev->rxwi_cache, struct mt76_txwi_cache,
list);
list_del(&t->list);
}
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
return t;
}
@@ -145,9 +145,9 @@ mt76_put_rxwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
if (!t)
return;
- spin_lock(&dev->wed_lock);
+ spin_lock_bh(&dev->wed_lock);
list_add(&t->list, &dev->rxwi_cache);
- spin_unlock(&dev->wed_lock);
+ spin_unlock_bh(&dev->wed_lock);
}
EXPORT_SYMBOL_GPL(mt76_put_rxwi);
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
index 0acabba2d1a5..5d402cf2951c 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c
@@ -131,15 +131,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev,
s8 *lna_2g, s8 *lna_5g,
struct ieee80211_channel *chan)
{
- u16 val;
u8 lna;
- val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_2G)
- *lna_2g = 0;
- if (val & MT_EE_NIC_CONF_1_LNA_EXT_5G)
- memset(lna_5g, 0, sizeof(s8) * 3);
-
if (chan->band == NL80211_BAND_2GHZ)
lna = *lna_2g;
else if (chan->hw_value <= 64)
diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
index d5809408d1d3..8c01855885ce 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
+++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c
@@ -256,7 +256,8 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
struct ieee80211_channel *chan = dev->mphy.chandef.chan;
int channel = chan->hw_value;
s8 lna_5g[3], lna_2g;
- u8 lna;
+ bool use_lna;
+ u8 lna = 0;
u16 val;
if (chan->band == NL80211_BAND_2GHZ)
@@ -275,7 +276,15 @@ void mt76x2_read_rx_gain(struct mt76x02_dev *dev)
dev->cal.rx.mcu_gain |= (lna_5g[1] & 0xff) << 16;
dev->cal.rx.mcu_gain |= (lna_5g[2] & 0xff) << 24;
- lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+ val = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_1);
+ if (chan->band == NL80211_BAND_2GHZ)
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_2G);
+ else
+ use_lna = !(val & MT_EE_NIC_CONF_1_LNA_EXT_5G);
+
+ if (use_lna)
+ lna = mt76x02_get_lna_gain(dev, &lna_2g, lna_5g, chan);
+
dev->cal.rx.lna_gain = mt76x02_sign_extend(lna, 8);
}
EXPORT_SYMBOL_GPL(mt76x2_read_rx_gain);
diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.h b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
index 3642a2c7f80c..2434e2480cbe 100644
--- a/drivers/net/wireless/realtek/rtw88/rtw8723d.h
+++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.h
@@ -46,6 +46,7 @@ struct rtw8723du_efuse {
u8 vender_id[2]; /* 0x100 */
u8 product_id[2]; /* 0x102 */
u8 usb_option; /* 0x104 */
+ u8 res5[2]; /* 0x105 */
u8 mac_addr[ETH_ALEN]; /* 0x107 */
};