summaryrefslogtreecommitdiff
path: root/drivers/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-14 19:24:32 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-14 19:24:32 +0300
commit8096acd7442e613fad0354fc8dfdb2003cceea0b (patch)
treead8b748475fa87fe7c3b6f9cd00da8d7b8d078bd /drivers/net
parentd1d488d813703618f0dd93f0e4c4a05928114aa8 (diff)
parentbcb9928a155444dbd212473e60241ca0a7f641e1 (diff)
downloadlinux-8096acd7442e613fad0354fc8dfdb2003cceea0b.tar.xz
Merge tag 'net-5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski. "Including fixes from bpf and netfilter. Current release - regressions: - sock: fix parameter order in sock_setsockopt() Current release - new code bugs: - netfilter: nft_last: - fix incorrect arithmetic when restoring last used - honor NFTA_LAST_SET on restoration Previous releases - regressions: - udp: properly flush normal packet at GRO time - sfc: ensure correct number of XDP queues; don't allow enabling the feature if there isn't sufficient resources to Tx from any CPU - dsa: sja1105: fix address learning getting disabled on the CPU port - mptcp: addresses a rmem accounting issue that could keep packets in subflow receive buffers longer than necessary, delaying MPTCP-level ACKs - ip_tunnel: fix mtu calculation for ETHER tunnel devices - do not reuse skbs allocated from skbuff_fclone_cache in the napi skb cache, we'd try to return them to the wrong slab cache - tcp: consistently disable header prediction for mptcp Previous releases - always broken: - bpf: fix subprog poke descriptor tracking use-after-free - ipv6: - allocate enough headroom in ip6_finish_output2() in case iptables TEE is used - tcp: drop silly ICMPv6 packet too big messages to avoid expensive and pointless lookups (which may serve as a DDOS vector) - make sure fwmark is copied in SYNACK packets - fix 'disable_policy' for forwarded packets (align with IPv4) - netfilter: conntrack: - do not renew entry stuck in tcp SYN_SENT state - do not mark RST in the reply direction coming after SYN packet for an out-of-sync entry - mptcp: cleanly handle error conditions with MP_JOIN and syncookies - mptcp: fix double free when rejecting a join due to port mismatch - validate lwtstate->data before returning from skb_tunnel_info() - tcp: call sk_wmem_schedule before sk_mem_charge in zerocopy path - mt76: mt7921: continue to probe driver when fw already downloaded - bonding: fix multiple issues with offloading IPsec to (thru?) bond - stmmac: ptp: fix issues around Qbv support and setting time back - bcmgenet: always clear wake-up based on energy detection Misc: - sctp: move 198 addresses from unusable to private scope - ptp: support virtual clocks and timestamping - openvswitch: optimize operation for key comparison" * tag 'net-5.14-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (158 commits) net: dsa: properly check for the bridge_leave methods in dsa_switch_bridge_leave() sfc: add logs explaining XDP_TX/REDIRECT is not available sfc: ensure correct number of XDP queues sfc: fix lack of XDP TX queues - error XDP TX failed (-22) net: fddi: fix UAF in fza_probe net: dsa: sja1105: fix address learning getting disabled on the CPU port net: ocelot: fix switchdev objects synced for wrong netdev with LAG offload net: Use nlmsg_unicast() instead of netlink_unicast() octeontx2-pf: Fix uninitialized boolean variable pps ipv6: allocate enough headroom in ip6_finish_output2() net: hdlc: rename 'mod_init' & 'mod_exit' functions to be module-specific net: bridge: multicast: fix MRD advertisement router port marking race net: bridge: multicast: fix PIM hello router port marking race net: phy: marvell10g: fix differentiation of 88X3310 from 88X3340 dsa: fix for_each_child.cocci warnings virtio_net: check virtqueue_add_sgs() return value mptcp: properly account bulk freed memory selftests: mptcp: fix case multiple subflows limited by server mptcp: avoid processing packet if a subflow reset mptcp: fix syncookie process if mptcp can not_accept new subflow ...
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/bonding/bond_main.c181
-rw-r--r--drivers/net/caif/Kconfig9
-rw-r--r--drivers/net/caif/Makefile3
-rw-r--r--drivers/net/caif/caif_hsi.c1454
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c22
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c6
-rw-r--r--drivers/net/dsa/sja1105/sja1105_main.c14
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_hw.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c23
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c3
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c19
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c7
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c22
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c1
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c15
-rw-r--r--drivers/net/ethernet/intel/igc/igc.h2
-rw-r--r--drivers/net/ethernet/intel/igc/igc_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ipsec.c20
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c20
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.c292
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/cgx.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h12
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/mbox.h58
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.c1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu.h7
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c111
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c200
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c88
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h10
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/Makefile2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c87
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c3
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h18
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c173
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c229
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c26
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c2
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h1
-rw-r--r--drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c12
-rw-r--r--drivers/net/ethernet/microchip/sparx5/Kconfig1
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/mscc/ocelot_net.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/conntrack.c13
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c3
-rw-r--r--drivers/net/ethernet/sfc/efx_channels.c22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c56
-rw-r--r--drivers/net/ethernet/ti/tlan.c3
-rw-r--r--drivers/net/fddi/defza.c3
-rw-r--r--drivers/net/netdevsim/ipsec.c8
-rw-r--r--drivers/net/phy/marvell10g.c40
-rw-r--r--drivers/net/usb/asix_devices.c1
-rw-r--r--drivers/net/virtio_net.c8
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c22
-rw-r--r--drivers/net/wan/hdlc_cisco.c8
-rw-r--r--drivers/net/wan/hdlc_fr.c8
-rw-r--r--drivers/net/wan/hdlc_ppp.c8
-rw-r--r--drivers/net/wan/hdlc_raw.c8
-rw-r--r--drivers/net/wan/hdlc_raw_eth.c8
-rw-r--r--drivers/net/wan/hdlc_x25.c8
-rw-r--r--drivers/net/wireless/mediatek/mt76/mt7921/mcu.c3
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.c21
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_imem_ops.h6
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_mux_codec.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_uevent.c2
-rw-r--r--drivers/net/wwan/iosm/iosm_ipc_wwan.c11
79 files changed, 1749 insertions, 1810 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 0ff7567bd04f..d22d78303311 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -401,24 +401,85 @@ static int bond_vlan_rx_kill_vid(struct net_device *bond_dev,
static int bond_ipsec_add_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
+ int err;
if (!bond_dev)
return -EINVAL;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
- xs->xso.real_dev = slave->dev;
- bond->xs = xs;
+ if (!slave) {
+ rcu_read_unlock();
+ return -ENODEV;
+ }
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_add)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "Slave does not support ipsec offload\n");
+ rcu_read_unlock();
return -EINVAL;
}
- return slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ ipsec = kmalloc(sizeof(*ipsec), GFP_ATOMIC);
+ if (!ipsec) {
+ rcu_read_unlock();
+ return -ENOMEM;
+ }
+ xs->xso.real_dev = slave->dev;
+
+ err = slave->dev->xfrmdev_ops->xdo_dev_state_add(xs);
+ if (!err) {
+ ipsec->xs = xs;
+ INIT_LIST_HEAD(&ipsec->list);
+ spin_lock_bh(&bond->ipsec_lock);
+ list_add(&ipsec->list, &bond->ipsec_list);
+ spin_unlock_bh(&bond->ipsec_lock);
+ } else {
+ kfree(ipsec);
+ }
+ rcu_read_unlock();
+ return err;
+}
+
+static void bond_ipsec_add_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave)
+ goto out;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_add ||
+ netif_is_bond_master(slave->dev)) {
+ spin_lock_bh(&bond->ipsec_lock);
+ if (!list_empty(&bond->ipsec_list))
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_add\n",
+ __func__);
+ spin_unlock_bh(&bond->ipsec_lock);
+ goto out;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ ipsec->xs->xso.real_dev = slave->dev;
+ if (slave->dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs)) {
+ slave_warn(bond_dev, slave->dev, "%s: failed to add SA\n", __func__);
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+out:
+ rcu_read_unlock();
}
/**
@@ -428,27 +489,77 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs)
static void bond_ipsec_del_sa(struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
+ struct bond_ipsec *ipsec;
struct bonding *bond;
struct slave *slave;
if (!bond_dev)
return;
+ rcu_read_lock();
bond = netdev_priv(bond_dev);
slave = rcu_dereference(bond->curr_active_slave);
if (!slave)
- return;
+ goto out;
- xs->xso.real_dev = slave->dev;
+ if (!xs->xso.real_dev)
+ goto out;
+
+ WARN_ON(xs->xso.real_dev != slave->dev);
- if (!(slave->dev->xfrmdev_ops
- && slave->dev->xfrmdev_ops->xdo_dev_state_delete)) {
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
slave_warn(bond_dev, slave->dev, "%s: no slave xdo_dev_state_delete\n", __func__);
- return;
+ goto out;
}
slave->dev->xfrmdev_ops->xdo_dev_state_delete(xs);
+out:
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (ipsec->xs == xs) {
+ list_del(&ipsec->list);
+ kfree(ipsec);
+ break;
+ }
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
+}
+
+static void bond_ipsec_del_sa_all(struct bonding *bond)
+{
+ struct net_device *bond_dev = bond->dev;
+ struct bond_ipsec *ipsec;
+ struct slave *slave;
+
+ rcu_read_lock();
+ slave = rcu_dereference(bond->curr_active_slave);
+ if (!slave) {
+ rcu_read_unlock();
+ return;
+ }
+
+ spin_lock_bh(&bond->ipsec_lock);
+ list_for_each_entry(ipsec, &bond->ipsec_list, list) {
+ if (!ipsec->xs->xso.real_dev)
+ continue;
+
+ if (!slave->dev->xfrmdev_ops ||
+ !slave->dev->xfrmdev_ops->xdo_dev_state_delete ||
+ netif_is_bond_master(slave->dev)) {
+ slave_warn(bond_dev, slave->dev,
+ "%s: no slave xdo_dev_state_delete\n",
+ __func__);
+ } else {
+ slave->dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs);
+ }
+ ipsec->xs->xso.real_dev = NULL;
+ }
+ spin_unlock_bh(&bond->ipsec_lock);
+ rcu_read_unlock();
}
/**
@@ -459,21 +570,37 @@ static void bond_ipsec_del_sa(struct xfrm_state *xs)
static bool bond_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
struct net_device *bond_dev = xs->xso.dev;
- struct bonding *bond = netdev_priv(bond_dev);
- struct slave *curr_active = rcu_dereference(bond->curr_active_slave);
- struct net_device *slave_dev = curr_active->dev;
+ struct net_device *real_dev;
+ struct slave *curr_active;
+ struct bonding *bond;
+ int err;
- if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)
- return true;
+ bond = netdev_priv(bond_dev);
+ rcu_read_lock();
+ curr_active = rcu_dereference(bond->curr_active_slave);
+ real_dev = curr_active->dev;
- if (!(slave_dev->xfrmdev_ops
- && slave_dev->xfrmdev_ops->xdo_dev_offload_ok)) {
- slave_warn(bond_dev, slave_dev, "%s: no slave xdo_dev_offload_ok\n", __func__);
- return false;
+ if (BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP) {
+ err = false;
+ goto out;
}
- xs->xso.real_dev = slave_dev;
- return slave_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+ if (!xs->xso.real_dev) {
+ err = false;
+ goto out;
+ }
+
+ if (!real_dev->xfrmdev_ops ||
+ !real_dev->xfrmdev_ops->xdo_dev_offload_ok ||
+ netif_is_bond_master(real_dev)) {
+ err = false;
+ goto out;
+ }
+
+ err = real_dev->xfrmdev_ops->xdo_dev_offload_ok(skb, xs);
+out:
+ rcu_read_unlock();
+ return err;
}
static const struct xfrmdev_ops bond_xfrmdev_ops = {
@@ -990,8 +1117,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
#ifdef CONFIG_XFRM_OFFLOAD
- if (old_active && bond->xs)
- bond_ipsec_del_sa(bond->xs);
+ bond_ipsec_del_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
if (new_active) {
@@ -1066,10 +1192,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
}
#ifdef CONFIG_XFRM_OFFLOAD
- if (new_active && bond->xs) {
- xfrm_dev_state_flush(dev_net(bond->dev), bond->dev, true);
- bond_ipsec_add_sa(bond->xs);
- }
+ bond_ipsec_add_sa_all(bond);
#endif /* CONFIG_XFRM_OFFLOAD */
/* resend IGMP joins since active slave has changed or
@@ -3327,6 +3450,7 @@ static int bond_master_netdev_event(unsigned long event,
return bond_event_changename(event_bond);
case NETDEV_UNREGISTER:
bond_remove_proc_entry(event_bond);
+ xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true);
break;
case NETDEV_REGISTER:
bond_create_proc_entry(event_bond);
@@ -4894,7 +5018,8 @@ void bond_setup(struct net_device *bond_dev)
#ifdef CONFIG_XFRM_OFFLOAD
/* set up xfrm device ops (only supported in active-backup right now) */
bond_dev->xfrmdev_ops = &bond_xfrmdev_ops;
- bond->xs = NULL;
+ INIT_LIST_HEAD(&bond->ipsec_list);
+ spin_lock_init(&bond->ipsec_lock);
#endif /* CONFIG_XFRM_OFFLOAD */
/* don't acquire bond device's netif_tx_lock when transmitting */
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig
index a77124bc1f4b..709660cb38f8 100644
--- a/drivers/net/caif/Kconfig
+++ b/drivers/net/caif/Kconfig
@@ -20,15 +20,6 @@ config CAIF_TTY
identified as N_CAIF. When this ldisc is opened from user space
it will redirect the TTY's traffic into the CAIF stack.
-config CAIF_HSI
- tristate "CAIF HSI transport driver"
- depends on CAIF
- default n
- help
- The CAIF low level driver for CAIF over HSI.
- Be aware that if you enable this then you also need to
- enable a low-level HSI driver.
-
config CAIF_VIRTIO
tristate "CAIF virtio transport driver"
depends on CAIF && HAS_DMA
diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile
index b1918c8c126c..97f664f8016c 100644
--- a/drivers/net/caif/Makefile
+++ b/drivers/net/caif/Makefile
@@ -4,8 +4,5 @@ ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG
# Serial interface
obj-$(CONFIG_CAIF_TTY) += caif_serial.o
-# HSI interface
-obj-$(CONFIG_CAIF_HSI) += caif_hsi.o
-
# Virtio interface
obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
deleted file mode 100644
index 3d63b15bbaa1..000000000000
--- a/drivers/net/caif/caif_hsi.c
+++ /dev/null
@@ -1,1454 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) ST-Ericsson AB 2010
- * Author: Daniel Martensson
- * Dmitry.Tarnyagin / dmitry.tarnyagin@lockless.no
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME fmt
-
-#include <linux/init.h>
-#include <linux/module.h>
-#include <linux/device.h>
-#include <linux/netdevice.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/if_arp.h>
-#include <linux/timer.h>
-#include <net/rtnetlink.h>
-#include <linux/pkt_sched.h>
-#include <net/caif/caif_layer.h>
-#include <net/caif/caif_hsi.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Daniel Martensson");
-MODULE_DESCRIPTION("CAIF HSI driver");
-
-/* Returns the number of padding bytes for alignment. */
-#define PAD_POW2(x, pow) ((((x)&((pow)-1)) == 0) ? 0 :\
- (((pow)-((x)&((pow)-1)))))
-
-static const struct cfhsi_config hsi_default_config = {
-
- /* Inactivity timeout on HSI, ms */
- .inactivity_timeout = HZ,
-
- /* Aggregation timeout (ms) of zero means no aggregation is done*/
- .aggregation_timeout = 1,
-
- /*
- * HSI link layer flow-control thresholds.
- * Threshold values for the HSI packet queue. Flow-control will be
- * asserted when the number of packets exceeds q_high_mark. It will
- * not be de-asserted before the number of packets drops below
- * q_low_mark.
- * Warning: A high threshold value might increase throughput but it
- * will at the same time prevent channel prioritization and increase
- * the risk of flooding the modem. The high threshold should be above
- * the low.
- */
- .q_high_mark = 100,
- .q_low_mark = 50,
-
- /*
- * HSI padding options.
- * Warning: must be a base of 2 (& operation used) and can not be zero !
- */
- .head_align = 4,
- .tail_align = 4,
-};
-
-#define ON 1
-#define OFF 0
-
-static LIST_HEAD(cfhsi_list);
-
-static void cfhsi_inactivity_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Schedule power down work queue. */
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_down_work);
-}
-
-static void cfhsi_update_aggregation_stats(struct cfhsi *cfhsi,
- const struct sk_buff *skb,
- int direction)
-{
- struct caif_payload_info *info;
- int hpad, tpad, len;
-
- info = (struct caif_payload_info *)&skb->cb;
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
- len = skb->len + hpad + tpad;
-
- if (direction > 0)
- cfhsi->aggregation_len += len;
- else if (direction < 0)
- cfhsi->aggregation_len -= len;
-}
-
-static bool cfhsi_can_send_aggregate(struct cfhsi *cfhsi)
-{
- int i;
-
- if (cfhsi->cfg.aggregation_timeout == 0)
- return true;
-
- for (i = 0; i < CFHSI_PRIO_BEBK; ++i) {
- if (cfhsi->qhead[i].qlen)
- return true;
- }
-
- /* TODO: Use aggregation_len instead */
- if (cfhsi->qhead[CFHSI_PRIO_BEBK].qlen >= CFHSI_MAX_PKTS)
- return true;
-
- return false;
-}
-
-static struct sk_buff *cfhsi_dequeue(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
- int i;
-
- for (i = 0; i < CFHSI_PRIO_LAST; ++i) {
- skb = skb_dequeue(&cfhsi->qhead[i]);
- if (skb)
- break;
- }
-
- return skb;
-}
-
-static int cfhsi_tx_queue_len(struct cfhsi *cfhsi)
-{
- int i, len = 0;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- len += skb_queue_len(&cfhsi->qhead[i]);
- return len;
-}
-
-static void cfhsi_abort_tx(struct cfhsi *cfhsi)
-{
- struct sk_buff *skb;
-
- for (;;) {
- spin_lock_bh(&cfhsi->lock);
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- break;
-
- cfhsi->ndev->stats.tx_errors++;
- cfhsi->ndev->stats.tx_dropped++;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
- kfree_skb(skb);
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- if (!test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-}
-
-static int cfhsi_flush_fifo(struct cfhsi *cfhsi)
-{
- char buffer[32]; /* Any reasonable value */
- size_t fifo_occupancy;
- int ret;
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- do {
- ret = cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy);
- if (ret) {
- netdev_warn(cfhsi->ndev,
- "%s: can't get FIFO occupancy: %d.\n",
- __func__, ret);
- break;
- } else if (!fifo_occupancy)
- /* No more data, exitting normally */
- break;
-
- fifo_occupancy = min(sizeof(buffer), fifo_occupancy);
- set_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- ret = cfhsi->ops->cfhsi_rx(buffer, fifo_occupancy,
- cfhsi->ops);
- if (ret) {
- clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits);
- netdev_warn(cfhsi->ndev,
- "%s: can't read data: %d.\n",
- __func__, ret);
- break;
- }
-
- ret = 5 * HZ;
- ret = wait_event_interruptible_timeout(cfhsi->flush_fifo_wait,
- !test_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits), ret);
-
- if (ret < 0) {
- netdev_warn(cfhsi->ndev,
- "%s: can't wait for flush complete: %d.\n",
- __func__, ret);
- break;
- } else if (!ret) {
- ret = -ETIMEDOUT;
- netdev_warn(cfhsi->ndev,
- "%s: timeout waiting for flush complete.\n",
- __func__);
- break;
- }
- } while (1);
-
- return ret;
-}
-
-static int cfhsi_tx_frm(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int nfrms = 0;
- int pld_len = 0;
- struct sk_buff *skb;
- u8 *pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
-
- skb = cfhsi_dequeue(cfhsi);
- if (!skb)
- return 0;
-
- /* Clear offset. */
- desc->offset = 0;
-
- /* Check if we can embed a CAIF frame. */
- if (skb->len < CFHSI_MAX_EMB_FRM_SZ) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Check if frame still fits with added alignment. */
- if ((skb->len + hpad + tpad) <= CFHSI_MAX_EMB_FRM_SZ) {
- u8 *pemb = desc->emb_frm;
- desc->offset = CFHSI_DESC_SHORT_SZ;
- *pemb = (u8)(hpad - 1);
- pemb += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in embedded CAIF frame. */
- skb_copy_bits(skb, 0, pemb, skb->len);
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
- }
- }
-
- /* Create payload CAIF frames. */
- while (nfrms < CFHSI_MAX_PKTS) {
- struct caif_payload_info *info;
- int hpad;
- int tpad;
-
- if (!skb)
- skb = cfhsi_dequeue(cfhsi);
-
- if (!skb)
- break;
-
- /* Calculate needed head alignment and tail alignment. */
- info = (struct caif_payload_info *)&skb->cb;
-
- hpad = 1 + PAD_POW2((info->hdr_len + 1), cfhsi->cfg.head_align);
- tpad = PAD_POW2((skb->len + hpad), cfhsi->cfg.tail_align);
-
- /* Fill in CAIF frame length in descriptor. */
- desc->cffrm_len[nfrms] = hpad + skb->len + tpad;
-
- /* Fill head padding information. */
- *pfrm = (u8)(hpad - 1);
- pfrm += hpad;
-
- /* Update network statistics. */
- spin_lock_bh(&cfhsi->lock);
- cfhsi->ndev->stats.tx_packets++;
- cfhsi->ndev->stats.tx_bytes += skb->len;
- cfhsi_update_aggregation_stats(cfhsi, skb, -1);
- spin_unlock_bh(&cfhsi->lock);
-
- /* Copy in CAIF frame. */
- skb_copy_bits(skb, 0, pfrm, skb->len);
-
- /* Update payload length. */
- pld_len += desc->cffrm_len[nfrms];
-
- /* Update frame pointer. */
- pfrm += skb->len + tpad;
-
- /* Consume the SKB */
- consume_skb(skb);
- skb = NULL;
-
- /* Update number of frames. */
- nfrms++;
- }
-
- /* Unused length fields should be zero-filled (according to SPEC). */
- while (nfrms < CFHSI_MAX_PKTS) {
- desc->cffrm_len[nfrms] = 0x0000;
- nfrms++;
- }
-
- /* Check if we can piggy-back another descriptor. */
- if (cfhsi_can_send_aggregate(cfhsi))
- desc->header |= CFHSI_PIGGY_DESC;
- else
- desc->header &= ~CFHSI_PIGGY_DESC;
-
- return CFHSI_DESC_SZ + pld_len;
-}
-
-static void cfhsi_start_tx(struct cfhsi *cfhsi)
-{
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len, res;
-
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- do {
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- if (!len) {
- spin_lock_bh(&cfhsi->lock);
- if (unlikely(cfhsi_tx_queue_len(cfhsi))) {
- spin_unlock_bh(&cfhsi->lock);
- res = -EAGAIN;
- continue;
- }
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- break;
- }
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- } while (res < 0);
-}
-
-static void cfhsi_tx_done(struct cfhsi *cfhsi)
-{
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /*
- * Send flow on if flow off has been previously signalled
- * and number of packets is below low water mark.
- */
- spin_lock_bh(&cfhsi->lock);
- if (cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) <= cfhsi->cfg.q_low_mark &&
- cfhsi->cfdev.flowctrl) {
-
- cfhsi->flow_off_sent = 0;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, ON);
- }
-
- if (cfhsi_can_send_aggregate(cfhsi)) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_start_tx(cfhsi);
- } else {
- mod_timer(&cfhsi->aggregation_timer,
- jiffies + cfhsi->cfg.aggregation_timeout);
- spin_unlock_bh(&cfhsi->lock);
- }
-
- return;
-}
-
-static void cfhsi_tx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
- cfhsi_tx_done(cfhsi);
-}
-
-static int cfhsi_rx_desc(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Check for embedded CAIF frame. */
- if (desc->offset) {
- struct sk_buff *skb;
- int len = 0;
- pfrm = ((u8 *)desc) + desc->offset;
-
- /* Remove offset padding. */
- pfrm += *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pfrm;
- len |= ((*(pfrm+1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frame. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pfrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Check for piggy-backed descriptor. */
- if (desc->header & CFHSI_PIGGY_DESC)
- xfer_sz += CFHSI_DESC_SZ;
-
- if ((xfer_sz % 4) || (xfer_sz > (CFHSI_BUF_SZ_RX - CFHSI_DESC_SZ))) {
- netdev_err(cfhsi->ndev,
- "%s: Invalid payload len: %d, ignored.\n",
- __func__, xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_desc_len(struct cfhsi_desc *desc)
-{
- int xfer_sz = 0;
- int nfrms = 0;
- u16 *plen;
-
- if ((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ)) {
-
- pr_err("Invalid descriptor. %x %x\n", desc->header,
- desc->offset);
- return -EPROTO;
- }
-
- /* Calculate transfer length. */
- plen = desc->cffrm_len;
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- xfer_sz += *plen;
- plen++;
- nfrms++;
- }
-
- if (xfer_sz % 4) {
- pr_err("Invalid payload len: %d, ignored.\n", xfer_sz);
- return -EPROTO;
- }
- return xfer_sz;
-}
-
-static int cfhsi_rx_pld(struct cfhsi_desc *desc, struct cfhsi *cfhsi)
-{
- int rx_sz = 0;
- int nfrms = 0;
- u16 *plen = NULL;
- u8 *pfrm = NULL;
-
- /* Sanity check header and offset. */
- if (WARN_ON((desc->header & ~CFHSI_PIGGY_DESC) ||
- (desc->offset > CFHSI_MAX_EMB_FRM_SZ))) {
- netdev_err(cfhsi->ndev, "%s: Invalid descriptor.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Set frame pointer to start of payload. */
- pfrm = desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ;
- plen = desc->cffrm_len;
-
- /* Skip already processed frames. */
- while (nfrms < cfhsi->rx_state.nfrms) {
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- /* Parse payload. */
- while (nfrms < CFHSI_MAX_PKTS && *plen) {
- struct sk_buff *skb;
- u8 *pcffrm = NULL;
- int len;
-
- /* CAIF frame starts after head padding. */
- pcffrm = pfrm + *pfrm + 1;
-
- /* Read length of CAIF frame (little endian). */
- len = *pcffrm;
- len |= ((*(pcffrm + 1)) << 8) & 0xFF00;
- len += 2; /* Add FCS fields. */
-
- /* Sanity check length of CAIF frames. */
- if (unlikely(len > CFHSI_MAX_CAIF_FRAME_SZ)) {
- netdev_err(cfhsi->ndev, "%s: Invalid length.\n",
- __func__);
- return -EPROTO;
- }
-
- /* Allocate SKB (OK even in IRQ context). */
- skb = alloc_skb(len + 1, GFP_ATOMIC);
- if (!skb) {
- netdev_err(cfhsi->ndev, "%s: Out of memory !\n",
- __func__);
- cfhsi->rx_state.nfrms = nfrms;
- return -ENOMEM;
- }
- caif_assert(skb != NULL);
-
- skb_put_data(skb, pcffrm, len);
-
- skb->protocol = htons(ETH_P_CAIF);
- skb_reset_mac_header(skb);
- skb->dev = cfhsi->ndev;
-
- netif_rx_any_context(skb);
-
- /* Update network statistics. */
- cfhsi->ndev->stats.rx_packets++;
- cfhsi->ndev->stats.rx_bytes += len;
-
- pfrm += *plen;
- rx_sz += *plen;
- plen++;
- nfrms++;
- }
-
- return rx_sz;
-}
-
-static void cfhsi_rx_done(struct cfhsi *cfhsi)
-{
- int res;
- int desc_pld_len = 0, rx_len, rx_state;
- struct cfhsi_desc *desc = NULL;
- u8 *rx_ptr, *rx_buf;
- struct cfhsi_desc *piggy_desc = NULL;
-
- desc = (struct cfhsi_desc *)cfhsi->rx_buf;
-
- netdev_dbg(cfhsi->ndev, "%s\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Update inactivity timer if pending. */
- spin_lock_bh(&cfhsi->lock);
- mod_timer_pending(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- desc_pld_len = cfhsi_rx_desc_len(desc);
-
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- rx_buf = cfhsi->rx_buf;
- rx_len = desc_pld_len;
- if (desc_pld_len > 0 && (desc->header & CFHSI_PIGGY_DESC))
- rx_len += CFHSI_DESC_SZ;
- if (desc_pld_len == 0)
- rx_buf = cfhsi->rx_flip_buf;
- } else {
- rx_buf = cfhsi->rx_flip_buf;
-
- rx_len = CFHSI_DESC_SZ;
- if (cfhsi->rx_state.pld_len > 0 &&
- (desc->header & CFHSI_PIGGY_DESC)) {
-
- piggy_desc = (struct cfhsi_desc *)
- (desc->emb_frm + CFHSI_MAX_EMB_FRM_SZ +
- cfhsi->rx_state.pld_len);
-
- cfhsi->rx_state.piggy_desc = true;
-
- /* Extract payload len from piggy-backed descriptor. */
- desc_pld_len = cfhsi_rx_desc_len(piggy_desc);
- if (desc_pld_len < 0)
- goto out_of_sync;
-
- if (desc_pld_len > 0) {
- rx_len = desc_pld_len;
- if (piggy_desc->header & CFHSI_PIGGY_DESC)
- rx_len += CFHSI_DESC_SZ;
- }
-
- /*
- * Copy needed information from the piggy-backed
- * descriptor to the descriptor in the start.
- */
- memcpy(rx_buf, (u8 *)piggy_desc,
- CFHSI_DESC_SHORT_SZ);
- }
- }
-
- if (desc_pld_len) {
- rx_state = CFHSI_RX_STATE_PAYLOAD;
- rx_ptr = rx_buf + CFHSI_DESC_SZ;
- } else {
- rx_state = CFHSI_RX_STATE_DESC;
- rx_ptr = rx_buf;
- rx_len = CFHSI_DESC_SZ;
- }
-
- /* Initiate next read */
- if (test_bit(CFHSI_AWAKE, &cfhsi->bits)) {
- /* Set up new transfer. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n",
- __func__);
-
- res = cfhsi->ops->cfhsi_rx(rx_ptr, rx_len,
- cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: RX error %d.\n",
- __func__, res);
- cfhsi->ndev->stats.rx_errors++;
- cfhsi->ndev->stats.rx_dropped++;
- }
- }
-
- if (cfhsi->rx_state.state == CFHSI_RX_STATE_DESC) {
- /* Extract payload from descriptor */
- if (cfhsi_rx_desc(desc, cfhsi) < 0)
- goto out_of_sync;
- } else {
- /* Extract payload */
- if (cfhsi_rx_pld(desc, cfhsi) < 0)
- goto out_of_sync;
- if (piggy_desc) {
- /* Extract any payload in piggyback descriptor. */
- if (cfhsi_rx_desc(piggy_desc, cfhsi) < 0)
- goto out_of_sync;
- /* Mark no embedded frame after extracting it */
- piggy_desc->offset = 0;
- }
- }
-
- /* Update state info */
- memset(&cfhsi->rx_state, 0, sizeof(cfhsi->rx_state));
- cfhsi->rx_state.state = rx_state;
- cfhsi->rx_ptr = rx_ptr;
- cfhsi->rx_len = rx_len;
- cfhsi->rx_state.pld_len = desc_pld_len;
- cfhsi->rx_state.piggy_desc = desc->header & CFHSI_PIGGY_DESC;
-
- if (rx_buf != cfhsi->rx_buf)
- swap(cfhsi->rx_buf, cfhsi->rx_flip_buf);
- return;
-
-out_of_sync:
- netdev_err(cfhsi->ndev, "%s: Out of sync.\n", __func__);
- print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE,
- cfhsi->rx_buf, CFHSI_DESC_SZ);
- schedule_work(&cfhsi->out_of_sync_work);
-}
-
-static void cfhsi_rx_slowpath(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_rx_done_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (test_and_clear_bit(CFHSI_FLUSH_FIFO, &cfhsi->bits))
- wake_up_interruptible(&cfhsi->flush_fifo_wait);
- else
- cfhsi_rx_done(cfhsi);
-}
-
-static void cfhsi_wake_up(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
- int res;
- int len;
- long ret;
-
- cfhsi = container_of(work, struct cfhsi, wake_up_work);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- if (unlikely(test_bit(CFHSI_AWAKE, &cfhsi->bits))) {
- /* It happenes when wakeup is requested by
- * both ends at the same time. */
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- return;
- }
-
- /* Activate wake line. */
- cfhsi->ops->cfhsi_wake_up(cfhsi->ops);
-
- netdev_dbg(cfhsi->ndev, "%s: Start waiting.\n",
- __func__);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_up_wait,
- test_and_clear_bit(CFHSI_WAKE_UP_ACK,
- &cfhsi->bits), ret);
- if (unlikely(ret < 0)) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- } else if (!ret) {
- bool ca_wake = false;
- size_t fifo_occupancy = 0;
-
- /* Wakeup timeout */
- netdev_dbg(cfhsi->ndev, "%s: Timeout.\n",
- __func__);
-
- /* Check FIFO to check if modem has sent something. */
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- netdev_dbg(cfhsi->ndev, "%s: Bytes in FIFO: %u.\n",
- __func__, (unsigned) fifo_occupancy);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
-
- if (ca_wake) {
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
-
- /* Clear the CFHSI_WAKE_UP_ACK bit to prevent race. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- /* Continue execution. */
- goto wake_ack;
- }
-
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
- return;
- }
-wake_ack:
- netdev_dbg(cfhsi->ndev, "%s: Woken.\n",
- __func__);
-
- /* Clear power up bit. */
- set_bit(CFHSI_AWAKE, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
-
- /* Resume read operation. */
- netdev_dbg(cfhsi->ndev, "%s: Start RX.\n", __func__);
- res = cfhsi->ops->cfhsi_rx(cfhsi->rx_ptr, cfhsi->rx_len, cfhsi->ops);
-
- if (WARN_ON(res < 0))
- netdev_err(cfhsi->ndev, "%s: RX err %d.\n", __func__, res);
-
- /* Clear power up acknowledment. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Resume transmit if queues are not empty. */
- if (!cfhsi_tx_queue_len(cfhsi)) {
- netdev_dbg(cfhsi->ndev, "%s: Peer wake, start timer.\n",
- __func__);
- /* Start inactivity timer. */
- mod_timer(&cfhsi->inactivity_timer,
- jiffies + cfhsi->cfg.inactivity_timeout);
- spin_unlock_bh(&cfhsi->lock);
- return;
- }
-
- netdev_dbg(cfhsi->ndev, "%s: Host wake.\n",
- __func__);
-
- spin_unlock_bh(&cfhsi->lock);
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm((struct cfhsi_desc *)cfhsi->tx_buf, cfhsi);
-
- if (likely(len > 0)) {
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- netdev_err(cfhsi->ndev,
- "%s: Failed to create HSI frame: %d.\n",
- __func__, len);
- }
-}
-
-static void cfhsi_wake_down(struct work_struct *work)
-{
- long ret;
- struct cfhsi *cfhsi = NULL;
- size_t fifo_occupancy = 0;
- int retry = CFHSI_WAKE_TOUT;
-
- cfhsi = container_of(work, struct cfhsi, wake_down_work);
- netdev_dbg(cfhsi->ndev, "%s.\n", __func__);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Deactivate wake line. */
- cfhsi->ops->cfhsi_wake_down(cfhsi->ops);
-
- /* Wait for acknowledge. */
- ret = CFHSI_WAKE_TOUT;
- ret = wait_event_interruptible_timeout(cfhsi->wake_down_wait,
- test_and_clear_bit(CFHSI_WAKE_DOWN_ACK,
- &cfhsi->bits), ret);
- if (ret < 0) {
- /* Interrupted by signal. */
- netdev_err(cfhsi->ndev, "%s: Signalled: %ld.\n",
- __func__, ret);
- return;
- } else if (!ret) {
- bool ca_wake = true;
-
- /* Timeout */
- netdev_err(cfhsi->ndev, "%s: Timeout.\n", __func__);
-
- /* Check if we misssed the interrupt. */
- WARN_ON(cfhsi->ops->cfhsi_get_peer_wake(cfhsi->ops,
- &ca_wake));
- if (!ca_wake)
- netdev_err(cfhsi->ndev, "%s: CA Wake missed !.\n",
- __func__);
- }
-
- /* Check FIFO occupancy. */
- while (retry) {
- WARN_ON(cfhsi->ops->cfhsi_fifo_occupancy(cfhsi->ops,
- &fifo_occupancy));
-
- if (!fifo_occupancy)
- break;
-
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
- retry--;
- }
-
- if (!retry)
- netdev_err(cfhsi->ndev, "%s: FIFO Timeout.\n", __func__);
-
- /* Clear AWAKE condition. */
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Cancel pending RX requests. */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-}
-
-static void cfhsi_out_of_sync(struct work_struct *work)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(work, struct cfhsi, out_of_sync_work);
-
- rtnl_lock();
- dev_close(cfhsi->ndev);
- rtnl_unlock();
-}
-
-static void cfhsi_wake_up_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- set_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_up_wait);
-
- if (test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))
- return;
-
- /* Schedule wake up work queue if the peer initiates. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
-}
-
-static void cfhsi_wake_down_cb(struct cfhsi_cb_ops *cb_ops)
-{
- struct cfhsi *cfhsi = NULL;
-
- cfhsi = container_of(cb_ops, struct cfhsi, cb_ops);
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- /* Initiating low power is only permitted by the host (us). */
- set_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- wake_up_interruptible(&cfhsi->wake_down_wait);
-}
-
-static void cfhsi_aggregation_tout(struct timer_list *t)
-{
- struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
-
- netdev_dbg(cfhsi->ndev, "%s.\n",
- __func__);
-
- cfhsi_start_tx(cfhsi);
-}
-
-static netdev_tx_t cfhsi_xmit(struct sk_buff *skb, struct net_device *dev)
-{
- struct cfhsi *cfhsi = NULL;
- int start_xfer = 0;
- int timer_active;
- int prio;
-
- if (!dev)
- return -EINVAL;
-
- cfhsi = netdev_priv(dev);
-
- switch (skb->priority) {
- case TC_PRIO_BESTEFFORT:
- case TC_PRIO_FILLER:
- case TC_PRIO_BULK:
- prio = CFHSI_PRIO_BEBK;
- break;
- case TC_PRIO_INTERACTIVE_BULK:
- prio = CFHSI_PRIO_VI;
- break;
- case TC_PRIO_INTERACTIVE:
- prio = CFHSI_PRIO_VO;
- break;
- case TC_PRIO_CONTROL:
- default:
- prio = CFHSI_PRIO_CTL;
- break;
- }
-
- spin_lock_bh(&cfhsi->lock);
-
- /* Update aggregation statistics */
- cfhsi_update_aggregation_stats(cfhsi, skb, 1);
-
- /* Queue the SKB */
- skb_queue_tail(&cfhsi->qhead[prio], skb);
-
- /* Sanity check; xmit should not be called after unregister_netdev */
- if (WARN_ON(test_bit(CFHSI_SHUTDOWN, &cfhsi->bits))) {
- spin_unlock_bh(&cfhsi->lock);
- cfhsi_abort_tx(cfhsi);
- return -EINVAL;
- }
-
- /* Send flow off if number of packets is above high water mark. */
- if (!cfhsi->flow_off_sent &&
- cfhsi_tx_queue_len(cfhsi) > cfhsi->cfg.q_high_mark &&
- cfhsi->cfdev.flowctrl) {
- cfhsi->flow_off_sent = 1;
- cfhsi->cfdev.flowctrl(cfhsi->ndev, OFF);
- }
-
- if (cfhsi->tx_state == CFHSI_TX_STATE_IDLE) {
- cfhsi->tx_state = CFHSI_TX_STATE_XFER;
- start_xfer = 1;
- }
-
- if (!start_xfer) {
- /* Send aggregate if it is possible */
- bool aggregate_ready =
- cfhsi_can_send_aggregate(cfhsi) &&
- del_timer(&cfhsi->aggregation_timer) > 0;
- spin_unlock_bh(&cfhsi->lock);
- if (aggregate_ready)
- cfhsi_start_tx(cfhsi);
- return NETDEV_TX_OK;
- }
-
- /* Delete inactivity timer if started. */
- timer_active = del_timer_sync(&cfhsi->inactivity_timer);
-
- spin_unlock_bh(&cfhsi->lock);
-
- if (timer_active) {
- struct cfhsi_desc *desc = (struct cfhsi_desc *)cfhsi->tx_buf;
- int len;
- int res;
-
- /* Create HSI frame. */
- len = cfhsi_tx_frm(desc, cfhsi);
- WARN_ON(!len);
-
- /* Set up new transfer. */
- res = cfhsi->ops->cfhsi_tx(cfhsi->tx_buf, len, cfhsi->ops);
- if (WARN_ON(res < 0)) {
- netdev_err(cfhsi->ndev, "%s: TX error %d.\n",
- __func__, res);
- cfhsi_abort_tx(cfhsi);
- }
- } else {
- /* Schedule wake up work queue if the we initiate. */
- if (!test_and_set_bit(CFHSI_WAKE_UP, &cfhsi->bits))
- queue_work(cfhsi->wq, &cfhsi->wake_up_work);
- }
-
- return NETDEV_TX_OK;
-}
-
-static const struct net_device_ops cfhsi_netdevops;
-
-static void cfhsi_setup(struct net_device *dev)
-{
- int i;
- struct cfhsi *cfhsi = netdev_priv(dev);
- dev->features = 0;
- dev->type = ARPHRD_CAIF;
- dev->flags = IFF_POINTOPOINT | IFF_NOARP;
- dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
- dev->priv_flags |= IFF_NO_QUEUE;
- dev->needs_free_netdev = true;
- dev->netdev_ops = &cfhsi_netdevops;
- for (i = 0; i < CFHSI_PRIO_LAST; ++i)
- skb_queue_head_init(&cfhsi->qhead[i]);
- cfhsi->cfdev.link_select = CAIF_LINK_HIGH_BANDW;
- cfhsi->cfdev.use_frag = false;
- cfhsi->cfdev.use_stx = false;
- cfhsi->cfdev.use_fcs = false;
- cfhsi->ndev = dev;
- cfhsi->cfg = hsi_default_config;
-}
-
-static int cfhsi_open(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- int res;
-
- clear_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Initialize state vaiables. */
- cfhsi->tx_state = CFHSI_TX_STATE_IDLE;
- cfhsi->rx_state.state = CFHSI_RX_STATE_DESC;
-
- /* Set flow info */
- cfhsi->flow_off_sent = 0;
-
- /*
- * Allocate a TX buffer with the size of a HSI packet descriptors
- * and the necessary room for CAIF payload frames.
- */
- cfhsi->tx_buf = kzalloc(CFHSI_BUF_SZ_TX, GFP_KERNEL);
- if (!cfhsi->tx_buf) {
- res = -ENODEV;
- goto err_alloc_tx;
- }
-
- /*
- * Allocate a RX buffer with the size of two HSI packet descriptors and
- * the necessary room for CAIF payload frames.
- */
- cfhsi->rx_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_buf) {
- res = -ENODEV;
- goto err_alloc_rx;
- }
-
- cfhsi->rx_flip_buf = kzalloc(CFHSI_BUF_SZ_RX, GFP_KERNEL);
- if (!cfhsi->rx_flip_buf) {
- res = -ENODEV;
- goto err_alloc_rx_flip;
- }
-
- /* Initialize aggregation timeout */
- cfhsi->cfg.aggregation_timeout = hsi_default_config.aggregation_timeout;
-
- /* Initialize recieve vaiables. */
- cfhsi->rx_ptr = cfhsi->rx_buf;
- cfhsi->rx_len = CFHSI_DESC_SZ;
-
- /* Initialize spin locks. */
- spin_lock_init(&cfhsi->lock);
-
- /* Set up the driver. */
- cfhsi->cb_ops.tx_done_cb = cfhsi_tx_done_cb;
- cfhsi->cb_ops.rx_done_cb = cfhsi_rx_done_cb;
- cfhsi->cb_ops.wake_up_cb = cfhsi_wake_up_cb;
- cfhsi->cb_ops.wake_down_cb = cfhsi_wake_down_cb;
-
- /* Initialize the work queues. */
- INIT_WORK(&cfhsi->wake_up_work, cfhsi_wake_up);
- INIT_WORK(&cfhsi->wake_down_work, cfhsi_wake_down);
- INIT_WORK(&cfhsi->out_of_sync_work, cfhsi_out_of_sync);
-
- /* Clear all bit fields. */
- clear_bit(CFHSI_WAKE_UP_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_DOWN_ACK, &cfhsi->bits);
- clear_bit(CFHSI_WAKE_UP, &cfhsi->bits);
- clear_bit(CFHSI_AWAKE, &cfhsi->bits);
-
- /* Create work thread. */
- cfhsi->wq = alloc_ordered_workqueue(cfhsi->ndev->name, WQ_MEM_RECLAIM);
- if (!cfhsi->wq) {
- netdev_err(cfhsi->ndev, "%s: Failed to create work queue.\n",
- __func__);
- res = -ENODEV;
- goto err_create_wq;
- }
-
- /* Initialize wait queues. */
- init_waitqueue_head(&cfhsi->wake_up_wait);
- init_waitqueue_head(&cfhsi->wake_down_wait);
- init_waitqueue_head(&cfhsi->flush_fifo_wait);
-
- /* Setup the inactivity timer. */
- timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
- /* Setup the slowpath RX timer. */
- timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
- /* Setup the aggregation timer. */
- timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
-
- /* Activate HSI interface. */
- res = cfhsi->ops->cfhsi_up(cfhsi->ops);
- if (res) {
- netdev_err(cfhsi->ndev,
- "%s: can't activate HSI interface: %d.\n",
- __func__, res);
- goto err_activate;
- }
-
- /* Flush FIFO */
- res = cfhsi_flush_fifo(cfhsi);
- if (res) {
- netdev_err(cfhsi->ndev, "%s: Can't flush FIFO: %d.\n",
- __func__, res);
- goto err_net_reg;
- }
- return res;
-
- err_net_reg:
- cfhsi->ops->cfhsi_down(cfhsi->ops);
- err_activate:
- destroy_workqueue(cfhsi->wq);
- err_create_wq:
- kfree(cfhsi->rx_flip_buf);
- err_alloc_rx_flip:
- kfree(cfhsi->rx_buf);
- err_alloc_rx:
- kfree(cfhsi->tx_buf);
- err_alloc_tx:
- return res;
-}
-
-static int cfhsi_close(struct net_device *ndev)
-{
- struct cfhsi *cfhsi = netdev_priv(ndev);
- u8 *tx_buf, *rx_buf, *flip_buf;
-
- /* going to shutdown driver */
- set_bit(CFHSI_SHUTDOWN, &cfhsi->bits);
-
- /* Delete timers if pending */
- del_timer_sync(&cfhsi->inactivity_timer);
- del_timer_sync(&cfhsi->rx_slowpath_timer);
- del_timer_sync(&cfhsi->aggregation_timer);
-
- /* Cancel pending RX request (if any) */
- cfhsi->ops->cfhsi_rx_cancel(cfhsi->ops);
-
- /* Destroy workqueue */
- destroy_workqueue(cfhsi->wq);
-
- /* Store bufferes: will be freed later. */
- tx_buf = cfhsi->tx_buf;
- rx_buf = cfhsi->rx_buf;
- flip_buf = cfhsi->rx_flip_buf;
- /* Flush transmit queues. */
- cfhsi_abort_tx(cfhsi);
-
- /* Deactivate interface */
- cfhsi->ops->cfhsi_down(cfhsi->ops);
-
- /* Free buffers. */
- kfree(tx_buf);
- kfree(rx_buf);
- kfree(flip_buf);
- return 0;
-}
-
-static void cfhsi_uninit(struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
- ASSERT_RTNL();
- symbol_put(cfhsi_get_device);
- list_del(&cfhsi->list);
-}
-
-static const struct net_device_ops cfhsi_netdevops = {
- .ndo_uninit = cfhsi_uninit,
- .ndo_open = cfhsi_open,
- .ndo_stop = cfhsi_close,
- .ndo_start_xmit = cfhsi_xmit
-};
-
-static void cfhsi_netlink_parms(struct nlattr *data[], struct cfhsi *cfhsi)
-{
- int i;
-
- if (!data) {
- pr_debug("no params data found\n");
- return;
- }
-
- i = __IFLA_CAIF_HSI_INACTIVITY_TOUT;
- /*
- * Inactivity timeout in millisecs. Lowest possible value is 1,
- * and highest possible is NEXT_TIMER_MAX_DELTA.
- */
- if (data[i]) {
- u32 inactivity_timeout = nla_get_u32(data[i]);
- /* Pre-calculate inactivity timeout. */
- cfhsi->cfg.inactivity_timeout = inactivity_timeout * HZ / 1000;
- if (cfhsi->cfg.inactivity_timeout == 0)
- cfhsi->cfg.inactivity_timeout = 1;
- else if (cfhsi->cfg.inactivity_timeout > NEXT_TIMER_MAX_DELTA)
- cfhsi->cfg.inactivity_timeout = NEXT_TIMER_MAX_DELTA;
- }
-
- i = __IFLA_CAIF_HSI_AGGREGATION_TOUT;
- if (data[i])
- cfhsi->cfg.aggregation_timeout = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_HEAD_ALIGN;
- if (data[i])
- cfhsi->cfg.head_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_TAIL_ALIGN;
- if (data[i])
- cfhsi->cfg.tail_align = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QHIGH_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_high_mark = nla_get_u32(data[i]);
-
- i = __IFLA_CAIF_HSI_QLOW_WATERMARK;
- if (data[i])
- cfhsi->cfg.q_low_mark = nla_get_u32(data[i]);
-}
-
-static int caif_hsi_changelink(struct net_device *dev, struct nlattr *tb[],
- struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- cfhsi_netlink_parms(data, netdev_priv(dev));
- netdev_state_change(dev);
- return 0;
-}
-
-static const struct nla_policy caif_hsi_policy[__IFLA_CAIF_HSI_MAX + 1] = {
- [__IFLA_CAIF_HSI_INACTIVITY_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_AGGREGATION_TOUT] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_HEAD_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_TAIL_ALIGN] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QHIGH_WATERMARK] = { .type = NLA_U32, .len = 4 },
- [__IFLA_CAIF_HSI_QLOW_WATERMARK] = { .type = NLA_U32, .len = 4 },
-};
-
-static size_t caif_hsi_get_size(const struct net_device *dev)
-{
- int i;
- size_t s = 0;
- for (i = __IFLA_CAIF_HSI_UNSPEC + 1; i < __IFLA_CAIF_HSI_MAX; i++)
- s += nla_total_size(caif_hsi_policy[i].len);
- return s;
-}
-
-static int caif_hsi_fill_info(struct sk_buff *skb, const struct net_device *dev)
-{
- struct cfhsi *cfhsi = netdev_priv(dev);
-
- if (nla_put_u32(skb, __IFLA_CAIF_HSI_INACTIVITY_TOUT,
- cfhsi->cfg.inactivity_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_AGGREGATION_TOUT,
- cfhsi->cfg.aggregation_timeout) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_HEAD_ALIGN,
- cfhsi->cfg.head_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_TAIL_ALIGN,
- cfhsi->cfg.tail_align) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QHIGH_WATERMARK,
- cfhsi->cfg.q_high_mark) ||
- nla_put_u32(skb, __IFLA_CAIF_HSI_QLOW_WATERMARK,
- cfhsi->cfg.q_low_mark))
- return -EMSGSIZE;
-
- return 0;
-}
-
-static int caif_hsi_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[],
- struct netlink_ext_ack *extack)
-{
- struct cfhsi *cfhsi = NULL;
- struct cfhsi_ops *(*get_ops)(void);
-
- ASSERT_RTNL();
-
- cfhsi = netdev_priv(dev);
- cfhsi_netlink_parms(data, cfhsi);
-
- get_ops = symbol_get(cfhsi_get_ops);
- if (!get_ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- return -ENODEV;
- }
-
- /* Assign the HSI device. */
- cfhsi->ops = (*get_ops)();
- if (!cfhsi->ops) {
- pr_err("%s: failed to get the cfhsi_ops\n", __func__);
- goto err;
- }
-
- /* Assign the driver to this HSI device. */
- cfhsi->ops->cb_ops = &cfhsi->cb_ops;
- if (register_netdevice(dev)) {
- pr_warn("%s: caif_hsi device registration failed\n", __func__);
- goto err;
- }
- /* Add CAIF HSI device to list. */
- list_add_tail(&cfhsi->list, &cfhsi_list);
-
- return 0;
-err:
- symbol_put(cfhsi_get_ops);
- return -ENODEV;
-}
-
-static struct rtnl_link_ops caif_hsi_link_ops __read_mostly = {
- .kind = "cfhsi",
- .priv_size = sizeof(struct cfhsi),
- .setup = cfhsi_setup,
- .maxtype = __IFLA_CAIF_HSI_MAX,
- .policy = caif_hsi_policy,
- .newlink = caif_hsi_newlink,
- .changelink = caif_hsi_changelink,
- .get_size = caif_hsi_get_size,
- .fill_info = caif_hsi_fill_info,
-};
-
-static void __exit cfhsi_exit_module(void)
-{
- struct list_head *list_node;
- struct list_head *n;
- struct cfhsi *cfhsi;
-
- rtnl_link_unregister(&caif_hsi_link_ops);
-
- rtnl_lock();
- list_for_each_safe(list_node, n, &cfhsi_list) {
- cfhsi = list_entry(list_node, struct cfhsi, list);
- unregister_netdevice(cfhsi->ndev);
- }
- rtnl_unlock();
-}
-
-static int __init cfhsi_init_module(void)
-{
- return rtnl_link_register(&caif_hsi_link_ops);
-}
-
-module_init(cfhsi_init_module);
-module_exit(cfhsi_exit_module);
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index a7e5ac60baef..1542bfb8b5e5 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -419,8 +419,10 @@ int ksz_switch_register(struct ksz_device *dev,
if (of_property_read_u32(port, "reg",
&port_num))
continue;
- if (!(dev->port_mask & BIT(port_num)))
+ if (!(dev->port_mask & BIT(port_num))) {
+ of_node_put(port);
return -EINVAL;
+ }
of_get_phy_mode(port,
&dev->ports[port_num].interface);
}
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 961fa6b75cad..beb41572d04e 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -3583,6 +3583,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -3596,7 +3597,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -3606,6 +3607,9 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -3619,6 +3623,11 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.serdes_irq_enable = mv88e6390_serdes_irq_enable,
.serdes_irq_status = mv88e6390_serdes_irq_status,
.gpio_ops = &mv88e6352_gpio_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
@@ -4383,6 +4392,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_speed_duplex = mv88e6341_port_set_speed_duplex,
.port_max_speed_mode = mv88e6341_port_max_speed_mode,
.port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_policy = mv88e6352_port_set_policy,
.port_set_frame_mode = mv88e6351_port_set_frame_mode,
.port_set_ucast_flood = mv88e6352_port_set_ucast_flood,
.port_set_mcast_flood = mv88e6352_port_set_mcast_flood,
@@ -4396,7 +4406,7 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.port_set_cmode = mv88e6341_port_set_cmode,
.port_setup_message_port = mv88e6xxx_setup_message_port,
.stats_snapshot = mv88e6390_g1_stats_snapshot,
- .stats_set_histogram = mv88e6095_g1_stats_set_histogram,
+ .stats_set_histogram = mv88e6390_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6320_stats_get_sset_count,
.stats_get_strings = mv88e6320_stats_get_strings,
.stats_get_stats = mv88e6390_stats_get_stats,
@@ -4406,6 +4416,9 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
.pot_clear = mv88e6xxx_g2_pot_clear,
.reset = mv88e6352_g1_reset,
+ .rmu_disable = mv88e6390_g1_rmu_disable,
+ .atu_get_hash = mv88e6165_g1_atu_get_hash,
+ .atu_set_hash = mv88e6165_g1_atu_set_hash,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
@@ -4421,6 +4434,11 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.gpio_ops = &mv88e6352_gpio_ops,
.avb_ops = &mv88e6390_avb_ops,
.ptp_ops = &mv88e6352_ptp_ops,
+ .serdes_get_sset_count = mv88e6390_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6390_serdes_get_strings,
+ .serdes_get_stats = mv88e6390_serdes_get_stats,
+ .serdes_get_regs_len = mv88e6390_serdes_get_regs_len,
+ .serdes_get_regs = mv88e6390_serdes_get_regs,
.phylink_validate = mv88e6341_phylink_validate,
};
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index e4fbef81bc52..b1d46dd8eaab 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -722,7 +722,7 @@ static struct mv88e6390_serdes_hw_stat mv88e6390_serdes_hw_stats[] = {
int mv88e6390_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
{
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
return ARRAY_SIZE(mv88e6390_serdes_hw_stats);
@@ -734,7 +734,7 @@ int mv88e6390_serdes_get_strings(struct mv88e6xxx_chip *chip,
struct mv88e6390_serdes_hw_stat *stat;
int i;
- if (mv88e6390_serdes_get_lane(chip, port) < 0)
+ if (mv88e6xxx_serdes_get_lane(chip, port) < 0)
return 0;
for (i = 0; i < ARRAY_SIZE(mv88e6390_serdes_hw_stats); i++) {
@@ -770,7 +770,7 @@ int mv88e6390_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
int lane;
int i;
- lane = mv88e6390_serdes_get_lane(chip, port);
+ lane = mv88e6xxx_serdes_get_lane(chip, port);
if (lane < 0)
return 0;
diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c
index 4f0545605f6b..ced8c9cb29c2 100644
--- a/drivers/net/dsa/sja1105/sja1105_main.c
+++ b/drivers/net/dsa/sja1105/sja1105_main.c
@@ -122,14 +122,12 @@ static int sja1105_init_mac_settings(struct sja1105_private *priv)
for (i = 0; i < ds->num_ports; i++) {
mac[i] = default_mac;
- if (i == dsa_upstream_port(priv->ds, i)) {
- /* STP doesn't get called for CPU port, so we need to
- * set the I/O parameters statically.
- */
- mac[i].dyn_learn = true;
- mac[i].ingress = true;
- mac[i].egress = true;
- }
+
+ /* Let sja1105_bridge_stp_state_set() keep address learning
+ * enabled for the CPU port.
+ */
+ if (dsa_is_cpu_port(ds, i))
+ priv->learn_ena |= BIT(i);
}
return 0;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
index 7dff20350865..f19370c33444 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
@@ -594,6 +594,11 @@ int atl1c_phy_init(struct atl1c_hw *hw)
int ret_val;
u16 mii_bmcr_data = BMCR_RESET;
+ if (hw->nic_type == athr_mt) {
+ hw->phy_configured = true;
+ return 0;
+ }
+
if ((atl1c_read_phy_reg(hw, MII_PHYSID1, &hw->phy_id1) != 0) ||
(atl1c_read_phy_reg(hw, MII_PHYSID2, &hw->phy_id2) != 0)) {
dev_err(&pdev->dev, "Error get phy ID\n");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 41f7f078cd27..db74241935ab 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1640,7 +1640,8 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
switch (mode) {
case GENET_POWER_PASSIVE:
- reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS |
+ EXT_ENERGY_DET_MASK);
if (GENET_IS_V5(priv)) {
reg &= ~(EXT_PWR_DOWN_PHY_EN |
EXT_PWR_DOWN_PHY_RD |
@@ -3237,15 +3238,21 @@ static void bcmgenet_get_hw_addr(struct bcmgenet_priv *priv,
/* Returns a reusable dma control register value */
static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
{
+ unsigned int i;
u32 reg;
u32 dma_ctrl;
/* disable DMA */
dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ for (i = 0; i < priv->hw_params->rx_queues; i++)
+ dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
reg &= ~dma_ctrl;
bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
@@ -3292,7 +3299,6 @@ static int bcmgenet_open(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
unsigned long dma_ctrl;
- u32 reg;
int ret;
netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
@@ -3318,12 +3324,6 @@ static int bcmgenet_open(struct net_device *dev)
bcmgenet_set_hw_addr(priv, dev->dev_addr);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
@@ -4139,7 +4139,6 @@ static int bcmgenet_resume(struct device *d)
struct bcmgenet_priv *priv = netdev_priv(dev);
struct bcmgenet_rxnfc_rule *rule;
unsigned long dma_ctrl;
- u32 reg;
int ret;
if (!netif_running(dev))
@@ -4176,12 +4175,6 @@ static int bcmgenet_resume(struct device *d)
if (rule->state != BCMGENET_RXNFC_STATE_UNUSED)
bcmgenet_hfb_create_rxnfc_filter(priv, rule);
- if (priv->internal_phy) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg |= EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
/* Disable RX/TX DMA and flush TX queues */
dma_ctrl = bcmgenet_dma_disable(priv);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
index facde824bcaa..e31a5a397f11 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
@@ -186,12 +186,6 @@ int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
reg |= CMD_RX_EN;
bcmgenet_umac_writel(priv, reg, UMAC_CMD);
- if (priv->hw_params->flags & GENET_HAS_EXT) {
- reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
- reg &= ~EXT_ENERGY_DET_MASK;
- bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
- }
-
reg = UMAC_IRQ_MPD_R;
if (hfb_enable)
reg |= UMAC_IRQ_HFB_SM | UMAC_IRQ_HFB_MM;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 9a2b166d651e..dbf9a0e6601d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2643,6 +2643,9 @@ static void detach_ulds(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
list_del(&adap->list_node);
@@ -7141,10 +7144,13 @@ static void remove_one(struct pci_dev *pdev)
*/
destroy_workqueue(adapter->workq);
- if (is_uld(adapter)) {
- detach_ulds(adapter);
- t4_uld_clean_up(adapter);
- }
+ detach_ulds(adapter);
+
+ for_each_port(adapter, i)
+ if (adapter->port[i]->reg_state == NETREG_REGISTERED)
+ unregister_netdev(adapter->port[i]);
+
+ t4_uld_clean_up(adapter);
adap_free_hma_mem(adapter);
@@ -7152,10 +7158,6 @@ static void remove_one(struct pci_dev *pdev)
cxgb4_free_mps_ref_entries(adapter);
- for_each_port(adapter, i)
- if (adapter->port[i]->reg_state == NETREG_REGISTERED)
- unregister_netdev(adapter->port[i]);
-
debugfs_remove_recursive(adapter->debugfs_root);
if (!is_t4(adapter->params.chip))
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
index 743af9e654aa..17faac715882 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c
@@ -581,6 +581,9 @@ void t4_uld_clean_up(struct adapter *adap)
{
unsigned int i;
+ if (!is_uld(adap))
+ return;
+
mutex_lock(&uld_mutex);
for (i = 0; i < CXGB4_ULD_MAX; i++) {
if (!adap->uld[i].handle)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 867e87af3432..099a2bc5ae67 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -1469,7 +1469,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err)
- return -ENXIO;
+ return err;
err = pci_request_regions(pdev, "gvnic-cfg");
if (err)
@@ -1477,19 +1477,12 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
if (err) {
dev_err(&pdev->dev, "Failed to set dma mask: err=%d\n", err);
goto abort_with_pci_region;
}
- err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
- if (err) {
- dev_err(&pdev->dev,
- "Failed to set consistent dma mask: err=%d\n", err);
- goto abort_with_pci_region;
- }
-
reg_bar = pci_iomap(pdev, GVE_REGISTER_BAR, 0);
if (!reg_bar) {
dev_err(&pdev->dev, "Failed to map pci bar!\n");
@@ -1512,6 +1505,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev_mqs(sizeof(*priv), max_tx_queues, max_rx_queues);
if (!dev) {
dev_err(&pdev->dev, "could not allocate netdev\n");
+ err = -ENOMEM;
goto abort_with_db_bar;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1565,7 +1559,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = register_netdev(dev);
if (err)
- goto abort_with_wq;
+ goto abort_with_gve_init;
dev_info(&pdev->dev, "GVE version %s\n", gve_version_str);
dev_info(&pdev->dev, "GVE queue format %d\n", (int)priv->queue_format);
@@ -1573,6 +1567,9 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
queue_work(priv->gve_wq, &priv->service_task);
return 0;
+abort_with_gve_init:
+ gve_teardown_priv_resources(priv);
+
abort_with_wq:
destroy_workqueue(priv->gve_wq);
@@ -1590,7 +1587,7 @@ abort_with_pci_region:
abort_with_enabled:
pci_disable_device(pdev);
- return -ENXIO;
+ return err;
}
static void gve_remove(struct pci_dev *pdev)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 77bb8227f89b..8500621b2cd4 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -566,13 +566,6 @@ static int gve_rx_dqo(struct napi_struct *napi, struct gve_rx_ring *rx,
return 0;
}
- /* Prefetch the payload header. */
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset);
-#if L1_CACHE_BYTES < 128
- prefetch((char *)buf_state->addr + buf_state->page_info.page_offset +
- L1_CACHE_BYTES);
-#endif
-
if (eop && buf_len <= priv->rx_copybreak) {
rx->skb_head = gve_rx_copy(priv->dev, napi,
&buf_state->page_info, buf_len, 0);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 374a75d4faea..ed77191d19f4 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -2420,9 +2420,10 @@ out:
static void __ibmvnic_reset(struct work_struct *work)
{
- struct ibmvnic_rwi *rwi;
struct ibmvnic_adapter *adapter;
bool saved_state = false;
+ struct ibmvnic_rwi *tmprwi;
+ struct ibmvnic_rwi *rwi;
unsigned long flags;
u32 reset_state;
int rc = 0;
@@ -2489,7 +2490,7 @@ static void __ibmvnic_reset(struct work_struct *work)
} else {
rc = do_reset(adapter, rwi, reset_state);
}
- kfree(rwi);
+ tmprwi = rwi;
adapter->last_reset_time = jiffies;
if (rc)
@@ -2497,8 +2498,23 @@ static void __ibmvnic_reset(struct work_struct *work)
rwi = get_next_rwi(adapter);
+ /*
+ * If there is another reset queued, free the previous rwi
+ * and process the new reset even if previous reset failed
+ * (the previous reset could have failed because of a fail
+ * over for instance, so process the fail over).
+ *
+ * If there are no resets queued and the previous reset failed,
+ * the adapter would be in an undefined state. So retry the
+ * previous reset as a hard reset.
+ */
+ if (rwi)
+ kfree(tmprwi);
+ else if (rc)
+ rwi = tmprwi;
+
if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER ||
- rwi->reset_reason == VNIC_RESET_MOBILITY))
+ rwi->reset_reason == VNIC_RESET_MOBILITY || rc))
adapter->force_reset_recovery = true;
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index d150dade06cf..757a54c39eef 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -7664,6 +7664,7 @@ err_flashmap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index dbcae92bb18d..adfa2768f024 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -2227,6 +2227,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_netdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index e612c24fa384..44bafedd09f2 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -3798,6 +3798,7 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7e6435dc7e80..171a7a629b20 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -931,6 +931,7 @@ static void igb_configure_msix(struct igb_adapter *adapter)
**/
static int igb_request_msix(struct igb_adapter *adapter)
{
+ unsigned int num_q_vectors = adapter->num_q_vectors;
struct net_device *netdev = adapter->netdev;
int i, err = 0, vector = 0, free_vector = 0;
@@ -939,7 +940,13 @@ static int igb_request_msix(struct igb_adapter *adapter)
if (err)
goto err_out;
- for (i = 0; i < adapter->num_q_vectors; i++) {
+ if (num_q_vectors > MAX_Q_VECTORS) {
+ num_q_vectors = MAX_Q_VECTORS;
+ dev_warn(&adapter->pdev->dev,
+ "The number of queue vectors (%d) is higher than max allowed (%d)\n",
+ adapter->num_q_vectors, MAX_Q_VECTORS);
+ }
+ for (i = 0; i < num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i];
vector++;
@@ -1678,14 +1685,15 @@ static bool is_any_txtime_enabled(struct igb_adapter *adapter)
**/
static void igb_config_tx_modes(struct igb_adapter *adapter, int queue)
{
- struct igb_ring *ring = adapter->tx_ring[queue];
struct net_device *netdev = adapter->netdev;
struct e1000_hw *hw = &adapter->hw;
+ struct igb_ring *ring;
u32 tqavcc, tqavctrl;
u16 value;
WARN_ON(hw->mac.type != e1000_i210);
WARN_ON(queue < 0 || queue > 1);
+ ring = adapter->tx_ring[queue];
/* If any of the Qav features is enabled, configure queues as SR and
* with HIGH PRIO. If none is, then configure them with LOW PRIO and
@@ -3615,6 +3623,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
@@ -4835,6 +4844,8 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
DMA_TO_DEVICE);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
index 9e0bbb2e55e3..5901ed9fb545 100644
--- a/drivers/net/ethernet/intel/igc/igc.h
+++ b/drivers/net/ethernet/intel/igc/igc.h
@@ -578,7 +578,7 @@ static inline s32 igc_read_phy_reg(struct igc_hw *hw, u32 offset, u16 *data)
if (hw->phy.ops.read_reg)
return hw->phy.ops.read_reg(hw, offset, data);
- return 0;
+ return -EOPNOTSUPP;
}
void igc_reinit_locked(struct igc_adapter *);
diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
index 95323095094d..e29aadbc6744 100644
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
@@ -232,6 +232,8 @@ static void igc_clean_tx_ring(struct igc_ring *tx_ring)
igc_unmap_tx_buffer(tx_ring->dev, tx_buffer);
}
+ tx_buffer->next_to_watch = NULL;
+
/* move us one more past the eop_desc for start of next pkt */
tx_buffer++;
i++;
@@ -6054,6 +6056,7 @@ err_sw_init:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ffff69efd78a..913253f8ecb4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -11067,6 +11067,7 @@ err_ioremap:
disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
free_netdev(netdev);
err_alloc_etherdev:
+ pci_disable_pcie_error_reporting(pdev);
pci_release_mem_regions(pdev);
err_pci_reg:
err_dma:
diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
index caaea2c920a6..e3e4676af9e4 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c
@@ -211,7 +211,7 @@ struct xfrm_state *ixgbevf_ipsec_find_rx_state(struct ixgbevf_ipsec *ipsec,
static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -260,12 +260,15 @@ static int ixgbevf_ipsec_parse_proto_keys(struct xfrm_state *xs,
**/
static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
int ret;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
netdev_err(dev, "Unsupported protocol 0x%04x for IPsec offload\n",
xs->id.proto);
@@ -383,11 +386,14 @@ static int ixgbevf_ipsec_add_sa(struct xfrm_state *xs)
**/
static void ixgbevf_ipsec_del_sa(struct xfrm_state *xs)
{
- struct net_device *dev = xs->xso.dev;
- struct ixgbevf_adapter *adapter = netdev_priv(dev);
- struct ixgbevf_ipsec *ipsec = adapter->ipsec;
+ struct net_device *dev = xs->xso.real_dev;
+ struct ixgbevf_adapter *adapter;
+ struct ixgbevf_ipsec *ipsec;
u16 sa_idx;
+ adapter = netdev_priv(dev);
+ ipsec = adapter->ipsec;
+
if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 361bc4fbe20b..76a7777c746d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2299,19 +2299,19 @@ mvneta_swbm_add_rx_fragment(struct mvneta_port *pp,
skb_frag_off_set(frag, pp->rx_offset_correction);
skb_frag_size_set(frag, data_len);
__skb_frag_set_page(frag, page);
-
- /* last fragment */
- if (len == *size) {
- struct skb_shared_info *sinfo;
-
- sinfo = xdp_get_shared_info_from_buff(xdp);
- sinfo->nr_frags = xdp_sinfo->nr_frags;
- memcpy(sinfo->frags, xdp_sinfo->frags,
- sinfo->nr_frags * sizeof(skb_frag_t));
- }
} else {
page_pool_put_full_page(rxq->page_pool, page, true);
}
+
+ /* last fragment */
+ if (len == *size) {
+ struct skb_shared_info *sinfo;
+
+ sinfo = xdp_get_shared_info_from_buff(xdp);
+ sinfo->nr_frags = xdp_sinfo->nr_frags;
+ memcpy(sinfo->frags, xdp_sinfo->frags,
+ sinfo->nr_frags * sizeof(skb_frag_t));
+ }
*size -= len;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
index fac6474ad694..9169849881bf 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c
@@ -86,6 +86,22 @@ bool is_lmac_valid(struct cgx *cgx, int lmac_id)
return test_bit(lmac_id, &cgx->lmac_bmap);
}
+/* Helper function to get sequential index
+ * given the enabled LMAC of a CGX
+ */
+static int get_sequence_id_of_lmac(struct cgx *cgx, int lmac_id)
+{
+ int tmp, id = 0;
+
+ for_each_set_bit(tmp, &cgx->lmac_bmap, MAX_LMAC_PER_CGX) {
+ if (tmp == lmac_id)
+ break;
+ id++;
+ }
+
+ return id;
+}
+
struct mac_ops *get_mac_ops(void *cgxd)
{
if (!cgxd)
@@ -211,37 +227,257 @@ static u64 mac2u64 (u8 *mac_addr)
return mac;
}
+static void cfg2mac(u64 cfg, u8 *mac_addr)
+{
+ int i, index = 0;
+
+ for (i = ETH_ALEN - 1; i >= 0; i--, index++)
+ mac_addr[i] = (cfg >> (8 * index)) & 0xFF;
+}
+
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index, id;
u64 cfg;
+ /* access mac_ops to know csr_offset */
mac_ops = cgx_dev->mac_ops;
+
/* copy 6bytes from macaddr */
/* memcpy(&cfg, mac_addr, 6); */
cfg = mac2u64 (mac_addr);
- cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (lmac_id * 0x8)),
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)),
cfg | CGX_DMAC_CAM_ADDR_ENABLE | ((u64)lmac_id << 49));
cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg |= CGX_DMAC_CTL0_CAM_ENABLE;
+ cfg |= (CGX_DMAC_CTL0_CAM_ENABLE | CGX_DMAC_BCAST_MODE |
+ CGX_DMAC_MCAST_MODE);
cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
return 0;
}
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx = cgxd;
+
+ if (!cgxd || !is_lmac_valid(cgxd, lmac_id))
+ return 0;
+
+ cgx = cgxd;
+ /* Get mac_ops to know csr offset */
+ mac_ops = cgx->mac_ops;
+
+ return cgx_read(cgxd, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+}
+
+u64 cgx_read_dmac_entry(void *cgxd, int index)
+{
+ struct mac_ops *mac_ops;
+ struct cgx *cgx;
+
+ if (!cgxd)
+ return 0;
+
+ cgx = cgxd;
+ mac_ops = cgx->mac_ops;
+ return cgx_read(cgx, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 8)));
+}
+
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ int index, idx;
+ u64 cfg = 0;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Get available index where entry is to be installed */
+ idx = rvu_alloc_rsrc(&lmac->mac_to_index_bmap);
+ if (idx < 0)
+ return idx;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + idx;
+
+ cfg = mac2u64 (mac_addr);
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cfg |= ((u64)lmac_id << 49);
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_CAM_ACCEPT);
+
+ if (is_multicast_ether_addr(mac_addr)) {
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE_CAM;
+ lmac->mcast_filters_count++;
+ } else if (!lmac->mcast_filters_count) {
+ cfg |= CGX_DMAC_MCAST_MODE;
+ }
+
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return idx;
+}
+
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 index = 0, id;
+ u64 cfg;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Restore index 0 to its default init value as done during
+ * cgx_lmac_init
+ */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ /* Reset CGXX_CMRX_RX_DMAC_CTL0 register to default state */
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+
+ return 0;
+}
+
+/* Allows caller to change macaddress associated with index
+ * in dmac filter table including index 0 reserved for
+ * interface mac address
+ */
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct mac_ops *mac_ops;
+ struct lmac *lmac;
+ u64 cfg;
+ int id;
+
+ lmac = lmac_pdata(lmac_id, cgx_dev);
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* ensure index is already set */
+ if (!test_bit(index, lmac->mac_to_index_bmap.bmap))
+ return -EINVAL;
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+ cfg &= ~CGX_RX_DMAC_ADR_MASK;
+ cfg |= mac2u64 (mac_addr);
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), cfg);
+ return 0;
+}
+
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+ struct mac_ops *mac_ops;
+ u8 mac[ETH_ALEN];
+ u64 cfg;
+ int id;
+
+ if (!lmac)
+ return -ENODEV;
+
+ mac_ops = cgx_dev->mac_ops;
+ /* Validate the index */
+ if (index >= lmac->mac_to_index_bmap.max)
+ return -EINVAL;
+
+ /* Skip deletion for reserved index i.e. index 0 */
+ if (index == 0)
+ return 0;
+
+ rvu_free_rsrc(&lmac->mac_to_index_bmap, index);
+
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max + index;
+
+ /* Read MAC address to check whether it is ucast or mcast */
+ cfg = cgx_read(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)));
+
+ cfg2mac(cfg, mac);
+ if (is_multicast_ether_addr(mac))
+ lmac->mcast_filters_count--;
+
+ if (!lmac->mcast_filters_count) {
+ cfg = cgx_read(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
+ cfg &= ~GENMASK_ULL(2, 1);
+ cfg |= CGX_DMAC_MCAST_MODE;
+ cgx_write(cgx_dev, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
+ }
+
+ cgx_write(cgx_dev, 0, (CGXX_CMRX_RX_DMAC_CAM0 + (index * 0x8)), 0);
+
+ return 0;
+}
+
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id)
+{
+ struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
+
+ if (lmac)
+ return lmac->mac_to_index_bmap.max;
+
+ return 0;
+}
+
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id)
{
struct cgx *cgx_dev = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx_dev);
struct mac_ops *mac_ops;
+ int index;
u64 cfg;
+ int id;
mac_ops = cgx_dev->mac_ops;
- cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8);
+ id = get_sequence_id_of_lmac(cgx_dev, lmac_id);
+
+ index = id * lmac->mac_to_index_bmap.max;
+
+ cfg = cgx_read(cgx_dev, 0, CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8);
return cfg & CGX_RX_DMAC_ADR_MASK;
}
@@ -297,35 +533,51 @@ int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable)
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable)
{
struct cgx *cgx = cgx_get_pdata(cgx_id);
+ struct lmac *lmac = lmac_pdata(lmac_id, cgx);
+ u16 max_dmac = lmac->mac_to_index_bmap.max;
struct mac_ops *mac_ops;
+ int index, i;
u64 cfg = 0;
+ int id;
if (!cgx)
return;
+ id = get_sequence_id_of_lmac(cgx, lmac_id);
+
mac_ops = cgx->mac_ops;
if (enable) {
/* Enable promiscuous mode on LMAC */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
- cfg &= ~(CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE);
- cfg |= CGX_DMAC_BCAST_MODE;
+ cfg &= ~CGX_DMAC_CAM_ACCEPT;
+ cfg |= (CGX_DMAC_BCAST_MODE | CGX_DMAC_MCAST_MODE);
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ cfg &= ~CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8), cfg);
+ }
} else {
/* Disable promiscuous mode */
cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0);
cfg |= CGX_DMAC_CAM_ACCEPT | CGX_DMAC_MCAST_MODE;
cgx_write(cgx, lmac_id, CGXX_CMRX_RX_DMAC_CTL0, cfg);
- cfg = cgx_read(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8));
- cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
- cgx_write(cgx, 0,
- (CGXX_CMRX_RX_DMAC_CAM0 + lmac_id * 0x8), cfg);
+ for (i = 0; i < max_dmac; i++) {
+ index = id * max_dmac + i;
+ cfg = cgx_read(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 + index * 0x8));
+ if ((cfg & CGX_RX_DMAC_ADR_MASK) != 0) {
+ cfg |= CGX_DMAC_CAM_ADDR_ENABLE;
+ cgx_write(cgx, 0,
+ (CGXX_CMRX_RX_DMAC_CAM0 +
+ index * 0x8),
+ cfg);
+ }
+ }
}
}
@@ -1234,6 +1486,15 @@ static int cgx_lmac_init(struct cgx *cgx)
}
lmac->cgx = cgx;
+ lmac->mac_to_index_bmap.max =
+ MAX_DMAC_ENTRIES_PER_CGX / cgx->lmac_count;
+ err = rvu_alloc_bitmap(&lmac->mac_to_index_bmap);
+ if (err)
+ return err;
+
+ /* Reserve first entry for default MAC address */
+ set_bit(0, lmac->mac_to_index_bmap.bmap);
+
init_waitqueue_head(&lmac->wq_cmd_cmplt);
mutex_init(&lmac->cmd_lock);
spin_lock_init(&lmac->event_cb_lock);
@@ -1274,6 +1535,7 @@ static int cgx_lmac_exit(struct cgx *cgx)
continue;
cgx->mac_ops->mac_pause_frm_config(cgx, lmac->lmac_id, false);
cgx_configure_interrupt(cgx, lmac, lmac->lmac_id, true);
+ kfree(lmac->mac_to_index_bmap.bmap);
kfree(lmac->name);
kfree(lmac);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
index 12521262164a..237ba2b56210 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h
@@ -23,6 +23,7 @@
#define CGX_ID_MASK 0x7
#define MAX_LMAC_PER_CGX 4
+#define MAX_DMAC_ENTRIES_PER_CGX 32
#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */
#define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX)
@@ -46,10 +47,12 @@
#define CGXX_CMRX_RX_DMAC_CTL0 (0x1F8 + mac_ops->csr_offset)
#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3)
#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3)
+#define CGX_DMAC_MCAST_MODE_CAM BIT_ULL(2)
#define CGX_DMAC_MCAST_MODE BIT_ULL(1)
#define CGX_DMAC_BCAST_MODE BIT_ULL(0)
#define CGXX_CMRX_RX_DMAC_CAM0 (0x200 + mac_ops->csr_offset)
#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48)
+#define CGX_DMAC_CAM_ENTRY_LMACID GENMASK_ULL(50, 49)
#define CGXX_CMRX_RX_DMAC_CAM1 0x400
#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0)
#define CGXX_CMRX_TX_STAT0 0x700
@@ -139,7 +142,11 @@ int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat);
int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_tx_enable(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_addr_set(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_reset(u8 cgx_id, u8 lmac_id);
u64 cgx_lmac_addr_get(u8 cgx_id, u8 lmac_id);
+int cgx_lmac_addr_add(u8 cgx_id, u8 lmac_id, u8 *mac_addr);
+int cgx_lmac_addr_del(u8 cgx_id, u8 lmac_id, u8 index);
+int cgx_lmac_addr_max_entries_get(u8 cgx_id, u8 lmac_id);
void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable);
void cgx_lmac_enadis_rx_pause_fwding(void *cgxd, int lmac_id, bool enable);
int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable);
@@ -165,4 +172,7 @@ u8 cgx_get_lmacid(void *cgxd, u8 lmac_index);
unsigned long cgx_get_lmac_bmap(void *cgxd);
void cgx_lmac_write(int cgx_id, int lmac_id, u64 offset, u64 val);
u64 cgx_lmac_read(int cgx_id, int lmac_id, u64 offset);
+int cgx_lmac_addr_update(u8 cgx_id, u8 lmac_id, u8 *mac_addr, u8 index);
+u64 cgx_read_dmac_ctrl(void *cgxd, int lmac_id);
+u64 cgx_read_dmac_entry(void *cgxd, int index);
#endif /* CGX_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
index 45706fd87120..a8b7b1c7a1d5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h
@@ -10,17 +10,19 @@
#include "rvu.h"
#include "cgx.h"
/**
- * struct lmac
+ * struct lmac - per lmac locks and properties
* @wq_cmd_cmplt: waitq to keep the process blocked until cmd completion
* @cmd_lock: Lock to serialize the command interface
* @resp: command response
* @link_info: link related information
+ * @mac_to_index_bmap: Mac address to CGX table index mapping
* @event_cb: callback for linkchange events
* @event_cb_lock: lock for serializing callback with unregister
- * @cmd_pend: flag set before new command is started
- * flag cleared after command response is received
* @cgx: parent cgx port
+ * @mcast_filters_count: Number of multicast filters installed
* @lmac_id: lmac port id
+ * @cmd_pend: flag set before new command is started
+ * flag cleared after command response is received
* @name: lmac port name
*/
struct lmac {
@@ -29,12 +31,14 @@ struct lmac {
struct mutex cmd_lock;
u64 resp;
struct cgx_link_user_info link_info;
+ struct rsrc_bmap mac_to_index_bmap;
struct cgx_event_cb event_cb;
/* lock for serializing callback with unregister */
spinlock_t event_cb_lock;
- bool cmd_pend;
struct cgx *cgx;
+ u8 mcast_filters_count;
u8 lmac_id;
+ bool cmd_pend;
char *name;
};
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
index 770d86262838..f5ec39de026a 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h
@@ -134,6 +134,8 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \
M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \
M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \
M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \
+M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \
+ msg_rsp) \
M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \
/* CGX mbox IDs (range 0x200 - 0x3FF) */ \
M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \
@@ -163,7 +165,15 @@ M(CGX_SET_LINK_MODE, 0x214, cgx_set_link_mode, cgx_set_link_mode_req,\
M(CGX_FEATURES_GET, 0x215, cgx_features_get, msg_req, \
cgx_features_info_msg) \
M(RPM_STATS, 0x216, rpm_stats, msg_req, rpm_stats_rsp) \
- /* NPA mbox IDs (range 0x400 - 0x5FF) */ \
+M(CGX_MAC_ADDR_ADD, 0x217, cgx_mac_addr_add, cgx_mac_addr_add_req, \
+ cgx_mac_addr_add_rsp) \
+M(CGX_MAC_ADDR_DEL, 0x218, cgx_mac_addr_del, cgx_mac_addr_del_req, \
+ msg_rsp) \
+M(CGX_MAC_MAX_ENTRIES_GET, 0x219, cgx_mac_max_entries_get, msg_req, \
+ cgx_max_dmac_entries_get_rsp) \
+M(CGX_MAC_ADDR_RESET, 0x21A, cgx_mac_addr_reset, msg_req, msg_rsp) \
+M(CGX_MAC_ADDR_UPDATE, 0x21B, cgx_mac_addr_update, cgx_mac_addr_update_req, \
+ msg_rsp) \
/* NPA mbox IDs (range 0x400 - 0x5FF) */ \
M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \
npa_lf_alloc_req, npa_lf_alloc_rsp) \
@@ -401,6 +411,38 @@ struct cgx_mac_addr_set_or_get {
u8 mac_addr[ETH_ALEN];
};
+/* Structure for requesting the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Structure for response against the operation to
+ * add DMAC filter entry into CGX interface
+ */
+struct cgx_mac_addr_add_rsp {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for requesting the operation to
+ * delete DMAC filter entry from CGX interface
+ */
+struct cgx_mac_addr_del_req {
+ struct mbox_msghdr hdr;
+ u8 index;
+};
+
+/* Structure for response against the operation to
+ * get maximum supported DMAC filter entries
+ */
+struct cgx_max_dmac_entries_get_rsp {
+ struct mbox_msghdr hdr;
+ u8 max_dmac_filters;
+};
+
struct cgx_link_user_info {
uint64_t link_up:1;
uint64_t full_duplex:1;
@@ -499,6 +541,12 @@ struct cgx_set_link_mode_rsp {
int status;
};
+struct cgx_mac_addr_update_req {
+ struct mbox_msghdr hdr;
+ u8 mac_addr[ETH_ALEN];
+ u8 index;
+};
+
#define RVU_LMAC_FEAT_FC BIT_ULL(0) /* pause frames */
#define RVU_LMAC_FEAT_PTP BIT_ULL(1) /* precision time protocol */
#define RVU_MAC_VERSION BIT_ULL(2)
@@ -1278,6 +1326,14 @@ struct set_vf_perm {
u64 flags;
};
+struct lmtst_tbl_setup_req {
+ struct mbox_msghdr hdr;
+ u16 base_pcifunc;
+ u8 use_local_lmt_region;
+ u64 lmt_iova;
+ u64 rsvd[4];
+};
+
/* CPT mailbox error codes
* Range 901 - 1000.
*/
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index 0b092949d7ac..10cddf1ac7b9 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2333,6 +2333,7 @@ static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc)
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO);
rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA);
+ rvu_reset_lmt_map_tbl(rvu, pcifunc);
rvu_detach_rsrcs(rvu, NULL, pcifunc);
mutex_unlock(&rvu->flr_lock);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 9e5d9ba6f01e..10e58a5d5861 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -243,6 +243,7 @@ struct rvu_pfvf {
u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */
u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */
u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */
+ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/
unsigned long flags;
};
@@ -656,6 +657,8 @@ void rvu_cgx_enadis_rx_bp(struct rvu *rvu, int pf, bool enable);
int rvu_cgx_start_stop_io(struct rvu *rvu, u16 pcifunc, bool start);
int rvu_cgx_nix_cuml_stats(struct rvu *rvu, void *cgxd, int lmac_id, int index,
int rxtxflag, u64 *stat);
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc);
+
/* NPA APIs */
int rvu_npa_init(struct rvu *rvu);
void rvu_npa_freemem(struct rvu *rvu);
@@ -741,6 +744,7 @@ void npc_read_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam,
bool is_mac_feature_supported(struct rvu *rvu, int pf, int feature);
u32 rvu_cgx_get_fifolen(struct rvu *rvu);
void *rvu_first_cgx_pdata(struct rvu *rvu);
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id);
int npc_get_nixlf_mcam_index(struct npc_mcam *mcam, u16 pcifunc, int nixlf,
int type);
@@ -754,6 +758,9 @@ int rvu_cpt_lf_teardown(struct rvu *rvu, u16 pcifunc, int lf, int slot);
int rvu_set_channels_base(struct rvu *rvu);
void rvu_program_channels(struct rvu *rvu);
+/* CN10K RVU - LMT*/
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc);
+
#ifdef CONFIG_DEBUG_FS
void rvu_dbg_init(struct rvu *rvu);
void rvu_dbg_exit(struct rvu *rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
index 6e2bf4fcd29c..6cc8fbb7190c 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c
@@ -63,7 +63,7 @@ static u16 cgxlmac_to_pfmap(struct rvu *rvu, u8 cgx_id, u8 lmac_id)
return rvu->cgxlmac2pf_map[CGX_OFFSET(cgx_id) + lmac_id];
}
-static int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
+int cgxlmac_to_pf(struct rvu *rvu, int cgx_id, int lmac_id)
{
unsigned long pfmap;
@@ -454,6 +454,31 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start)
return 0;
}
+void rvu_cgx_disable_dmac_entries(struct rvu *rvu, u16 pcifunc)
+{
+ int pf = rvu_get_pf(pcifunc);
+ int i = 0, lmac_count = 0;
+ u8 max_dmac_filters;
+ u8 cgx_id, lmac_id;
+ void *cgx_dev;
+
+ if (!is_cgx_config_permitted(rvu, pcifunc))
+ return;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ cgx_dev = cgx_get_pdata(cgx_id);
+ lmac_count = cgx_get_lmac_cnt(cgx_dev);
+ max_dmac_filters = MAX_DMAC_ENTRIES_PER_CGX / lmac_count;
+
+ for (i = 0; i < max_dmac_filters; i++)
+ cgx_lmac_addr_del(cgx_id, lmac_id, i);
+
+ /* As cgx_lmac_addr_del does not clear entry for index 0
+ * so it needs to be done explicitly
+ */
+ cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req,
struct msg_rsp *rsp)
{
@@ -557,6 +582,63 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu,
return 0;
}
+int rvu_mbox_handler_cgx_mac_addr_add(struct rvu *rvu,
+ struct cgx_mac_addr_add_req *req,
+ struct cgx_mac_addr_add_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+ int rc = 0;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rc = cgx_lmac_addr_add(cgx_id, lmac_id, req->mac_addr);
+ if (rc >= 0) {
+ rsp->index = rc;
+ return 0;
+ }
+
+ return rc;
+}
+
+int rvu_mbox_handler_cgx_mac_addr_del(struct rvu *rvu,
+ struct cgx_mac_addr_del_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_del(cgx_id, lmac_id, req->index);
+}
+
+int rvu_mbox_handler_cgx_mac_max_entries_get(struct rvu *rvu,
+ struct msg_req *req,
+ struct cgx_max_dmac_entries_get_rsp
+ *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ /* If msg is received from PFs(which are not mapped to CGX LMACs)
+ * or VF then no entries are allocated for DMAC filters at CGX level.
+ * So returning zero.
+ */
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) {
+ rsp->max_dmac_filters = 0;
+ return 0;
+ }
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ rsp->max_dmac_filters = cgx_lmac_addr_max_entries_get(cgx_id, lmac_id);
+ return 0;
+}
+
int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu,
struct cgx_mac_addr_set_or_get *req,
struct cgx_mac_addr_set_or_get *rsp)
@@ -953,3 +1035,30 @@ int rvu_mbox_handler_cgx_set_link_mode(struct rvu *rvu,
rsp->status = cgx_set_link_mode(cgxd, req->args, cgx_idx, lmac);
return 0;
}
+
+int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_reset(cgx_id, lmac_id);
+}
+
+int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu,
+ struct cgx_mac_addr_update_req *req,
+ struct msg_rsp *rsp)
+{
+ int pf = rvu_get_pf(req->hdr.pcifunc);
+ u8 cgx_id, lmac_id;
+
+ if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc))
+ return -EPERM;
+
+ rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id);
+ return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
index 7d9e71c6965f..8d48b64485c6 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c
@@ -10,6 +10,206 @@
#include "cgx.h"
#include "rvu_reg.h"
+/* RVU LMTST */
+#define LMT_TBL_OP_READ 0
+#define LMT_TBL_OP_WRITE 1
+#define LMT_MAP_TABLE_SIZE (128 * 1024)
+#define LMT_MAPTBL_ENTRY_SIZE 16
+
+/* Function to perform operations (read/write) on lmtst map table */
+static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val,
+ int lmt_tbl_op)
+{
+ void __iomem *lmt_map_base;
+ u64 tbl_base;
+
+ tbl_base = rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_MAP_BASE);
+
+ lmt_map_base = ioremap_wc(tbl_base, LMT_MAP_TABLE_SIZE);
+ if (!lmt_map_base) {
+ dev_err(rvu->dev, "Failed to setup lmt map table mapping!!\n");
+ return -ENOMEM;
+ }
+
+ if (lmt_tbl_op == LMT_TBL_OP_READ) {
+ *val = readq(lmt_map_base + index);
+ } else {
+ writeq((*val), (lmt_map_base + index));
+ /* Flushing the AP interceptor cache to make APR_LMT_MAP_ENTRY_S
+ * changes effective. Write 1 for flush and read is being used as a
+ * barrier and sets up a data dependency. Write to 0 after a write
+ * to 1 to complete the flush.
+ */
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, BIT_ULL(0));
+ rvu_read64(rvu, BLKADDR_APR, APR_AF_LMT_CTL);
+ rvu_write64(rvu, BLKADDR_APR, APR_AF_LMT_CTL, 0x00);
+ }
+
+ iounmap(lmt_map_base);
+ return 0;
+}
+
+static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc)
+{
+ return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) +
+ (pcifunc & RVU_PFVF_FUNC_MASK)) * LMT_MAPTBL_ENTRY_SIZE;
+}
+
+static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc,
+ u64 iova, u64 *lmt_addr)
+{
+ u64 pa, val, pf;
+ int err;
+
+ if (!iova) {
+ dev_err(rvu->dev, "%s Requested Null address for transulation\n", __func__);
+ return -EINVAL;
+ }
+
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_REQ, iova);
+ pf = rvu_get_pf(pcifunc) & 0x1F;
+ val = BIT_ULL(63) | BIT_ULL(14) | BIT_ULL(13) | pf << 8 |
+ ((pcifunc & RVU_PFVF_FUNC_MASK) & 0xFF);
+ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TXN_REQ, val);
+
+ err = rvu_poll_reg(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS, BIT_ULL(0), false);
+ if (err) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed\n", __func__);
+ return err;
+ }
+ val = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_ADDR_RSP_STS);
+ if (val & ~0x1ULL) {
+ dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val);
+ return -EIO;
+ }
+ /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21]
+ * PA[11:0] = IOVA[11:0]
+ */
+ pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21;
+ pa &= GENMASK_ULL(39, 0);
+ *lmt_addr = (pa << 12) | (iova & 0xFFF);
+
+ return 0;
+}
+
+static int rvu_update_lmtaddr(struct rvu *rvu, u16 pcifunc, u64 lmt_addr)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err = 0;
+ u64 val;
+
+ /* Read the current lmt addr of pcifunc */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ err = lmtst_map_table_ops(rvu, tbl_idx, &val, LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+
+ /* Storing the seondary's lmt base address as this needs to be
+ * reverted in FLR. Also making sure this default value doesn't
+ * get overwritten on multiple calls to this mailbox.
+ */
+ if (!pfvf->lmt_base_addr)
+ pfvf->lmt_base_addr = val;
+
+ /* Update the LMT table with new addr */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &lmt_addr, LMT_TBL_OP_WRITE);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ return err;
+ }
+ return 0;
+}
+
+int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu,
+ struct lmtst_tbl_setup_req *req,
+ struct msg_rsp *rsp)
+{
+ u64 lmt_addr, val;
+ u32 pri_tbl_idx;
+ int err = 0;
+
+ /* Check if PF_FUNC wants to use it's own local memory as LMTLINE
+ * region, if so, convert that IOVA to physical address and
+ * populate LMT table with that address
+ */
+ if (req->use_local_lmt_region) {
+ err = rvu_get_lmtaddr(rvu, req->hdr.pcifunc,
+ req->lmt_iova, &lmt_addr);
+ if (err < 0)
+ return err;
+
+ /* Update the lmt addr for this PFFUNC in the LMT table */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, lmt_addr);
+ if (err)
+ return err;
+ }
+
+ /* Reconfiguring lmtst map table in lmt region shared mode i.e. make
+ * multiple PF_FUNCs to share an LMTLINE region, so primary/base
+ * pcifunc (which is passed as an argument to mailbox) is the one
+ * whose lmt base address will be shared among other secondary
+ * pcifunc (will be the one who is calling this mailbox).
+ */
+ if (req->base_pcifunc) {
+ /* Calculating the LMT table index equivalent to primary
+ * pcifunc.
+ */
+ pri_tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->base_pcifunc);
+
+ /* Read the base lmt addr of the primary pcifunc */
+ err = lmtst_map_table_ops(rvu, pri_tbl_idx, &val,
+ LMT_TBL_OP_READ);
+ if (err) {
+ dev_err(rvu->dev,
+ "Failed to read LMT map table: index 0x%x err %d\n",
+ pri_tbl_idx, err);
+ return err;
+ }
+
+ /* Update the base lmt addr of secondary with primary's base
+ * lmt addr.
+ */
+ err = rvu_update_lmtaddr(rvu, req->hdr.pcifunc, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Resetting the lmtst map table to original base addresses */
+void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc)
+{
+ struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc);
+ u32 tbl_idx;
+ int err;
+
+ if (is_rvu_otx2(rvu))
+ return;
+
+ if (pfvf->lmt_base_addr) {
+ /* This corresponds to lmt map table index */
+ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc);
+ /* Reverting back original lmt base addr for respective
+ * pcifunc.
+ */
+ err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr,
+ LMT_TBL_OP_WRITE);
+ if (err)
+ dev_err(rvu->dev,
+ "Failed to update LMT map table: index 0x%x err %d\n",
+ tbl_idx, err);
+ pfvf->lmt_base_addr = 0;
+ }
+}
+
int rvu_set_channels_base(struct rvu *rvu)
{
struct rvu_hwinfo *hw = rvu->hw;
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
index 3cc3c6fd1d84..370d4ca1e5ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c
@@ -1971,10 +1971,9 @@ static int cgx_print_stats(struct seq_file *s, int lmac_id)
return err;
}
-static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+static int rvu_dbg_derive_lmacid(struct seq_file *filp, int *lmac_id)
{
struct dentry *current_dir;
- int err, lmac_id;
char *buf;
current_dir = filp->file->f_path.dentry->d_parent;
@@ -1982,17 +1981,87 @@ static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
if (!buf)
return -EINVAL;
- err = kstrtoint(buf + 1, 10, &lmac_id);
- if (!err) {
- err = cgx_print_stats(filp, lmac_id);
- if (err)
- return err;
- }
+ return kstrtoint(buf + 1, 10, lmac_id);
+}
+
+static int rvu_dbg_cgx_stat_display(struct seq_file *filp, void *unused)
+{
+ int lmac_id, err;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_stats(filp, lmac_id);
+
return err;
}
RVU_DEBUG_SEQ_FOPS(cgx_stat, cgx_stat_display, NULL);
+static int cgx_print_dmac_flt(struct seq_file *s, int lmac_id)
+{
+ struct pci_dev *pdev = NULL;
+ void *cgxd = s->private;
+ char *bcast, *mcast;
+ u16 index, domain;
+ u8 dmac[ETH_ALEN];
+ struct rvu *rvu;
+ u64 cfg, mac;
+ int pf;
+
+ rvu = pci_get_drvdata(pci_get_device(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVID_OCTEONTX2_RVU_AF, NULL));
+ if (!rvu)
+ return -ENODEV;
+
+ pf = cgxlmac_to_pf(rvu, cgx_get_cgxid(cgxd), lmac_id);
+ domain = 2;
+
+ pdev = pci_get_domain_bus_and_slot(domain, pf + 1, 0);
+ if (!pdev)
+ return 0;
+
+ cfg = cgx_read_dmac_ctrl(cgxd, lmac_id);
+ bcast = cfg & CGX_DMAC_BCAST_MODE ? "ACCEPT" : "REJECT";
+ mcast = cfg & CGX_DMAC_MCAST_MODE ? "ACCEPT" : "REJECT";
+
+ seq_puts(s,
+ "PCI dev RVUPF BROADCAST MULTICAST FILTER-MODE\n");
+ seq_printf(s, "%s PF%d %9s %9s",
+ dev_name(&pdev->dev), pf, bcast, mcast);
+ if (cfg & CGX_DMAC_CAM_ACCEPT)
+ seq_printf(s, "%12s\n\n", "UNICAST");
+ else
+ seq_printf(s, "%16s\n\n", "PROMISCUOUS");
+
+ seq_puts(s, "\nDMAC-INDEX ADDRESS\n");
+
+ for (index = 0 ; index < 32 ; index++) {
+ cfg = cgx_read_dmac_entry(cgxd, index);
+ /* Display enabled dmac entries associated with current lmac */
+ if (lmac_id == FIELD_GET(CGX_DMAC_CAM_ENTRY_LMACID, cfg) &&
+ FIELD_GET(CGX_DMAC_CAM_ADDR_ENABLE, cfg)) {
+ mac = FIELD_GET(CGX_RX_DMAC_ADR_MASK, cfg);
+ u64_to_ether_addr(mac, dmac);
+ seq_printf(s, "%7d %pM\n", index, dmac);
+ }
+ }
+
+ return 0;
+}
+
+static int rvu_dbg_cgx_dmac_flt_display(struct seq_file *filp, void *unused)
+{
+ int err, lmac_id;
+
+ err = rvu_dbg_derive_lmacid(filp, &lmac_id);
+ if (!err)
+ return cgx_print_dmac_flt(filp, lmac_id);
+
+ return err;
+}
+
+RVU_DEBUG_SEQ_FOPS(cgx_dmac_flt, cgx_dmac_flt_display, NULL);
+
static void rvu_dbg_cgx_init(struct rvu *rvu)
{
struct mac_ops *mac_ops;
@@ -2029,6 +2098,9 @@ static void rvu_dbg_cgx_init(struct rvu *rvu)
debugfs_create_file("stats", 0600, rvu->rvu_dbg.lmac,
cgx, &rvu_dbg_cgx_stat_fops);
+ debugfs_create_file("mac_filter", 0600,
+ rvu->rvu_dbg.lmac, cgx,
+ &rvu_dbg_cgx_dmac_flt_fops);
}
}
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
index d6f8210652c5..aeae37704428 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c
@@ -346,6 +346,9 @@ static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf)
/* Free and disable any MCAM entries used by this NIX LF */
rvu_npc_disable_mcam_entries(rvu, pcifunc, nixlf);
+
+ /* Disable DMAC filters used */
+ rvu_cgx_disable_dmac_entries(rvu, pcifunc);
}
int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
index 76837d5e19c6..8b01ef6e2c99 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h
@@ -49,6 +49,11 @@
#define RVU_AF_PFX_VF_BAR4_ADDR (0x5400 | (a) << 4)
#define RVU_AF_PFX_VF_BAR4_CFG (0x5600 | (a) << 4)
#define RVU_AF_PFX_LMTLINE_ADDR (0x5800 | (a) << 4)
+#define RVU_AF_SMMU_ADDR_REQ (0x6000)
+#define RVU_AF_SMMU_TXN_REQ (0x6008)
+#define RVU_AF_SMMU_ADDR_RSP_STS (0x6010)
+#define RVU_AF_SMMU_ADDR_TLN (0x6018)
+#define RVU_AF_SMMU_TLN_FLIT1 (0x6030)
/* Admin function's privileged PF/VF registers */
#define RVU_PRIV_CONST (0x8000000)
@@ -692,4 +697,9 @@
#define LBK_LINK_CFG_ID_MASK GENMASK_ULL(11, 6)
#define LBK_LINK_CFG_BASE_MASK GENMASK_ULL(5, 0)
+/* APR */
+#define APR_AF_LMT_CFG (0x000ull)
+#define APR_AF_LMT_MAP_BASE (0x008ull)
+#define APR_AF_LMT_CTL (0x010ull)
+
#endif /* RVU_REG_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 14aa8e37ea41..5bbe6727d11d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -35,7 +35,8 @@ enum rvu_block_addr_e {
BLKADDR_NDC_NPA0 = 0xeULL,
BLKADDR_NDC_NIX1_RX = 0x10ULL,
BLKADDR_NDC_NIX1_TX = 0x11ULL,
- BLK_COUNT = 0x12ULL,
+ BLKADDR_APR = 0x16ULL,
+ BLK_COUNT = 0x17ULL,
};
/* RVU Block Type Enumeration */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
index 457c94793e63..3254b02205ca 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile
@@ -7,7 +7,7 @@ obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o
obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o
rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \
- otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o
+ otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o
rvu_nicvf-y := otx2_vf.o
ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
index 1b08896b46d2..184de9466286 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c
@@ -22,69 +22,52 @@ static struct dev_hw_ops cn10k_hw_ops = {
.refill_pool_ptrs = cn10k_refill_pool_ptrs,
};
-int cn10k_pf_lmtst_init(struct otx2_nic *pf)
+int cn10k_lmtst_init(struct otx2_nic *pfvf)
{
- int size, num_lines;
- u64 base;
- if (!test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
- pf->hw_ops = &otx2_hw_ops;
+ struct lmtst_tbl_setup_req *req;
+ int qcount, err;
+
+ if (!test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
+ pfvf->hw_ops = &otx2_hw_ops;
return 0;
}
- pf->hw_ops = &cn10k_hw_ops;
- base = pci_resource_start(pf->pdev, PCI_MBOX_BAR_NUM) +
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- size = pci_resource_len(pf->pdev, PCI_MBOX_BAR_NUM) -
- (MBOX_SIZE * (pf->total_vfs + 1));
-
- pf->hw.lmt_base = ioremap(base, size);
+ pfvf->hw_ops = &cn10k_hw_ops;
+ qcount = pfvf->hw.max_queues;
+ /* LMTST lines allocation
+ * qcount = num_online_cpus();
+ * NPA = TX + RX + XDP.
+ * NIX = TX * 32 (For Burst SQE flush).
+ */
+ pfvf->tot_lmt_lines = (qcount * 3) + (qcount * 32);
+ pfvf->npa_lmt_lines = qcount * 3;
+ pfvf->nix_lmt_size = LMT_BURST_SIZE * LMT_LINE_SIZE;
- if (!pf->hw.lmt_base) {
- dev_err(pf->dev, "Unable to map PF LMTST region\n");
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_lmtst_tbl_setup(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
return -ENOMEM;
}
- /* FIXME: Get the num of LMTST lines from LMT table */
- pf->tot_lmt_lines = size / LMT_LINE_SIZE;
- num_lines = (pf->tot_lmt_lines - NIX_LMTID_BASE) /
- pf->hw.tx_queues;
- /* Number of LMT lines per SQ queues */
- pf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
-
- pf->nix_lmt_size = pf->nix_lmt_lines * LMT_LINE_SIZE;
- return 0;
-}
+ req->use_local_lmt_region = true;
-int cn10k_vf_lmtst_init(struct otx2_nic *vf)
-{
- int size, num_lines;
-
- if (!test_bit(CN10K_LMTST, &vf->hw.cap_flag)) {
- vf->hw_ops = &otx2_hw_ops;
- return 0;
+ err = qmem_alloc(pfvf->dev, &pfvf->dync_lmt, pfvf->tot_lmt_lines,
+ LMT_LINE_SIZE);
+ if (err) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return err;
}
+ pfvf->hw.lmt_base = (u64 *)pfvf->dync_lmt->base;
+ req->lmt_iova = (u64)pfvf->dync_lmt->iova;
- vf->hw_ops = &cn10k_hw_ops;
- size = pci_resource_len(vf->pdev, PCI_MBOX_BAR_NUM);
- vf->hw.lmt_base = ioremap_wc(pci_resource_start(vf->pdev,
- PCI_MBOX_BAR_NUM),
- size);
- if (!vf->hw.lmt_base) {
- dev_err(vf->dev, "Unable to map VF LMTST region\n");
- return -ENOMEM;
- }
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
- vf->tot_lmt_lines = size / LMT_LINE_SIZE;
- /* LMTST lines per SQ */
- num_lines = (vf->tot_lmt_lines - NIX_LMTID_BASE) /
- vf->hw.tx_queues;
- vf->nix_lmt_lines = num_lines > 32 ? 32 : num_lines;
- vf->nix_lmt_size = vf->nix_lmt_lines * LMT_LINE_SIZE;
return 0;
}
-EXPORT_SYMBOL(cn10k_vf_lmtst_init);
+EXPORT_SYMBOL(cn10k_lmtst_init);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
{
@@ -93,9 +76,11 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura)
struct otx2_snd_queue *sq;
sq = &pfvf->qset.sq[qidx];
- sq->lmt_addr = (__force u64 *)((u64)pfvf->hw.nix_lmt_base +
+ sq->lmt_addr = (u64 *)((u64)pfvf->hw.nix_lmt_base +
(qidx * pfvf->nix_lmt_size));
+ sq->lmt_id = pfvf->npa_lmt_lines + (qidx * LMT_BURST_SIZE);
+
/* Get memory to put this msg */
aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
if (!aq)
@@ -158,15 +143,13 @@ void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq)
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx)
{
- struct otx2_nic *pfvf = dev;
- int lmt_id = NIX_LMTID_BASE + (qidx * pfvf->nix_lmt_lines);
u64 val = 0, tar_addr = 0;
/* FIXME: val[0:10] LMT_ID.
* [12:15] no of LMTST - 1 in the burst.
* [19:63] data size of each LMTST in the burst except first.
*/
- val = (lmt_id & 0x7FF);
+ val = (sq->lmt_id & 0x7FF);
/* Target address for LMTST flush tells HW how many 128bit
* words are present.
* tar_addr[6:4] size of first LMTST - 1 in units of 128b.
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
index 71292a4cf1f3..1a1ae334477d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h
@@ -12,8 +12,7 @@
void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq);
void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx);
int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura);
-int cn10k_pf_lmtst_init(struct otx2_nic *pf);
-int cn10k_vf_lmtst_init(struct otx2_nic *vf);
+int cn10k_lmtst_init(struct otx2_nic *pfvf);
int cn10k_free_all_ipolicers(struct otx2_nic *pfvf);
int cn10k_alloc_matchall_ipolicer(struct otx2_nic *pfvf);
int cn10k_free_matchall_ipolicer(struct otx2_nic *pfvf);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
index cf7875d51d87..7cccd802c4ed 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c
@@ -210,6 +210,9 @@ int otx2_set_mac_address(struct net_device *netdev, void *p)
/* update dmac field in vlan offload rule */
if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT)
otx2_install_rxvlan_offload_flow(pfvf);
+ /* update dmac address in ntuple and DMAC filter list */
+ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_update_pfmac_flow(pfvf);
} else {
return -EPERM;
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
index 234b330f3183..8fd58cd07f50 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h
@@ -218,8 +218,8 @@ struct otx2_hw {
unsigned long cap_flag;
#define LMT_LINE_SIZE 128
-#define NIX_LMTID_BASE 72 /* RX + TX + XDP */
- void __iomem *lmt_base;
+#define LMT_BURST_SIZE 32 /* 32 LMTST lines for burst SQE flush */
+ u64 *lmt_base;
u64 *npa_lmt_base;
u64 *nix_lmt_base;
};
@@ -288,6 +288,9 @@ struct otx2_flow_config {
u16 tc_flower_offset;
u16 ntuple_max_flows;
u16 tc_max_flows;
+ u8 dmacflt_max_flows;
+ u8 *bmap_to_dmacindex;
+ unsigned long dmacflt_bmap;
struct list_head flow_list;
};
@@ -329,6 +332,7 @@ struct otx2_nic {
#define OTX2_FLAG_TC_FLOWER_SUPPORT BIT_ULL(11)
#define OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED BIT_ULL(12)
#define OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED BIT_ULL(13)
+#define OTX2_FLAG_DMACFLTR_SUPPORT BIT_ULL(14)
u64 flags;
struct otx2_qset qset;
@@ -363,8 +367,9 @@ struct otx2_nic {
/* Block address of NIX either BLKADDR_NIX0 or BLKADDR_NIX1 */
int nix_blkaddr;
/* LMTST Lines info */
+ struct qmem *dync_lmt;
u16 tot_lmt_lines;
- u16 nix_lmt_lines;
+ u16 npa_lmt_lines;
u32 nix_lmt_size;
struct otx2_ptp *ptp;
@@ -833,4 +838,11 @@ int otx2_init_tc(struct otx2_nic *nic);
void otx2_shutdown_tc(struct otx2_nic *nic);
int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
void *type_data);
+/* CGX/RPM DMAC filters support */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf);
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac, u8 bit_pos);
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos);
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf);
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf);
#endif /* OTX2_COMMON_H */
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
new file mode 100644
index 000000000000..383a6b5cb698
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Physcial Function ethernet driver
+ *
+ * Copyright (C) 2021 Marvell.
+ */
+
+#include "otx2_common.h"
+
+static int otx2_dmacflt_do_add(struct otx2_nic *pf, const u8 *mac,
+ u8 *dmac_index)
+{
+ struct cgx_mac_addr_add_req *req;
+ struct cgx_mac_addr_add_rsp *rsp;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_add(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ if (!err) {
+ rsp = (struct cgx_mac_addr_add_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
+ *dmac_index = rsp->index;
+ }
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+static int otx2_dmacflt_add_pfmac(struct otx2_nic *pf)
+{
+ struct cgx_mac_addr_set_or_get *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_set(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, pf->netdev->dev_addr);
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos)
+{
+ u8 *dmacindex;
+
+ /* Store dmacindex returned by CGX/RPM driver which will
+ * be used for macaddr update/remove
+ */
+ dmacindex = &pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_add_pfmac(pf);
+ else
+ return otx2_dmacflt_do_add(pf, mac, dmacindex);
+}
+
+static int otx2_dmacflt_do_remove(struct otx2_nic *pfvf, const u8 *mac,
+ u8 dmac_index)
+{
+ struct cgx_mac_addr_del_req *req;
+ int err;
+
+ mutex_lock(&pfvf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_del(&pfvf->mbox);
+ if (!req) {
+ mutex_unlock(&pfvf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ req->index = dmac_index;
+
+ err = otx2_sync_mbox_msg(&pfvf->mbox);
+ mutex_unlock(&pfvf->mbox.lock);
+
+ return err;
+}
+
+static int otx2_dmacflt_remove_pfmac(struct otx2_nic *pf)
+{
+ struct msg_req *req;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_reset(&pf->mbox);
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_remove(struct otx2_nic *pf, const u8 *mac,
+ u8 bit_pos)
+{
+ u8 dmacindex = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+
+ if (ether_addr_equal(mac, pf->netdev->dev_addr))
+ return otx2_dmacflt_remove_pfmac(pf);
+ else
+ return otx2_dmacflt_do_remove(pf, mac, dmacindex);
+}
+
+/* CGX/RPM blocks support max unicast entries of 32.
+ * on typical configuration MAC block associated
+ * with 4 lmacs, each lmac will have 8 dmac entries
+ */
+int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf)
+{
+ struct cgx_max_dmac_entries_get_rsp *rsp;
+ struct msg_req *msg;
+ int err;
+
+ mutex_lock(&pf->mbox.lock);
+ msg = otx2_mbox_alloc_msg_cgx_mac_max_entries_get(&pf->mbox);
+
+ if (!msg) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ err = otx2_sync_mbox_msg(&pf->mbox);
+ if (err)
+ goto out;
+
+ rsp = (struct cgx_max_dmac_entries_get_rsp *)
+ otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &msg->hdr);
+ pf->flow_cfg->dmacflt_max_flows = rsp->max_dmac_filters;
+
+out:
+ mutex_unlock(&pf->mbox.lock);
+ return err;
+}
+
+int otx2_dmacflt_update(struct otx2_nic *pf, u8 *mac, u8 bit_pos)
+{
+ struct cgx_mac_addr_update_req *req;
+ int rc;
+
+ mutex_lock(&pf->mbox.lock);
+
+ req = otx2_mbox_alloc_msg_cgx_mac_addr_update(&pf->mbox);
+
+ if (!req) {
+ mutex_unlock(&pf->mbox.lock);
+ return -ENOMEM;
+ }
+
+ ether_addr_copy(req->mac_addr, mac);
+ req->index = pf->flow_cfg->bmap_to_dmacindex[bit_pos];
+ rc = otx2_sync_mbox_msg(&pf->mbox);
+
+ mutex_unlock(&pf->mbox.lock);
+ return rc;
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
index 8c97106bdd1c..4d9de525802d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c
@@ -18,6 +18,12 @@ struct otx2_flow {
bool is_vf;
u8 rss_ctx_id;
int vf;
+ bool dmac_filter;
+};
+
+enum dmac_req {
+ DMAC_ADDR_UPDATE,
+ DMAC_ADDR_DEL
};
static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
@@ -219,6 +225,22 @@ int otx2_mcam_flow_init(struct otx2_nic *pf)
if (!pf->mac_table)
return -ENOMEM;
+ otx2_dmacflt_get_max_cnt(pf);
+
+ /* DMAC filters are not allocated */
+ if (!pf->flow_cfg->dmacflt_max_flows)
+ return 0;
+
+ pf->flow_cfg->bmap_to_dmacindex =
+ devm_kzalloc(pf->dev, sizeof(u8) *
+ pf->flow_cfg->dmacflt_max_flows,
+ GFP_KERNEL);
+
+ if (!pf->flow_cfg->bmap_to_dmacindex)
+ return -ENOMEM;
+
+ pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
+
return 0;
}
@@ -280,6 +302,12 @@ int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
{
struct otx2_nic *pf = netdev_priv(netdev);
+ if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(netdev,
+ "Add %pM to CGX/RPM DMAC filters list as well\n",
+ mac);
+
return otx2_do_add_macfilter(pf, mac);
}
@@ -351,12 +379,22 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
list_add(&flow->list, head);
}
+static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
+{
+ if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
+ bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows))
+ return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
+ else
+ return flow_cfg->ntuple_max_flows;
+}
+
int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
u32 location)
{
struct otx2_flow *iter;
- if (location >= pfvf->flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(pfvf->flow_cfg))
return -EINVAL;
list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
@@ -378,7 +416,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
int idx = 0;
int err = 0;
- nfc->data = pfvf->flow_cfg->ntuple_max_flows;
+ nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
while ((!err || err == -ENOENT) && idx < rule_cnt) {
err = otx2_get_flow(pfvf, nfc, location);
if (!err)
@@ -760,6 +798,32 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
return 0;
}
+static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
+ struct ethtool_rx_flow_spec *fsp)
+{
+ struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
+ struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
+ u64 ring_cookie = fsp->ring_cookie;
+ u32 flow_type;
+
+ if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
+ return false;
+
+ flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
+
+ /* CGX/RPM block dmac filtering configured for white listing
+ * check for action other than DROP
+ */
+ if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
+ !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
+ if (is_zero_ether_addr(eth_mask->h_dest) &&
+ is_valid_ether_addr(eth_hdr->h_dest))
+ return true;
+ }
+
+ return false;
+}
+
static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
{
u64 ring_cookie = flow->flow_spec.ring_cookie;
@@ -818,14 +882,46 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
return err;
}
+static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
+ struct otx2_flow *flow)
+{
+ struct otx2_flow *pf_mac;
+ struct ethhdr *eth_hdr;
+
+ pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
+ if (!pf_mac)
+ return -ENOMEM;
+
+ pf_mac->entry = 0;
+ pf_mac->dmac_filter = true;
+ pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
+ memcpy(&pf_mac->flow_spec, &flow->flow_spec,
+ sizeof(struct ethtool_rx_flow_spec));
+ pf_mac->flow_spec.location = pf_mac->location;
+
+ /* Copy PF mac address */
+ eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
+ ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
+
+ /* Install DMAC filter with PF mac address */
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
+
+ otx2_add_flow_to_list(pfvf, pf_mac);
+ pfvf->flow_cfg->nr_flows++;
+ set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+
+ return 0;
+}
+
int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct ethtool_rx_flow_spec *fsp = &nfc->fs;
struct otx2_flow *flow;
+ struct ethhdr *eth_hdr;
bool new = false;
+ int err = 0;
u32 ring;
- int err;
ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
@@ -834,16 +930,15 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
return -EINVAL;
- if (fsp->location >= flow_cfg->ntuple_max_flows)
+ if (fsp->location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, fsp->location);
if (!flow) {
- flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
+ flow = kzalloc(sizeof(*flow), GFP_KERNEL);
if (!flow)
return -ENOMEM;
flow->location = fsp->location;
- flow->entry = flow_cfg->flow_ent[flow->location];
new = true;
}
/* struct copy */
@@ -852,7 +947,54 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
if (fsp->flow_type & FLOW_RSS)
flow->rss_ctx_id = nfc->rss_context;
- err = otx2_add_flow_msg(pfvf, flow);
+ if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
+ eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* Sync dmac filter table with updated fields */
+ if (flow->dmac_filter)
+ return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
+ flow->entry);
+
+ if (bitmap_full(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows)) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert the rule %d as max allowed dmac filters are %d\n",
+ flow->location +
+ flow_cfg->dmacflt_max_flows,
+ flow_cfg->dmacflt_max_flows);
+ err = -EINVAL;
+ if (new)
+ kfree(flow);
+ return err;
+ }
+
+ /* Install PF mac address to DMAC filter list */
+ if (!test_bit(0, &flow_cfg->dmacflt_bmap))
+ otx2_add_flow_with_pfmac(pfvf, flow);
+
+ flow->dmac_filter = true;
+ flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows);
+ fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
+ flow->flow_spec.location = fsp->location;
+ flow->location = fsp->location;
+
+ set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
+
+ } else {
+ if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
+ netdev_warn(pfvf->netdev,
+ "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
+ flow->location,
+ flow_cfg->ntuple_max_flows - 1);
+ err = -EINVAL;
+ } else {
+ flow->entry = flow_cfg->flow_ent[flow->location];
+ err = otx2_add_flow_msg(pfvf, flow);
+ }
+ }
+
if (err) {
if (new)
kfree(flow);
@@ -890,20 +1032,70 @@ static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
return err;
}
+static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+ bool found = false;
+
+ list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter && iter->entry == 0) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ if (req == DMAC_ADDR_DEL) {
+ otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ 0);
+ clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
+ found = true;
+ } else {
+ ether_addr_copy(eth_hdr->h_dest,
+ pfvf->netdev->dev_addr);
+ otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
+ }
+ break;
+ }
+ }
+
+ if (found) {
+ list_del(&iter->list);
+ kfree(iter);
+ pfvf->flow_cfg->nr_flows--;
+ }
+}
+
int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
{
struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
struct otx2_flow *flow;
int err;
- if (location >= flow_cfg->ntuple_max_flows)
+ if (location >= otx2_get_maxflows(flow_cfg))
return -EINVAL;
flow = otx2_find_flow(pfvf, location);
if (!flow)
return -ENOENT;
- err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ if (flow->dmac_filter) {
+ struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
+
+ /* user not allowed to remove dmac filter with interface mac */
+ if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
+ return -EPERM;
+
+ err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
+ flow->entry);
+ clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
+ /* If all dmac filters are removed delete macfilter with
+ * interface mac address and configure CGX/RPM block in
+ * promiscuous mode
+ */
+ if (bitmap_weight(&flow_cfg->dmacflt_bmap,
+ flow_cfg->dmacflt_max_flows) == 1)
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
+ } else {
+ err = otx2_remove_flow_msg(pfvf, flow->entry, false);
+ }
+
if (err)
return err;
@@ -1100,3 +1292,22 @@ int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
mutex_unlock(&pf->mbox.lock);
return rsp_hdr->rc;
}
+
+void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
+{
+ struct otx2_flow *iter;
+ struct ethhdr *eth_hdr;
+
+ list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
+ if (iter->dmac_filter) {
+ eth_hdr = &iter->flow_spec.h_u.ether_spec;
+ otx2_dmacflt_add(pf, eth_hdr->h_dest,
+ iter->entry);
+ }
+ }
+}
+
+void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
+{
+ otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
index 59912f73417b..f300b807a85b 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c
@@ -1110,6 +1110,11 @@ static int otx2_cgx_config_loopback(struct otx2_nic *pf, bool enable)
struct msg_req *msg;
int err;
+ if (enable && bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
+ pf->flow_cfg->dmacflt_max_flows))
+ netdev_warn(pf->netdev,
+ "CGX/RPM internal loopback might not work as DMAC filters are active\n");
+
mutex_lock(&pf->mbox.lock);
if (enable)
msg = otx2_mbox_alloc_msg_cgx_intlbk_enable(&pf->mbox);
@@ -1533,10 +1538,10 @@ int otx2_open(struct net_device *netdev)
if (test_bit(CN10K_LMTST, &pf->hw.cap_flag)) {
/* Reserve LMT lines for NPA AURA batch free */
- pf->hw.npa_lmt_base = (__force u64 *)pf->hw.lmt_base;
+ pf->hw.npa_lmt_base = pf->hw.lmt_base;
/* Reserve LMT lines for NIX TX */
- pf->hw.nix_lmt_base = (__force u64 *)((u64)pf->hw.npa_lmt_base +
- (NIX_LMTID_BASE * LMT_LINE_SIZE));
+ pf->hw.nix_lmt_base = (u64 *)((u64)pf->hw.npa_lmt_base +
+ (pf->npa_lmt_lines * LMT_LINE_SIZE));
}
err = otx2_init_hw_resources(pf);
@@ -1644,6 +1649,10 @@ int otx2_open(struct net_device *netdev)
/* Restore pause frame settings */
otx2_config_pause_frm(pf);
+ /* Install DMAC Filters */
+ if (pf->flags & OTX2_FLAG_DMACFLTR_SUPPORT)
+ otx2_dmacflt_reinstall_flows(pf);
+
err = otx2_rxtx_enable(pf, true);
if (err)
goto err_tx_stop_queues;
@@ -2526,7 +2535,7 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_pf_lmtst_init(pf);
+ err = cn10k_lmtst_init(pf);
if (err)
goto err_detach_rsrc;
@@ -2630,8 +2639,8 @@ err_del_mcam_entries:
err_ptp_destroy:
otx2_ptp_destroy(pf);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_detach_resources(&pf->mbox);
err_disable_mbox_intr:
otx2_disable_mbox_intr(pf);
@@ -2772,9 +2781,8 @@ static void otx2_remove(struct pci_dev *pdev)
otx2_mcam_flow_del(pf);
otx2_shutdown_tc(pf);
otx2_detach_resources(&pf->mbox);
- if (pf->hw.lmt_base)
- iounmap(pf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &pf->hw.cap_flag))
+ qmem_free(pf->dev, pf->dync_lmt);
otx2_disable_mbox_intr(pf);
otx2_pfaf_mbox_destroy(pf);
pci_free_irq_vectors(pf->pdev);
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
index 905fc02a7dfe..972b202b9884 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c
@@ -288,7 +288,7 @@ static int otx2_tc_parse_actions(struct otx2_nic *nic,
struct otx2_nic *priv;
u32 burst, mark = 0;
u8 nr_police = 0;
- bool pps;
+ bool pps = false;
u64 rate;
int i;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
index 52486c1f0973..2f144e2cf436 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h
@@ -83,6 +83,7 @@ struct otx2_snd_queue {
u16 num_sqbs;
u16 sqe_thresh;
u8 sqe_per_sqb;
+ u32 lmt_id;
u64 io_addr;
u64 *aura_fc_addr;
u64 *lmt_addr;
diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
index 13a908f75ba0..a8bee5aefec1 100644
--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
+++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
@@ -609,7 +609,7 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_detach_rsrc;
- err = cn10k_vf_lmtst_init(vf);
+ err = cn10k_lmtst_init(vf);
if (err)
goto err_detach_rsrc;
@@ -667,8 +667,8 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
err_unreg_netdev:
unregister_netdev(netdev);
err_detach_rsrc:
- if (hw->lmt_base)
- iounmap(hw->lmt_base);
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2_detach_resources(&vf->mbox);
err_disable_mbox_intr:
otx2vf_disable_mbox_intr(vf);
@@ -700,10 +700,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
destroy_workqueue(vf->otx2_wq);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);
-
- if (vf->hw.lmt_base)
- iounmap(vf->hw.lmt_base);
-
+ if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+ qmem_free(vf->dev, vf->dync_lmt);
otx2vf_vfaf_mbox_destroy(vf);
pci_free_irq_vectors(vf->pdev);
pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig
index a80419d8d4b5..ac403d43c74c 100644
--- a/drivers/net/ethernet/microchip/sparx5/Kconfig
+++ b/drivers/net/ethernet/microchip/sparx5/Kconfig
@@ -2,6 +2,7 @@ config SPARX5_SWITCH
tristate "Sparx5 switch driver"
depends on NET_SWITCHDEV
depends on HAS_IOMEM
+ depends on OF
select PHYLINK
select PHY_SPARX5_SERDES
select RESET_CONTROLLER
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 5249b64f4fc5..49def6934cad 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -540,10 +540,8 @@ static int moxart_mac_probe(struct platform_device *pdev)
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
- if (ret) {
- free_netdev(ndev);
+ if (ret)
goto init_fail;
- }
netdev_dbg(ndev, "%s: IRQ=%d address=%pM\n",
__func__, ndev->irq, ndev->dev_addr);
diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c
index 3e89e34f86d5..e9d260d84bf3 100644
--- a/drivers/net/ethernet/mscc/ocelot_net.c
+++ b/drivers/net/ethernet/mscc/ocelot_net.c
@@ -1298,6 +1298,7 @@ static int ocelot_netdevice_lag_leave(struct net_device *dev,
}
static int ocelot_netdevice_changeupper(struct net_device *dev,
+ struct net_device *brport_dev,
struct netdev_notifier_changeupper_info *info)
{
struct netlink_ext_ack *extack;
@@ -1307,11 +1308,11 @@ static int ocelot_netdevice_changeupper(struct net_device *dev,
if (netif_is_bridge_master(info->upper_dev)) {
if (info->linking)
- err = ocelot_netdevice_bridge_join(dev, dev,
+ err = ocelot_netdevice_bridge_join(dev, brport_dev,
info->upper_dev,
extack);
else
- err = ocelot_netdevice_bridge_leave(dev, dev,
+ err = ocelot_netdevice_bridge_leave(dev, brport_dev,
info->upper_dev);
}
if (netif_is_lag_master(info->upper_dev)) {
@@ -1346,7 +1347,7 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev,
if (ocelot_port->bond != dev)
return NOTIFY_OK;
- err = ocelot_netdevice_changeupper(lower, info);
+ err = ocelot_netdevice_changeupper(lower, dev, info);
if (err)
return notifier_from_errno(err);
}
@@ -1385,7 +1386,7 @@ static int ocelot_netdevice_event(struct notifier_block *unused,
struct netdev_notifier_changeupper_info *info = ptr;
if (ocelot_netdevice_dev_check(dev))
- return ocelot_netdevice_changeupper(dev, info);
+ return ocelot_netdevice_changeupper(dev, dev, info);
if (netif_is_lag_master(dev))
return ocelot_netdevice_lag_changeupper(dev, info);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
index 273d529d43c2..128020b1573e 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c
@@ -1141,20 +1141,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent)
nfp_fl_ct_clean_flow_entry(ct_entry);
kfree(ct_map_ent);
- /* If this is the last pre_ct_rule it means that it is
- * very likely that the nft table will be cleaned up next,
- * as this happens on the removal of the last act_ct flow.
- * However we cannot deregister the callback on the removal
- * of the last nft flow as this runs into a deadlock situation.
- * So deregister the callback on removal of the last pre_ct flow
- * and remove any remaining nft flow entries. We also cannot
- * save this state and delete the callback later since the
- * nft table would already have been freed at that time.
- */
if (!zt->pre_ct_count) {
- nf_flow_table_offload_del_cb(zt->nft,
- nfp_fl_ct_handle_nft_flow,
- zt);
zt->nft = NULL;
nfp_fl_ct_clean_nft_entries(zt);
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 8543bf3c3484..ad655f0a4965 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -735,12 +735,13 @@ static int emac_remove(struct platform_device *pdev)
put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
- free_netdev(netdev);
if (adpt->phy.digital)
iounmap(adpt->phy.digital);
iounmap(adpt->phy.base);
+ free_netdev(netdev);
+
return 0;
}
diff --git a/drivers/net/ethernet/sfc/efx_channels.c b/drivers/net/ethernet/sfc/efx_channels.c
index a3ca406a3561..e5b0d795c301 100644
--- a/drivers/net/ethernet/sfc/efx_channels.c
+++ b/drivers/net/ethernet/sfc/efx_channels.c
@@ -152,6 +152,7 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
* maximum size.
*/
tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
+ tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
@@ -169,6 +170,8 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
@@ -176,12 +179,14 @@ static int efx_allocate_msix_channels(struct efx_nic *efx,
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
n_xdp_tx, n_channels, efx->max_vis);
+ netif_err(efx, drv, efx->net_dev,
+ "XDP_TX and XDP_REDIRECT will not work on this interface");
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
- efx->xdp_tx_per_channel = EFX_MAX_TXQ_PER_CHANNEL;
+ efx->xdp_tx_per_channel = tx_per_ev;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
@@ -891,18 +896,20 @@ int efx_set_channels(struct efx_nic *efx)
if (efx_channel_is_xdp_tx(channel)) {
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
+
/* We may have a few left-over XDP TX
* queues owing to xdp_tx_queue_count
* not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
* We still allocate and probe those
* TXQs, but never use them.
*/
- if (xdp_queue_number < efx->xdp_tx_queue_count)
+ if (xdp_queue_number < efx->xdp_tx_queue_count) {
+ netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
+ channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- xdp_queue_number++;
+ xdp_queue_number++;
+ }
}
} else {
efx_for_each_channel_tx_queue(tx_queue, channel) {
@@ -914,8 +921,7 @@ int efx_set_channels(struct efx_nic *efx)
}
}
}
- if (xdp_queue_number)
- efx->xdp_tx_queue_count = xdp_queue_number;
+ WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
index e108b0d2bd28..4c9a37dd0d3f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -49,9 +49,9 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
{
struct plat_stmmacenet_data *plat;
struct stmmac_resources res;
- bool mdio = false;
- int ret, i;
struct device_node *np;
+ int ret, i, phy_mode;
+ bool mdio = false;
np = dev_of_node(&pdev->dev);
@@ -108,10 +108,11 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id
if (plat->bus_id < 0)
plat->bus_id = pci_dev_id(pdev);
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
dev_err(&pdev->dev, "phy_mode not found\n");
+ plat->phy_interface = phy_mode;
plat->interface = PHY_INTERFACE_MODE_GMII;
pci_set_master(pdev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index e735134e8487..fcdb1d20389b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -349,6 +349,9 @@ void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time);
#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
void stmmac_selftest_run(struct net_device *dev,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8d9d6ecf8c63..7b8404a21544 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -7171,6 +7171,7 @@ int stmmac_suspend(struct device *dev)
priv->plat->rx_queues_to_use, false);
stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
}
priv->speed = SPEED_UNKNOWN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 072eff8079d0..5ca710844cc1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -397,6 +397,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ int phy_mode;
void *ret;
int rc;
@@ -412,10 +413,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
eth_zero_addr(mac);
}
- plat->phy_interface = device_get_phy_mode(&pdev->dev);
- if (plat->phy_interface < 0)
- return ERR_PTR(plat->phy_interface);
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
+ return ERR_PTR(phy_mode);
+ plat->phy_interface = phy_mode;
plat->interface = stmmac_of_get_mac_mode(np);
if (plat->interface < 0)
plat->interface = plat->phy_interface;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index 4e86cdf2bc9f..580cc035536b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -62,7 +62,8 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
u32 sec, nsec;
u32 quotient, reminder;
int neg_adj = 0;
- bool xmac;
+ bool xmac, est_rst = false;
+ int ret;
xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
@@ -75,10 +76,48 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
sec = quotient;
nsec = reminder;
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
spin_lock_irqsave(&priv->ptp_lock, flags);
stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
+ /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ if (est_rst) {
+ struct timespec64 current_time, time;
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+ time.tv_sec = priv->plat->est->btr_reserve[1];
+ basetime = timespec64_to_ktime(time);
+ cycle_time = priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ priv->plat->est->ctr[0];
+ time = stmmac_calc_tas_basetime(basetime,
+ current_time_ns,
+ cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+
return 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
index 92dab609d4f8..4f3b6437b114 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -711,12 +711,35 @@ static int tc_setup_cls(struct stmmac_priv *priv,
return ret;
}
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time)
+{
+ struct timespec64 time;
+
+ if (ktime_after(old_base_time, current_time)) {
+ time = ktime_to_timespec64(old_base_time);
+ } else {
+ s64 n;
+ ktime_t base_time;
+
+ n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+ cycle_time);
+ base_time = ktime_add_ns(old_base_time,
+ (n + 1) * cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
+ return time;
+}
+
static int tc_setup_taprio(struct stmmac_priv *priv,
struct tc_taprio_qopt_offload *qopt)
{
u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
struct plat_stmmacenet_data *plat = priv->plat;
- struct timespec64 time, current_time;
+ struct timespec64 time, current_time, qopt_time;
ktime_t current_time_ns;
bool fpe = false;
int i, ret = 0;
@@ -773,14 +796,18 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
GFP_KERNEL);
if (!plat->est)
return -ENOMEM;
+
+ mutex_init(&priv->plat->est->lock);
} else {
memset(plat->est, 0, sizeof(*plat->est));
}
size = qopt->num_entries;
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->gcl_size = size;
priv->plat->est->enable = qopt->enable;
+ mutex_unlock(&priv->plat->est->lock);
for (i = 0; i < size; i++) {
s64 delta_ns = qopt->entries[i].interval;
@@ -811,32 +838,28 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
priv->plat->est->gcl[i] = delta_ns | (gates << wid);
}
+ mutex_lock(&priv->plat->est->lock);
/* Adjust for real system time */
priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
current_time_ns = timespec64_to_ktime(current_time);
- if (ktime_after(qopt->base_time, current_time_ns)) {
- time = ktime_to_timespec64(qopt->base_time);
- } else {
- ktime_t base_time;
- s64 n;
-
- n = div64_s64(ktime_sub_ns(current_time_ns, qopt->base_time),
- qopt->cycle_time);
- base_time = ktime_add_ns(qopt->base_time,
- (n + 1) * qopt->cycle_time);
-
- time = ktime_to_timespec64(base_time);
- }
+ time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+ qopt->cycle_time);
priv->plat->est->btr[0] = (u32)time.tv_nsec;
priv->plat->est->btr[1] = (u32)time.tv_sec;
+ qopt_time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
ctr = qopt->cycle_time;
priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
priv->plat->est->ctr[1] = (u32)ctr;
- if (fpe && !priv->dma_cap.fpesel)
+ if (fpe && !priv->dma_cap.fpesel) {
+ mutex_unlock(&priv->plat->est->lock);
return -EOPNOTSUPP;
+ }
/* Actual FPE register configuration will be done after FPE handshake
* is success.
@@ -845,6 +868,7 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
if (ret) {
netdev_err(priv->dev, "failed to configure EST\n");
goto disable;
@@ -860,9 +884,11 @@ static int tc_setup_taprio(struct stmmac_priv *priv,
return 0;
disable:
+ mutex_lock(&priv->plat->est->lock);
priv->plat->est->enable = false;
stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
priv->plat->fpe_cfg->enable = false;
stmmac_fpe_configure(priv, priv->ioaddr,
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
index 0b2ce4bdc2c3..e0cb713193ea 100644
--- a/drivers/net/ethernet/ti/tlan.c
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -313,9 +313,8 @@ static void tlan_remove_one(struct pci_dev *pdev)
pci_release_regions(pdev);
#endif
- free_netdev(dev);
-
cancel_work_sync(&priv->tlan_tqueue);
+ free_netdev(dev);
}
static void tlan_start(struct net_device *dev)
diff --git a/drivers/net/fddi/defza.c b/drivers/net/fddi/defza.c
index 14f07050b6b1..0de2c4552f5e 100644
--- a/drivers/net/fddi/defza.c
+++ b/drivers/net/fddi/defza.c
@@ -1504,9 +1504,8 @@ err_out_resource:
release_mem_region(start, len);
err_out_kfree:
- free_netdev(dev);
-
pr_err("%s: initialization failure, aborting!\n", fp->name);
+ free_netdev(dev);
return ret;
}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
index 3811f1bde84e..b80ed2ffd45e 100644
--- a/drivers/net/netdevsim/ipsec.c
+++ b/drivers/net/netdevsim/ipsec.c
@@ -85,7 +85,7 @@ static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
u32 *mykey, u32 *mysalt)
{
const char aes_gcm_name[] = "rfc4106(gcm(aes))";
- struct net_device *dev = xs->xso.dev;
+ struct net_device *dev = xs->xso.real_dev;
unsigned char *key_data;
char *alg_name = NULL;
int key_len;
@@ -134,7 +134,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
u16 sa_idx;
int ret;
- dev = xs->xso.dev;
+ dev = xs->xso.real_dev;
ns = netdev_priv(dev);
ipsec = &ns->ipsec;
@@ -194,7 +194,7 @@ static int nsim_ipsec_add_sa(struct xfrm_state *xs)
static void nsim_ipsec_del_sa(struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
u16 sa_idx;
@@ -211,7 +211,7 @@ static void nsim_ipsec_del_sa(struct xfrm_state *xs)
static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
{
- struct netdevsim *ns = netdev_priv(xs->xso.dev);
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
struct nsim_ipsec *ipsec = &ns->ipsec;
ipsec->ok++;
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index bbbc6ac8fa82..53a433442803 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -78,6 +78,11 @@ enum {
/* Temperature read register (88E2110 only) */
MV_PCS_TEMP = 0x8042,
+ /* Number of ports on the device */
+ MV_PCS_PORT_INFO = 0xd00d,
+ MV_PCS_PORT_INFO_NPORTS_MASK = 0x0380,
+ MV_PCS_PORT_INFO_NPORTS_SHIFT = 7,
+
/* These registers appear at 0x800X and 0xa00X - the 0xa00X control
* registers appear to set themselves to the 0x800X when AN is
* restarted, but status registers appear readable from either.
@@ -966,6 +971,30 @@ static const struct mv3310_chip mv2111_type = {
#endif
};
+static int mv3310_get_number_of_ports(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MV_PCS_PORT_INFO);
+ if (ret < 0)
+ return ret;
+
+ ret &= MV_PCS_PORT_INFO_NPORTS_MASK;
+ ret >>= MV_PCS_PORT_INFO_NPORTS_SHIFT;
+
+ return ret + 1;
+}
+
+static int mv3310_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 1;
+}
+
+static int mv3340_match_phy_device(struct phy_device *phydev)
+{
+ return mv3310_get_number_of_ports(phydev) == 4;
+}
+
static int mv211x_match_phy_device(struct phy_device *phydev, bool has_5g)
{
int val;
@@ -994,7 +1023,8 @@ static int mv2111_match_phy_device(struct phy_device *phydev)
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88X3310,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3310_match_phy_device,
.name = "mv88x3310",
.driver_data = &mv3310_type,
.get_features = mv3310_get_features,
@@ -1011,8 +1041,9 @@ static struct phy_driver mv3310_drivers[] = {
.set_loopback = genphy_c45_loopback,
},
{
- .phy_id = MARVELL_PHY_ID_88X3340,
- .phy_id_mask = MARVELL_PHY_ID_88X33X0_MASK,
+ .phy_id = MARVELL_PHY_ID_88X3310,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
+ .match_phy_device = mv3340_match_phy_device,
.name = "mv88x3340",
.driver_data = &mv3340_type,
.get_features = mv3310_get_features,
@@ -1069,8 +1100,7 @@ static struct phy_driver mv3310_drivers[] = {
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_88X33X0_MASK },
- { MARVELL_PHY_ID_88X3340, MARVELL_PHY_ID_88X33X0_MASK },
+ { MARVELL_PHY_ID_88X3310, MARVELL_PHY_ID_MASK },
{ MARVELL_PHY_ID_88E2110, MARVELL_PHY_ID_MASK },
{ },
};
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index aec97b021a73..2c115216420a 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -701,6 +701,7 @@ static int ax88772_init_phy(struct usbnet *dev)
return ret;
}
+ phy_suspend(priv->phydev);
priv->phydev->mac_managed_pm = 1;
phy_attached_info(priv->phydev);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 8a58a2f013af..56c3f8519093 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1771,6 +1771,7 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
{
struct scatterlist *sgs[4], hdr, stat;
unsigned out_num = 0, tmp;
+ int ret;
/* Caller should know better */
BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
@@ -1790,7 +1791,12 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
sgs[out_num] = &stat;
BUG_ON(out_num + 1 > ARRAY_SIZE(sgs));
- virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
+ if (ret < 0) {
+ dev_warn(&vi->vdev->dev,
+ "Failed to add sgs for command vq: %d\n.", ret);
+ return false;
+ }
if (unlikely(!virtqueue_kick(vi->cvq)))
return vi->ctrl->status == VIRTIO_NET_OK;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index c0bd9cbc43b1..1b483cf2b1ca 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -1,7 +1,7 @@
/*
* Linux driver for VMware's vmxnet3 ethernet NIC.
*
- * Copyright (C) 2008-2020, VMware, Inc. All Rights Reserved.
+ * Copyright (C) 2008-2021, VMware, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -26,6 +26,10 @@
#include "vmxnet3_int.h"
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#define VXLAN_UDP_PORT 8472
struct vmxnet3_stat_desc {
char desc[ETH_GSTRING_LEN];
@@ -262,6 +266,8 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
if (VMXNET3_VERSION_GE_4(adapter) &&
skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_proto = 0;
+ u16 port;
+ struct udphdr *udph;
switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP):
@@ -274,8 +280,20 @@ netdev_features_t vmxnet3_features_check(struct sk_buff *skb,
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
- if (l4_proto != IPPROTO_UDP)
+ switch (l4_proto) {
+ case IPPROTO_UDP:
+ udph = udp_hdr(skb);
+ port = be16_to_cpu(udph->dest);
+ /* Check if offloaded port is supported */
+ if (port != GENEVE_UDP_PORT &&
+ port != IANA_VXLAN_UDP_PORT &&
+ port != VXLAN_UDP_PORT) {
+ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
+ break;
+ default:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+ }
}
return features;
}
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index 349ca18088e8..c54fdae950fb 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -364,19 +364,19 @@ static int cisco_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_cisco_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_cisco_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_cisco_init);
+module_exit(hdlc_cisco_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c
index 72250fe0a1df..25e3564ce118 100644
--- a/drivers/net/wan/hdlc_fr.c
+++ b/drivers/net/wan/hdlc_fr.c
@@ -1279,19 +1279,19 @@ static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_fr_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_fr_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_fr_init);
+module_exit(hdlc_fr_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c
index 834be2ae3e9e..b81ecf432a0c 100644
--- a/drivers/net/wan/hdlc_ppp.c
+++ b/drivers/net/wan/hdlc_ppp.c
@@ -705,20 +705,20 @@ static int ppp_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_ppp_init(void)
{
skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_ppp_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_ppp_init);
+module_exit(hdlc_ppp_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw.c b/drivers/net/wan/hdlc_raw.c
index 388fcc09b4dd..54d28496fefd 100644
--- a/drivers/net/wan/hdlc_raw.c
+++ b/drivers/net/wan/hdlc_raw.c
@@ -90,7 +90,7 @@ static int raw_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_raw_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -98,14 +98,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_raw_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_raw_init);
+module_exit(hdlc_raw_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_raw_eth.c b/drivers/net/wan/hdlc_raw_eth.c
index c70a518b8b47..927596276a07 100644
--- a/drivers/net/wan/hdlc_raw_eth.c
+++ b/drivers/net/wan/hdlc_raw_eth.c
@@ -110,7 +110,7 @@ static int raw_eth_ioctl(struct net_device *dev, struct ifreq *ifr)
}
-static int __init mod_init(void)
+static int __init hdlc_eth_init(void)
{
register_hdlc_protocol(&proto);
return 0;
@@ -118,14 +118,14 @@ static int __init mod_init(void)
-static void __exit mod_exit(void)
+static void __exit hdlc_eth_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_eth_init);
+module_exit(hdlc_eth_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
index d2bf72bf3bd7..9b7ebf8bd85c 100644
--- a/drivers/net/wan/hdlc_x25.c
+++ b/drivers/net/wan/hdlc_x25.c
@@ -365,19 +365,19 @@ static int x25_ioctl(struct net_device *dev, struct ifreq *ifr)
return -EINVAL;
}
-static int __init mod_init(void)
+static int __init hdlc_x25_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
-static void __exit mod_exit(void)
+static void __exit hdlc_x25_exit(void)
{
unregister_hdlc_protocol(&proto);
}
-module_init(mod_init);
-module_exit(mod_exit);
+module_init(hdlc_x25_init);
+module_exit(hdlc_x25_exit);
MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
index c2c4dc196802..cd690c64f65b 100644
--- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
+++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c
@@ -931,7 +931,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
ret = mt76_get_field(dev, MT_CONN_ON_MISC, MT_TOP_MISC2_FW_N9_RDY);
if (ret) {
dev_dbg(dev->mt76.dev, "Firmware is already download\n");
- return -EIO;
+ goto fw_loaded;
}
ret = mt7921_load_patch(dev);
@@ -949,6 +949,7 @@ static int mt7921_load_firmware(struct mt7921_dev *dev)
return -EIO;
}
+fw_loaded:
mt76_queue_tx_cleanup(dev, dev->mt76.q_mcu[MT_MCUQ_FWDL], false);
#ifdef CONFIG_PM
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
index 46f76e8aae92..0a472ce77370 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c
@@ -24,15 +24,7 @@ int ipc_imem_sys_wwan_open(struct iosm_imem *ipc_imem, int if_id)
return -EIO;
}
- /* check for the interafce id
- * if if_id 1 to 8 then create IP MUX channel sessions.
- * To start MUX session from 0 as network interface id would start
- * from 1 so map it to if_id = if_id - 1
- */
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- return ipc_mux_open_session(ipc_imem->mux, if_id - 1);
-
- return -EINVAL;
+ return ipc_mux_open_session(ipc_imem->mux, if_id);
}
/* Release a net link to CP. */
@@ -41,7 +33,7 @@ void ipc_imem_sys_wwan_close(struct iosm_imem *ipc_imem, int if_id,
{
if (ipc_imem->mux && if_id >= IP_MUX_SESSION_START &&
if_id <= IP_MUX_SESSION_END)
- ipc_mux_close_session(ipc_imem->mux, if_id - 1);
+ ipc_mux_close_session(ipc_imem->mux, if_id);
}
/* Tasklet call to do uplink transfer. */
@@ -83,13 +75,8 @@ int ipc_imem_sys_wwan_transmit(struct iosm_imem *ipc_imem,
goto out;
}
- if (if_id >= IP_MUX_SESSION_START && if_id <= IP_MUX_SESSION_END)
- /* Route the UL packet through IP MUX Layer */
- ret = ipc_mux_ul_trigger_encode(ipc_imem->mux,
- if_id - 1, skb);
- else
- dev_err(ipc_imem->dev,
- "invalid if_id %d: ", if_id);
+ /* Route the UL packet through IP MUX Layer */
+ ret = ipc_mux_ul_trigger_encode(ipc_imem->mux, if_id, skb);
out:
return ret;
}
diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
index fd356dafbdd6..2007fe23e9a5 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
+++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.h
@@ -27,11 +27,11 @@
#define BOOT_CHECK_DEFAULT_TIMEOUT 400
/* IP MUX channel range */
-#define IP_MUX_SESSION_START 1
-#define IP_MUX_SESSION_END 8
+#define IP_MUX_SESSION_START 0
+#define IP_MUX_SESSION_END 7
/* Default IP MUX channel */
-#define IP_MUX_SESSION_DEFAULT 1
+#define IP_MUX_SESSION_DEFAULT 0
/**
* ipc_imem_sys_port_open - Open a port link to CP.
diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
index e634ffc6ec08..562de275797a 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_mux_codec.c
@@ -288,7 +288,7 @@ static int ipc_mux_net_receive(struct iosm_mux *ipc_mux, int if_id,
/* Pass the packet to the netif layer. */
dest_skb->priority = service_class;
- return ipc_wwan_receive(wwan, dest_skb, false, if_id + 1);
+ return ipc_wwan_receive(wwan, dest_skb, false, if_id);
}
/* Decode Flow Credit Table in the block */
diff --git a/drivers/net/wwan/iosm/iosm_ipc_uevent.c b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
index 2229d752926c..d12188ffed7e 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_uevent.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_uevent.c
@@ -37,7 +37,7 @@ void ipc_uevent_send(struct device *dev, char *uevent)
/* Store the device and event information */
info->dev = dev;
- snprintf(info->uevent, MAX_UEVENT_LEN, "%s: %s", dev_name(dev), uevent);
+ snprintf(info->uevent, MAX_UEVENT_LEN, "IOSM_EVENT=%s", uevent);
/* Schedule uevent in process context using work queue */
schedule_work(&info->work);
diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
index c999c64001f4..b2357ad5d517 100644
--- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c
+++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c
@@ -107,6 +107,7 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
{
struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev);
struct iosm_wwan *ipc_wwan = priv->ipc_wwan;
+ unsigned int len = skb->len;
int if_id = priv->if_id;
int ret;
@@ -123,6 +124,8 @@ static int ipc_wwan_link_transmit(struct sk_buff *skb,
/* Return code of zero is success */
if (ret == 0) {
+ netdev->stats.tx_packets++;
+ netdev->stats.tx_bytes += len;
ret = NETDEV_TX_OK;
} else if (ret == -EBUSY) {
ret = NETDEV_TX_BUSY;
@@ -140,7 +143,8 @@ exit:
ret);
dev_kfree_skb_any(skb);
- return ret;
+ netdev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
}
/* Ops structure for wwan net link */
@@ -158,6 +162,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev)
iosm_dev->priv_flags |= IFF_NO_QUEUE;
iosm_dev->type = ARPHRD_NONE;
+ iosm_dev->mtu = ETH_DATA_LEN;
iosm_dev->min_mtu = ETH_MIN_MTU;
iosm_dev->max_mtu = ETH_MAX_MTU;
@@ -252,8 +257,8 @@ int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg,
skb->pkt_type = PACKET_HOST;
- if (if_id < (IP_MUX_SESSION_START - 1) ||
- if_id > (IP_MUX_SESSION_END - 1)) {
+ if (if_id < IP_MUX_SESSION_START ||
+ if_id > IP_MUX_SESSION_END) {
ret = -EINVAL;
goto free;
}