summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/atm/idt77252.c12
-rw-r--r--drivers/bluetooth/ath3k.c28
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btrtl.c119
-rw-r--r--drivers/bluetooth/btusb.c10
-rw-r--r--drivers/bluetooth/hci_ath.c4
-rw-r--r--drivers/bluetooth/hci_ll.c2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/hw/mlx5/Makefile1
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c64
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.c189
-rw-r--r--drivers/infiniband/hw/mlx5/ib_rep.h72
-rw-r--r--drivers/infiniband/hw/mlx5/main.c361
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h38
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c5
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c24
-rw-r--r--drivers/infiniband/hw/usnic/usnic_transport.c5
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/Space.c6
-rw-r--r--drivers/net/bonding/bond_main.c1
-rw-r--r--drivers/net/dsa/b53/b53_common.c2
-rw-r--r--drivers/net/dsa/b53/b53_priv.h2
-rw-r--r--drivers/net/dsa/dsa_loop.c2
-rw-r--r--drivers/net/dsa/lan9303-core.c2
-rw-r--r--drivers/net/dsa/microchip/ksz_common.c2
-rw-r--r--drivers/net/dsa/mt7530.c2
-rw-r--r--drivers/net/dsa/mv88e6xxx/Kconfig10
-rw-r--r--drivers/net/dsa/mv88e6xxx/Makefile4
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c336
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h129
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c9
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h99
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_avb.c193
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2_scratch.c291
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.c576
-rw-r--r--drivers/net/dsa/mv88e6xxx/hwtstamp.h172
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.c381
-rw-r--r--drivers/net/dsa/mv88e6xxx/ptp.h108
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.c106
-rw-r--r--drivers/net/dsa/mv88e6xxx/serdes.h6
-rw-r--r--drivers/net/dsa/qca8k.c2
-rw-r--r--drivers/net/ethernet/8390/Makefile6
-rw-r--r--drivers/net/ethernet/8390/ax88796.c3
-rw-r--r--drivers/net/ethernet/8390/axnet_cs.c2
-rw-r--r--drivers/net/ethernet/8390/etherh.c17
-rw-r--r--drivers/net/ethernet/8390/hydra.c4
-rw-r--r--drivers/net/ethernet/8390/lib8390.c2
-rw-r--r--drivers/net/ethernet/8390/mac8390.c171
-rw-r--r--drivers/net/ethernet/8390/mcf8390.c4
-rw-r--r--drivers/net/ethernet/8390/ne.c2
-rw-r--r--drivers/net/ethernet/8390/pcnet_cs.c4
-rw-r--r--drivers/net/ethernet/8390/wd.c2
-rw-r--r--drivers/net/ethernet/8390/zorro8390.c5
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/apple/macmace.c25
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_core.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c194
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c14
-rw-r--r--drivers/net/ethernet/cirrus/mac89x0.c158
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c36
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c81
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c22
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h3
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_devcmd.h5
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_nic.h1
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c65
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.c19
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_dtsec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.c32
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_memac.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.c33
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_tgec.h1
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.c3
-rw-r--r--drivers/net/ethernet/freescale/fman/mac.h2
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c236
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.h8
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_common.c5
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_main.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_netdev.c10
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pci.c13
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_pf.c4
-rw-r--r--drivers/net/ethernet/intel/fm10k/fm10k_tlv.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c52
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c91
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c233
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c437
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h70
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c1000
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h20
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c427
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h75
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c934
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c338
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c48
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf.h72
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c825
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c649
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c37
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c116
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/dev.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c92
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c104
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h28
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Makefile2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c19
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h145
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c497
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h42
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c174
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c189
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c206
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c796
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h104
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c16
-rw-r--r--drivers/net/ethernet/natsemi/jazzsonic.c32
-rw-r--r--drivers/net/ethernet/natsemi/macsonic.c244
-rw-r--r--drivers/net/ethernet/natsemi/sonic.c99
-rw-r--r--drivers/net/ethernet/natsemi/sonic.h2
-rw-r--r--drivers/net/ethernet/natsemi/xtsonic.c30
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/cmsg.h11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/main.h1
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c20
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c34
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h280
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nic/Makefile2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_iwarp.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c1013
-rw-r--r--drivers/net/ethernet/renesas/ravb.h1
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c34
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c37
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/sfc/falcon/enum.h1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c208
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c34
-rw-r--r--drivers/net/geneve.c1
-rw-r--r--drivers/net/gtp.c1
-rw-r--r--drivers/net/ieee802154/Kconfig11
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/mcr20a.c1413
-rw-r--r--drivers/net/ieee802154/mcr20a.h498
-rw-r--r--drivers/net/ipvlan/ipvlan.h1
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c103
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c109
-rw-r--r--drivers/net/loopback.c1
-rw-r--r--drivers/net/phy/aquantia.c20
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/cortina.c18
-rw-r--r--drivers/net/phy/dp83867.c19
-rw-r--r--drivers/net/phy/marvell.c2
-rw-r--r--drivers/net/phy/marvell10g.c13
-rw-r--r--drivers/net/phy/phy-c45.c28
-rw-r--r--drivers/net/phy/phy-core.c4
-rw-r--r--drivers/net/phy/phylink.c45
-rw-r--r--drivers/net/phy/sfp-bus.c162
-rw-r--r--drivers/net/phy/sfp.c150
-rw-r--r--drivers/net/phy/teranetics.c32
-rw-r--r--drivers/net/ppp/ppp_generic.c1
-rw-r--r--drivers/net/ppp/pppoe.c7
-rw-r--r--drivers/net/ppp/pptp.c6
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/tun.c72
-rw-r--r--drivers/net/usb/kalmia.c8
-rw-r--r--drivers/net/vrf.c13
-rw-r--r--drivers/net/vxlan.c1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c96
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h9
-rw-r--r--drivers/net/xen-netback/rx.c2
-rw-r--r--drivers/scsi/iscsi_tcp.c14
-rw-r--r--drivers/soc/qcom/qmi_interface.c3
-rw-r--r--drivers/staging/ipx/af_ipx.c6
-rw-r--r--drivers/staging/irda/net/af_irda.c8
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-socket.c7
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c18
-rw-r--r--drivers/vhost/net.c7
221 files changed, 13762 insertions, 4410 deletions
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index 0277f36be85b..6e737142ceaa 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3173,14 +3173,10 @@ static void init_sram(struct idt77252_dev *card)
(u32) 0xffffffff);
}
- writel((SAR_FBQ0_LOW << 28) | 0x00000000 | 0x00000000 |
- (SAR_FB_SIZE_0 / 48), SAR_REG_FBQS0);
- writel((SAR_FBQ1_LOW << 28) | 0x00000000 | 0x00000000 |
- (SAR_FB_SIZE_1 / 48), SAR_REG_FBQS1);
- writel((SAR_FBQ2_LOW << 28) | 0x00000000 | 0x00000000 |
- (SAR_FB_SIZE_2 / 48), SAR_REG_FBQS2);
- writel((SAR_FBQ3_LOW << 28) | 0x00000000 | 0x00000000 |
- (SAR_FB_SIZE_3 / 48), SAR_REG_FBQS3);
+ writel((SAR_FBQ0_LOW << 28) | (SAR_FB_SIZE_0 / 48), SAR_REG_FBQS0);
+ writel((SAR_FBQ1_LOW << 28) | (SAR_FB_SIZE_1 / 48), SAR_REG_FBQS1);
+ writel((SAR_FBQ2_LOW << 28) | (SAR_FB_SIZE_2 / 48), SAR_REG_FBQS2);
+ writel((SAR_FBQ3_LOW << 28) | (SAR_FB_SIZE_3 / 48), SAR_REG_FBQS3);
/* Initialize rate table */
for (i = 0; i < 256; i++) {
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 204afe66de92..3d7a5c149af3 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -203,6 +203,12 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ } /* Terminating entry */
};
+static inline void ath3k_log_failed_loading(int err, int len, int size)
+{
+ BT_ERR("Error in firmware loading err = %d, len = %d, size = %d",
+ err, len, size);
+}
+
#define USB_REQ_DFU_DNLOAD 1
#define BULK_SIZE 4096
#define FW_HDR_SIZE 20
@@ -227,15 +233,16 @@ static int ath3k_load_firmware(struct usb_device *udev,
return -ENOMEM;
}
- memcpy(send_buf, firmware->data, 20);
+ memcpy(send_buf, firmware->data, FW_HDR_SIZE);
err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR,
- 0, 0, send_buf, 20, USB_CTRL_SET_TIMEOUT);
+ 0, 0, send_buf, FW_HDR_SIZE,
+ USB_CTRL_SET_TIMEOUT);
if (err < 0) {
BT_ERR("Can't change to loading configuration err");
goto error;
}
- sent += 20;
- count -= 20;
+ sent += FW_HDR_SIZE;
+ count -= FW_HDR_SIZE;
pipe = usb_sndbulkpipe(udev, 0x02);
@@ -250,8 +257,7 @@ static int ath3k_load_firmware(struct usb_device *udev,
&len, 3000);
if (err || (len != size)) {
- BT_ERR("Error in firmware loading err = %d,"
- "len = %d, size = %d", err, len, size);
+ ath3k_log_failed_loading(err, len, size);
goto error;
}
@@ -350,8 +356,7 @@ static int ath3k_load_fwfile(struct usb_device *udev,
err = usb_bulk_msg(udev, pipe, send_buf, size,
&len, 3000);
if (err || (len != size)) {
- BT_ERR("Error in firmware loading err = %d,"
- "len = %d, size = %d", err, len, size);
+ ath3k_log_failed_loading(err, len, size);
kfree(send_buf);
return err;
}
@@ -398,7 +403,7 @@ static int ath3k_set_normal_mode(struct usb_device *udev)
static int ath3k_load_patch(struct usb_device *udev)
{
unsigned char fw_state;
- char filename[ATH3K_NAME_LEN] = {0};
+ char filename[ATH3K_NAME_LEN];
const struct firmware *firmware;
struct ath3k_version fw_version;
__u32 pt_rom_version, pt_build_version;
@@ -451,7 +456,7 @@ static int ath3k_load_patch(struct usb_device *udev)
static int ath3k_load_syscfg(struct usb_device *udev)
{
unsigned char fw_state;
- char filename[ATH3K_NAME_LEN] = {0};
+ char filename[ATH3K_NAME_LEN];
const struct firmware *firmware;
struct ath3k_version fw_version;
int clk_value, ret;
@@ -522,7 +527,6 @@ static int ath3k_probe(struct usb_interface *intf,
/* load patch and sysconfig files for AR3012 */
if (id->driver_info & BTUSB_ATH3012) {
-
/* New firmware with patch and sysconfig files already loaded */
if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001)
return -ENODEV;
@@ -565,7 +569,7 @@ static int ath3k_probe(struct usb_interface *intf,
static void ath3k_disconnect(struct usb_interface *intf)
{
- BT_DBG("ath3k_disconnect intf %p", intf);
+ BT_DBG("%s intf %p", __func__, intf);
}
static struct usb_driver ath3k_driver = {
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index b280d466f05b..f6c694a1b9b0 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -183,7 +183,7 @@ static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode,
return -EFAULT;
}
- skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC);
+ skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_KERNEL);
if (!skb) {
BT_ERR("No free skb");
return -ENOMEM;
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 6e2ad748abba..437f080deaab 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -35,6 +35,60 @@
#define RTL_ROM_LMP_8761A 0x8761
#define RTL_ROM_LMP_8822B 0x8822
+#define IC_MATCH_FL_LMPSUBV (1 << 0)
+#define IC_MATCH_FL_HCIREV (1 << 1)
+#define IC_INFO(lmps, hcir) \
+ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV, \
+ .lmp_subver = (lmps), \
+ .hci_rev = (hcir)
+
+struct id_table {
+ __u16 match_flags;
+ __u16 lmp_subver;
+ __u16 hci_rev;
+ bool config_needed;
+ char *fw_name;
+ char *cfg_name;
+};
+
+static const struct id_table ic_id_table[] = {
+ /* 8723B */
+ { IC_INFO(RTL_ROM_LMP_8723B, 0xb),
+ .config_needed = false,
+ .fw_name = "rtl_bt/rtl8723b_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723b_config.bin" },
+
+ /* 8723D */
+ { IC_INFO(RTL_ROM_LMP_8723B, 0xd),
+ .config_needed = true,
+ .fw_name = "rtl_bt/rtl8723d_fw.bin",
+ .cfg_name = "rtl_bt/rtl8723d_config.bin" },
+
+ /* 8821A */
+ { IC_INFO(RTL_ROM_LMP_8821A, 0xa),
+ .config_needed = false,
+ .fw_name = "rtl_bt/rtl8821a_fw.bin",
+ .cfg_name = "rtl_bt/rtl8821a_config.bin" },
+
+ /* 8821C */
+ { IC_INFO(RTL_ROM_LMP_8821A, 0xc),
+ .config_needed = false,
+ .fw_name = "rtl_bt/rtl8821c_fw.bin",
+ .cfg_name = "rtl_bt/rtl8821c_config.bin" },
+
+ /* 8761A */
+ { IC_MATCH_FL_LMPSUBV, RTL_ROM_LMP_8761A, 0x0,
+ .config_needed = false,
+ .fw_name = "rtl_bt/rtl8761a_fw.bin",
+ .cfg_name = "rtl_bt/rtl8761a_config.bin" },
+
+ /* 8822B */
+ { IC_INFO(RTL_ROM_LMP_8822B, 0xb),
+ .config_needed = true,
+ .fw_name = "rtl_bt/rtl8822b_fw.bin",
+ .cfg_name = "rtl_bt/rtl8822b_config.bin" },
+ };
+
static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
{
struct rtl_rom_version_evt *rom_version;
@@ -64,9 +118,9 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
return 0;
}
-static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
- const struct firmware *fw,
- unsigned char **_buf)
+static int rtlbt_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+ const struct firmware *fw,
+ unsigned char **_buf)
{
const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
struct rtl_epatch_header *epatch_info;
@@ -88,6 +142,8 @@ static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
{ RTL_ROM_LMP_8821A, 2 },
{ RTL_ROM_LMP_8761A, 3 },
{ RTL_ROM_LMP_8822B, 8 },
+ { RTL_ROM_LMP_8723B, 9 }, /* 8723D */
+ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */
};
ret = rtl_read_rom_version(hdev, &rom_version);
@@ -320,8 +376,8 @@ out:
return ret;
}
-static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
- const char *fw_name)
+static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 hci_rev,
+ u16 lmp_subver)
{
unsigned char *fw_data = NULL;
const struct firmware *fw;
@@ -330,39 +386,40 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
u8 *cfg_buff = NULL;
u8 *tbuff;
char *cfg_name = NULL;
- bool config_needed = false;
+ char *fw_name = NULL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) {
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) &&
+ (ic_id_table[i].lmp_subver != lmp_subver))
+ continue;
+ if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) &&
+ (ic_id_table[i].hci_rev != hci_rev))
+ continue;
- switch (lmp_subver) {
- case RTL_ROM_LMP_8723B:
- cfg_name = "rtl_bt/rtl8723b_config.bin";
- break;
- case RTL_ROM_LMP_8821A:
- cfg_name = "rtl_bt/rtl8821a_config.bin";
- break;
- case RTL_ROM_LMP_8761A:
- cfg_name = "rtl_bt/rtl8761a_config.bin";
- break;
- case RTL_ROM_LMP_8822B:
- cfg_name = "rtl_bt/rtl8822b_config.bin";
- config_needed = true;
- break;
- default:
- BT_ERR("%s: rtl: no config according to lmp_subver %04x",
- hdev->name, lmp_subver);
break;
}
+ if (i >= ARRAY_SIZE(ic_id_table)) {
+ BT_ERR("%s: unknown IC info, lmp subver %04x, hci rev %04x",
+ hdev->name, lmp_subver, hci_rev);
+ return -EINVAL;
+ }
+
+ cfg_name = ic_id_table[i].cfg_name;
+
if (cfg_name) {
cfg_sz = rtl_load_config(hdev, cfg_name, &cfg_buff);
if (cfg_sz < 0) {
cfg_sz = 0;
- if (config_needed)
+ if (ic_id_table[i].config_needed)
BT_ERR("Necessary config file %s not found\n",
cfg_name);
}
} else
cfg_sz = 0;
+ fw_name = ic_id_table[i].fw_name;
bt_dev_info(hdev, "rtl: loading %s", fw_name);
ret = request_firmware(&fw, fw_name, &hdev->dev);
if (ret < 0) {
@@ -370,7 +427,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
goto err_req_fw;
}
- ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+ ret = rtlbt_parse_firmware(hdev, lmp_subver, fw, &fw_data);
if (ret < 0)
goto out;
@@ -429,7 +486,7 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
{
struct sk_buff *skb;
struct hci_rp_read_local_version *resp;
- u16 lmp_subver;
+ u16 hci_rev, lmp_subver;
skb = btrtl_read_local_version(hdev);
if (IS_ERR(skb))
@@ -441,6 +498,7 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
resp->hci_ver, resp->hci_rev,
resp->lmp_ver, resp->lmp_subver);
+ hci_rev = le16_to_cpu(resp->hci_rev);
lmp_subver = le16_to_cpu(resp->lmp_subver);
kfree_skb(skb);
@@ -455,17 +513,10 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
case RTL_ROM_LMP_3499:
return btrtl_setup_rtl8723a(hdev);
case RTL_ROM_LMP_8723B:
- return btrtl_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8723b_fw.bin");
case RTL_ROM_LMP_8821A:
- return btrtl_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8821a_fw.bin");
case RTL_ROM_LMP_8761A:
- return btrtl_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8761a_fw.bin");
case RTL_ROM_LMP_8822B:
- return btrtl_setup_rtl8723b(hdev, lmp_subver,
- "rtl_bt/rtl8822b_fw.bin");
+ return btrtl_setup_rtl8723b(hdev, hci_rev, lmp_subver);
default:
bt_dev_info(hdev, "rtl: assuming no firmware upload needed");
return 0;
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 60bf04b8f103..fa4ce83893bb 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -340,6 +340,7 @@ static const struct usb_device_id blacklist_table[] = {
/* Intel Bluetooth devices */
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
+ { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
@@ -374,6 +375,9 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+ /* Additional Realtek 8822BE Bluetooth devices */
+ { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK },
+
/* Silicon Wave based devices */
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
@@ -2073,6 +2077,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
case 0x0c: /* WsP */
case 0x11: /* JfP */
case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* QnJ, IcP */
break;
default:
BT_ERR("%s: Unsupported Intel hardware variant (%u)",
@@ -2165,6 +2171,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* QnJ, IcP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(ver.hw_revision),
@@ -2196,6 +2204,8 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
break;
case 0x11: /* JfP */
case 0x12: /* ThP */
+ case 0x13: /* HrP */
+ case 0x14: /* QnJ, IcP */
snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc",
le16_to_cpu(ver.hw_variant),
le16_to_cpu(ver.hw_revision),
diff --git a/drivers/bluetooth/hci_ath.c b/drivers/bluetooth/hci_ath.c
index 14ae7ee88acb..d568fbd94d6c 100644
--- a/drivers/bluetooth/hci_ath.c
+++ b/drivers/bluetooth/hci_ath.c
@@ -71,12 +71,12 @@ static int ath_wakeup_ar3k(struct tty_struct *tty)
/* Clear RTS first */
tty->driver->ops->tiocmget(tty);
tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS);
- mdelay(20);
+ msleep(20);
/* Set RTS, wake up board */
tty->driver->ops->tiocmget(tty);
tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00);
- mdelay(20);
+ msleep(20);
status = tty->driver->ops->tiocmget(tty);
return status;
diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c
index 1b4417a623a4..2f30dcad96bd 100644
--- a/drivers/bluetooth/hci_ll.c
+++ b/drivers/bluetooth/hci_ll.c
@@ -650,7 +650,7 @@ static int download_firmware(struct ll_device *lldev)
break;
case ACTION_DELAY: /* sleep */
bt_dev_info(lldev->hu.hdev, "sleep command in scr");
- mdelay(((struct bts_action_delay *)action_ptr)->msec);
+ msleep(((struct bts_action_delay *)action_ptr)->msec);
break;
}
len -= (sizeof(struct bts_action) +
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e66963ca58bd..915bbd867b61 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1334,7 +1334,7 @@ static bool validate_ipv6_net_dev(struct net_device *net_dev,
IPV6_ADDR_LINKLOCAL;
struct rt6_info *rt = rt6_lookup(dev_net(net_dev), &dst_addr->sin6_addr,
&src_addr->sin6_addr, net_dev->ifindex,
- strict);
+ NULL, strict);
bool ret;
if (!rt)
@@ -4549,6 +4549,7 @@ static struct pernet_operations cma_pernet_operations = {
.exit = cma_exit_net,
.id = &cma_pernet_id,
.size = sizeof(struct cma_pernet),
+ .async = true,
};
static int __init cma_init(void)
diff --git a/drivers/infiniband/hw/mlx5/Makefile b/drivers/infiniband/hw/mlx5/Makefile
index bc6299697dda..d42b922bede8 100644
--- a/drivers/infiniband/hw/mlx5/Makefile
+++ b/drivers/infiniband/hw/mlx5/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_MLX5_INFINIBAND) += mlx5_ib.o
mlx5_ib-y := main.o cq.o doorbell.o qp.o mem.o srq.o mr.o ah.o mad.o gsi.o ib_virt.o cmd.o cong.o
mlx5_ib-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += odp.o
+mlx5_ib-$(CONFIG_MLX5_ESWITCH) += ib_rep.o
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 5b974fb97611..c4c7b82f4ac1 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -64,14 +64,9 @@ static void mlx5_ib_cq_event(struct mlx5_core_cq *mcq, enum mlx5_event type)
}
}
-static void *get_cqe_from_buf(struct mlx5_ib_cq_buf *buf, int n, int size)
-{
- return mlx5_buf_offset(&buf->buf, n * size);
-}
-
static void *get_cqe(struct mlx5_ib_cq *cq, int n)
{
- return get_cqe_from_buf(&cq->buf, n, cq->mcq.cqe_sz);
+ return mlx5_frag_buf_get_wqe(&cq->buf.fbc, n);
}
static u8 sw_ownership_bit(int n, int nent)
@@ -403,7 +398,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
- mlx5_buf_free(dev->mdev, &buf->buf);
+ mlx5_frag_buf_free(dev->mdev, &buf->fbc.frag_buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -724,12 +719,25 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
return ret;
}
-static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
- int nent, int cqe_size)
+static int alloc_cq_frag_buf(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_cq_buf *buf,
+ int nent,
+ int cqe_size)
{
+ struct mlx5_frag_buf_ctrl *c = &buf->fbc;
+ struct mlx5_frag_buf *frag_buf = &c->frag_buf;
+ u32 cqc_buff[MLX5_ST_SZ_DW(cqc)] = {0};
int err;
- err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf);
+ MLX5_SET(cqc, cqc_buff, log_cq_size, ilog2(cqe_size));
+ MLX5_SET(cqc, cqc_buff, cqe_sz, (cqe_size == 128) ? 1 : 0);
+
+ mlx5_core_init_cq_frag_buf(&buf->fbc, cqc_buff);
+
+ err = mlx5_frag_buf_alloc_node(dev->mdev,
+ nent * cqe_size,
+ frag_buf,
+ dev->mdev->priv.numa_node);
if (err)
return err;
@@ -862,14 +870,15 @@ static void destroy_cq_user(struct mlx5_ib_cq *cq, struct ib_ucontext *context)
ib_umem_release(cq->buf.umem);
}
-static void init_cq_buf(struct mlx5_ib_cq *cq, struct mlx5_ib_cq_buf *buf)
+static void init_cq_frag_buf(struct mlx5_ib_cq *cq,
+ struct mlx5_ib_cq_buf *buf)
{
int i;
void *cqe;
struct mlx5_cqe64 *cqe64;
for (i = 0; i < buf->nent; i++) {
- cqe = get_cqe_from_buf(buf, i, buf->cqe_size);
+ cqe = get_cqe(cq, i);
cqe64 = buf->cqe_size == 64 ? cqe : cqe + 64;
cqe64->op_own = MLX5_CQE_INVALID << 4;
}
@@ -891,14 +900,15 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
cq->mcq.arm_db = cq->db.db + 1;
cq->mcq.cqe_sz = cqe_size;
- err = alloc_cq_buf(dev, &cq->buf, entries, cqe_size);
+ err = alloc_cq_frag_buf(dev, &cq->buf, entries, cqe_size);
if (err)
goto err_db;
- init_cq_buf(cq, &cq->buf);
+ init_cq_frag_buf(cq, &cq->buf);
*inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
- MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * cq->buf.buf.npages;
+ MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) *
+ cq->buf.fbc.frag_buf.npages;
*cqb = kvzalloc(*inlen, GFP_KERNEL);
if (!*cqb) {
err = -ENOMEM;
@@ -906,11 +916,12 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
}
pas = (__be64 *)MLX5_ADDR_OF(create_cq_in, *cqb, pas);
- mlx5_fill_page_array(&cq->buf.buf, pas);
+ mlx5_fill_page_frag_array(&cq->buf.fbc.frag_buf, pas);
cqc = MLX5_ADDR_OF(create_cq_in, *cqb, cq_context);
MLX5_SET(cqc, cqc, log_page_size,
- cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
+ cq->buf.fbc.frag_buf.page_shift -
+ MLX5_ADAPTER_PAGE_SHIFT);
*index = dev->mdev->priv.uar->index;
@@ -1207,11 +1218,11 @@ static int resize_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
if (!cq->resize_buf)
return -ENOMEM;
- err = alloc_cq_buf(dev, cq->resize_buf, entries, cqe_size);
+ err = alloc_cq_frag_buf(dev, cq->resize_buf, entries, cqe_size);
if (err)
goto ex;
- init_cq_buf(cq, cq->resize_buf);
+ init_cq_frag_buf(cq, cq->resize_buf);
return 0;
@@ -1256,9 +1267,8 @@ static int copy_resize_cqes(struct mlx5_ib_cq *cq)
}
while ((scqe64->op_own >> 4) != MLX5_CQE_RESIZE_CQ) {
- dcqe = get_cqe_from_buf(cq->resize_buf,
- (i + 1) & (cq->resize_buf->nent),
- dsize);
+ dcqe = mlx5_frag_buf_get_wqe(&cq->resize_buf->fbc,
+ (i + 1) & cq->resize_buf->nent);
dcqe64 = dsize == 64 ? dcqe : dcqe + 64;
sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
memcpy(dcqe, scqe, dsize);
@@ -1324,8 +1334,11 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
cqe_size = 64;
err = resize_kernel(dev, cq, entries, cqe_size);
if (!err) {
- npas = cq->resize_buf->buf.npages;
- page_shift = cq->resize_buf->buf.page_shift;
+ struct mlx5_frag_buf_ctrl *c;
+
+ c = &cq->resize_buf->fbc;
+ npas = c->frag_buf.npages;
+ page_shift = c->frag_buf.page_shift;
}
}
@@ -1346,7 +1359,8 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
mlx5_ib_populate_pas(dev, cq->resize_umem, page_shift,
pas, 0);
else
- mlx5_fill_page_array(&cq->resize_buf->buf, pas);
+ mlx5_fill_page_frag_array(&cq->resize_buf->fbc.frag_buf,
+ pas);
MLX5_SET(modify_cq_in, in,
modify_field_select_resize_field_select.resize_field_select.resize_field_select,
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.c b/drivers/infiniband/hw/mlx5/ib_rep.c
new file mode 100644
index 000000000000..61cc3d7db257
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.c
@@ -0,0 +1,189 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#include "ib_rep.h"
+
+static const struct mlx5_ib_profile rep_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
+ mlx5_ib_stage_rep_flow_db_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_rep_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_rep_roce_init,
+ mlx5_ib_stage_rep_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+};
+
+static int
+mlx5_ib_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ return 0;
+}
+
+static void
+mlx5_ib_nic_rep_unload(struct mlx5_eswitch_rep *rep)
+{
+ rep->rep_if[REP_IB].priv = NULL;
+}
+
+static int
+mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_ib_dev *ibdev;
+
+ ibdev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*ibdev));
+ if (!ibdev)
+ return -ENOMEM;
+
+ ibdev->rep = rep;
+ ibdev->mdev = dev;
+ ibdev->num_ports = max(MLX5_CAP_GEN(dev, num_ports),
+ MLX5_CAP_GEN(dev, num_vhca_ports));
+ if (!__mlx5_ib_add(ibdev, &rep_profile))
+ return -EINVAL;
+
+ rep->rep_if[REP_IB].priv = ibdev;
+
+ return 0;
+}
+
+static void
+mlx5_ib_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5_ib_dev *dev;
+
+ if (!rep->rep_if[REP_IB].priv)
+ return;
+
+ dev = mlx5_ib_rep_to_dev(rep);
+ __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX);
+ rep->rep_if[REP_IB].priv = NULL;
+}
+
+static void *mlx5_ib_vport_get_proto_dev(struct mlx5_eswitch_rep *rep)
+{
+ return mlx5_ib_rep_to_dev(rep);
+}
+
+static void mlx5_ib_rep_register_vf_vports(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
+ int vport;
+
+ for (vport = 1; vport < total_vfs; vport++) {
+ struct mlx5_eswitch_rep_if rep_if = {};
+
+ rep_if.load = mlx5_ib_vport_rep_load;
+ rep_if.unload = mlx5_ib_vport_rep_unload;
+ rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
+ mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_IB);
+ }
+}
+
+static void mlx5_ib_rep_unregister_vf_vports(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ int total_vfs = MLX5_TOTAL_VPORTS(dev->mdev);
+ int vport;
+
+ for (vport = 1; vport < total_vfs; vport++)
+ mlx5_eswitch_unregister_vport_rep(esw, vport, REP_IB);
+}
+
+void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+ struct mlx5_eswitch_rep_if rep_if = {};
+
+ rep_if.load = mlx5_ib_nic_rep_load;
+ rep_if.unload = mlx5_ib_nic_rep_unload;
+ rep_if.get_proto_dev = mlx5_ib_vport_get_proto_dev;
+ rep_if.priv = dev;
+
+ mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_IB);
+
+ mlx5_ib_rep_register_vf_vports(dev);
+}
+
+void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+
+ mlx5_ib_rep_unregister_vf_vports(dev); /* VFs vports */
+ mlx5_eswitch_unregister_vport_rep(esw, 0, REP_IB); /* UPLINK PF*/
+}
+
+u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return mlx5_eswitch_mode(esw);
+}
+
+struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
+ int vport_index)
+{
+ return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_IB);
+}
+
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ int vport_index)
+{
+ return mlx5_eswitch_get_proto_dev(esw, vport_index, REP_ETH);
+}
+
+struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
+{
+ return mlx5_eswitch_uplink_get_proto_dev(esw, REP_IB);
+}
+
+struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw, int vport)
+{
+ return mlx5_eswitch_vport_rep(esw, vport);
+}
+
+int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq)
+{
+ struct mlx5_flow_handle *flow_rule;
+ struct mlx5_eswitch *esw = dev->mdev->priv.eswitch;
+
+ if (!dev->rep)
+ return 0;
+
+ flow_rule =
+ mlx5_eswitch_add_send_to_vport_rule(esw,
+ dev->rep->vport,
+ sq->base.mqp.qpn);
+ if (IS_ERR(flow_rule))
+ return PTR_ERR(flow_rule);
+ sq->flow_rule = flow_rule;
+
+ return 0;
+}
diff --git a/drivers/infiniband/hw/mlx5/ib_rep.h b/drivers/infiniband/hw/mlx5/ib_rep.h
new file mode 100644
index 000000000000..046fd942fd46
--- /dev/null
+++ b/drivers/infiniband/hw/mlx5/ib_rep.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef __MLX5_IB_REP_H__
+#define __MLX5_IB_REP_H__
+
+#include <linux/mlx5/eswitch.h>
+#include "mlx5_ib.h"
+
+#ifdef CONFIG_MLX5_ESWITCH
+u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
+struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
+ int vport_index);
+struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw);
+struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index);
+void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev);
+void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev);
+int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq);
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ int vport_index);
+#else /* CONFIG_MLX5_ESWITCH */
+static inline u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return SRIOV_NONE;
+}
+
+static inline
+struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
+ int vport_index)
+{
+ return NULL;
+}
+
+static inline
+struct mlx5_ib_dev *mlx5_ib_get_uplink_ibdev(struct mlx5_eswitch *esw)
+{
+ return NULL;
+}
+
+static inline
+struct mlx5_eswitch_rep *mlx5_ib_vport_rep(struct mlx5_eswitch *esw,
+ int vport_index)
+{
+ return NULL;
+}
+
+static inline void mlx5_ib_register_vport_reps(struct mlx5_ib_dev *dev) {}
+static inline void mlx5_ib_unregister_vport_reps(struct mlx5_ib_dev *dev) {}
+static inline int create_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq)
+{
+ return 0;
+}
+
+static inline
+struct net_device *mlx5_ib_get_rep_netdev(struct mlx5_eswitch *esw,
+ int vport_index)
+{
+ return NULL;
+}
+#endif
+
+static inline
+struct mlx5_ib_dev *mlx5_ib_rep_to_dev(struct mlx5_eswitch_rep *rep)
+{
+ return (struct mlx5_ib_dev *)rep->rep_if[REP_IB].priv;
+}
+#endif /* __MLX5_IB_REP_H__ */
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 4236c8086820..ee55d7d64554 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -57,6 +57,7 @@
#include <linux/in.h>
#include <linux/etherdevice.h>
#include "mlx5_ib.h"
+#include "ib_rep.h"
#include "cmd.h"
#define DRIVER_NAME "mlx5_ib"
@@ -130,7 +131,7 @@ static int get_port_state(struct ib_device *ibdev,
int ret;
memset(&attr, 0, sizeof(attr));
- ret = mlx5_ib_query_port(ibdev, port_num, &attr);
+ ret = ibdev->query_port(ibdev, port_num, &attr);
if (!ret)
*state = attr.state;
return ret;
@@ -154,10 +155,19 @@ static int mlx5_netdev_event(struct notifier_block *this,
case NETDEV_REGISTER:
case NETDEV_UNREGISTER:
write_lock(&roce->netdev_lock);
-
- if (ndev->dev.parent == &mdev->pdev->dev)
- roce->netdev = (event == NETDEV_UNREGISTER) ?
+ if (ibdev->rep) {
+ struct mlx5_eswitch *esw = ibdev->mdev->priv.eswitch;
+ struct net_device *rep_ndev;
+
+ rep_ndev = mlx5_ib_get_rep_netdev(esw,
+ ibdev->rep->vport);
+ if (rep_ndev == ndev)
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
NULL : ndev;
+ } else if (ndev->dev.parent == &ibdev->mdev->pdev->dev) {
+ roce->netdev = (event == NETDEV_UNREGISTER) ?
+ NULL : ndev;
+ }
write_unlock(&roce->netdev_lock);
break;
@@ -1268,6 +1278,22 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
return ret;
}
+static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u8 port,
+ struct ib_port_attr *props)
+{
+ int ret;
+
+ /* Only link layer == ethernet is valid for representors */
+ ret = mlx5_query_port_roce(ibdev, port, props);
+ if (ret || !props)
+ return ret;
+
+ /* We don't support GIDS */
+ props->gid_tbl_len = 0;
+
+ return ret;
+}
+
static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
union ib_gid *gid)
{
@@ -2631,7 +2657,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
ibflow);
struct mlx5_ib_flow_handler *iter, *tmp;
- mutex_lock(&dev->flow_db.lock);
+ mutex_lock(&dev->flow_db->lock);
list_for_each_entry_safe(iter, tmp, &handler->list, list) {
mlx5_del_flow_rules(iter->rule);
@@ -2642,7 +2668,7 @@ static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
mlx5_del_flow_rules(handler->rule);
put_flow_table(dev, handler->prio, true);
- mutex_unlock(&dev->flow_db.lock);
+ mutex_unlock(&dev->flow_db->lock);
kfree(handler);
@@ -2691,7 +2717,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
MLX5_FLOW_NAMESPACE_BYPASS);
num_entries = MLX5_FS_MAX_ENTRIES;
num_groups = MLX5_FS_MAX_TYPES;
- prio = &dev->flow_db.prios[priority];
+ prio = &dev->flow_db->prios[priority];
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
ns = mlx5_get_flow_namespace(dev->mdev,
@@ -2699,7 +2725,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
build_leftovers_ft_param(&priority,
&num_entries,
&num_groups);
- prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
+ prio = &dev->flow_db->prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
} else if (flow_attr->type == IB_FLOW_ATTR_SNIFFER) {
if (!MLX5_CAP_FLOWTABLE(dev->mdev,
allow_sniffer_and_nic_rx_shared_tir))
@@ -2709,7 +2735,7 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
MLX5_FLOW_NAMESPACE_SNIFFER_RX :
MLX5_FLOW_NAMESPACE_SNIFFER_TX);
- prio = &dev->flow_db.sniffer[ft_type];
+ prio = &dev->flow_db->sniffer[ft_type];
priority = 0;
num_entries = 1;
num_groups = 1;
@@ -2802,6 +2828,18 @@ static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
if (!flow_is_multicast_only(flow_attr))
set_underlay_qp(dev, spec, underlay_qpn);
+ if (dev->rep) {
+ void *misc;
+
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+ MLX5_SET(fte_match_set_misc, misc, source_port,
+ dev->rep->vport);
+ misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ misc_parameters);
+ MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
+ }
+
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
if (is_drop) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
@@ -2999,7 +3037,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
if (!dst)
return ERR_PTR(-ENOMEM);
- mutex_lock(&dev->flow_db.lock);
+ mutex_lock(&dev->flow_db->lock);
ft_prio = get_flow_table(dev, flow_attr, MLX5_IB_FT_RX);
if (IS_ERR(ft_prio)) {
@@ -3048,7 +3086,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
goto destroy_ft;
}
- mutex_unlock(&dev->flow_db.lock);
+ mutex_unlock(&dev->flow_db->lock);
kfree(dst);
return &handler->ibflow;
@@ -3058,7 +3096,7 @@ destroy_ft:
if (ft_prio_tx)
put_flow_table(dev, ft_prio_tx, false);
unlock:
- mutex_unlock(&dev->flow_db.lock);
+ mutex_unlock(&dev->flow_db->lock);
kfree(dst);
kfree(handler);
return ERR_PTR(err);
@@ -3772,6 +3810,25 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0;
}
+static int mlx5_port_rep_immutable(struct ib_device *ibdev, u8 port_num,
+ struct ib_port_immutable *immutable)
+{
+ struct ib_port_attr attr;
+ int err;
+
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ err = ib_query_port(ibdev, port_num, &attr);
+ if (err)
+ return err;
+
+ immutable->pkey_tbl_len = attr.pkey_tbl_len;
+ immutable->gid_tbl_len = attr.gid_tbl_len;
+ immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET;
+
+ return 0;
+}
+
static void get_dev_fw_str(struct ib_device *ibdev, char *str)
{
struct mlx5_ib_dev *dev =
@@ -3802,7 +3859,7 @@ static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev)
goto err_destroy_vport_lag;
}
- dev->flow_db.lag_demux_ft = ft;
+ dev->flow_db->lag_demux_ft = ft;
return 0;
err_destroy_vport_lag:
@@ -3814,9 +3871,9 @@ static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
- if (dev->flow_db.lag_demux_ft) {
- mlx5_destroy_flow_table(dev->flow_db.lag_demux_ft);
- dev->flow_db.lag_demux_ft = NULL;
+ if (dev->flow_db->lag_demux_ft) {
+ mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft);
+ dev->flow_db->lag_demux_ft = NULL;
mlx5_cmd_destroy_vport_lag(mdev);
}
@@ -3848,14 +3905,10 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev, u8 port_num)
{
int err;
- err = mlx5_add_netdev_notifier(dev, port_num);
- if (err)
- return err;
-
if (MLX5_CAP_GEN(dev->mdev, roce)) {
err = mlx5_nic_vport_enable_roce(dev->mdev);
if (err)
- goto err_unregister_netdevice_notifier;
+ return err;
}
err = mlx5_eth_lag_init(dev);
@@ -3868,8 +3921,6 @@ err_disable_roce:
if (MLX5_CAP_GEN(dev->mdev, roce))
mlx5_nic_vport_disable_roce(dev->mdev);
-err_unregister_netdevice_notifier:
- mlx5_remove_netdev_notifier(dev, port_num);
return err;
}
@@ -4503,7 +4554,7 @@ static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev)
mlx5_nic_vport_disable_roce(dev->mdev);
}
-static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_ib_cleanup_multiport_master(dev);
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@@ -4512,7 +4563,7 @@ static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev)
kfree(dev->port);
}
-static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
const char *name;
@@ -4564,7 +4615,6 @@ static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev)
dev->mdev->priv.eq_table.num_comp_vectors;
dev->ib_dev.dev.parent = &mdev->pdev->dev;
- mutex_init(&dev->flow_db.lock);
mutex_init(&dev->cap_mask_mutex);
INIT_LIST_HEAD(&dev->qp_list);
spin_lock_init(&dev->reset_flow_resource_lock);
@@ -4585,7 +4635,38 @@ err_free_port:
return -ENOMEM;
}
-static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
+static int mlx5_ib_stage_flow_db_init(struct mlx5_ib_dev *dev)
+{
+ dev->flow_db = kzalloc(sizeof(*dev->flow_db), GFP_KERNEL);
+
+ if (!dev->flow_db)
+ return -ENOMEM;
+
+ mutex_init(&dev->flow_db->lock);
+
+ return 0;
+}
+
+int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_ib_dev *nic_dev;
+
+ nic_dev = mlx5_ib_get_uplink_ibdev(dev->mdev->priv.eswitch);
+
+ if (!nic_dev)
+ return -EINVAL;
+
+ dev->flow_db = nic_dev->flow_db;
+
+ return 0;
+}
+
+static void mlx5_ib_stage_flow_db_cleanup(struct mlx5_ib_dev *dev)
+{
+ kfree(dev->flow_db);
+}
+
+int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
int err;
@@ -4626,7 +4707,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
(1ull << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
dev->ib_dev.query_device = mlx5_ib_query_device;
- dev->ib_dev.query_port = mlx5_ib_query_port;
dev->ib_dev.get_link_layer = mlx5_ib_port_link_layer;
dev->ib_dev.query_gid = mlx5_ib_query_gid;
dev->ib_dev.add_gid = mlx5_ib_add_gid;
@@ -4669,7 +4749,6 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
- dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads))
@@ -4720,6 +4799,80 @@ static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
return 0;
}
+static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ dev->ib_dev.get_port_immutable = mlx5_port_immutable;
+ dev->ib_dev.query_port = mlx5_ib_query_port;
+
+ return 0;
+}
+
+int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
+{
+ dev->ib_dev.get_port_immutable = mlx5_port_rep_immutable;
+ dev->ib_dev.query_port = mlx5_ib_rep_query_port;
+
+ return 0;
+}
+
+static int mlx5_ib_stage_common_roce_init(struct mlx5_ib_dev *dev,
+ u8 port_num)
+{
+ int i;
+
+ for (i = 0; i < dev->num_ports; i++) {
+ dev->roce[i].dev = dev;
+ dev->roce[i].native_port_num = i + 1;
+ dev->roce[i].last_port_state = IB_PORT_DOWN;
+ }
+
+ dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
+ dev->ib_dev.create_wq = mlx5_ib_create_wq;
+ dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
+ dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
+ dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
+ dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
+
+ dev->ib_dev.uverbs_ex_cmd_mask |=
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
+ (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
+ (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
+
+ return mlx5_add_netdev_notifier(dev, port_num);
+}
+
+static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ u8 port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+
+ mlx5_remove_netdev_notifier(dev, port_num);
+}
+
+int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
+{
+ struct mlx5_core_dev *mdev = dev->mdev;
+ enum rdma_link_layer ll;
+ int port_type_cap;
+ int err = 0;
+ u8 port_num;
+
+ port_num = mlx5_core_native_port_num(dev->mdev) - 1;
+ port_type_cap = MLX5_CAP_GEN(mdev, port_type);
+ ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
+
+ if (ll == IB_LINK_LAYER_ETHERNET)
+ err = mlx5_ib_stage_common_roce_init(dev, port_num);
+
+ return err;
+}
+
+void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_stage_common_roce_cleanup(dev);
+}
+
static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
{
struct mlx5_core_dev *mdev = dev->mdev;
@@ -4727,37 +4880,26 @@ static int mlx5_ib_stage_roce_init(struct mlx5_ib_dev *dev)
int port_type_cap;
u8 port_num;
int err;
- int i;
port_num = mlx5_core_native_port_num(dev->mdev) - 1;
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
if (ll == IB_LINK_LAYER_ETHERNET) {
- for (i = 0; i < dev->num_ports; i++) {
- dev->roce[i].dev = dev;
- dev->roce[i].native_port_num = i + 1;
- dev->roce[i].last_port_state = IB_PORT_DOWN;
- }
+ err = mlx5_ib_stage_common_roce_init(dev, port_num);
+ if (err)
+ return err;
- dev->ib_dev.get_netdev = mlx5_ib_get_netdev;
- dev->ib_dev.create_wq = mlx5_ib_create_wq;
- dev->ib_dev.modify_wq = mlx5_ib_modify_wq;
- dev->ib_dev.destroy_wq = mlx5_ib_destroy_wq;
- dev->ib_dev.create_rwq_ind_table = mlx5_ib_create_rwq_ind_table;
- dev->ib_dev.destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table;
- dev->ib_dev.uverbs_ex_cmd_mask |=
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_MODIFY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_WQ) |
- (1ull << IB_USER_VERBS_EX_CMD_CREATE_RWQ_IND_TBL) |
- (1ull << IB_USER_VERBS_EX_CMD_DESTROY_RWQ_IND_TBL);
err = mlx5_enable_eth(dev, port_num);
if (err)
- return err;
+ goto cleanup;
}
return 0;
+cleanup:
+ mlx5_ib_stage_common_roce_cleanup(dev);
+
+ return err;
}
static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
@@ -4773,16 +4915,16 @@ static void mlx5_ib_stage_roce_cleanup(struct mlx5_ib_dev *dev)
if (ll == IB_LINK_LAYER_ETHERNET) {
mlx5_disable_eth(dev);
- mlx5_remove_netdev_notifier(dev, port_num);
+ mlx5_ib_stage_common_roce_cleanup(dev);
}
}
-static int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev)
{
return create_dev_resources(&dev->devr);
}
-static void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev)
{
destroy_dev_resources(&dev->devr);
}
@@ -4794,7 +4936,7 @@ static int mlx5_ib_stage_odp_init(struct mlx5_ib_dev *dev)
return mlx5_ib_odp_init_one(dev);
}
-static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt)) {
dev->ib_dev.get_hw_stats = mlx5_ib_get_hw_stats;
@@ -4806,7 +4948,7 @@ static int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev)
return 0;
}
-static void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, max_qp_cnt))
mlx5_ib_dealloc_counters(dev);
@@ -4837,7 +4979,7 @@ static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev)
mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar);
}
-static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
{
int err;
@@ -4852,28 +4994,28 @@ static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev)
return err;
}
-static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev)
{
mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg);
mlx5_free_bfreg(dev->mdev, &dev->bfreg);
}
-static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev)
{
return ib_register_device(&dev->ib_dev, NULL);
}
-static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev)
{
ib_unregister_device(&dev->ib_dev);
}
-static int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev)
{
return create_umr_res(dev);
}
-static void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
+void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev)
{
destroy_umrc_res(dev);
}
@@ -4890,7 +5032,7 @@ static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev)
cancel_delay_drop(dev);
}
-static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
+int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
{
int err;
int i;
@@ -4905,9 +5047,21 @@ static int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev)
return 0;
}
-static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
- const struct mlx5_ib_profile *profile,
- int stage)
+static int mlx5_ib_stage_rep_reg_init(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_register_vport_reps(dev);
+
+ return 0;
+}
+
+static void mlx5_ib_stage_rep_reg_cleanup(struct mlx5_ib_dev *dev)
+{
+ mlx5_ib_unregister_vport_reps(dev);
+}
+
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage)
{
/* Number of stages to cleanup */
while (stage) {
@@ -4921,23 +5075,14 @@ static void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num);
-static void *__mlx5_ib_add(struct mlx5_core_dev *mdev,
- const struct mlx5_ib_profile *profile)
+void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile)
{
- struct mlx5_ib_dev *dev;
int err;
int i;
printk_once(KERN_INFO "%s", mlx5_version);
- dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
- if (!dev)
- return NULL;
-
- dev->mdev = mdev;
- dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
- MLX5_CAP_GEN(mdev, num_vhca_ports));
-
for (i = 0; i < MLX5_IB_STAGE_MAX; i++) {
if (profile->stage[i].init) {
err = profile->stage[i].init(dev);
@@ -4961,9 +5106,15 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_INIT,
mlx5_ib_stage_init_init,
mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
+ mlx5_ib_stage_flow_db_init,
+ mlx5_ib_stage_flow_db_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_CAPS,
mlx5_ib_stage_caps_init,
NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_non_default_cb,
+ NULL),
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup),
@@ -4999,6 +5150,48 @@ static const struct mlx5_ib_profile pf_profile = {
NULL),
};
+static const struct mlx5_ib_profile nic_rep_profile = {
+ STAGE_CREATE(MLX5_IB_STAGE_INIT,
+ mlx5_ib_stage_init_init,
+ mlx5_ib_stage_init_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_FLOW_DB,
+ mlx5_ib_stage_flow_db_init,
+ mlx5_ib_stage_flow_db_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CAPS,
+ mlx5_ib_stage_caps_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
+ mlx5_ib_stage_rep_non_default_cb,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_ROCE,
+ mlx5_ib_stage_rep_roce_init,
+ mlx5_ib_stage_rep_roce_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES,
+ mlx5_ib_stage_dev_res_init,
+ mlx5_ib_stage_dev_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_COUNTERS,
+ mlx5_ib_stage_counters_init,
+ mlx5_ib_stage_counters_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UAR,
+ mlx5_ib_stage_uar_init,
+ mlx5_ib_stage_uar_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_BFREG,
+ mlx5_ib_stage_bfrag_init,
+ mlx5_ib_stage_bfrag_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_IB_REG,
+ mlx5_ib_stage_ib_reg_init,
+ mlx5_ib_stage_ib_reg_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_UMR_RESOURCES,
+ mlx5_ib_stage_umr_res_init,
+ mlx5_ib_stage_umr_res_cleanup),
+ STAGE_CREATE(MLX5_IB_STAGE_CLASS_ATTR,
+ mlx5_ib_stage_class_attr_init,
+ NULL),
+ STAGE_CREATE(MLX5_IB_STAGE_REP_REG,
+ mlx5_ib_stage_rep_reg_init,
+ mlx5_ib_stage_rep_reg_cleanup),
+};
+
static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
{
struct mlx5_ib_multiport_info *mpi;
@@ -5044,8 +5237,11 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev, u8 port_num)
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
{
enum rdma_link_layer ll;
+ struct mlx5_ib_dev *dev;
int port_type_cap;
+ printk_once(KERN_INFO "%s", mlx5_version);
+
port_type_cap = MLX5_CAP_GEN(mdev, port_type);
ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap);
@@ -5055,7 +5251,22 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
return mlx5_ib_add_slave_port(mdev, port_num);
}
- return __mlx5_ib_add(mdev, &pf_profile);
+ dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
+ if (!dev)
+ return NULL;
+
+ dev->mdev = mdev;
+ dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
+ MLX5_CAP_GEN(mdev, num_vhca_ports));
+
+ if (MLX5_VPORT_MANAGER(mdev) &&
+ mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
+ dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
+
+ return __mlx5_ib_add(dev, &nic_rep_profile);
+ }
+
+ return __mlx5_ib_add(dev, &pf_profile);
}
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 139385129973..e0bad28e0f09 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -343,6 +343,7 @@ struct mlx5_ib_sq {
struct mlx5_ib_wq *sq;
struct mlx5_ib_ubuffer ubuffer;
struct mlx5_db *doorbell;
+ struct mlx5_flow_handle *flow_rule;
u32 tisn;
u8 state;
};
@@ -371,7 +372,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_rss_qp rss_qp;
struct mlx5_ib_dct dct;
};
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
struct mlx5_ib_wq rq;
@@ -413,7 +414,7 @@ struct mlx5_ib_qp {
};
struct mlx5_ib_cq_buf {
- struct mlx5_buf buf;
+ struct mlx5_frag_buf_ctrl fbc;
struct ib_umem *umem;
int cqe_size;
int nent;
@@ -495,7 +496,7 @@ struct mlx5_ib_wc {
struct mlx5_ib_srq {
struct ib_srq ibsrq;
struct mlx5_core_srq msrq;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
u64 *wrid;
/* protect SRQ hanlding
@@ -731,7 +732,9 @@ struct mlx5_ib_delay_drop {
enum mlx5_ib_stages {
MLX5_IB_STAGE_INIT,
+ MLX5_IB_STAGE_FLOW_DB,
MLX5_IB_STAGE_CAPS,
+ MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE,
MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_ODP,
@@ -743,6 +746,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_UMR_RESOURCES,
MLX5_IB_STAGE_DELAY_DROP,
MLX5_IB_STAGE_CLASS_ATTR,
+ MLX5_IB_STAGE_REP_REG,
MLX5_IB_STAGE_MAX,
};
@@ -797,7 +801,7 @@ struct mlx5_ib_dev {
struct srcu_struct mr_srcu;
u32 null_mkey;
#endif
- struct mlx5_ib_flow_db flow_db;
+ struct mlx5_ib_flow_db *flow_db;
/* protect resources needed as part of reset flow */
spinlock_t reset_flow_resource_lock;
struct list_head qp_list;
@@ -807,6 +811,7 @@ struct mlx5_ib_dev {
struct mlx5_sq_bfreg fp_bfreg;
struct mlx5_ib_delay_drop delay_drop;
const struct mlx5_ib_profile *profile;
+ struct mlx5_eswitch_rep *rep;
/* protect the user_td */
struct mutex lb_mutex;
@@ -1049,6 +1054,31 @@ static inline void mlx5_odp_populate_klm(struct mlx5_klm *pklm, size_t offset,
#endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
+/* Needed for rep profile */
+int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_rep_flow_db_init(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_dev_res_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_dev_res_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_counters_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_counters_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_umr_res_init(struct mlx5_ib_dev *dev);
+void mlx5_ib_stage_umr_res_cleanup(struct mlx5_ib_dev *dev);
+int mlx5_ib_stage_class_attr_init(struct mlx5_ib_dev *dev);
+void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile,
+ int stage);
+void *__mlx5_ib_add(struct mlx5_ib_dev *dev,
+ const struct mlx5_ib_profile *profile);
+
int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
u8 port, struct ifla_vf_info *info);
int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index 556e015678de..a5fad3e87ff7 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -587,7 +587,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev)
{
- if (!mlx5_debugfs_root)
+ if (!mlx5_debugfs_root || dev->rep)
return;
debugfs_remove_recursive(dev->cache.root);
@@ -600,7 +600,7 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
struct mlx5_cache_ent *ent;
int i;
- if (!mlx5_debugfs_root)
+ if (!mlx5_debugfs_root || dev->rep)
return 0;
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
@@ -690,6 +690,7 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
MLX5_IB_UMR_OCTOWORD;
ent->access_mode = MLX5_MKC_ACCESS_MODE_MTT;
if ((dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE) &&
+ !dev->rep &&
mlx5_core_is_pf(dev->mdev))
ent->limit = dev->mdev->profile->mr_cache[i].limit;
else
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 39d24bf694a8..5663530ea5fd 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -36,6 +36,7 @@
#include <rdma/ib_user_verbs.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
+#include "ib_rep.h"
/* not supported currently */
static int wq_signature;
@@ -1082,6 +1083,13 @@ static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
mlx5_core_destroy_tis(dev->mdev, sq->tisn);
}
+static void destroy_flow_rule_vport_sq(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_sq *sq)
+{
+ if (sq->flow_rule)
+ mlx5_del_flow_rules(sq->flow_rule);
+}
+
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, void *qpin,
struct ib_pd *pd)
@@ -1145,8 +1153,15 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
if (err)
goto err_umem;
+ err = create_flow_rule_vport_sq(dev, sq);
+ if (err)
+ goto err_flow;
+
return 0;
+err_flow:
+ mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
+
err_umem:
ib_umem_release(sq->ubuffer.umem);
sq->ubuffer.umem = NULL;
@@ -1157,6 +1172,7 @@ err_umem:
static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
+ destroy_flow_rule_vport_sq(dev, sq);
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
@@ -1263,6 +1279,10 @@ static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
if (tunnel_offload_en)
MLX5_SET(tirc, tirc, tunneled_offload_en, 1);
+ if (dev->rep)
+ MLX5_SET(tirc, tirc, self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
kvfree(in);
@@ -1554,6 +1574,10 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
+ if (dev->rep)
+ MLX5_SET(tirc, tirc, self_lb_block,
+ MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST_);
+
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
if (err)
diff --git a/drivers/infiniband/hw/usnic/usnic_transport.c b/drivers/infiniband/hw/usnic/usnic_transport.c
index de318389a301..67de94343cb4 100644
--- a/drivers/infiniband/hw/usnic/usnic_transport.c
+++ b/drivers/infiniband/hw/usnic/usnic_transport.c
@@ -174,14 +174,13 @@ void usnic_transport_put_socket(struct socket *sock)
int usnic_transport_sock_get_addr(struct socket *sock, int *proto,
uint32_t *addr, uint16_t *port)
{
- int len;
int err;
struct sockaddr_in sock_addr;
err = sock->ops->getname(sock,
(struct sockaddr *)&sock_addr,
- &len, 0);
- if (err)
+ 0);
+ if (err < 0)
return err;
if (sock_addr.sin_family != AF_INET)
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index c5603d1a07d6..1f8f489b4167 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -560,7 +560,7 @@ done:
static int
data_sock_getname(struct socket *sock, struct sockaddr *addr,
- int *addr_len, int peer)
+ int peer)
{
struct sockaddr_mISDN *maddr = (struct sockaddr_mISDN *) addr;
struct sock *sk = sock->sk;
@@ -570,14 +570,13 @@ data_sock_getname(struct socket *sock, struct sockaddr *addr,
lock_sock(sk);
- *addr_len = sizeof(*maddr);
maddr->family = AF_ISDN;
maddr->dev = _pms(sk)->dev->id;
maddr->channel = _pms(sk)->ch.nr;
maddr->sapi = _pms(sk)->ch.addr & 0xff;
maddr->tei = (_pms(sk)->ch.addr >> 8) & 0xff;
release_sock(sk);
- return 0;
+ return sizeof(*maddr);
}
static const struct proto_ops data_sock_ops = {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 944ec3c9282c..08b85215c2be 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -149,9 +149,9 @@ config MACVTAP
config IPVLAN
tristate "IP-VLAN support"
depends on INET
- depends on IPV6
+ depends on IPV6 || !IPV6
depends on NETFILTER
- depends on NET_L3_MASTER_DEV
+ select NET_L3_MASTER_DEV
---help---
This allows one to create virtual devices off of a main interface
and packets will be delivered based on the dest L3 (IPv6/IPv4 addr)
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 11fe71278f40..3afda6561434 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -114,12 +114,6 @@ static struct devprobe2 m68k_probes[] __initdata = {
#ifdef CONFIG_MVME147_NET /* MVME147 internal Ethernet */
{mvme147lance_probe, 0},
#endif
-#ifdef CONFIG_MAC8390 /* NuBus NS8390-based cards */
- {mac8390_probe, 0},
-#endif
-#ifdef CONFIG_MAC89x0
- {mac89x0_probe, 0},
-#endif
{NULL, 0},
};
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c669554d70bb..4c19d23dd282 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4791,6 +4791,7 @@ static struct pernet_operations bond_net_ops = {
.exit = bond_net_exit,
.id = &bond_net_id,
.size = sizeof(struct bond_net),
+ .async = true,
};
static int __init bonding_init(void)
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index db830a1141d9..cd16067265dd 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -852,7 +852,7 @@ void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data)
}
EXPORT_SYMBOL(b53_get_ethtool_stats);
-int b53_get_sset_count(struct dsa_switch *ds)
+int b53_get_sset_count(struct dsa_switch *ds, int port)
{
struct b53_device *dev = ds->priv;
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index d954cf36ecd8..1187ebd79287 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -288,7 +288,7 @@ void b53_imp_vlan_setup(struct dsa_switch *ds, int cpu_port);
int b53_configure_vlan(struct dsa_switch *ds);
void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
-int b53_get_sset_count(struct dsa_switch *ds);
+int b53_get_sset_count(struct dsa_switch *ds, int port);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c
index 7aa84ee4e771..f77be9f85cb3 100644
--- a/drivers/net/dsa/dsa_loop.c
+++ b/drivers/net/dsa/dsa_loop.c
@@ -86,7 +86,7 @@ static int dsa_loop_setup(struct dsa_switch *ds)
return 0;
}
-static int dsa_loop_get_sset_count(struct dsa_switch *ds)
+static int dsa_loop_get_sset_count(struct dsa_switch *ds, int port)
{
return __DSA_LOOP_CNT_MAX;
}
diff --git a/drivers/net/dsa/lan9303-core.c b/drivers/net/dsa/lan9303-core.c
index 6171c0853ff1..fefa454f3e56 100644
--- a/drivers/net/dsa/lan9303-core.c
+++ b/drivers/net/dsa/lan9303-core.c
@@ -1007,7 +1007,7 @@ static void lan9303_get_ethtool_stats(struct dsa_switch *ds, int port,
}
}
-static int lan9303_get_sset_count(struct dsa_switch *ds)
+static int lan9303_get_sset_count(struct dsa_switch *ds, int port)
{
return ARRAY_SIZE(lan9303_mib);
}
diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
index 663b0d5b982b..bcb3e6c734f2 100644
--- a/drivers/net/dsa/microchip/ksz_common.c
+++ b/drivers/net/dsa/microchip/ksz_common.c
@@ -439,7 +439,7 @@ static void ksz_disable_port(struct dsa_switch *ds, int port,
ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true);
}
-static int ksz_sset_count(struct dsa_switch *ds)
+static int ksz_sset_count(struct dsa_switch *ds, int port)
{
return TOTAL_SWITCH_COUNTER_NUM;
}
diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c
index 8a0bb000d056..511ca134f13f 100644
--- a/drivers/net/dsa/mt7530.c
+++ b/drivers/net/dsa/mt7530.c
@@ -604,7 +604,7 @@ mt7530_get_ethtool_stats(struct dsa_switch *ds, int port,
}
static int
-mt7530_get_sset_count(struct dsa_switch *ds)
+mt7530_get_sset_count(struct dsa_switch *ds, int port)
{
return ARRAY_SIZE(mt7530_mib);
}
diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig
index 1aaa7a95ebc4..ae9e7f7cb31c 100644
--- a/drivers/net/dsa/mv88e6xxx/Kconfig
+++ b/drivers/net/dsa/mv88e6xxx/Kconfig
@@ -18,3 +18,13 @@ config NET_DSA_MV88E6XXX_GLOBAL2
It is required on most chips. If the chip you compile the support for
doesn't have such registers set, say N here. In doubt, say Y.
+
+config NET_DSA_MV88E6XXX_PTP
+ bool "PTP support for Marvell 88E6xxx"
+ default n
+ depends on NET_DSA_MV88E6XXX_GLOBAL2
+ imply NETWORK_PHY_TIMESTAMPING
+ imply PTP_1588_CLOCK
+ help
+ Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch
+ chips that support it.
diff --git a/drivers/net/dsa/mv88e6xxx/Makefile b/drivers/net/dsa/mv88e6xxx/Makefile
index 58a4a0014e59..50de304abe2f 100644
--- a/drivers/net/dsa/mv88e6xxx/Makefile
+++ b/drivers/net/dsa/mv88e6xxx/Makefile
@@ -5,6 +5,10 @@ mv88e6xxx-objs += global1.o
mv88e6xxx-objs += global1_atu.o
mv88e6xxx-objs += global1_vtu.o
mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_avb.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_GLOBAL2) += global2_scratch.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += hwtstamp.o
mv88e6xxx-objs += phy.o
mv88e6xxx-objs += port.o
+mv88e6xxx-$(CONFIG_NET_DSA_MV88E6XXX_PTP) += ptp.o
mv88e6xxx-objs += serdes.o
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eb328bade225..cfd53632a655 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -36,8 +36,10 @@
#include "chip.h"
#include "global1.h"
#include "global2.h"
+#include "hwtstamp.h"
#include "phy.h"
#include "port.h"
+#include "ptp.h"
#include "serdes.h"
static void assert_reg_lock(struct mv88e6xxx_chip *chip)
@@ -251,9 +253,8 @@ static void mv88e6xxx_g1_irq_unmask(struct irq_data *d)
chip->g1_irq.masked &= ~(1 << n);
}
-static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id)
+static irqreturn_t mv88e6xxx_g1_irq_thread_work(struct mv88e6xxx_chip *chip)
{
- struct mv88e6xxx_chip *chip = dev_id;
unsigned int nhandled = 0;
unsigned int sub_irq;
unsigned int n;
@@ -278,6 +279,13 @@ out:
return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
}
+static irqreturn_t mv88e6xxx_g1_irq_thread_fn(int irq, void *dev_id)
+{
+ struct mv88e6xxx_chip *chip = dev_id;
+
+ return mv88e6xxx_g1_irq_thread_work(chip);
+}
+
static void mv88e6xxx_g1_irq_bus_lock(struct irq_data *d)
{
struct mv88e6xxx_chip *chip = irq_data_get_irq_chip_data(d);
@@ -333,7 +341,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
+static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
u16 mask;
@@ -342,8 +350,6 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
mask &= ~GENMASK(chip->g1_irq.nirqs, 0);
mv88e6xxx_g1_write(chip, MV88E6XXX_G1_CTL1, mask);
- free_irq(chip->irq, chip);
-
for (irq = 0; irq < chip->g1_irq.nirqs; irq++) {
virq = irq_find_mapping(chip->g1_irq.domain, irq);
irq_dispose_mapping(virq);
@@ -352,7 +358,14 @@ static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
irq_domain_remove(chip->g1_irq.domain);
}
-static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
+static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
+{
+ mv88e6xxx_g1_irq_free(chip);
+
+ free_irq(chip->irq, chip);
+}
+
+static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
{
int err, irq, virq;
u16 reg, mask;
@@ -385,13 +398,6 @@ static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
if (err)
goto out_disable;
- err = request_threaded_irq(chip->irq, NULL,
- mv88e6xxx_g1_irq_thread_fn,
- IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
- dev_name(chip->dev), chip);
- if (err)
- goto out_disable;
-
return 0;
out_disable:
@@ -409,6 +415,62 @@ out_mapping:
return err;
}
+static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_irq_setup_common(chip);
+ if (err)
+ return err;
+
+ err = request_threaded_irq(chip->irq, NULL,
+ mv88e6xxx_g1_irq_thread_fn,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING,
+ dev_name(chip->dev), chip);
+ if (err)
+ mv88e6xxx_g1_irq_free_common(chip);
+
+ return err;
+}
+
+static void mv88e6xxx_irq_poll(struct kthread_work *work)
+{
+ struct mv88e6xxx_chip *chip = container_of(work,
+ struct mv88e6xxx_chip,
+ irq_poll_work.work);
+ mv88e6xxx_g1_irq_thread_work(chip);
+
+ kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
+ msecs_to_jiffies(100));
+}
+
+static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+
+ err = mv88e6xxx_g1_irq_setup_common(chip);
+ if (err)
+ return err;
+
+ kthread_init_delayed_work(&chip->irq_poll_work,
+ mv88e6xxx_irq_poll);
+
+ chip->kworker = kthread_create_worker(0, dev_name(chip->dev));
+ if (IS_ERR(chip->kworker))
+ return PTR_ERR(chip->kworker);
+
+ kthread_queue_delayed_work(chip->kworker, &chip->irq_poll_work,
+ msecs_to_jiffies(100));
+
+ return 0;
+}
+
+static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
+{
+ kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
+ kthread_destroy_worker(chip->kworker);
+}
+
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
{
int i;
@@ -604,7 +666,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
return UINT64_MAX;
low = reg;
- if (s->sizeof_stat == 4) {
+ if (s->size == 4) {
err = mv88e6xxx_port_read(chip, port, s->reg + 1, &reg);
if (err)
return UINT64_MAX;
@@ -617,7 +679,7 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
case STATS_TYPE_BANK0:
reg |= s->reg | histogram;
mv88e6xxx_g1_stats_read(chip, reg, &low);
- if (s->sizeof_stat == 8)
+ if (s->size == 8)
mv88e6xxx_g1_stats_read(chip, reg + 1, &high);
break;
default:
@@ -627,8 +689,8 @@ static uint64_t _mv88e6xxx_get_ethtool_stat(struct mv88e6xxx_chip *chip,
return value;
}
-static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data, int types)
+static int mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data, int types)
{
struct mv88e6xxx_hw_stat *stat;
int i, j;
@@ -641,29 +703,41 @@ static void mv88e6xxx_stats_get_strings(struct mv88e6xxx_chip *chip,
j++;
}
}
+
+ return j;
}
-static void mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static int mv88e6095_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
{
- mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_PORT);
+ return mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_PORT);
}
-static void mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
- uint8_t *data)
+static int mv88e6320_stats_get_strings(struct mv88e6xxx_chip *chip,
+ uint8_t *data)
{
- mv88e6xxx_stats_get_strings(chip, data,
- STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
+ return mv88e6xxx_stats_get_strings(chip, data,
+ STATS_TYPE_BANK0 | STATS_TYPE_BANK1);
}
static void mv88e6xxx_get_strings(struct dsa_switch *ds, int port,
uint8_t *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int count = 0;
+
+ mutex_lock(&chip->reg_lock);
if (chip->info->ops->stats_get_strings)
- chip->info->ops->stats_get_strings(chip, data);
+ count = chip->info->ops->stats_get_strings(chip, data);
+
+ if (chip->info->ops->serdes_get_strings) {
+ data += count * ETH_GSTRING_LEN;
+ chip->info->ops->serdes_get_strings(chip, port, data);
+ }
+
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_stats_get_sset_count(struct mv88e6xxx_chip *chip,
@@ -692,19 +766,34 @@ static int mv88e6320_stats_get_sset_count(struct mv88e6xxx_chip *chip)
STATS_TYPE_BANK1);
}
-static int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
+static int mv88e6xxx_get_sset_count(struct dsa_switch *ds, int port)
{
struct mv88e6xxx_chip *chip = ds->priv;
+ int serdes_count = 0;
+ int count = 0;
+ mutex_lock(&chip->reg_lock);
if (chip->info->ops->stats_get_sset_count)
- return chip->info->ops->stats_get_sset_count(chip);
+ count = chip->info->ops->stats_get_sset_count(chip);
+ if (count < 0)
+ goto out;
- return 0;
+ if (chip->info->ops->serdes_get_sset_count)
+ serdes_count = chip->info->ops->serdes_get_sset_count(chip,
+ port);
+ if (serdes_count < 0)
+ count = serdes_count;
+ else
+ count += serdes_count;
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return count;
}
-static void mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data, int types,
- u16 bank1_select, u16 histogram)
+static int mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data, int types,
+ u16 bank1_select, u16 histogram)
{
struct mv88e6xxx_hw_stat *stat;
int i, j;
@@ -712,24 +801,28 @@ static void mv88e6xxx_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
for (i = 0, j = 0; i < ARRAY_SIZE(mv88e6xxx_hw_stats); i++) {
stat = &mv88e6xxx_hw_stats[i];
if (stat->type & types) {
+ mutex_lock(&chip->reg_lock);
data[j] = _mv88e6xxx_get_ethtool_stat(chip, stat, port,
bank1_select,
histogram);
+ mutex_unlock(&chip->reg_lock);
+
j++;
}
}
+ return j;
}
-static void mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static int mv88e6095_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_PORT,
0, MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
-static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static int mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -737,8 +830,8 @@ static void mv88e6320_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
MV88E6XXX_G1_STATS_OP_HIST_RX_TX);
}
-static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data)
+static int mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
{
return mv88e6xxx_stats_get_stats(chip, port, data,
STATS_TYPE_BANK0 | STATS_TYPE_BANK1,
@@ -749,8 +842,15 @@ static void mv88e6390_stats_get_stats(struct mv88e6xxx_chip *chip, int port,
static void mv88e6xxx_get_stats(struct mv88e6xxx_chip *chip, int port,
uint64_t *data)
{
+ int count = 0;
+
if (chip->info->ops->stats_get_stats)
- chip->info->ops->stats_get_stats(chip, port, data);
+ count = chip->info->ops->stats_get_stats(chip, port, data);
+
+ if (chip->info->ops->serdes_get_stats) {
+ data += count;
+ chip->info->ops->serdes_get_stats(chip, port, data);
+ }
}
static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
@@ -762,14 +862,13 @@ static void mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds, int port,
mutex_lock(&chip->reg_lock);
ret = mv88e6xxx_stats_snapshot(chip, port);
- if (ret < 0) {
- mutex_unlock(&chip->reg_lock);
+ mutex_unlock(&chip->reg_lock);
+
+ if (ret < 0)
return;
- }
mv88e6xxx_get_stats(chip, port, data);
- mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_stats_set_histogram(struct mv88e6xxx_chip *chip)
@@ -1433,7 +1532,9 @@ static int mv88e6xxx_port_db_dump_fid(struct mv88e6xxx_chip *chip,
eth_broadcast_addr(addr.mac);
do {
+ mutex_lock(&chip->reg_lock);
err = mv88e6xxx_g1_atu_getnext(chip, fid, &addr);
+ mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@ -1466,7 +1567,10 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
int err;
/* Dump port's default Filtering Information Database (VLAN ID 0) */
+ mutex_lock(&chip->reg_lock);
err = mv88e6xxx_port_get_fid(chip, port, &fid);
+ mutex_unlock(&chip->reg_lock);
+
if (err)
return err;
@@ -1476,7 +1580,9 @@ static int mv88e6xxx_port_db_dump(struct mv88e6xxx_chip *chip, int port,
/* Dump VLANs' Filtering Information Databases */
do {
+ mutex_lock(&chip->reg_lock);
err = mv88e6xxx_vtu_getnext(chip, &vlan);
+ mutex_unlock(&chip->reg_lock);
if (err)
return err;
@@ -1496,13 +1602,8 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
dsa_fdb_dump_cb_t *cb, void *data)
{
struct mv88e6xxx_chip *chip = ds->priv;
- int err;
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_port_db_dump(chip, port, cb, data);
- mutex_unlock(&chip->reg_lock);
-
- return err;
+ return mv88e6xxx_port_db_dump(chip, port, cb, data);
}
static int mv88e6xxx_bridge_map(struct mv88e6xxx_chip *chip,
@@ -2092,6 +2193,17 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
if (err)
goto unlock;
+ /* Setup PTP Hardware Clock and timestamping */
+ if (chip->info->ptp_support) {
+ err = mv88e6xxx_ptp_setup(chip);
+ if (err)
+ goto unlock;
+
+ err = mv88e6xxx_hwtstamp_setup(chip);
+ if (err)
+ goto unlock;
+ }
+
unlock:
mutex_unlock(&chip->reg_lock);
@@ -2148,6 +2260,15 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
struct mii_bus *bus;
int err;
+ if (external) {
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_g2_scratch_gpio_set_smi(chip, true);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ return err;
+ }
+
bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
@@ -2472,6 +2593,7 @@ static const struct mv88e6xxx_ops mv88e6141_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
};
static const struct mv88e6xxx_ops mv88e6161_ops = {
@@ -2602,6 +2724,7 @@ static const struct mv88e6xxx_ops mv88e6172_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
};
static const struct mv88e6xxx_ops mv88e6175_ops = {
@@ -2673,6 +2796,7 @@ static const struct mv88e6xxx_ops mv88e6176_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
};
static const struct mv88e6xxx_ops mv88e6185_ops = {
@@ -2736,6 +2860,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
};
static const struct mv88e6xxx_ops mv88e6190x_ops = {
@@ -2771,6 +2896,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
};
static const struct mv88e6xxx_ops mv88e6191_ops = {
@@ -2843,6 +2969,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6290_ops = {
@@ -2879,6 +3007,8 @@ static const struct mv88e6xxx_ops mv88e6290_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6320_ops = {
@@ -2913,6 +3043,8 @@ static const struct mv88e6xxx_ops mv88e6320_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6321_ops = {
@@ -2945,6 +3077,8 @@ static const struct mv88e6xxx_ops mv88e6321_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6185_g1_vtu_getnext,
.vtu_loadpurge = mv88e6185_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6341_ops = {
@@ -2981,6 +3115,8 @@ static const struct mv88e6xxx_ops mv88e6341_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6350_ops = {
@@ -3049,6 +3185,7 @@ static const struct mv88e6xxx_ops mv88e6351_ops = {
.reset = mv88e6352_g1_reset,
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
+ .avb_ops = &mv88e6352_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6352_ops = {
@@ -3086,6 +3223,11 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.vtu_getnext = mv88e6352_g1_vtu_getnext,
.vtu_loadpurge = mv88e6352_g1_vtu_loadpurge,
.serdes_power = mv88e6352_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6352_avb_ops,
+ .serdes_get_sset_count = mv88e6352_serdes_get_sset_count,
+ .serdes_get_strings = mv88e6352_serdes_get_strings,
+ .serdes_get_stats = mv88e6352_serdes_get_stats,
};
static const struct mv88e6xxx_ops mv88e6390_ops = {
@@ -3124,6 +3266,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
};
static const struct mv88e6xxx_ops mv88e6390x_ops = {
@@ -3162,6 +3306,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
.vtu_getnext = mv88e6390_g1_vtu_getnext,
.vtu_loadpurge = mv88e6390_g1_vtu_loadpurge,
.serdes_power = mv88e6390_serdes_power,
+ .gpio_ops = &mv88e6352_gpio_ops,
+ .avb_ops = &mv88e6390_avb_ops,
};
static const struct mv88e6xxx_info mv88e6xxx_table[] = {
@@ -3267,6 +3413,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6341",
.num_databases = 4096,
.num_ports = 6,
+ .num_gpio = 11,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3346,6 +3493,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6172",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3386,6 +3534,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6176",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3424,6 +3573,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -3444,6 +3594,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6190X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -3475,6 +3626,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6191_ops,
},
@@ -3484,6 +3636,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6240",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3495,6 +3648,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6240_ops,
},
@@ -3504,6 +3658,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6290",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -3515,6 +3670,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6290_ops,
},
@@ -3524,6 +3680,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6320",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3534,6 +3691,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6320_ops,
},
@@ -3543,6 +3701,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6321",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3552,6 +3711,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.atu_move_port_mask = 0xf,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6321_ops,
},
@@ -3561,6 +3721,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6341",
.num_databases = 4096,
.num_ports = 6,
+ .num_gpio = 11,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3571,6 +3732,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6341_ops,
},
@@ -3620,6 +3782,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6352",
.num_databases = 4096,
.num_ports = 7,
+ .num_gpio = 15,
.max_vid = 4095,
.port_base_addr = 0x10,
.global1_addr = 0x1b,
@@ -3631,6 +3794,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_EDSA,
+ .ptp_support = true,
.ops = &mv88e6352_ops,
},
[MV88E6390] = {
@@ -3639,6 +3803,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -3650,6 +3815,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6390_ops,
},
[MV88E6390X] = {
@@ -3658,6 +3824,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.name = "Marvell 88E6390X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
+ .num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
.global1_addr = 0x1b,
@@ -3669,6 +3836,7 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.pvt = true,
.multi_chip = true,
.tag_protocol = DSA_TAG_PROTO_DSA,
+ .ptp_support = true,
.ops = &mv88e6390x_ops,
},
};
@@ -3880,6 +4048,11 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.port_mdb_del = mv88e6xxx_port_mdb_del,
.crosschip_bridge_join = mv88e6xxx_crosschip_bridge_join,
.crosschip_bridge_leave = mv88e6xxx_crosschip_bridge_leave,
+ .port_hwtstamp_set = mv88e6xxx_port_hwtstamp_set,
+ .port_hwtstamp_get = mv88e6xxx_port_hwtstamp_get,
+ .port_txtstamp = mv88e6xxx_port_txtstamp,
+ .port_rxtstamp = mv88e6xxx_port_rxtstamp,
+ .get_ts_info = mv88e6xxx_get_ts_info,
};
static struct dsa_switch_driver mv88e6xxx_switch_drv = {
@@ -3959,33 +4132,34 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
goto out;
}
- if (chip->irq > 0) {
- /* Has to be performed before the MDIO bus is created,
- * because the PHYs will link there interrupts to these
- * interrupt controllers
- */
- mutex_lock(&chip->reg_lock);
+ /* Has to be performed before the MDIO bus is created, because
+ * the PHYs will link there interrupts to these interrupt
+ * controllers
+ */
+ mutex_lock(&chip->reg_lock);
+ if (chip->irq > 0)
err = mv88e6xxx_g1_irq_setup(chip);
- mutex_unlock(&chip->reg_lock);
-
- if (err)
- goto out;
-
- if (chip->info->g2_irqs > 0) {
- err = mv88e6xxx_g2_irq_setup(chip);
- if (err)
- goto out_g1_irq;
- }
+ else
+ err = mv88e6xxx_irq_poll_setup(chip);
+ mutex_unlock(&chip->reg_lock);
- err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
- if (err)
- goto out_g2_irq;
+ if (err)
+ goto out;
- err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+ if (chip->info->g2_irqs > 0) {
+ err = mv88e6xxx_g2_irq_setup(chip);
if (err)
- goto out_g1_atu_prob_irq;
+ goto out_g1_irq;
}
+ err = mv88e6xxx_g1_atu_prob_irq_setup(chip);
+ if (err)
+ goto out_g2_irq;
+
+ err = mv88e6xxx_g1_vtu_prob_irq_setup(chip);
+ if (err)
+ goto out_g1_atu_prob_irq;
+
err = mv88e6xxx_mdios_register(chip, np);
if (err)
goto out_g1_vtu_prob_irq;
@@ -3999,20 +4173,19 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
out_mdio:
mv88e6xxx_mdios_unregister(chip);
out_g1_vtu_prob_irq:
- if (chip->irq > 0)
- mv88e6xxx_g1_vtu_prob_irq_free(chip);
+ mv88e6xxx_g1_vtu_prob_irq_free(chip);
out_g1_atu_prob_irq:
- if (chip->irq > 0)
- mv88e6xxx_g1_atu_prob_irq_free(chip);
+ mv88e6xxx_g1_atu_prob_irq_free(chip);
out_g2_irq:
- if (chip->info->g2_irqs > 0 && chip->irq > 0)
+ if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
- if (chip->irq > 0) {
- mutex_lock(&chip->reg_lock);
+ mutex_lock(&chip->reg_lock);
+ if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
- mutex_unlock(&chip->reg_lock);
- }
+ else
+ mv88e6xxx_irq_poll_free(chip);
+ mutex_unlock(&chip->reg_lock);
out:
return err;
}
@@ -4022,6 +4195,11 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
struct dsa_switch *ds = dev_get_drvdata(&mdiodev->dev);
struct mv88e6xxx_chip *chip = ds->priv;
+ if (chip->info->ptp_support) {
+ mv88e6xxx_hwtstamp_free(chip);
+ mv88e6xxx_ptp_free(chip);
+ }
+
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
mv88e6xxx_mdios_unregister(chip);
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index 3dba6e90adcf..26b9a618cdee 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -15,7 +15,10 @@
#include <linux/if_vlan.h>
#include <linux/irq.h>
#include <linux/gpio/consumer.h>
+#include <linux/kthread.h>
#include <linux/phy.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
#include <net/dsa.h>
#ifndef UINT64_MAX
@@ -39,6 +42,8 @@
#define MV88E6XXX_MAX_PVT_SWITCHES 32
#define MV88E6XXX_MAX_PVT_PORTS 16
+#define MV88E6XXX_MAX_GPIO 16
+
enum mv88e6xxx_egress_mode {
MV88E6XXX_EGRESS_MODE_UNMODIFIED,
MV88E6XXX_EGRESS_MODE_UNTAGGED,
@@ -105,6 +110,7 @@ struct mv88e6xxx_info {
const char *name;
unsigned int num_databases;
unsigned int num_ports;
+ unsigned int num_gpio;
unsigned int max_vid;
unsigned int port_base_addr;
unsigned int global1_addr;
@@ -126,6 +132,9 @@ struct mv88e6xxx_info {
*/
u8 atu_move_port_mask;
const struct mv88e6xxx_ops *ops;
+
+ /* Supports PTP */
+ bool ptp_support;
};
struct mv88e6xxx_atu_entry {
@@ -146,6 +155,8 @@ struct mv88e6xxx_vtu_entry {
struct mv88e6xxx_bus_ops;
struct mv88e6xxx_irq_ops;
+struct mv88e6xxx_gpio_ops;
+struct mv88e6xxx_avb_ops;
struct mv88e6xxx_irq {
u16 masked;
@@ -154,6 +165,36 @@ struct mv88e6xxx_irq {
unsigned int nirqs;
};
+/* state flags for mv88e6xxx_port_hwtstamp::state */
+enum {
+ MV88E6XXX_HWTSTAMP_ENABLED,
+ MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
+};
+
+struct mv88e6xxx_port_hwtstamp {
+ /* Port index */
+ int port_id;
+
+ /* Timestamping state */
+ unsigned long state;
+
+ /* Resources for receive timestamping */
+ struct sk_buff_head rx_queue;
+ struct sk_buff_head rx_queue2;
+
+ /* Resources for transmit timestamping */
+ unsigned long tx_tstamp_start;
+ struct sk_buff *tx_skb;
+ u16 tx_seq_id;
+
+ /* Current timestamp configuration */
+ struct hwtstamp_config tstamp_config;
+};
+
+struct mv88e6xxx_port {
+ u64 serdes_stats[2];
+};
+
struct mv88e6xxx_chip {
const struct mv88e6xxx_info *info;
@@ -207,8 +248,34 @@ struct mv88e6xxx_chip {
int irq;
int device_irq;
int watchdog_irq;
+
int atu_prob_irq;
int vtu_prob_irq;
+ struct kthread_worker *kworker;
+ struct kthread_delayed_work irq_poll_work;
+
+ /* GPIO resources */
+ u8 gpio_data[2];
+
+ /* This cyclecounter abstracts the switch PTP time.
+ * reg_lock must be held for any operation that read()s.
+ */
+ struct cyclecounter tstamp_cc;
+ struct timecounter tstamp_tc;
+ struct delayed_work overflow_work;
+
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_info;
+ struct delayed_work tai_event_work;
+ struct ptp_pin_desc pin_config[MV88E6XXX_MAX_GPIO];
+ u16 trig_config;
+ u16 evcap_config;
+
+ /* Per-port timestamping resources. */
+ struct mv88e6xxx_port_hwtstamp port_hwtstamp[DSA_MAX_PORTS];
+
+ /* Array of port structures. */
+ struct mv88e6xxx_port ports[DSA_MAX_PORTS];
};
struct mv88e6xxx_bus_ops {
@@ -327,9 +394,9 @@ struct mv88e6xxx_ops {
/* Return the number of strings describing statistics */
int (*stats_get_sset_count)(struct mv88e6xxx_chip *chip);
- void (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
- void (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
- uint64_t *data);
+ int (*stats_get_strings)(struct mv88e6xxx_chip *chip, uint8_t *data);
+ int (*stats_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
int (*set_cpu_port)(struct mv88e6xxx_chip *chip, int port);
int (*set_egress_port)(struct mv88e6xxx_chip *chip, int port);
const struct mv88e6xxx_irq_ops *watchdog_ops;
@@ -339,11 +406,24 @@ struct mv88e6xxx_ops {
/* Power on/off a SERDES interface */
int (*serdes_power)(struct mv88e6xxx_chip *chip, int port, bool on);
+ /* Statistics from the SERDES interface */
+ int (*serdes_get_sset_count)(struct mv88e6xxx_chip *chip, int port);
+ void (*serdes_get_strings)(struct mv88e6xxx_chip *chip, int port,
+ uint8_t *data);
+ void (*serdes_get_stats)(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
+
/* VLAN Translation Unit operations */
int (*vtu_getnext)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
int (*vtu_loadpurge)(struct mv88e6xxx_chip *chip,
struct mv88e6xxx_vtu_entry *entry);
+
+ /* GPIO operations */
+ const struct mv88e6xxx_gpio_ops *gpio_ops;
+
+ /* Interface to the AVB/PTP registers */
+ const struct mv88e6xxx_avb_ops *avb_ops;
};
struct mv88e6xxx_irq_ops {
@@ -355,13 +435,49 @@ struct mv88e6xxx_irq_ops {
void (*irq_free)(struct mv88e6xxx_chip *chip);
};
+struct mv88e6xxx_gpio_ops {
+ /* Get/set data on GPIO pin */
+ int (*get_data)(struct mv88e6xxx_chip *chip, unsigned int pin);
+ int (*set_data)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int value);
+
+ /* get/set GPIO direction */
+ int (*get_dir)(struct mv88e6xxx_chip *chip, unsigned int pin);
+ int (*set_dir)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ bool input);
+
+ /* get/set GPIO pin control */
+ int (*get_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int *func);
+ int (*set_pctl)(struct mv88e6xxx_chip *chip, unsigned int pin,
+ int func);
+};
+
+struct mv88e6xxx_avb_ops {
+ /* Access port-scoped Precision Time Protocol registers */
+ int (*port_ptp_read)(struct mv88e6xxx_chip *chip, int port, int addr,
+ u16 *data, int len);
+ int (*port_ptp_write)(struct mv88e6xxx_chip *chip, int port, int addr,
+ u16 data);
+
+ /* Access global Precision Time Protocol registers */
+ int (*ptp_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data,
+ int len);
+ int (*ptp_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
+
+ /* Access global Time Application Interface registers */
+ int (*tai_read)(struct mv88e6xxx_chip *chip, int addr, u16 *data,
+ int len);
+ int (*tai_write)(struct mv88e6xxx_chip *chip, int addr, u16 data);
+};
+
#define STATS_TYPE_PORT BIT(0)
#define STATS_TYPE_BANK0 BIT(1)
#define STATS_TYPE_BANK1 BIT(2)
struct mv88e6xxx_hw_stat {
char string[ETH_GSTRING_LEN];
- int sizeof_stat;
+ size_t size;
int reg;
int type;
};
@@ -386,6 +502,11 @@ static inline u16 mv88e6xxx_port_mask(struct mv88e6xxx_chip *chip)
return GENMASK(mv88e6xxx_num_ports(chip) - 1, 0);
}
+static inline unsigned int mv88e6xxx_num_gpio(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->num_gpio;
+}
+
int mv88e6xxx_read(struct mv88e6xxx_chip *chip, int addr, int reg, u16 *val);
int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
int mv88e6xxx_update(struct mv88e6xxx_chip *chip, int addr, int reg,
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index af0727877825..5f370f1fc7c4 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -20,22 +20,22 @@
#include "global1.h" /* for MV88E6XXX_G1_STS_IRQ_DEVICE */
#include "global2.h"
-static int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
{
return mv88e6xxx_read(chip, chip->info->global2_addr, reg, val);
}
-static int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
{
return mv88e6xxx_write(chip, chip->info->global2_addr, reg, val);
}
-static int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
{
return mv88e6xxx_update(chip, chip->info->global2_addr, reg, update);
}
-static int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
{
return mv88e6xxx_wait(chip, chip->info->global2_addr, reg, mask);
}
@@ -798,6 +798,7 @@ int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, struct mii_bus *bus,
val);
}
+/* Offset 0x1B: Watchdog Control */
static int mv88e6097_watchdog_action(struct mv88e6xxx_chip *chip, int irq)
{
u16 reg;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 669f59017b12..aa3f0a736966 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -149,7 +149,26 @@
#define MV88E6390_G2_EEPROM_ADDR_MASK 0xffff
/* Offset 0x16: AVB Command Register */
-#define MV88E6352_G2_AVB_CMD 0x16
+#define MV88E6352_G2_AVB_CMD 0x16
+#define MV88E6352_G2_AVB_CMD_BUSY 0x8000
+#define MV88E6352_G2_AVB_CMD_OP_READ 0x4000
+#define MV88E6352_G2_AVB_CMD_OP_READ_INCR 0x6000
+#define MV88E6352_G2_AVB_CMD_OP_WRITE 0x3000
+#define MV88E6390_G2_AVB_CMD_OP_READ 0x0000
+#define MV88E6390_G2_AVB_CMD_OP_READ_INCR 0x4000
+#define MV88E6390_G2_AVB_CMD_OP_WRITE 0x6000
+#define MV88E6352_G2_AVB_CMD_PORT_MASK 0x0f00
+#define MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL 0xe
+#define MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL 0xf
+#define MV88E6390_G2_AVB_CMD_PORT_MASK 0x1f00
+#define MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL 0x1e
+#define MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL 0x1f
+#define MV88E6352_G2_AVB_CMD_BLOCK_PTP 0
+#define MV88E6352_G2_AVB_CMD_BLOCK_AVB 1
+#define MV88E6352_G2_AVB_CMD_BLOCK_QAV 2
+#define MV88E6352_G2_AVB_CMD_BLOCK_QVB 3
+#define MV88E6352_G2_AVB_CMD_BLOCK_MASK 0x00e0
+#define MV88E6352_G2_AVB_CMD_ADDR_MASK 0x001f
/* Offset 0x17: AVB Data Register */
#define MV88E6352_G2_AVB_DATA 0x17
@@ -223,6 +242,40 @@
#define MV88E6352_G2_NOEGR_POLICY 0x2000
#define MV88E6390_G2_LAG_ID_4 0x2000
+/* Scratch/Misc registers accessed through MV88E6XXX_G2_SCRATCH_MISC */
+/* Offset 0x02: Misc Configuration */
+#define MV88E6352_G2_SCRATCH_MISC_CFG 0x02
+#define MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI 0x80
+/* Offset 0x60-0x61: GPIO Configuration */
+#define MV88E6352_G2_SCRATCH_GPIO_CFG0 0x60
+#define MV88E6352_G2_SCRATCH_GPIO_CFG1 0x61
+/* Offset 0x62-0x63: GPIO Direction */
+#define MV88E6352_G2_SCRATCH_GPIO_DIR0 0x62
+#define MV88E6352_G2_SCRATCH_GPIO_DIR1 0x63
+#define MV88E6352_G2_SCRATCH_GPIO_DIR_OUT 0
+#define MV88E6352_G2_SCRATCH_GPIO_DIR_IN 1
+/* Offset 0x64-0x65: GPIO Data */
+#define MV88E6352_G2_SCRATCH_GPIO_DATA0 0x64
+#define MV88E6352_G2_SCRATCH_GPIO_DATA1 0x65
+/* Offset 0x68-0x6F: GPIO Pin Control */
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL0 0x68
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL1 0x69
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL2 0x6A
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL3 0x6B
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL4 0x6C
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL5 0x6D
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL6 0x6E
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL7 0x6F
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA0 0x70
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA1 0x71
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU BIT(2)
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2 0x72
+#define MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK 0x3
+
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO 0
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_TRIG 1
+#define MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ 2
+
#ifdef CONFIG_NET_DSA_MV88E6XXX_GLOBAL2
static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
@@ -230,6 +283,11 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
+int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val);
+int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val);
+int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update);
+int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask);
+
int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
int mv88e6390_g2_irl_init_all(struct mv88e6xxx_chip *chip, int port);
@@ -267,6 +325,14 @@ int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip);
extern const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops;
extern const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6352_avb_ops;
+extern const struct mv88e6xxx_avb_ops mv88e6390_avb_ops;
+
+extern const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops;
+
+int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external);
+
#else /* !CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
@@ -279,6 +345,26 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
+static inline int mv88e6xxx_g2_read(struct mv88e6xxx_chip *chip, int reg, u16 *val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_write(struct mv88e6xxx_chip *chip, int reg, u16 val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_update(struct mv88e6xxx_chip *chip, int reg, u16 update)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_wait(struct mv88e6xxx_chip *chip, int reg, u16 mask)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6352_g2_irl_init_all(struct mv88e6xxx_chip *chip,
int port)
{
@@ -382,6 +468,17 @@ static inline int mv88e6xxx_g2_pot_clear(struct mv88e6xxx_chip *chip)
static const struct mv88e6xxx_irq_ops mv88e6097_watchdog_ops = {};
static const struct mv88e6xxx_irq_ops mv88e6390_watchdog_ops = {};
+static const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {};
+static const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {};
+
+static const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {};
+
+static inline int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_NET_DSA_MV88E6XXX_GLOBAL2 */
#endif /* _MV88E6XXX_GLOBAL2_H */
diff --git a/drivers/net/dsa/mv88e6xxx/global2_avb.c b/drivers/net/dsa/mv88e6xxx/global2_avb.c
new file mode 100644
index 000000000000..2e398ccb88ca
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2_avb.c
@@ -0,0 +1,193 @@
+/*
+ * Marvell 88E6xxx Switch Global 2 Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2016-2017 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * Copyright (c) 2017 National Instruments
+ * Brandon Streiff <brandon.streiff@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "global2.h"
+
+/* Offset 0x16: AVB Command Register
+ * Offset 0x17: AVB Data Register
+ *
+ * There are two different versions of this register interface:
+ * "6352": 3-bit "op" field, 4-bit "port" field.
+ * "6390": 2-bit "op" field, 5-bit "port" field.
+ *
+ * The "op" codes are different between the two, as well as the special
+ * port fields for global PTP and TAI configuration.
+ */
+
+/* mv88e6xxx_g2_avb_read -- Read one or multiple 16-bit words.
+ * The hardware supports snapshotting up to four contiguous registers.
+ */
+static int mv88e6xxx_g2_avb_read(struct mv88e6xxx_chip *chip, u16 readop,
+ u16 *data, int len)
+{
+ int err;
+ int i;
+
+ /* Hardware can only snapshot four words. */
+ if (len > 4)
+ return -E2BIG;
+
+ err = mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, readop);
+ if (err)
+ return err;
+
+ for (i = 0; i < len; ++i) {
+ err = mv88e6xxx_g2_read(chip, MV88E6352_G2_AVB_DATA,
+ &data[i]);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* mv88e6xxx_g2_avb_write -- Write one 16-bit word. */
+static int mv88e6xxx_g2_avb_write(struct mv88e6xxx_chip *chip, u16 writeop,
+ u16 data)
+{
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6352_G2_AVB_DATA, data);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_update(chip, MV88E6352_G2_AVB_CMD, writeop);
+}
+
+static int mv88e6352_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 *data,
+ int len)
+{
+ u16 readop = (len == 1 ? MV88E6352_G2_AVB_CMD_OP_READ :
+ MV88E6352_G2_AVB_CMD_OP_READ_INCR) |
+ (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
+ addr;
+
+ return mv88e6xxx_g2_avb_read(chip, readop, data, len);
+}
+
+static int mv88e6352_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 data)
+{
+ u16 writeop = MV88E6352_G2_AVB_CMD_OP_WRITE | (port << 8) |
+ (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
+
+ return mv88e6xxx_g2_avb_write(chip, writeop, data);
+}
+
+static int mv88e6352_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6352_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6352_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+static int mv88e6352_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6352_g2_avb_port_ptp_read(chip,
+ MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6352_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6352_g2_avb_port_ptp_write(chip,
+ MV88E6352_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6352_avb_ops = {
+ .port_ptp_read = mv88e6352_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6352_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6352_g2_avb_ptp_read,
+ .ptp_write = mv88e6352_g2_avb_ptp_write,
+ .tai_read = mv88e6352_g2_avb_tai_read,
+ .tai_write = mv88e6352_g2_avb_tai_write,
+};
+
+static int mv88e6390_g2_avb_port_ptp_read(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 *data,
+ int len)
+{
+ u16 readop = (len == 1 ? MV88E6390_G2_AVB_CMD_OP_READ :
+ MV88E6390_G2_AVB_CMD_OP_READ_INCR) |
+ (port << 8) | (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) |
+ addr;
+
+ return mv88e6xxx_g2_avb_read(chip, readop, data, len);
+}
+
+static int mv88e6390_g2_avb_port_ptp_write(struct mv88e6xxx_chip *chip,
+ int port, int addr, u16 data)
+{
+ u16 writeop = MV88E6390_G2_AVB_CMD_OP_WRITE | (port << 8) |
+ (MV88E6352_G2_AVB_CMD_BLOCK_PTP << 5) | addr;
+
+ return mv88e6xxx_g2_avb_write(chip, writeop, data);
+}
+
+static int mv88e6390_g2_avb_ptp_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6390_g2_avb_port_ptp_read(chip,
+ MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6390_g2_avb_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6390_g2_avb_port_ptp_write(chip,
+ MV88E6390_G2_AVB_CMD_PORT_PTPGLOBAL,
+ addr, data);
+}
+
+static int mv88e6390_g2_avb_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ return mv88e6390_g2_avb_port_ptp_read(chip,
+ MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data, len);
+}
+
+static int mv88e6390_g2_avb_tai_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ return mv88e6390_g2_avb_port_ptp_write(chip,
+ MV88E6390_G2_AVB_CMD_PORT_TAIGLOBAL,
+ addr, data);
+}
+
+const struct mv88e6xxx_avb_ops mv88e6390_avb_ops = {
+ .port_ptp_read = mv88e6390_g2_avb_port_ptp_read,
+ .port_ptp_write = mv88e6390_g2_avb_port_ptp_write,
+ .ptp_read = mv88e6390_g2_avb_ptp_read,
+ .ptp_write = mv88e6390_g2_avb_ptp_write,
+ .tai_read = mv88e6390_g2_avb_tai_read,
+ .tai_write = mv88e6390_g2_avb_tai_write,
+};
diff --git a/drivers/net/dsa/mv88e6xxx/global2_scratch.c b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
new file mode 100644
index 000000000000..3f92b8892dc7
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/global2_scratch.c
@@ -0,0 +1,291 @@
+/*
+ * Marvell 88E6xxx Switch Global 2 Scratch & Misc Registers support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Brandon Streiff <brandon.streiff@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "global2.h"
+
+/* Offset 0x1A: Scratch and Misc. Register */
+static int mv88e6xxx_g2_scratch_read(struct mv88e6xxx_chip *chip, int reg,
+ u8 *data)
+{
+ u16 value;
+ int err;
+
+ err = mv88e6xxx_g2_write(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC,
+ reg << 8);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, &value);
+ if (err)
+ return err;
+
+ *data = (value & MV88E6XXX_G2_SCRATCH_MISC_DATA_MASK);
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_scratch_write(struct mv88e6xxx_chip *chip, int reg,
+ u8 data)
+{
+ u16 value = (reg << 8) | data;
+
+ return mv88e6xxx_g2_update(chip, MV88E6XXX_G2_SCRATCH_MISC_MISC, value);
+}
+
+/**
+ * mv88e6xxx_g2_scratch_gpio_get_bit - get a bit
+ * @chip: chip private data
+ * @nr: bit index
+ * @set: is bit set?
+ */
+static int mv88e6xxx_g2_scratch_get_bit(struct mv88e6xxx_chip *chip,
+ int base_reg, unsigned int offset,
+ int *set)
+{
+ int reg = base_reg + (offset / 8);
+ u8 mask = (1 << (offset & 0x7));
+ u8 val;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ *set = !!(mask & val);
+
+ return 0;
+}
+
+/**
+ * mv88e6xxx_g2_scratch_gpio_set_bit - set (or clear) a bit
+ * @chip: chip private data
+ * @nr: bit index
+ * @set: set if true, clear if false
+ *
+ * Helper function for dealing with the direction and data registers.
+ */
+static int mv88e6xxx_g2_scratch_set_bit(struct mv88e6xxx_chip *chip,
+ int base_reg, unsigned int offset,
+ int set)
+{
+ int reg = base_reg + (offset / 8);
+ u8 mask = (1 << (offset & 0x7));
+ u8 val;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ if (set)
+ val |= mask;
+ else
+ val &= ~mask;
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, val);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_data - get data on gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ *
+ * Return: 0 for low, 1 for high, negative error
+ */
+static int mv88e6352_g2_scratch_gpio_get_data(struct mv88e6xxx_chip *chip,
+ unsigned int pin)
+{
+ int val = 0;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_get_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DATA0,
+ pin, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_data - set data on gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ * @value: value to set
+ */
+static int mv88e6352_g2_scratch_gpio_set_data(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int value)
+{
+ u8 mask = (1 << (pin & 0x7));
+ int offset = (pin / 8);
+ int reg;
+
+ reg = MV88E6352_G2_SCRATCH_GPIO_DATA0 + offset;
+
+ if (value)
+ chip->gpio_data[offset] |= mask;
+ else
+ chip->gpio_data[offset] &= ~mask;
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, chip->gpio_data[offset]);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_dir - get direction of gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ *
+ * Return: 0 for output, 1 for input (same as GPIOF_DIR_XXX).
+ */
+static int mv88e6352_g2_scratch_gpio_get_dir(struct mv88e6xxx_chip *chip,
+ unsigned int pin)
+{
+ int val = 0;
+ int err;
+
+ err = mv88e6xxx_g2_scratch_get_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DIR0,
+ pin, &val);
+ if (err)
+ return err;
+
+ return val;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_dir - set direction of gpio pin
+ * @chip: chip private data
+ * @pin: gpio index
+ */
+static int mv88e6352_g2_scratch_gpio_set_dir(struct mv88e6xxx_chip *chip,
+ unsigned int pin, bool input)
+{
+ int value = (input ? MV88E6352_G2_SCRATCH_GPIO_DIR_IN :
+ MV88E6352_G2_SCRATCH_GPIO_DIR_OUT);
+
+ return mv88e6xxx_g2_scratch_set_bit(chip,
+ MV88E6352_G2_SCRATCH_GPIO_DIR0,
+ pin, value);
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_get_pctl - get pin control setting
+ * @chip: chip private data
+ * @pin: gpio index
+ * @func: function number
+ *
+ * Note that the function numbers themselves may vary by chipset.
+ */
+static int mv88e6352_g2_scratch_gpio_get_pctl(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int *func)
+{
+ int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
+ int offset = (pin & 0x1) ? 4 : 0;
+ u8 mask = (0x7 << offset);
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ *func = (val & mask) >> offset;
+
+ return 0;
+}
+
+/**
+ * mv88e6352_g2_scratch_gpio_set_pctl - set pin control setting
+ * @chip: chip private data
+ * @pin: gpio index
+ * @func: function number
+ */
+static int mv88e6352_g2_scratch_gpio_set_pctl(struct mv88e6xxx_chip *chip,
+ unsigned int pin, int func)
+{
+ int reg = MV88E6352_G2_SCRATCH_GPIO_PCTL0 + (pin / 2);
+ int offset = (pin & 0x1) ? 4 : 0;
+ u8 mask = (0x7 << offset);
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, reg, &val);
+ if (err)
+ return err;
+
+ val = (val & ~mask) | ((func & mask) << offset);
+
+ return mv88e6xxx_g2_scratch_write(chip, reg, val);
+}
+
+const struct mv88e6xxx_gpio_ops mv88e6352_gpio_ops = {
+ .get_data = mv88e6352_g2_scratch_gpio_get_data,
+ .set_data = mv88e6352_g2_scratch_gpio_set_data,
+ .get_dir = mv88e6352_g2_scratch_gpio_get_dir,
+ .set_dir = mv88e6352_g2_scratch_gpio_set_dir,
+ .get_pctl = mv88e6352_g2_scratch_gpio_get_pctl,
+ .set_pctl = mv88e6352_g2_scratch_gpio_set_pctl,
+};
+
+/**
+ * mv88e6xxx_g2_gpio_set_smi - set gpio muxing for external smi
+ * @chip: chip private data
+ * @external: set mux for external smi, or free for gpio usage
+ *
+ * Some mv88e6xxx models have GPIO pins that may be configured as
+ * an external SMI interface, or they may be made free for other
+ * GPIO uses.
+ */
+int mv88e6xxx_g2_scratch_gpio_set_smi(struct mv88e6xxx_chip *chip,
+ bool external)
+{
+ int misc_cfg = MV88E6352_G2_SCRATCH_MISC_CFG;
+ int config_data1 = MV88E6352_G2_SCRATCH_CONFIG_DATA1;
+ int config_data2 = MV88E6352_G2_SCRATCH_CONFIG_DATA2;
+ bool no_cpu;
+ u8 p0_mode;
+ int err;
+ u8 val;
+
+ err = mv88e6xxx_g2_scratch_read(chip, config_data2, &val);
+ if (err)
+ return err;
+
+ p0_mode = val & MV88E6352_G2_SCRATCH_CONFIG_DATA2_P0_MODE_MASK;
+
+ if (p0_mode == 0x01 || p0_mode == 0x02)
+ return -EBUSY;
+
+ err = mv88e6xxx_g2_scratch_read(chip, config_data1, &val);
+ if (err)
+ return err;
+
+ no_cpu = !!(val & MV88E6352_G2_SCRATCH_CONFIG_DATA1_NO_CPU);
+
+ err = mv88e6xxx_g2_scratch_read(chip, misc_cfg, &val);
+ if (err)
+ return err;
+
+ /* NO_CPU being 0 inverts the meaning of the bit */
+ if (!no_cpu)
+ external = !external;
+
+ if (external)
+ val |= MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+ else
+ val &= ~MV88E6352_G2_SCRATCH_MISC_CFG_NORMALSMI;
+
+ return mv88e6xxx_g2_scratch_write(chip, misc_cfg, val);
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
new file mode 100644
index 000000000000..ac7694c71266
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c
@@ -0,0 +1,576 @@
+/*
+ * Marvell 88E6xxx Switch hardware timestamping support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "global2.h"
+#include "hwtstamp.h"
+#include "ptp.h"
+#include <linux/ptp_classify.h>
+
+#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+
+static int mv88e6xxx_port_ptp_read(struct mv88e6xxx_chip *chip, int port,
+ int addr, u16 *data, int len)
+{
+ if (!chip->info->ops->avb_ops->port_ptp_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->port_ptp_read(chip, port, addr,
+ data, len);
+}
+
+static int mv88e6xxx_port_ptp_write(struct mv88e6xxx_chip *chip, int port,
+ int addr, u16 data)
+{
+ if (!chip->info->ops->avb_ops->port_ptp_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->port_ptp_write(chip, port, addr,
+ data);
+}
+
+static int mv88e6xxx_ptp_write(struct mv88e6xxx_chip *chip, int addr,
+ u16 data)
+{
+ if (!chip->info->ops->avb_ops->ptp_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->ptp_write(chip, addr, data);
+}
+
+/* TX_TSTAMP_TIMEOUT: This limits the time spent polling for a TX
+ * timestamp. When working properly, hardware will produce a timestamp
+ * within 1ms. Software may enounter delays due to MDIO contention, so
+ * the timeout is set accordingly.
+ */
+#define TX_TSTAMP_TIMEOUT msecs_to_jiffies(20)
+
+int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = ptp_clock_index(chip->ptp_clock);
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ);
+
+ return 0;
+}
+
+static int mv88e6xxx_set_hwtstamp_config(struct mv88e6xxx_chip *chip, int port,
+ struct hwtstamp_config *config)
+{
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ bool tstamp_enable = false;
+ u16 port_config0;
+ int err;
+
+ /* Prevent the TX/RX paths from trying to interact with the
+ * timestamp hardware while we reconfigure it.
+ */
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
+
+ /* reserved for future extensions */
+ if (config->flags)
+ return -EINVAL;
+
+ switch (config->tx_type) {
+ case HWTSTAMP_TX_OFF:
+ tstamp_enable = false;
+ break;
+ case HWTSTAMP_TX_ON:
+ tstamp_enable = true;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ /* The switch supports timestamping both L2 and L4; one cannot be
+ * disabled independently of the other.
+ */
+ switch (config->rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ tstamp_enable = false;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ default:
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
+ return -ERANGE;
+ }
+
+ if (tstamp_enable) {
+ /* Disable transportSpecific value matching, so that packets
+ * with either 1588 (0) and 802.1AS (1) will be timestamped.
+ */
+ port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH;
+ } else {
+ /* Disable PTP. This disables both RX and TX timestamping. */
+ port_config0 = MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP;
+ }
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ port_config0);
+ mutex_unlock(&chip->reg_lock);
+
+ if (err < 0)
+ return err;
+
+ /* Once hardware has been configured, enable timestamp checks
+ * in the RX/TX paths.
+ */
+ if (tstamp_enable)
+ set_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state);
+
+ return 0;
+}
+
+int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct hwtstamp_config config;
+ int err;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = mv88e6xxx_set_hwtstamp_config(chip, port, &config);
+ if (err)
+ return err;
+
+ /* Save the chosen configuration to be returned later. */
+ memcpy(&ps->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+ -EFAULT : 0;
+}
+
+int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ struct hwtstamp_config *config = &ps->tstamp_config;
+
+ if (!chip->info->ptp_support)
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ -EFAULT : 0;
+}
+
+/* Get the start of the PTP header in this skb */
+static u8 *parse_ptp_header(struct sk_buff *skb, unsigned int type)
+{
+ u8 *data = skb_mac_header(skb);
+ unsigned int offset = 0;
+
+ if (type & PTP_CLASS_VLAN)
+ offset += VLAN_HLEN;
+
+ switch (type & PTP_CLASS_PMASK) {
+ case PTP_CLASS_IPV4:
+ offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
+ break;
+ case PTP_CLASS_IPV6:
+ offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+ break;
+ case PTP_CLASS_L2:
+ offset += ETH_HLEN;
+ break;
+ default:
+ return NULL;
+ }
+
+ /* Ensure that the entire header is present in this packet. */
+ if (skb->len + ETH_HLEN < offset + 34)
+ return NULL;
+
+ return data + offset;
+}
+
+/* Returns a pointer to the PTP header if the caller should time stamp,
+ * or NULL if the caller should not.
+ */
+static u8 *mv88e6xxx_should_tstamp(struct mv88e6xxx_chip *chip, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ u8 *hdr;
+
+ if (!chip->info->ptp_support)
+ return NULL;
+
+ hdr = parse_ptp_header(skb, type);
+ if (!hdr)
+ return NULL;
+
+ if (!test_bit(MV88E6XXX_HWTSTAMP_ENABLED, &ps->state))
+ return NULL;
+
+ return hdr;
+}
+
+static int mv88e6xxx_ts_valid(u16 status)
+{
+ if (!(status & MV88E6XXX_PTP_TS_VALID))
+ return 0;
+ if (status & MV88E6XXX_PTP_TS_STATUS_MASK)
+ return 0;
+ return 1;
+}
+
+static int seq_match(struct sk_buff *skb, u16 ts_seqid)
+{
+ unsigned int type = SKB_PTP_TYPE(skb);
+ u8 *hdr = parse_ptp_header(skb, type);
+ __be16 *seqid;
+
+ seqid = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
+
+ return ts_seqid == ntohs(*seqid);
+}
+
+static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps,
+ struct sk_buff *skb, u16 reg,
+ struct sk_buff_head *rxq)
+{
+ u16 buf[4] = { 0 }, status, seq_id;
+ u64 ns, timelo, timehi;
+ struct skb_shared_hwtstamps *shwt;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
+ reg, buf, ARRAY_SIZE(buf));
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ pr_err("failed to get the receive time stamp\n");
+
+ status = buf[0];
+ timelo = buf[1];
+ timehi = buf[2];
+ seq_id = buf[3];
+
+ if (status & MV88E6XXX_PTP_TS_VALID) {
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
+ mutex_unlock(&chip->reg_lock);
+ if (err)
+ pr_err("failed to clear the receive status\n");
+ }
+ /* Since the device can only handle one time stamp at a time,
+ * we purge any extra frames from the queue.
+ */
+ for ( ; skb; skb = skb_dequeue(rxq)) {
+ if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
+ ns = timehi << 16 | timelo;
+
+ mutex_lock(&chip->reg_lock);
+ ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
+ mutex_unlock(&chip->reg_lock);
+ shwt = skb_hwtstamps(skb);
+ memset(shwt, 0, sizeof(*shwt));
+ shwt->hwtstamp = ns_to_ktime(ns);
+ status &= ~MV88E6XXX_PTP_TS_VALID;
+ }
+ netif_rx_ni(skb);
+ }
+}
+
+static void mv88e6xxx_rxtstamp_work(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps)
+{
+ struct sk_buff *skb;
+
+ skb = skb_dequeue(&ps->rx_queue);
+
+ if (skb)
+ mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR0_STS,
+ &ps->rx_queue);
+
+ skb = skb_dequeue(&ps->rx_queue2);
+ if (skb)
+ mv88e6xxx_get_rxts(chip, ps, skb, MV88E6XXX_PORT_PTP_ARR1_STS,
+ &ps->rx_queue2);
+}
+
+static int is_pdelay_resp(u8 *msgtype)
+{
+ return (*msgtype & 0xf) == 3;
+}
+
+bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *skb, unsigned int type)
+{
+ struct mv88e6xxx_port_hwtstamp *ps;
+ struct mv88e6xxx_chip *chip;
+ u8 *hdr;
+
+ chip = ds->priv;
+ ps = &chip->port_hwtstamp[port];
+
+ if (ps->tstamp_config.rx_filter != HWTSTAMP_FILTER_PTP_V2_EVENT)
+ return false;
+
+ hdr = mv88e6xxx_should_tstamp(chip, port, skb, type);
+ if (!hdr)
+ return false;
+
+ SKB_PTP_TYPE(skb) = type;
+
+ if (is_pdelay_resp(hdr))
+ skb_queue_tail(&ps->rx_queue2, skb);
+ else
+ skb_queue_tail(&ps->rx_queue, skb);
+
+ ptp_schedule_worker(chip->ptp_clock, 0);
+
+ return true;
+}
+
+static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
+ struct mv88e6xxx_port_hwtstamp *ps)
+{
+ struct skb_shared_hwtstamps shhwtstamps;
+ u16 departure_block[4], status;
+ struct sk_buff *tmp_skb;
+ u32 time_raw;
+ int err;
+ u64 ns;
+
+ if (!ps->tx_skb)
+ return 0;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
+ MV88E6XXX_PORT_PTP_DEP_STS,
+ departure_block,
+ ARRAY_SIZE(departure_block));
+ mutex_unlock(&chip->reg_lock);
+
+ if (err)
+ goto free_and_clear_skb;
+
+ if (!(departure_block[0] & MV88E6XXX_PTP_TS_VALID)) {
+ if (time_is_before_jiffies(ps->tx_tstamp_start +
+ TX_TSTAMP_TIMEOUT)) {
+ dev_warn(chip->dev, "p%d: clearing tx timestamp hang\n",
+ ps->port_id);
+ goto free_and_clear_skb;
+ }
+ /* The timestamp should be available quickly, while getting it
+ * is high priority and time bounded to only 10ms. A poll is
+ * warranted so restart the work.
+ */
+ return 1;
+ }
+
+ /* We have the timestamp; go ahead and clear valid now */
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_port_ptp_write(chip, ps->port_id,
+ MV88E6XXX_PORT_PTP_DEP_STS, 0);
+ mutex_unlock(&chip->reg_lock);
+
+ status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
+ if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) {
+ dev_warn(chip->dev, "p%d: tx timestamp overrun\n", ps->port_id);
+ goto free_and_clear_skb;
+ }
+
+ if (departure_block[3] != ps->tx_seq_id) {
+ dev_warn(chip->dev, "p%d: unexpected seq. id\n", ps->port_id);
+ goto free_and_clear_skb;
+ }
+
+ memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+ time_raw = ((u32)departure_block[2] << 16) | departure_block[1];
+ mutex_lock(&chip->reg_lock);
+ ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw);
+ mutex_unlock(&chip->reg_lock);
+ shhwtstamps.hwtstamp = ns_to_ktime(ns);
+
+ dev_dbg(chip->dev,
+ "p%d: txtstamp %llx status 0x%04x skb ID 0x%04x hw ID 0x%04x\n",
+ ps->port_id, ktime_to_ns(shhwtstamps.hwtstamp),
+ departure_block[0], ps->tx_seq_id, departure_block[3]);
+
+ /* skb_complete_tx_timestamp() will free up the client to make
+ * another timestamp-able transmit. We have to be ready for it
+ * -- by clearing the ps->tx_skb "flag" -- beforehand.
+ */
+
+ tmp_skb = ps->tx_skb;
+ ps->tx_skb = NULL;
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+ skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);
+
+ return 0;
+
+free_and_clear_skb:
+ dev_kfree_skb_any(ps->tx_skb);
+ ps->tx_skb = NULL;
+ clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
+
+ return 0;
+}
+
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ struct dsa_switch *ds = chip->ds;
+ struct mv88e6xxx_port_hwtstamp *ps;
+ int i, restart = 0;
+
+ for (i = 0; i < ds->num_ports; i++) {
+ if (!dsa_is_user_port(ds, i))
+ continue;
+
+ ps = &chip->port_hwtstamp[i];
+ if (test_bit(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state))
+ restart |= mv88e6xxx_txtstamp_work(chip, ps);
+
+ mv88e6xxx_rxtstamp_work(chip, ps);
+ }
+
+ return restart ? 1 : -1;
+}
+
+bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type)
+{
+ struct mv88e6xxx_chip *chip = ds->priv;
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+ __be16 *seq_ptr;
+ u8 *hdr;
+
+ if (!(skb_shinfo(clone)->tx_flags & SKBTX_HW_TSTAMP))
+ return false;
+
+ hdr = mv88e6xxx_should_tstamp(chip, port, clone, type);
+ if (!hdr)
+ return false;
+
+ seq_ptr = (__be16 *)(hdr + OFF_PTP_SEQUENCE_ID);
+
+ if (test_and_set_bit_lock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS,
+ &ps->state))
+ return false;
+
+ ps->tx_skb = clone;
+ ps->tx_tstamp_start = jiffies;
+ ps->tx_seq_id = be16_to_cpup(seq_ptr);
+
+ ptp_schedule_worker(chip->ptp_clock, 0);
+ return true;
+}
+
+static int mv88e6xxx_hwtstamp_port_setup(struct mv88e6xxx_chip *chip, int port)
+{
+ struct mv88e6xxx_port_hwtstamp *ps = &chip->port_hwtstamp[port];
+
+ ps->port_id = port;
+
+ skb_queue_head_init(&ps->rx_queue);
+ skb_queue_head_init(&ps->rx_queue2);
+
+ return mv88e6xxx_port_ptp_write(chip, port, MV88E6XXX_PORT_PTP_CFG0,
+ MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP);
+}
+
+int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
+{
+ int err;
+ int i;
+
+ /* Disable timestamping on all ports. */
+ for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
+ err = mv88e6xxx_hwtstamp_port_setup(chip, i);
+ if (err)
+ return err;
+ }
+
+ /* MV88E6XXX_PTP_MSG_TYPE is a mask of PTP message types to
+ * timestamp. This affects all ports that have timestamping enabled,
+ * but the timestamp config is per-port; thus we configure all events
+ * here and only support the HWTSTAMP_FILTER_*_EVENT filter types.
+ */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_MSGTYPE,
+ MV88E6XXX_PTP_MSGTYPE_ALL_EVENT);
+ if (err)
+ return err;
+
+ /* Use ARRIVAL1 for peer delay response messages. */
+ err = mv88e6xxx_ptp_write(chip, MV88E6XXX_PTP_TS_ARRIVAL_PTR,
+ MV88E6XXX_PTP_MSGTYPE_PDLAY_RES);
+ if (err)
+ return err;
+
+ /* 88E6341 devices default to timestamping at the PHY, but this has
+ * a hardware issue that results in unreliable timestamps. Force
+ * these devices to timestamp at the MAC.
+ */
+ if (chip->info->family == MV88E6XXX_FAMILY_6341) {
+ u16 val = MV88E6341_PTP_CFG_UPDATE |
+ MV88E6341_PTP_CFG_MODE_IDX |
+ MV88E6341_PTP_CFG_MODE_TS_AT_MAC;
+ err = mv88e6xxx_ptp_write(chip, MV88E6341_PTP_CFG, val);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip)
+{
+}
diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
new file mode 100644
index 000000000000..bc71c9212a08
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h
@@ -0,0 +1,172 @@
+/*
+ * Marvell 88E6xxx Switch hardware timestamping support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_HWTSTAMP_H
+#define _MV88E6XXX_HWTSTAMP_H
+
+#include "chip.h"
+
+/* Global PTP registers */
+/* Offset 0x00: PTP EtherType */
+#define MV88E6XXX_PTP_ETHERTYPE 0x00
+
+/* Offset 0x01: Message Type Timestamp Enables */
+#define MV88E6XXX_PTP_MSGTYPE 0x01
+#define MV88E6XXX_PTP_MSGTYPE_SYNC 0x0001
+#define MV88E6XXX_PTP_MSGTYPE_DELAY_REQ 0x0002
+#define MV88E6XXX_PTP_MSGTYPE_PDLAY_REQ 0x0004
+#define MV88E6XXX_PTP_MSGTYPE_PDLAY_RES 0x0008
+#define MV88E6XXX_PTP_MSGTYPE_ALL_EVENT 0x000f
+
+/* Offset 0x02: Timestamp Arrival Capture Pointers */
+#define MV88E6XXX_PTP_TS_ARRIVAL_PTR 0x02
+
+/* Offset 0x07: PTP Global Configuration */
+#define MV88E6341_PTP_CFG 0x07
+#define MV88E6341_PTP_CFG_UPDATE 0x8000
+#define MV88E6341_PTP_CFG_IDX_MASK 0x7f00
+#define MV88E6341_PTP_CFG_DATA_MASK 0x00ff
+#define MV88E6341_PTP_CFG_MODE_IDX 0x0
+#define MV88E6341_PTP_CFG_MODE_TS_AT_PHY 0x00
+#define MV88E6341_PTP_CFG_MODE_TS_AT_MAC 0x80
+
+/* Offset 0x08: PTP Interrupt Status */
+#define MV88E6XXX_PTP_IRQ_STATUS 0x08
+
+/* Per-Port PTP Registers */
+/* Offset 0x00: PTP Configuration 0 */
+#define MV88E6XXX_PORT_PTP_CFG0 0x00
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_SHIFT 12
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_MASK 0xf000
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_1588 0x0000
+#define MV88E6XXX_PORT_PTP_CFG0_TSPEC_8021AS 0x1000
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_TSPEC_MATCH 0x0800
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_OVERWRITE 0x0002
+#define MV88E6XXX_PORT_PTP_CFG0_DISABLE_PTP 0x0001
+
+/* Offset 0x01: PTP Configuration 1 */
+#define MV88E6XXX_PORT_PTP_CFG1 0x01
+
+/* Offset 0x02: PTP Configuration 2 */
+#define MV88E6XXX_PORT_PTP_CFG2 0x02
+#define MV88E6XXX_PORT_PTP_CFG2_EMBED_ARRIVAL 0x1000
+#define MV88E6XXX_PORT_PTP_CFG2_DEP_IRQ_EN 0x0002
+#define MV88E6XXX_PORT_PTP_CFG2_ARR_IRQ_EN 0x0001
+
+/* Offset 0x03: PTP LED Configuration */
+#define MV88E6XXX_PORT_PTP_LED_CFG 0x03
+
+/* Offset 0x08: PTP Arrival 0 Status */
+#define MV88E6XXX_PORT_PTP_ARR0_STS 0x08
+
+/* Offset 0x09/0x0A: PTP Arrival 0 Time */
+#define MV88E6XXX_PORT_PTP_ARR0_TIME_LO 0x09
+#define MV88E6XXX_PORT_PTP_ARR0_TIME_HI 0x0a
+
+/* Offset 0x0B: PTP Arrival 0 Sequence ID */
+#define MV88E6XXX_PORT_PTP_ARR0_SEQID 0x0b
+
+/* Offset 0x0C: PTP Arrival 1 Status */
+#define MV88E6XXX_PORT_PTP_ARR1_STS 0x0c
+
+/* Offset 0x0D/0x0E: PTP Arrival 1 Time */
+#define MV88E6XXX_PORT_PTP_ARR1_TIME_LO 0x0d
+#define MV88E6XXX_PORT_PTP_ARR1_TIME_HI 0x0e
+
+/* Offset 0x0F: PTP Arrival 1 Sequence ID */
+#define MV88E6XXX_PORT_PTP_ARR1_SEQID 0x0f
+
+/* Offset 0x10: PTP Departure Status */
+#define MV88E6XXX_PORT_PTP_DEP_STS 0x10
+
+/* Offset 0x11/0x12: PTP Deperture Time */
+#define MV88E6XXX_PORT_PTP_DEP_TIME_LO 0x11
+#define MV88E6XXX_PORT_PTP_DEP_TIME_HI 0x12
+
+/* Offset 0x13: PTP Departure Sequence ID */
+#define MV88E6XXX_PORT_PTP_DEP_SEQID 0x13
+
+/* Status fields for arrival and depature timestamp status registers */
+#define MV88E6XXX_PTP_TS_STATUS_MASK 0x0006
+#define MV88E6XXX_PTP_TS_STATUS_NORMAL 0x0000
+#define MV88E6XXX_PTP_TS_STATUS_OVERWITTEN 0x0002
+#define MV88E6XXX_PTP_TS_STATUS_DISCARDED 0x0004
+#define MV88E6XXX_PTP_TS_VALID 0x0001
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
+
+int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds, int port,
+ struct ifreq *ifr);
+
+bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone, unsigned int type);
+
+int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info);
+
+int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip);
+
+#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+static inline int mv88e6xxx_port_hwtstamp_set(struct dsa_switch *ds,
+ int port, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_port_hwtstamp_get(struct dsa_switch *ds,
+ int port, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline bool mv88e6xxx_port_rxtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone,
+ unsigned int type)
+{
+ return false;
+}
+
+static inline bool mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port,
+ struct sk_buff *clone,
+ unsigned int type)
+{
+ return false;
+}
+
+static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port,
+ struct ethtool_ts_info *info)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip)
+{
+ return 0;
+}
+
+static inline void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip)
+{
+}
+
+#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+#endif /* _MV88E6XXX_HWTSTAMP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.c b/drivers/net/dsa/mv88e6xxx/ptp.c
new file mode 100644
index 000000000000..bd85e2c390e1
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/ptp.c
@@ -0,0 +1,381 @@
+/*
+ * Marvell 88E6xxx Switch PTP support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include "chip.h"
+#include "global2.h"
+#include "ptp.h"
+
+/* Raw timestamps are in units of 8-ns clock periods. */
+#define CC_SHIFT 28
+#define CC_MULT (8 << CC_SHIFT)
+#define CC_MULT_NUM (1 << 9)
+#define CC_MULT_DEM 15625ULL
+
+#define TAI_EVENT_WORK_INTERVAL msecs_to_jiffies(100)
+
+#define cc_to_chip(cc) container_of(cc, struct mv88e6xxx_chip, tstamp_cc)
+#define dw_overflow_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
+ overflow_work)
+#define dw_tai_event_to_chip(dw) container_of(dw, struct mv88e6xxx_chip, \
+ tai_event_work)
+
+static int mv88e6xxx_tai_read(struct mv88e6xxx_chip *chip, int addr,
+ u16 *data, int len)
+{
+ if (!chip->info->ops->avb_ops->tai_read)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->tai_read(chip, addr, data, len);
+}
+
+static int mv88e6xxx_tai_write(struct mv88e6xxx_chip *chip, int addr, u16 data)
+{
+ if (!chip->info->ops->avb_ops->tai_write)
+ return -EOPNOTSUPP;
+
+ return chip->info->ops->avb_ops->tai_write(chip, addr, data);
+}
+
+/* TODO: places where this are called should be using pinctrl */
+static int mv88e6xxx_set_gpio_func(struct mv88e6xxx_chip *chip, int pin,
+ int func, int input)
+{
+ int err;
+
+ if (!chip->info->ops->gpio_ops)
+ return -EOPNOTSUPP;
+
+ err = chip->info->ops->gpio_ops->set_dir(chip, pin, input);
+ if (err)
+ return err;
+
+ return chip->info->ops->gpio_ops->set_pctl(chip, pin, func);
+}
+
+static u64 mv88e6xxx_ptp_clock_read(const struct cyclecounter *cc)
+{
+ struct mv88e6xxx_chip *chip = cc_to_chip(cc);
+ u16 phc_time[2];
+ int err;
+
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_TIME_LO, phc_time,
+ ARRAY_SIZE(phc_time));
+ if (err)
+ return 0;
+ else
+ return ((u32)phc_time[1] << 16) | phc_time[0];
+}
+
+/* mv88e6xxx_config_eventcap - configure TAI event capture
+ * @event: PTP_CLOCK_PPS (internal) or PTP_CLOCK_EXTTS (external)
+ * @rising: zero for falling-edge trigger, else rising-edge trigger
+ *
+ * This will also reset the capture sequence counter.
+ */
+static int mv88e6xxx_config_eventcap(struct mv88e6xxx_chip *chip, int event,
+ int rising)
+{
+ u16 global_config;
+ u16 cap_config;
+ int err;
+
+ chip->evcap_config = MV88E6XXX_TAI_CFG_CAP_OVERWRITE |
+ MV88E6XXX_TAI_CFG_CAP_CTR_START;
+ if (!rising)
+ chip->evcap_config |= MV88E6XXX_TAI_CFG_EVREQ_FALLING;
+
+ global_config = (chip->evcap_config | chip->trig_config);
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_CFG, global_config);
+ if (err)
+ return err;
+
+ if (event == PTP_CLOCK_PPS) {
+ cap_config = MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG;
+ } else if (event == PTP_CLOCK_EXTTS) {
+ /* if STATUS_CAP_TRIG is unset we capture PTP_EVREQ events */
+ cap_config = 0;
+ } else {
+ return -EINVAL;
+ }
+
+ /* Write the capture config; this also clears the capture counter */
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ cap_config);
+
+ return err;
+}
+
+static void mv88e6xxx_tai_event_work(struct work_struct *ugly)
+{
+ struct delayed_work *dw = to_delayed_work(ugly);
+ struct mv88e6xxx_chip *chip = dw_tai_event_to_chip(dw);
+ struct ptp_clock_event ev;
+ u16 status[4];
+ u32 raw_ts;
+ int err;
+
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_tai_read(chip, MV88E6XXX_TAI_EVENT_STATUS,
+ status, ARRAY_SIZE(status));
+ mutex_unlock(&chip->reg_lock);
+
+ if (err) {
+ dev_err(chip->dev, "failed to read TAI status register\n");
+ return;
+ }
+ if (status[0] & MV88E6XXX_TAI_EVENT_STATUS_ERROR) {
+ dev_warn(chip->dev, "missed event capture\n");
+ return;
+ }
+ if (!(status[0] & MV88E6XXX_TAI_EVENT_STATUS_VALID))
+ goto out;
+
+ raw_ts = ((u32)status[2] << 16) | status[1];
+
+ /* Clear the valid bit so the next timestamp can come in */
+ status[0] &= ~MV88E6XXX_TAI_EVENT_STATUS_VALID;
+ mutex_lock(&chip->reg_lock);
+ err = mv88e6xxx_tai_write(chip, MV88E6XXX_TAI_EVENT_STATUS, status[0]);
+ mutex_unlock(&chip->reg_lock);
+
+ /* This is an external timestamp */
+ ev.type = PTP_CLOCK_EXTTS;
+
+ /* We only have one timestamping channel. */
+ ev.index = 0;
+ mutex_lock(&chip->reg_lock);
+ ev.timestamp = timecounter_cyc2time(&chip->tstamp_tc, raw_ts);
+ mutex_unlock(&chip->reg_lock);
+
+ ptp_clock_event(chip->ptp_clock, &ev);
+out:
+ schedule_delayed_work(&chip->tai_event_work, TAI_EVENT_WORK_INTERVAL);
+}
+
+static int mv88e6xxx_ptp_adjfine(struct ptp_clock_info *ptp, long scaled_ppm)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
+
+ if (scaled_ppm < 0) {
+ neg_adj = 1;
+ scaled_ppm = -scaled_ppm;
+ }
+ mult = CC_MULT;
+ adj = CC_MULT_NUM;
+ adj *= scaled_ppm;
+ diff = div_u64(adj, CC_MULT_DEM);
+
+ mutex_lock(&chip->reg_lock);
+
+ timecounter_read(&chip->tstamp_tc);
+ chip->tstamp_cc.mult = neg_adj ? mult - diff : mult + diff;
+
+ mutex_unlock(&chip->reg_lock);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+
+ mutex_lock(&chip->reg_lock);
+ timecounter_adjtime(&chip->tstamp_tc, delta);
+ mutex_unlock(&chip->reg_lock);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_gettime(struct ptp_clock_info *ptp,
+ struct timespec64 *ts)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ u64 ns;
+
+ mutex_lock(&chip->reg_lock);
+ ns = timecounter_read(&chip->tstamp_tc);
+ mutex_unlock(&chip->reg_lock);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+
+ mutex_lock(&chip->reg_lock);
+ timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc, ns);
+ mutex_unlock(&chip->reg_lock);
+
+ return 0;
+}
+
+static int mv88e6xxx_ptp_enable_extts(struct mv88e6xxx_chip *chip,
+ struct ptp_clock_request *rq, int on)
+{
+ int rising = (rq->extts.flags & PTP_RISING_EDGE);
+ int func;
+ int pin;
+ int err;
+
+ pin = ptp_find_pin(chip->ptp_clock, PTP_PF_EXTTS, rq->extts.index);
+
+ if (pin < 0)
+ return -EBUSY;
+
+ mutex_lock(&chip->reg_lock);
+
+ if (on) {
+ func = MV88E6352_G2_SCRATCH_GPIO_PCTL_EVREQ;
+
+ err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+ if (err)
+ goto out;
+
+ schedule_delayed_work(&chip->tai_event_work,
+ TAI_EVENT_WORK_INTERVAL);
+
+ err = mv88e6xxx_config_eventcap(chip, PTP_CLOCK_EXTTS, rising);
+ } else {
+ func = MV88E6352_G2_SCRATCH_GPIO_PCTL_GPIO;
+
+ err = mv88e6xxx_set_gpio_func(chip, pin, func, true);
+
+ cancel_delayed_work_sync(&chip->tai_event_work);
+ }
+
+out:
+ mutex_unlock(&chip->reg_lock);
+
+ return err;
+}
+
+static int mv88e6xxx_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct mv88e6xxx_chip *chip = ptp_to_chip(ptp);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mv88e6xxx_ptp_enable_extts(chip, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mv88e6xxx_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ switch (func) {
+ case PTP_PF_NONE:
+ case PTP_PF_EXTTS:
+ break;
+ case PTP_PF_PEROUT:
+ case PTP_PF_PHYSYNC:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+/* With a 125MHz input clock, the 32-bit timestamp counter overflows in ~34.3
+ * seconds; this task forces periodic reads so that we don't miss any.
+ */
+#define MV88E6XXX_TAI_OVERFLOW_PERIOD (HZ * 16)
+static void mv88e6xxx_ptp_overflow_check(struct work_struct *work)
+{
+ struct delayed_work *dw = to_delayed_work(work);
+ struct mv88e6xxx_chip *chip = dw_overflow_to_chip(dw);
+ struct timespec64 ts;
+
+ mv88e6xxx_ptp_gettime(&chip->ptp_clock_info, &ts);
+
+ schedule_delayed_work(&chip->overflow_work,
+ MV88E6XXX_TAI_OVERFLOW_PERIOD);
+}
+
+int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ /* Set up the cycle counter */
+ memset(&chip->tstamp_cc, 0, sizeof(chip->tstamp_cc));
+ chip->tstamp_cc.read = mv88e6xxx_ptp_clock_read;
+ chip->tstamp_cc.mask = CYCLECOUNTER_MASK(32);
+ chip->tstamp_cc.mult = CC_MULT;
+ chip->tstamp_cc.shift = CC_SHIFT;
+
+ timecounter_init(&chip->tstamp_tc, &chip->tstamp_cc,
+ ktime_to_ns(ktime_get_real()));
+
+ INIT_DELAYED_WORK(&chip->overflow_work, mv88e6xxx_ptp_overflow_check);
+ INIT_DELAYED_WORK(&chip->tai_event_work, mv88e6xxx_tai_event_work);
+
+ chip->ptp_clock_info.owner = THIS_MODULE;
+ snprintf(chip->ptp_clock_info.name, sizeof(chip->ptp_clock_info.name),
+ dev_name(chip->dev));
+ chip->ptp_clock_info.max_adj = 1000000;
+
+ chip->ptp_clock_info.n_ext_ts = 1;
+ chip->ptp_clock_info.n_per_out = 0;
+ chip->ptp_clock_info.n_pins = mv88e6xxx_num_gpio(chip);
+ chip->ptp_clock_info.pps = 0;
+
+ for (i = 0; i < chip->ptp_clock_info.n_pins; ++i) {
+ struct ptp_pin_desc *ppd = &chip->pin_config[i];
+
+ snprintf(ppd->name, sizeof(ppd->name), "mv88e6xxx_gpio%d", i);
+ ppd->index = i;
+ ppd->func = PTP_PF_NONE;
+ }
+ chip->ptp_clock_info.pin_config = chip->pin_config;
+
+ chip->ptp_clock_info.adjfine = mv88e6xxx_ptp_adjfine;
+ chip->ptp_clock_info.adjtime = mv88e6xxx_ptp_adjtime;
+ chip->ptp_clock_info.gettime64 = mv88e6xxx_ptp_gettime;
+ chip->ptp_clock_info.settime64 = mv88e6xxx_ptp_settime;
+ chip->ptp_clock_info.enable = mv88e6xxx_ptp_enable;
+ chip->ptp_clock_info.verify = mv88e6xxx_ptp_verify;
+ chip->ptp_clock_info.do_aux_work = mv88e6xxx_hwtstamp_work;
+
+ chip->ptp_clock = ptp_clock_register(&chip->ptp_clock_info, chip->dev);
+ if (IS_ERR(chip->ptp_clock))
+ return PTR_ERR(chip->ptp_clock);
+
+ schedule_delayed_work(&chip->overflow_work,
+ MV88E6XXX_TAI_OVERFLOW_PERIOD);
+
+ return 0;
+}
+
+void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
+{
+ if (chip->ptp_clock) {
+ cancel_delayed_work_sync(&chip->overflow_work);
+ cancel_delayed_work_sync(&chip->tai_event_work);
+
+ ptp_clock_unregister(chip->ptp_clock);
+ chip->ptp_clock = NULL;
+ }
+}
diff --git a/drivers/net/dsa/mv88e6xxx/ptp.h b/drivers/net/dsa/mv88e6xxx/ptp.h
new file mode 100644
index 000000000000..10f271ab650d
--- /dev/null
+++ b/drivers/net/dsa/mv88e6xxx/ptp.h
@@ -0,0 +1,108 @@
+/*
+ * Marvell 88E6xxx Switch PTP support
+ *
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * Copyright (c) 2017 National Instruments
+ * Erik Hons <erik.hons@ni.com>
+ * Brandon Streiff <brandon.streiff@ni.com>
+ * Dane Wagner <dane.wagner@ni.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _MV88E6XXX_PTP_H
+#define _MV88E6XXX_PTP_H
+
+#include "chip.h"
+
+/* Offset 0x00: TAI Global Config */
+#define MV88E6XXX_TAI_CFG 0x00
+#define MV88E6XXX_TAI_CFG_CAP_OVERWRITE 0x8000
+#define MV88E6XXX_TAI_CFG_CAP_CTR_START 0x4000
+#define MV88E6XXX_TAI_CFG_EVREQ_FALLING 0x2000
+#define MV88E6XXX_TAI_CFG_TRIG_ACTIVE_LO 0x1000
+#define MV88E6XXX_TAI_CFG_IRL_ENABLE 0x0400
+#define MV88E6XXX_TAI_CFG_TRIG_IRQ_EN 0x0200
+#define MV88E6XXX_TAI_CFG_EVREQ_IRQ_EN 0x0100
+#define MV88E6XXX_TAI_CFG_TRIG_LOCK 0x0080
+#define MV88E6XXX_TAI_CFG_BLOCK_UPDATE 0x0008
+#define MV88E6XXX_TAI_CFG_MULTI_PTP 0x0004
+#define MV88E6XXX_TAI_CFG_TRIG_MODE_ONESHOT 0x0002
+#define MV88E6XXX_TAI_CFG_TRIG_ENABLE 0x0001
+
+/* Offset 0x01: Timestamp Clock Period (ps) */
+#define MV88E6XXX_TAI_CLOCK_PERIOD 0x01
+
+/* Offset 0x02/0x03: Trigger Generation Amount */
+#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_LO 0x02
+#define MV88E6XXX_TAI_TRIG_GEN_AMOUNT_HI 0x03
+
+/* Offset 0x04: Clock Compensation */
+#define MV88E6XXX_TAI_TRIG_CLOCK_COMP 0x04
+
+/* Offset 0x05: Trigger Configuration */
+#define MV88E6XXX_TAI_TRIG_CFG 0x05
+
+/* Offset 0x06: Ingress Rate Limiter Clock Generation Amount */
+#define MV88E6XXX_TAI_IRL_AMOUNT 0x06
+
+/* Offset 0x07: Ingress Rate Limiter Compensation */
+#define MV88E6XXX_TAI_IRL_COMP 0x07
+
+/* Offset 0x08: Ingress Rate Limiter Compensation */
+#define MV88E6XXX_TAI_IRL_COMP_PS 0x08
+
+/* Offset 0x09: Event Status */
+#define MV88E6XXX_TAI_EVENT_STATUS 0x09
+#define MV88E6XXX_TAI_EVENT_STATUS_CAP_TRIG 0x4000
+#define MV88E6XXX_TAI_EVENT_STATUS_ERROR 0x0200
+#define MV88E6XXX_TAI_EVENT_STATUS_VALID 0x0100
+#define MV88E6XXX_TAI_EVENT_STATUS_CTR_MASK 0x00ff
+
+/* Offset 0x0A/0x0B: Event Time */
+#define MV88E6XXX_TAI_EVENT_TIME_LO 0x0a
+#define MV88E6XXX_TAI_EVENT_TYPE_HI 0x0b
+
+/* Offset 0x0E/0x0F: PTP Global Time */
+#define MV88E6XXX_TAI_TIME_LO 0x0e
+#define MV88E6XXX_TAI_TIME_HI 0x0f
+
+/* Offset 0x10/0x11: Trig Generation Time */
+#define MV88E6XXX_TAI_TRIG_TIME_LO 0x10
+#define MV88E6XXX_TAI_TRIG_TIME_HI 0x11
+
+/* Offset 0x12: Lock Status */
+#define MV88E6XXX_TAI_LOCK_STATUS 0x12
+
+#ifdef CONFIG_NET_DSA_MV88E6XXX_PTP
+
+long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp);
+int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip);
+void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip);
+
+#define ptp_to_chip(ptp) container_of(ptp, struct mv88e6xxx_chip, \
+ ptp_clock_info)
+
+#else /* !CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+static inline long mv88e6xxx_hwtstamp_work(struct ptp_clock_info *ptp)
+{
+ return -1;
+}
+
+static inline int mv88e6xxx_ptp_setup(struct mv88e6xxx_chip *chip)
+{
+ return 0;
+}
+
+static inline void mv88e6xxx_ptp_free(struct mv88e6xxx_chip *chip)
+{
+}
+
+#endif /* CONFIG_NET_DSA_MV88E6XXX_PTP */
+
+#endif /* _MV88E6XXX_PTP_H */
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c
index f3c01119b3d1..4756969c1546 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.c
+++ b/drivers/net/dsa/mv88e6xxx/serdes.c
@@ -55,18 +55,30 @@ static int mv88e6352_serdes_power_set(struct mv88e6xxx_chip *chip, bool on)
return err;
}
-int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+static bool mv88e6352_port_has_serdes(struct mv88e6xxx_chip *chip, int port)
{
- int err;
u8 cmode;
+ int err;
err = mv88e6xxx_port_get_cmode(chip, port, &cmode);
- if (err)
- return err;
+ if (err) {
+ dev_err(chip->dev, "failed to read cmode\n");
+ return 0;
+ }
if ((cmode == MV88E6XXX_PORT_STS_CMODE_100BASE_X) ||
(cmode == MV88E6XXX_PORT_STS_CMODE_1000BASE_X) ||
- (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII)) {
+ (cmode == MV88E6XXX_PORT_STS_CMODE_SGMII))
+ return 1;
+
+ return 0;
+}
+
+int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
+{
+ int err;
+
+ if (mv88e6352_port_has_serdes(chip, port)) {
err = mv88e6352_serdes_power_set(chip, on);
if (err < 0)
return err;
@@ -75,6 +87,90 @@ int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on)
return 0;
}
+struct mv88e6352_serdes_hw_stat {
+ char string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int reg;
+};
+
+static struct mv88e6352_serdes_hw_stat mv88e6352_serdes_hw_stats[] = {
+ { "serdes_fibre_rx_error", 16, 21 },
+ { "serdes_PRBS_error", 32, 24 },
+};
+
+int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port)
+{
+ if (mv88e6352_port_has_serdes(chip, port))
+ return ARRAY_SIZE(mv88e6352_serdes_hw_stats);
+
+ return 0;
+}
+
+void mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data)
+{
+ struct mv88e6352_serdes_hw_stat *stat;
+ int i;
+
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
+ stat = &mv88e6352_serdes_hw_stats[i];
+ memcpy(data + i * ETH_GSTRING_LEN, stat->string,
+ ETH_GSTRING_LEN);
+ }
+}
+
+static uint64_t mv88e6352_serdes_get_stat(struct mv88e6xxx_chip *chip,
+ struct mv88e6352_serdes_hw_stat *stat)
+{
+ u64 val = 0;
+ u16 reg;
+ int err;
+
+ err = mv88e6352_serdes_read(chip, stat->reg, &reg);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+
+ val = reg;
+
+ if (stat->sizeof_stat == 32) {
+ err = mv88e6352_serdes_read(chip, stat->reg + 1, &reg);
+ if (err) {
+ dev_err(chip->dev, "failed to read statistic\n");
+ return 0;
+ }
+ val = val << 16 | reg;
+ }
+
+ return val;
+}
+
+void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data)
+{
+ struct mv88e6xxx_port *mv88e6xxx_port = &chip->ports[port];
+ struct mv88e6352_serdes_hw_stat *stat;
+ u64 value;
+ int i;
+
+ if (!mv88e6352_port_has_serdes(chip, port))
+ return;
+
+ BUILD_BUG_ON(ARRAY_SIZE(mv88e6352_serdes_hw_stats) >
+ ARRAY_SIZE(mv88e6xxx_port->serdes_stats));
+
+ for (i = 0; i < ARRAY_SIZE(mv88e6352_serdes_hw_stats); i++) {
+ stat = &mv88e6352_serdes_hw_stats[i];
+ value = mv88e6352_serdes_get_stat(chip, stat);
+ mv88e6xxx_port->serdes_stats[i] += value;
+ data[i] = mv88e6xxx_port->serdes_stats[i];
+ }
+}
+
/* Set the power on/off for 10GBASE-R and 10GBASE-X4/X2 */
static int mv88e6390_serdes_10g(struct mv88e6xxx_chip *chip, int addr, bool on)
{
diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h
index 5c1cd6d8e9a5..641baa75f910 100644
--- a/drivers/net/dsa/mv88e6xxx/serdes.h
+++ b/drivers/net/dsa/mv88e6xxx/serdes.h
@@ -44,5 +44,9 @@
int mv88e6352_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on);
-
+int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port);
+void mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip,
+ int port, uint8_t *data);
+void mv88e6352_serdes_get_stats(struct mv88e6xxx_chip *chip, int port,
+ uint64_t *data);
#endif
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 9df22ebee822..600d5ad1fbde 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -631,7 +631,7 @@ qca8k_get_ethtool_stats(struct dsa_switch *ds, int port,
}
static int
-qca8k_get_sset_count(struct dsa_switch *ds)
+qca8k_get_sset_count(struct dsa_switch *ds, int port)
{
return ARRAY_SIZE(ar8327_mib);
}
diff --git a/drivers/net/ethernet/8390/Makefile b/drivers/net/ethernet/8390/Makefile
index f975c2fc88a3..1d650e66cc6e 100644
--- a/drivers/net/ethernet/8390/Makefile
+++ b/drivers/net/ethernet/8390/Makefile
@@ -7,8 +7,8 @@ obj-$(CONFIG_MAC8390) += mac8390.o
obj-$(CONFIG_APNE) += apne.o 8390.o
obj-$(CONFIG_ARM_ETHERH) += etherh.o
obj-$(CONFIG_AX88796) += ax88796.o
-obj-$(CONFIG_HYDRA) += hydra.o 8390.o
-obj-$(CONFIG_MCF8390) += mcf8390.o 8390.o
+obj-$(CONFIG_HYDRA) += hydra.o
+obj-$(CONFIG_MCF8390) += mcf8390.o
obj-$(CONFIG_NE2000) += ne.o 8390p.o
obj-$(CONFIG_NE2K_PCI) += ne2k-pci.o 8390.o
obj-$(CONFIG_PCMCIA_AXNET) += axnet_cs.o 8390.o
@@ -16,4 +16,4 @@ obj-$(CONFIG_PCMCIA_PCNET) += pcnet_cs.o 8390.o
obj-$(CONFIG_STNIC) += stnic.o 8390.o
obj-$(CONFIG_ULTRA) += smc-ultra.o 8390.o
obj-$(CONFIG_WD80x3) += wd.o 8390.o
-obj-$(CONFIG_ZORRO8390) += zorro8390.o 8390.o
+obj-$(CONFIG_ZORRO8390) += zorro8390.o
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 245554707163..da61cf3cb3a9 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -77,8 +77,6 @@ static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electron
#define AX_GPOC_PPDSET BIT(6)
-static u32 ax_msg_enable;
-
/* device private data */
struct ax_device {
@@ -747,7 +745,6 @@ static int ax_init_dev(struct net_device *dev)
ei_local->block_output = &ax_block_output;
ei_local->get_8390_hdr = &ax_get_8390_hdr;
ei_local->priv = 0;
- ei_local->msg_enable = ax_msg_enable;
dev->netdev_ops = &ax_netdev_ops;
dev->ethtool_ops = &ax_ethtool_ops;
diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c
index 7bddb8efb6d5..d422a124cd7c 100644
--- a/drivers/net/ethernet/8390/axnet_cs.c
+++ b/drivers/net/ethernet/8390/axnet_cs.c
@@ -104,7 +104,6 @@ static void AX88190_init(struct net_device *dev, int startp);
static int ax_open(struct net_device *dev);
static int ax_close(struct net_device *dev);
static irqreturn_t ax_interrupt(int irq, void *dev_id);
-static u32 axnet_msg_enable;
/*====================================================================*/
@@ -151,7 +150,6 @@ static int axnet_probe(struct pcmcia_device *link)
return -ENOMEM;
ei_local = netdev_priv(dev);
- ei_local->msg_enable = axnet_msg_enable;
spin_lock_init(&ei_local->page_lock);
info = PRIV(dev);
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index 11cbf22ad201..32e9627e3880 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -64,8 +64,6 @@ static char version[] =
#include "lib8390.c"
-static u32 etherh_msg_enable;
-
struct etherh_priv {
void __iomem *ioc_fast;
void __iomem *memc;
@@ -502,18 +500,6 @@ etherh_close(struct net_device *dev)
}
/*
- * Initialisation
- */
-
-static void __init etherh_banner(void)
-{
- static int version_printed;
-
- if ((etherh_msg_enable & NETIF_MSG_DRV) && (version_printed++ == 0))
- pr_info("%s", version);
-}
-
-/*
* Read the ethernet address string from the on board rom.
* This is an ascii string...
*/
@@ -671,8 +657,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
struct etherh_priv *eh;
int ret;
- etherh_banner();
-
ret = ecard_request_resources(ec);
if (ret)
goto out;
@@ -757,7 +741,6 @@ etherh_probe(struct expansion_card *ec, const struct ecard_id *id)
ei_local->block_output = etherh_block_output;
ei_local->get_8390_hdr = etherh_get_header;
ei_local->interface_num = 0;
- ei_local->msg_enable = etherh_msg_enable;
etherh_reset(dev);
__NS8390_init(dev, 0);
diff --git a/drivers/net/ethernet/8390/hydra.c b/drivers/net/ethernet/8390/hydra.c
index 8ae249195301..941754ea78ec 100644
--- a/drivers/net/ethernet/8390/hydra.c
+++ b/drivers/net/ethernet/8390/hydra.c
@@ -66,7 +66,6 @@ static void hydra_block_input(struct net_device *dev, int count,
static void hydra_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void hydra_remove_one(struct zorro_dev *z);
-static u32 hydra_msg_enable;
static struct zorro_device_id hydra_zorro_tbl[] = {
{ ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET },
@@ -119,7 +118,6 @@ static int hydra_init(struct zorro_dev *z)
int start_page, stop_page;
int j;
int err;
- struct ei_device *ei_local;
static u32 hydra_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
@@ -138,8 +136,6 @@ static int hydra_init(struct zorro_dev *z)
start_page = NESM_START_PG;
stop_page = NESM_STOP_PG;
- ei_local = netdev_priv(dev);
- ei_local->msg_enable = hydra_msg_enable;
dev->base_addr = ioaddr;
dev->irq = IRQ_AMIGA_PORTS;
diff --git a/drivers/net/ethernet/8390/lib8390.c b/drivers/net/ethernet/8390/lib8390.c
index 60f8e2c8e726..5d9bbde9fe68 100644
--- a/drivers/net/ethernet/8390/lib8390.c
+++ b/drivers/net/ethernet/8390/lib8390.c
@@ -975,6 +975,8 @@ static void ethdev_setup(struct net_device *dev)
ether_setup(dev);
spin_lock_init(&ei_local->page_lock);
+
+ ei_local->msg_enable = msg_enable;
}
/**
diff --git a/drivers/net/ethernet/8390/mac8390.c b/drivers/net/ethernet/8390/mac8390.c
index 2f91ce8dc614..b6d735bf8011 100644
--- a/drivers/net/ethernet/8390/mac8390.c
+++ b/drivers/net/ethernet/8390/mac8390.c
@@ -123,8 +123,7 @@ enum mac8390_access {
};
extern int mac8390_memtest(struct net_device *dev);
-static int mac8390_initdev(struct net_device *dev,
- struct nubus_rsrc *ndev,
+static int mac8390_initdev(struct net_device *dev, struct nubus_board *board,
enum mac8390_type type);
static int mac8390_open(struct net_device *dev);
@@ -168,9 +167,8 @@ static void slow_sane_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
static void word_memcpy_tocard(unsigned long tp, const void *fp, int count);
static void word_memcpy_fromcard(void *tp, unsigned long fp, int count);
-static u32 mac8390_msg_enable;
-static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
+static enum mac8390_type mac8390_ident(struct nubus_rsrc *fres)
{
switch (fres->dr_sw) {
case NUBUS_DRSW_3COM:
@@ -236,7 +234,7 @@ static enum mac8390_type __init mac8390_ident(struct nubus_rsrc *fres)
return MAC8390_NONE;
}
-static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
+static enum mac8390_access mac8390_testio(unsigned long membase)
{
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
@@ -254,7 +252,7 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
return ACCESS_UNKNOWN;
}
-static int __init mac8390_memsize(unsigned long membase)
+static int mac8390_memsize(unsigned long membase)
{
unsigned long flags;
int i, j;
@@ -290,36 +288,34 @@ static int __init mac8390_memsize(unsigned long membase)
return i * 0x1000;
}
-static bool __init mac8390_init(struct net_device *dev,
- struct nubus_rsrc *ndev,
- enum mac8390_type cardtype)
+static bool mac8390_rsrc_init(struct net_device *dev,
+ struct nubus_rsrc *fres,
+ enum mac8390_type cardtype)
{
+ struct nubus_board *board = fres->board;
struct nubus_dir dir;
struct nubus_dirent ent;
int offset;
volatile unsigned short *i;
- printk_once(KERN_INFO pr_fmt("%s"), version);
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
+ dev->irq = SLOT2IRQ(board->slot);
/* This is getting to be a habit */
- dev->base_addr = (ndev->board->slot_addr |
- ((ndev->board->slot & 0xf) << 20));
+ dev->base_addr = board->slot_addr | ((board->slot & 0xf) << 20);
/*
* Get some Nubus info - we will trust the card's idea
* of where its memory and registers are.
*/
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (nubus_get_func_dir(fres, &dir) == -1) {
+ dev_err(&board->dev,
+ "Unable to get Nubus functional directory\n");
return false;
}
/* Get the MAC address */
if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
- pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ dev_info(&board->dev, "MAC address resource not found\n");
return false;
}
@@ -329,8 +325,8 @@ static bool __init mac8390_init(struct net_device *dev,
nubus_rewinddir(&dir);
if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
&ent) == -1) {
- pr_err("%s: Memory offset resource for slot %X not found!\n",
- dev->name, ndev->board->slot);
+ dev_err(&board->dev,
+ "Memory offset resource not found\n");
return false;
}
nubus_get_rsrc_mem(&offset, &ent, 4);
@@ -340,8 +336,8 @@ static bool __init mac8390_init(struct net_device *dev,
nubus_rewinddir(&dir);
if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
&ent) == -1) {
- pr_info("%s: Memory length resource for slot %X not found, probing\n",
- dev->name, ndev->board->slot);
+ dev_info(&board->dev,
+ "Memory length resource not found, probing\n");
offset = mac8390_memsize(dev->mem_start);
} else {
nubus_get_rsrc_mem(&offset, &ent, 4);
@@ -351,25 +347,25 @@ static bool __init mac8390_init(struct net_device *dev,
switch (cardtype) {
case MAC8390_KINETICS:
case MAC8390_DAYNA: /* it's the same */
- dev->base_addr = (int)(ndev->board->slot_addr +
+ dev->base_addr = (int)(board->slot_addr +
DAYNA_8390_BASE);
- dev->mem_start = (int)(ndev->board->slot_addr +
+ dev->mem_start = (int)(board->slot_addr +
DAYNA_8390_MEM);
dev->mem_end = dev->mem_start +
mac8390_memsize(dev->mem_start);
break;
case MAC8390_INTERLAN:
- dev->base_addr = (int)(ndev->board->slot_addr +
+ dev->base_addr = (int)(board->slot_addr +
INTERLAN_8390_BASE);
- dev->mem_start = (int)(ndev->board->slot_addr +
+ dev->mem_start = (int)(board->slot_addr +
INTERLAN_8390_MEM);
dev->mem_end = dev->mem_start +
mac8390_memsize(dev->mem_start);
break;
case MAC8390_CABLETRON:
- dev->base_addr = (int)(ndev->board->slot_addr +
+ dev->base_addr = (int)(board->slot_addr +
CABLETRON_8390_BASE);
- dev->mem_start = (int)(ndev->board->slot_addr +
+ dev->mem_start = (int)(board->slot_addr +
CABLETRON_8390_MEM);
/* The base address is unreadable if 0x00
* has been written to the command register
@@ -384,8 +380,8 @@ static bool __init mac8390_init(struct net_device *dev,
break;
default:
- pr_err("Card type %s is unsupported, sorry\n",
- ndev->board->name);
+ dev_err(&board->dev,
+ "No known base address for card type\n");
return false;
}
}
@@ -393,91 +389,83 @@ static bool __init mac8390_init(struct net_device *dev,
return true;
}
-struct net_device * __init mac8390_probe(int unit)
+static int mac8390_device_probe(struct nubus_board *board)
{
struct net_device *dev;
- struct nubus_rsrc *ndev = NULL;
int err = -ENODEV;
- struct ei_device *ei_local;
-
- static unsigned int slots;
-
- enum mac8390_type cardtype;
-
- /* probably should check for Nubus instead */
-
- if (!MACH_IS_MAC)
- return ERR_PTR(-ENODEV);
+ struct nubus_rsrc *fres;
+ enum mac8390_type cardtype = MAC8390_NONE;
dev = ____alloc_ei_netdev(0);
if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0)
- sprintf(dev->name, "eth%d", unit);
+ return -ENOMEM;
- for_each_func_rsrc(ndev) {
- if (ndev->category != NUBUS_CAT_NETWORK ||
- ndev->type != NUBUS_TYPE_ETHERNET)
- continue;
+ SET_NETDEV_DEV(dev, &board->dev);
- /* Have we seen it already? */
- if (slots & (1 << ndev->board->slot))
+ for_each_board_func_rsrc(board, fres) {
+ if (fres->category != NUBUS_CAT_NETWORK ||
+ fres->type != NUBUS_TYPE_ETHERNET)
continue;
- slots |= 1 << ndev->board->slot;
- cardtype = mac8390_ident(ndev);
+ cardtype = mac8390_ident(fres);
if (cardtype == MAC8390_NONE)
continue;
- if (!mac8390_init(dev, ndev, cardtype))
- continue;
-
- /* Do the nasty 8390 stuff */
- if (!mac8390_initdev(dev, ndev, cardtype))
+ if (mac8390_rsrc_init(dev, fres, cardtype))
break;
}
-
- if (!ndev)
+ if (!fres)
goto out;
- ei_local = netdev_priv(dev);
- ei_local->msg_enable = mac8390_msg_enable;
+ err = mac8390_initdev(dev, board, cardtype);
+ if (err)
+ goto out;
err = register_netdev(dev);
if (err)
goto out;
- return dev;
+
+ nubus_set_drvdata(board, dev);
+ return 0;
out:
free_netdev(dev);
- return ERR_PTR(err);
+ return err;
+}
+
+static int mac8390_device_remove(struct nubus_board *board)
+{
+ struct net_device *dev = nubus_get_drvdata(board);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+ return 0;
}
-#ifdef MODULE
+static struct nubus_driver mac8390_driver = {
+ .probe = mac8390_device_probe,
+ .remove = mac8390_device_remove,
+ .driver = {
+ .name = KBUILD_MODNAME,
+ .owner = THIS_MODULE,
+ }
+};
+
MODULE_AUTHOR("David Huggins-Daines <dhd@debian.org> and others");
MODULE_DESCRIPTION("Macintosh NS8390-based Nubus Ethernet driver");
MODULE_LICENSE("GPL");
-static struct net_device *dev_mac8390;
-
-int __init init_module(void)
+static int __init mac8390_init(void)
{
- dev_mac8390 = mac8390_probe(-1);
- if (IS_ERR(dev_mac8390)) {
- pr_warn("mac8390: No card found\n");
- return PTR_ERR(dev_mac8390);
- }
- return 0;
+ return nubus_driver_register(&mac8390_driver);
}
+module_init(mac8390_init);
-void __exit cleanup_module(void)
+static void __exit mac8390_exit(void)
{
- unregister_netdev(dev_mac8390);
- free_netdev(dev_mac8390);
+ nubus_driver_unregister(&mac8390_driver);
}
-
-#endif /* MODULE */
+module_exit(mac8390_exit);
static const struct net_device_ops mac8390_netdev_ops = {
.ndo_open = mac8390_open,
@@ -493,9 +481,8 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device *dev,
- struct nubus_rsrc *ndev,
- enum mac8390_type type)
+static int mac8390_initdev(struct net_device *dev, struct nubus_board *board,
+ enum mac8390_type type)
{
static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
@@ -546,7 +533,8 @@ static int __init mac8390_initdev(struct net_device *dev,
case MAC8390_APPLE:
switch (mac8390_testio(dev->mem_start)) {
case ACCESS_UNKNOWN:
- pr_err("Don't know how to access card memory!\n");
+ dev_err(&board->dev,
+ "Don't know how to access card memory\n");
return -ENODEV;
case ACCESS_16:
@@ -612,21 +600,18 @@ static int __init mac8390_initdev(struct net_device *dev,
break;
default:
- pr_err("Card type %s is unsupported, sorry\n",
- ndev->board->name);
+ dev_err(&board->dev, "Unsupported card type\n");
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- pr_info("%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot,
- cardname[type]);
- pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ dev_info(&board->dev, "%s (type %s)\n", board->name, cardname[type]);
+ dev_info(&board->dev, "MAC %pM, IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
diff --git a/drivers/net/ethernet/8390/mcf8390.c b/drivers/net/ethernet/8390/mcf8390.c
index 4bb967bc879e..4ad8031ab669 100644
--- a/drivers/net/ethernet/8390/mcf8390.c
+++ b/drivers/net/ethernet/8390/mcf8390.c
@@ -38,7 +38,6 @@ static const char version[] =
#define NESM_START_PG 0x40 /* First page of TX buffer */
#define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */
-static u32 mcf8390_msg_enable;
#ifdef NE2000_ODDOFFSET
/*
@@ -407,7 +406,6 @@ static int mcf8390_init(struct net_device *dev)
static int mcf8390_probe(struct platform_device *pdev)
{
struct net_device *dev;
- struct ei_device *ei_local;
struct resource *mem, *irq;
resource_size_t msize;
int ret;
@@ -435,8 +433,6 @@ static int mcf8390_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- ei_local = netdev_priv(dev);
- ei_local->msg_enable = mcf8390_msg_enable;
dev->irq = irq->start;
dev->base_addr = mem->start;
diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c
index 66f47987e2a2..4cdff6e6af89 100644
--- a/drivers/net/ethernet/8390/ne.c
+++ b/drivers/net/ethernet/8390/ne.c
@@ -485,7 +485,7 @@ static int __init ne_probe1(struct net_device *dev, unsigned long ioaddr)
mdelay(10); /* wait 10ms for interrupt to propagate */
outb_p(0x00, ioaddr + EN0_IMR); /* Mask it again. */
dev->irq = probe_irq_off(cookie);
- if (netif_msg_probe(ei_local))
+ if (ne_msg_enable & NETIF_MSG_PROBE)
pr_cont(" autoirq is %d", dev->irq);
} else if (dev->irq == 2)
/* Fixup for users that don't know that IRQ 2 is really IRQ 9,
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index bcad4a7fac9f..61e43802b9a5 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -66,7 +66,6 @@
#define PCNET_RDC_TIMEOUT (2*HZ/100) /* Max wait in jiffies for Tx RDC */
static const char *if_names[] = { "auto", "10baseT", "10base2"};
-static u32 pcnet_msg_enable;
/*====================================================================*/
@@ -556,7 +555,6 @@ static int pcnet_config(struct pcmcia_device *link)
int start_pg, stop_pg, cm_offset;
int has_shmem = 0;
struct hw_info *local_hw_info;
- struct ei_device *ei_local;
dev_dbg(&link->dev, "pcnet_config\n");
@@ -606,8 +604,6 @@ static int pcnet_config(struct pcmcia_device *link)
mii_phy_probe(dev);
SET_NETDEV_DEV(dev, &link->dev);
- ei_local = netdev_priv(dev);
- ei_local->msg_enable = pcnet_msg_enable;
if (register_netdev(dev) != 0) {
pr_notice("register_netdev() failed\n");
diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c
index 6efa2722f850..fb17c2c7e1dd 100644
--- a/drivers/net/ethernet/8390/wd.c
+++ b/drivers/net/ethernet/8390/wd.c
@@ -299,7 +299,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
outb_p(0x00, nic_addr+EN0_IMR); /* Mask all intrs. again. */
- if (netif_msg_drv(ei_local))
+ if (wd_msg_enable & NETIF_MSG_PROBE)
pr_cont(" autoirq is %d", dev->irq);
if (dev->irq < 2)
dev->irq = word16 ? 10 : 5;
diff --git a/drivers/net/ethernet/8390/zorro8390.c b/drivers/net/ethernet/8390/zorro8390.c
index 6d93956b293b..35a500a21521 100644
--- a/drivers/net/ethernet/8390/zorro8390.c
+++ b/drivers/net/ethernet/8390/zorro8390.c
@@ -44,8 +44,6 @@
static const char version[] =
"8390.c:v1.10cvs 9/23/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-static u32 zorro8390_msg_enable;
-
#include "lib8390.c"
#define DRV_NAME "zorro8390"
@@ -296,7 +294,6 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
int err;
unsigned char SA_prom[32];
int start_page, stop_page;
- struct ei_device *ei_local = netdev_priv(dev);
static u32 zorro8390_offsets[16] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e,
0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
@@ -388,8 +385,6 @@ static int zorro8390_init(struct net_device *dev, unsigned long board,
dev->netdev_ops = &zorro8390_netdev_ops;
__NS8390_init(dev, 0);
- ei_local->msg_enable = zorro8390_msg_enable;
-
err = register_netdev(dev);
if (err) {
free_irq(IRQ_AMIGA_PORTS, dev);
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 358f7ab77c70..c99e3e845ac0 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -649,7 +649,7 @@ static void amd8111e_free_ring(struct amd8111e_priv *lp)
static int amd8111e_tx(struct net_device *dev)
{
struct amd8111e_priv *lp = netdev_priv(dev);
- int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
+ int tx_index;
int status;
/* Complete all the transmit packet */
while (lp->tx_complete_idx != lp->tx_idx){
diff --git a/drivers/net/ethernet/apple/macmace.c b/drivers/net/ethernet/apple/macmace.c
index f17a160dbff2..137cbb470af2 100644
--- a/drivers/net/ethernet/apple/macmace.c
+++ b/drivers/net/ethernet/apple/macmace.c
@@ -247,8 +247,8 @@ static int mace_probe(struct platform_device *pdev)
dev->netdev_ops = &mace_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- printk(KERN_INFO "%s: 68K MACE, hardware address %pM\n",
- dev->name, dev->dev_addr);
+ pr_info("Onboard MACE, hardware address %pM, chip revision 0x%04X\n",
+ dev->dev_addr, mp->chipid);
err = register_netdev(dev);
if (!err)
@@ -589,7 +589,6 @@ static irqreturn_t mace_interrupt(int irq, void *dev_id)
else if (fs & (UFLO|LCOL|RTRY)) {
++dev->stats.tx_aborted_errors;
if (mb->xmtfs & UFLO) {
- printk(KERN_ERR "%s: DMA underrun.\n", dev->name);
dev->stats.tx_fifo_errors++;
mace_txdma_reset(dev);
}
@@ -644,10 +643,8 @@ static void mace_dma_rx_frame(struct net_device *dev, struct mace_frame *mf)
if (frame_status & (RS_OFLO | RS_CLSN | RS_FRAMERR | RS_FCSERR)) {
dev->stats.rx_errors++;
- if (frame_status & RS_OFLO) {
- printk(KERN_DEBUG "%s: fifo overflow.\n", dev->name);
+ if (frame_status & RS_OFLO)
dev->stats.rx_fifo_errors++;
- }
if (frame_status & RS_CLSN)
dev->stats.collisions++;
if (frame_status & RS_FRAMERR)
@@ -770,18 +767,4 @@ static struct platform_driver mac_mace_driver = {
},
};
-static int __init mac_mace_init_module(void)
-{
- if (!MACH_IS_MAC)
- return -ENODEV;
-
- return platform_driver_register(&mac_mace_driver);
-}
-
-static void __exit mac_mace_cleanup_module(void)
-{
- platform_driver_unregister(&mac_mace_driver);
-}
-
-module_init(mac_mace_init_module);
-module_exit(mac_mace_cleanup_module);
+module_platform_driver(mac_mace_driver);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile b/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_core.c b/drivers/net/ethernet/cavium/liquidio/lio_core.c
index 32ae63b6f20e..8b1ee83134e3 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_core.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_core.c
@@ -571,7 +571,8 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
napi_gro_receive(napi, skb);
- droq->stats.rx_bytes_received += len;
+ droq->stats.rx_bytes_received += len -
+ rh->r_dh.len * BYTES_PER_DHLEN_UNIT;
droq->stats.rx_pkts_received++;
} else {
recv_buffer_free(skb);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
index 00a1d2d13169..9da6f57901a9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cudbg_lib.c
@@ -878,6 +878,86 @@ static int cudbg_get_payload_range(struct adapter *padap, u8 mem_type,
&payload->start, &payload->end);
}
+static int cudbg_memory_read(struct cudbg_init *pdbg_init, int win,
+ int mtype, u32 addr, u32 len, void *hbuf)
+{
+ u32 win_pf, memoffset, mem_aperture, mem_base;
+ struct adapter *adap = pdbg_init->adap;
+ u32 pos, offset, resid;
+ u32 *res_buf;
+ u64 *buf;
+ int ret;
+
+ /* Argument sanity checks ...
+ */
+ if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
+ return -EINVAL;
+
+ buf = (u64 *)hbuf;
+
+ /* Try to do 64-bit reads. Residual will be handled later. */
+ resid = len & 0x7;
+ len -= resid;
+
+ ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
+ &mem_aperture);
+ if (ret)
+ return ret;
+
+ addr = addr + memoffset;
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
+
+ pos = addr & ~(mem_aperture - 1);
+ offset = addr - pos;
+
+ /* Set up initial PCI-E Memory Window to cover the start of our
+ * transfer.
+ */
+ t4_memory_update_win(adap, win, pos | win_pf);
+
+ /* Transfer data from the adapter */
+ while (len > 0) {
+ *buf++ = le64_to_cpu((__force __le64)
+ t4_read_reg64(adap, mem_base + offset));
+ offset += sizeof(u64);
+ len -= sizeof(u64);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next.
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_memory_update_win(adap, win, pos | win_pf);
+ }
+ }
+
+ res_buf = (u32 *)buf;
+ /* Read residual in 32-bit multiples */
+ while (resid > sizeof(u32)) {
+ *res_buf++ = le32_to_cpu((__force __le32)
+ t4_read_reg(adap, mem_base + offset));
+ offset += sizeof(u32);
+ resid -= sizeof(u32);
+
+ /* If we've reached the end of our current window aperture,
+ * move the PCI-E Memory Window on to the next.
+ */
+ if (offset == mem_aperture) {
+ pos += mem_aperture;
+ offset = 0;
+ t4_memory_update_win(adap, win, pos | win_pf);
+ }
+ }
+
+ /* Transfer residual < 32-bits */
+ if (resid)
+ t4_memory_rw_residual(adap, resid, mem_base + offset,
+ (u8 *)res_buf, T4_MEMORY_READ);
+
+ return 0;
+}
+
#define CUDBG_YIELD_ITERATION 256
static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
@@ -937,10 +1017,8 @@ static int cudbg_read_fw_mem(struct cudbg_init *pdbg_init,
goto skip_read;
spin_lock(&padap->win0_lock);
- rc = t4_memory_rw(padap, MEMWIN_NIC, mem_type,
- bytes_read, bytes,
- (__be32 *)temp_buff.data,
- 1);
+ rc = cudbg_memory_read(pdbg_init, MEMWIN_NIC, mem_type,
+ bytes_read, bytes, temp_buff.data);
spin_unlock(&padap->win0_lock);
if (rc) {
cudbg_err->sys_err = rc;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 9040e13ce4b7..d3fa53db61ee 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1488,6 +1488,11 @@ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg);
u32 t4_get_util_window(struct adapter *adap);
void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window);
+int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
+ u32 *mem_base, u32 *mem_aperture);
+void t4_memory_update_win(struct adapter *adap, int win, u32 addr);
+void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
+ int dir);
#define T4_MEMORY_WRITE 0
#define T4_MEMORY_READ 1
int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
index 3177b0c9bd2d..db92f1858060 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
@@ -1335,12 +1335,6 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
return ret;
}
- /* Clear out any old resources being used by the filter before
- * we start constructing the new filter.
- */
- if (f->valid)
- clear_filter(adapter, f);
-
if (is_t6(adapter->params.chip) && fs->type &&
ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
IPV6_ADDR_ANY) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 920bccd6bc40..2c889efc78ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -484,6 +484,117 @@ static int t4_edc_err_read(struct adapter *adap, int idx)
}
/**
+ * t4_memory_rw_init - Get memory window relative offset, base, and size.
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @mem_off: memory relative offset with respect to @mtype.
+ * @mem_base: configured memory base address.
+ * @mem_aperture: configured memory window aperture.
+ *
+ * Get the configured memory window's relative offset, base, and size.
+ */
+int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
+ u32 *mem_base, u32 *mem_aperture)
+{
+ u32 edc_size, mc_size, mem_reg;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
+ * MEM_HMA = 4
+ */
+ edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
+ if (mtype == MEM_HMA) {
+ *mem_off = 2 * (edc_size * 1024 * 1024);
+ } else if (mtype != MEM_MC1) {
+ *mem_off = (mtype * (edc_size * 1024 * 1024));
+ } else {
+ mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
+ MA_EXT_MEMORY0_BAR_A));
+ *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
+ }
+
+ /* Each PCI-E Memory Window is programmed with a window size -- or
+ * "aperture" -- which controls the granularity of its mapping onto
+ * adapter memory. We need to grab that aperture in order to know
+ * how to use the specified window. The window is also programmed
+ * with the base address of the Memory Window in BAR0's address
+ * space. For T4 this is an absolute PCI-E Bus Address. For T5
+ * the address is relative to BAR0.
+ */
+ mem_reg = t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
+ win));
+ /* a dead adapter will return 0xffffffff for PIO reads */
+ if (mem_reg == 0xffffffff)
+ return -ENXIO;
+
+ *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
+ *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
+ if (is_t4(adap->params.chip))
+ *mem_base -= adap->t4_bar0;
+
+ return 0;
+}
+
+/**
+ * t4_memory_update_win - Move memory window to specified address.
+ * @adap: the adapter
+ * @win: PCI-E Memory Window to use
+ * @addr: location to move.
+ *
+ * Move memory window to specified address.
+ */
+void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
+{
+ t4_write_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
+ addr);
+ /* Read it back to ensure that changes propagate before we
+ * attempt to use the new value.
+ */
+ t4_read_reg(adap,
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
+}
+
+/**
+ * t4_memory_rw_residual - Read/Write residual data.
+ * @adap: the adapter
+ * @off: relative offset within residual to start read/write.
+ * @addr: address within indicated memory type.
+ * @buf: host memory buffer
+ * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
+ *
+ * Read/Write residual data less than 32-bits.
+ */
+void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
+ int dir)
+{
+ union {
+ u32 word;
+ char byte[4];
+ } last;
+ unsigned char *bp;
+ int i;
+
+ if (dir == T4_MEMORY_READ) {
+ last.word = le32_to_cpu((__force __le32)
+ t4_read_reg(adap, addr));
+ for (bp = (unsigned char *)buf, i = off; i < 4; i++)
+ bp[i] = last.byte[i];
+ } else {
+ last.word = *buf;
+ for (i = off; i < 4; i++)
+ last.byte[i] = 0;
+ t4_write_reg(adap, addr,
+ (__force u32)cpu_to_le32(last.word));
+ }
+}
+
+/**
* t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
* @adap: the adapter
* @win: PCI-E Memory Window to use
@@ -504,8 +615,9 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
u32 len, void *hbuf, int dir)
{
u32 pos, offset, resid, memoffset;
- u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
+ u32 win_pf, mem_aperture, mem_base;
u32 *buf;
+ int ret;
/* Argument sanity checks ...
*/
@@ -521,59 +633,26 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
resid = len & 0x3;
len -= resid;
- /* Offset into the region of memory which is being accessed
- * MEM_EDC0 = 0
- * MEM_EDC1 = 1
- * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
- * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
- * MEM_HMA = 4
- */
- edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
- if (mtype == MEM_HMA) {
- memoffset = 2 * (edc_size * 1024 * 1024);
- } else if (mtype != MEM_MC1) {
- memoffset = (mtype * (edc_size * 1024 * 1024));
- } else {
- mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
- MA_EXT_MEMORY0_BAR_A));
- memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
- }
+ ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
+ &mem_aperture);
+ if (ret)
+ return ret;
/* Determine the PCIE_MEM_ACCESS_OFFSET */
addr = addr + memoffset;
- /* Each PCI-E Memory Window is programmed with a window size -- or
- * "aperture" -- which controls the granularity of its mapping onto
- * adapter memory. We need to grab that aperture in order to know
- * how to use the specified window. The window is also programmed
- * with the base address of the Memory Window in BAR0's address
- * space. For T4 this is an absolute PCI-E Bus Address. For T5
- * the address is relative to BAR0.
- */
- mem_reg = t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
- win));
- mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
- mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
- if (is_t4(adap->params.chip))
- mem_base -= adap->t4_bar0;
win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
*/
- pos = addr & ~(mem_aperture-1);
+ pos = addr & ~(mem_aperture - 1);
offset = addr - pos;
/* Set up initial PCI-E Memory Window to cover the start of our
- * transfer. (Read it back to ensure that changes propagate before we
- * attempt to use the new value.)
+ * transfer.
*/
- t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
- pos | win_pf);
- t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
+ t4_memory_update_win(adap, win, pos | win_pf);
/* Transfer data to/from the adapter as long as there's an integral
* number of 32-bit transfers to complete.
@@ -628,12 +707,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
if (offset == mem_aperture) {
pos += mem_aperture;
offset = 0;
- t4_write_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
- win), pos | win_pf);
- t4_read_reg(adap,
- PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A,
- win));
+ t4_memory_update_win(adap, win, pos | win_pf);
}
}
@@ -642,28 +716,9 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
* residual amount. The PCI-E Memory Window has already been moved
* above (if necessary) to cover this final transfer.
*/
- if (resid) {
- union {
- u32 word;
- char byte[4];
- } last;
- unsigned char *bp;
- int i;
-
- if (dir == T4_MEMORY_READ) {
- last.word = le32_to_cpu(
- (__force __le32)t4_read_reg(adap,
- mem_base + offset));
- for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
- bp[i] = last.byte[i];
- } else {
- last.word = *buf;
- for (i = resid; i < 4; i++)
- last.byte[i] = 0;
- t4_write_reg(adap, mem_base + offset,
- (__force u32)cpu_to_le32(last.word));
- }
- }
+ if (resid)
+ t4_memory_rw_residual(adap, resid, mem_base + offset,
+ (u8 *)buf, dir);
return 0;
}
@@ -6036,6 +6091,7 @@ unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
case CHELSIO_T6:
switch (nports) {
+ case 1:
case 2: return 1 << pidx;
}
break;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index b7e79e64d2ed..361de86b65b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -155,8 +155,6 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
const char *fc;
const struct port_info *pi = netdev_priv(dev);
- netif_carrier_on(dev);
-
switch (pi->link_cfg.speed) {
case 100:
s = "100Mbps";
@@ -202,7 +200,6 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
} else {
- netif_carrier_off(dev);
netdev_info(dev, "link down\n");
}
}
@@ -278,6 +275,17 @@ static int link_start(struct net_device *dev)
*/
if (ret == 0)
ret = t4vf_enable_vi(pi->adapter, pi->viid, true, true);
+
+ /* The Virtual Interfaces are connected to an internal switch on the
+ * chip which allows VIs attached to the same port to talk to each
+ * other even when the port link is down. As a result, we generally
+ * want to always report a VI's link as being "up", provided there are
+ * no errors in enabling vi.
+ */
+
+ if (ret == 0)
+ netif_carrier_on(dev);
+
return ret;
}
diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c
index 977d4c2c759d..3f8fe8fd79cc 100644
--- a/drivers/net/ethernet/cirrus/mac89x0.c
+++ b/drivers/net/ethernet/cirrus/mac89x0.c
@@ -56,21 +56,11 @@
local_irq_{dis,en}able()
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
static const char version[] =
"cs89x0.c:v1.02 11/26/96 Russell Nelson <nelson@crynwr.com>\n";
-/* ======================= configure the driver here ======================= */
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 0
-#endif
-
-/* ======================= end of configuration ======================= */
-
-
-/* Always include 'config.h' first in case the user wants to turn on
- or override something. */
#include <linux/module.h>
/*
@@ -93,6 +83,7 @@ static const char version[] =
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netdevice.h>
+#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
@@ -105,24 +96,22 @@ static const char version[] =
#include "cs89x0.h"
-static unsigned int net_debug = NET_DEBUG;
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debug message level");
/* Information that need to be kept for each board. */
struct net_local {
+ int msg_enable;
int chip_type; /* one of: CS8900, CS8920, CS8920M */
char chip_revision; /* revision letter of the chip ('A'...) */
int send_cmd; /* the propercommand used to send a packet. */
int rx_mode;
int curr_rx_cfg;
int send_underrun; /* keep track of how many underruns in a row we get */
- struct sk_buff *skb;
};
/* Index to functions, as function prototypes. */
-
-#if 0
-extern void reset_chip(struct net_device *dev);
-#endif
static int net_open(struct net_device *dev);
static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t net_interrupt(int irq, void *dev_id);
@@ -132,10 +121,6 @@ static int net_close(struct net_device *dev);
static struct net_device_stats *net_get_stats(struct net_device *dev);
static int set_mac_address(struct net_device *dev, void *addr);
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) 1
-
/* For reading/writing registers ISA-style */
static inline int
readreg_io(struct net_device *dev, int portno)
@@ -176,12 +161,10 @@ static const struct net_device_ops mac89x0_netdev_ops = {
/* Probe for the CS8900 card in slot E. We won't bother looking
anywhere else until we have a really good reason to do so. */
-struct net_device * __init mac89x0_probe(int unit)
+static int mac89x0_device_probe(struct platform_device *pdev)
{
struct net_device *dev;
- static int once_is_enough;
struct net_local *lp;
- static unsigned version_printed;
int i, slot;
unsigned rev_type = 0;
unsigned long ioaddr;
@@ -189,21 +172,9 @@ struct net_device * __init mac89x0_probe(int unit)
int err = -ENODEV;
struct nubus_rsrc *fres;
- if (!MACH_IS_MAC)
- return ERR_PTR(-ENODEV);
-
dev = alloc_etherdev(sizeof(struct net_local));
if (!dev)
- return ERR_PTR(-ENOMEM);
-
- if (unit >= 0) {
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
- }
-
- if (once_is_enough)
- goto out;
- once_is_enough = 1;
+ return -ENOMEM;
/* We might have to parameterize this later */
slot = 0xE;
@@ -230,9 +201,13 @@ struct net_device * __init mac89x0_probe(int unit)
if (sig != swab16(CHIP_EISA_ID_SIG))
goto out;
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
/* Initialize the net_device structure. */
lp = netdev_priv(dev);
+ lp->msg_enable = netif_msg_init(debug, 0);
+
/* Fill in the 'dev' fields. */
dev->base_addr = ioaddr;
dev->mem_start = (unsigned long)
@@ -255,19 +230,16 @@ struct net_device * __init mac89x0_probe(int unit)
if (lp->chip_type != CS8900 && lp->chip_revision >= 'C')
lp->send_cmd = TX_NOW;
- if (net_debug && version_printed++ == 0)
- printk(version);
+ netif_dbg(lp, drv, dev, "%s", version);
- printk(KERN_INFO "%s: cs89%c0%s rev %c found at %#8lx",
- dev->name,
- lp->chip_type==CS8900?'0':'2',
- lp->chip_type==CS8920M?"M":"",
- lp->chip_revision,
- dev->base_addr);
+ pr_info("cs89%c0%s rev %c found at %#8lx\n",
+ lp->chip_type == CS8900 ? '0' : '2',
+ lp->chip_type == CS8920M ? "M" : "",
+ lp->chip_revision, dev->base_addr);
/* Try to read the MAC address */
if ((readreg(dev, PP_SelfST) & (EEPROM_PRESENT | EEPROM_OK)) == 0) {
- printk("\nmac89x0: No EEPROM, giving up now.\n");
+ pr_info("No EEPROM, giving up now.\n");
goto out1;
} else {
for (i = 0; i < ETH_ALEN; i += 2) {
@@ -282,39 +254,23 @@ struct net_device * __init mac89x0_probe(int unit)
/* print the IRQ and ethernet address. */
- printk(" IRQ %d ADDR %pM\n", dev->irq, dev->dev_addr);
+ pr_info("MAC %pM, IRQ %d\n", dev->dev_addr, dev->irq);
dev->netdev_ops = &mac89x0_netdev_ops;
err = register_netdev(dev);
if (err)
goto out1;
- return NULL;
+
+ platform_set_drvdata(pdev, dev);
+ return 0;
out1:
nubus_writew(0, dev->base_addr + ADD_PORT);
out:
free_netdev(dev);
- return ERR_PTR(err);
+ return err;
}
-#if 0
-/* This is useful for something, but I don't know what yet. */
-void __init reset_chip(struct net_device *dev)
-{
- int reset_start_time;
-
- writereg(dev, PP_SelfCTL, readreg(dev, PP_SelfCTL) | POWER_ON_RESET);
-
- /* wait 30 ms */
- msleep_interruptible(30);
-
- /* Wait until the chip is reset */
- reset_start_time = jiffies;
- while( (readreg(dev, PP_SelfST) & INIT_DONE) == 0 && jiffies - reset_start_time < 2)
- ;
-}
-#endif
-
/* Open/initialize the board. This is called (in the current kernel)
sometime after booting when the 'ifconfig' program is run.
@@ -374,11 +330,9 @@ net_send_packet(struct sk_buff *skb, struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
unsigned long flags;
- if (net_debug > 3)
- printk("%s: sent %d byte packet of type %x\n",
- dev->name, skb->len,
- (skb->data[ETH_ALEN+ETH_ALEN] << 8)
- | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ netif_dbg(lp, tx_queued, dev, "sent %d byte packet of type %x\n",
+ skb->len, skb->data[ETH_ALEN + ETH_ALEN] << 8 |
+ skb->data[ETH_ALEN + ETH_ALEN + 1]);
/* keep the upload from being interrupted, since we
ask the chip to start transmitting before the
@@ -416,11 +370,6 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
struct net_local *lp;
int ioaddr, status;
- if (dev == NULL) {
- printk ("net_interrupt(): irq %d for unknown device.\n", irq);
- return IRQ_NONE;
- }
-
ioaddr = dev->base_addr;
lp = netdev_priv(dev);
@@ -432,7 +381,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
faster than you can read them off, you're screwed. Hasta la
vista, baby! */
while ((status = swab16(nubus_readw(dev->base_addr + ISQ_PORT)))) {
- if (net_debug > 4)printk("%s: event=%04x\n", dev->name, status);
+ netif_dbg(lp, intr, dev, "status=%04x\n", status);
switch(status & ISQ_EVENT_MASK) {
case ISQ_RECEIVER_EVENT:
/* Got a packet(s). */
@@ -462,7 +411,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
netif_wake_queue(dev);
}
if (status & TX_UNDERRUN) {
- if (net_debug > 0) printk("%s: transmit underrun\n", dev->name);
+ netif_dbg(lp, tx_err, dev, "transmit underrun\n");
lp->send_underrun++;
if (lp->send_underrun == 3) lp->send_cmd = TX_AFTER_381;
else if (lp->send_underrun == 6) lp->send_cmd = TX_AFTER_ALL;
@@ -483,6 +432,7 @@ static irqreturn_t net_interrupt(int irq, void *dev_id)
static void
net_rx(struct net_device *dev)
{
+ struct net_local *lp = netdev_priv(dev);
struct sk_buff *skb;
int status, length;
@@ -506,7 +456,6 @@ net_rx(struct net_device *dev)
/* Malloc up new buffer. */
skb = alloc_skb(length, GFP_ATOMIC);
if (skb == NULL) {
- printk("%s: Memory squeeze, dropping packet.\n", dev->name);
dev->stats.rx_dropped++;
return;
}
@@ -515,10 +464,9 @@ net_rx(struct net_device *dev)
skb_copy_to_linear_data(skb, (void *)(dev->mem_start + PP_RxFrame),
length);
- if (net_debug > 3)printk("%s: received %d byte packet of type %x\n",
- dev->name, length,
- (skb->data[ETH_ALEN+ETH_ALEN] << 8)
- | skb->data[ETH_ALEN+ETH_ALEN+1]);
+ netif_dbg(lp, rx_status, dev, "received %d byte packet of type %x\n",
+ length, skb->data[ETH_ALEN + ETH_ALEN] << 8 |
+ skb->data[ETH_ALEN + ETH_ALEN + 1]);
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
@@ -594,7 +542,7 @@ static int set_mac_address(struct net_device *dev, void *addr)
return -EADDRNOTAVAIL;
memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
- printk("%s: Setting MAC address to %pM\n", dev->name, dev->dev_addr);
+ netdev_info(dev, "Setting MAC address to %pM\n", dev->dev_addr);
/* set the Ethernet address */
for (i=0; i < ETH_ALEN/2; i++)
@@ -603,32 +551,24 @@ static int set_mac_address(struct net_device *dev, void *addr)
return 0;
}
-#ifdef MODULE
-
-static struct net_device *dev_cs89x0;
-static int debug;
-
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "CS89[02]0 debug level (0-5)");
MODULE_LICENSE("GPL");
-int __init
-init_module(void)
+static int mac89x0_device_remove(struct platform_device *pdev)
{
- net_debug = debug;
- dev_cs89x0 = mac89x0_probe(-1);
- if (IS_ERR(dev_cs89x0)) {
- printk(KERN_WARNING "mac89x0.c: No card found\n");
- return PTR_ERR(dev_cs89x0);
- }
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ nubus_writew(0, dev->base_addr + ADD_PORT);
+ free_netdev(dev);
return 0;
}
-void
-cleanup_module(void)
-{
- unregister_netdev(dev_cs89x0);
- nubus_writew(0, dev_cs89x0->base_addr + ADD_PORT);
- free_netdev(dev_cs89x0);
-}
-#endif /* MODULE */
+static struct platform_driver mac89x0_platform_driver = {
+ .probe = mac89x0_device_probe,
+ .remove = mac89x0_device_remove,
+ .driver = {
+ .name = "mac89x0",
+ },
+};
+
+module_platform_driver(mac89x0_platform_driver);
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 9b218f0e5a4c..0dd64acd2a3f 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.45"
+#define DRV_VERSION "2.3.0.53"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
@@ -140,6 +140,7 @@ struct enic_rfs_flw_tbl {
struct vxlan_offload {
u16 vxlan_udp_port_number;
u8 patch_level;
+ u8 flags;
};
/* Per-instance private data structure */
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index efb9333c7cf8..869006c2002d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -474,6 +474,39 @@ static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
return 0;
}
+static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
+{
+ cmd->data = 0;
+
+ switch (cmd->flow_type) {
+ case TCP_V6_FLOW:
+ case TCP_V4_FLOW:
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case UDP_V6_FLOW:
+ case UDP_V4_FLOW:
+ if (vnic_dev_capable_udp_rss(enic->vdev))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ cmd->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
@@ -500,6 +533,9 @@ static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
ret = enic_grxclsrule(enic, cmd);
spin_unlock_bh(&enic->rfs_h.lock);
break;
+ case ETHTOOL_GRXFH:
+ ret = enic_get_rx_flow_hash(enic, cmd);
+ break;
default:
ret = -EOPNOTSUPP;
break;
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index f202ba72a811..a25fb95492a0 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -191,8 +191,16 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
goto error;
}
- if (ti->sa_family != AF_INET) {
- netdev_info(netdev, "vxlan: only IPv4 offload supported");
+ switch (ti->sa_family) {
+ case AF_INET6:
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6)) {
+ netdev_info(netdev, "vxlan: only IPv4 offload supported");
+ goto error;
+ }
+ /* Fall through */
+ case AF_INET:
+ break;
+ default:
goto error;
}
@@ -204,6 +212,11 @@ static void enic_udp_tunnel_add(struct net_device *netdev,
goto error;
}
+ if ((vnic_dev_get_res_count(enic->vdev, RES_TYPE_WQ) != 1) &&
+ !(enic->vxlan.flags & ENIC_VXLAN_MULTI_WQ)) {
+ netdev_info(netdev, "vxlan: vxlan offload with multi wq not supported on this adapter");
+ goto error;
+ }
err = vnic_dev_overlay_offload_cfg(enic->vdev,
OVERLAY_CFG_VXLAN_PORT_UPDATE,
@@ -271,22 +284,37 @@ static netdev_features_t enic_features_check(struct sk_buff *skb,
struct enic *enic = netdev_priv(dev);
struct udphdr *udph;
u16 port = 0;
- u16 proto;
+ u8 proto;
if (!skb->encapsulation)
return features;
features = vxlan_features_check(skb, features);
- /* hardware only supports IPv4 vxlan tunnel */
- if (vlan_get_protocol(skb) != htons(ETH_P_IP))
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IPV6):
+ if (!(enic->vxlan.flags & ENIC_VXLAN_OUTER_IPV6))
+ goto out;
+ proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ case htons(ETH_P_IP):
+ proto = ip_hdr(skb)->protocol;
+ break;
+ default:
goto out;
+ }
- /* hardware does not support offload of ipv6 inner pkt */
- if (eth->h_proto != ntohs(ETH_P_IP))
+ switch (eth->h_proto) {
+ case ntohs(ETH_P_IPV6):
+ if (!(enic->vxlan.flags & ENIC_VXLAN_INNER_IPV6))
+ goto out;
+ /* Fall through */
+ case ntohs(ETH_P_IP):
+ break;
+ default:
goto out;
+ }
- proto = ip_hdr(skb)->protocol;
if (proto == IPPROTO_UDP) {
udph = udp_hdr(skb);
@@ -635,12 +663,25 @@ static int enic_queue_wq_skb_csum_l4(struct enic *enic, struct vnic_wq *wq,
static void enic_preload_tcp_csum_encap(struct sk_buff *skb)
{
- if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
+ const struct ethhdr *eth = (struct ethhdr *)skb_inner_mac_header(skb);
+
+ switch (eth->h_proto) {
+ case ntohs(ETH_P_IP):
inner_ip_hdr(skb)->check = 0;
inner_tcp_hdr(skb)->check =
~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
inner_ip_hdr(skb)->daddr, 0,
IPPROTO_TCP, 0);
+ break;
+ case ntohs(ETH_P_IPV6):
+ inner_tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
+ &inner_ipv6_hdr(skb)->daddr, 0,
+ IPPROTO_TCP, 0);
+ break;
+ default:
+ WARN_ONCE(1, "Non ipv4/ipv6 inner pkt for encap offload");
+ break;
}
}
@@ -1898,6 +1939,8 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
+ /* enable rq before updating rq desc */
+ vnic_rq_enable(&enic->rq[i]);
vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
/* Need at least one buffer on ring to get going */
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
@@ -1909,8 +1952,6 @@ static int enic_open(struct net_device *netdev)
for (i = 0; i < enic->wq_count; i++)
vnic_wq_enable(&enic->wq[i]);
- for (i = 0; i < enic->rq_count; i++)
- vnic_rq_enable(&enic->rq[i]);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_add_station_addr(enic);
@@ -1936,8 +1977,12 @@ static int enic_open(struct net_device *netdev)
return 0;
err_out_free_rq:
- for (i = 0; i < enic->rq_count; i++)
+ for (i = 0; i < enic->rq_count; i++) {
+ err = vnic_rq_disable(&enic->rq[i]);
+ if (err)
+ return err;
vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ }
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_unset_affinity_hint(enic);
@@ -2151,9 +2196,10 @@ static int enic_dev_wait(struct vnic_dev *vdev,
static int enic_dev_open(struct enic *enic)
{
int err;
+ u32 flags = CMD_OPENF_IG_DESCCACHE;
err = enic_dev_wait(enic->vdev, vnic_dev_open,
- vnic_dev_open_done, 0);
+ vnic_dev_open_done, flags);
if (err)
dev_err(enic_get_dev(enic), "vNIC device open failed, err %d\n",
err);
@@ -2275,7 +2321,7 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
{
struct device *dev = enic_get_dev(enic);
const u8 rss_default_cpu = 0;
- const u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
+ u8 rss_hash_type = NIC_CFG_RSS_HASH_TYPE_IPV4 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 |
NIC_CFG_RSS_HASH_TYPE_IPV6 |
NIC_CFG_RSS_HASH_TYPE_TCP_IPV6;
@@ -2283,6 +2329,8 @@ static int enic_set_rss_nic_cfg(struct enic *enic)
const u8 rss_base_cpu = 0;
u8 rss_enable = ENIC_SETTING(enic, RSS) && (enic->rq_count > 1);
+ if (vnic_dev_capable_udp_rss(enic->vdev))
+ rss_hash_type |= NIC_CFG_RSS_HASH_TYPE_UDP;
if (rss_enable) {
if (!enic_set_rsskey(enic)) {
if (enic_set_rsscpu(enic, rss_hash_bits)) {
@@ -2901,9 +2949,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXCSUM;
if (ENIC_SETTING(enic, VXLAN)) {
u64 patch_level;
+ u64 a1 = 0;
netdev->hw_enc_features |= NETIF_F_RXCSUM |
NETIF_F_TSO |
+ NETIF_F_TSO6 |
NETIF_F_TSO_ECN |
NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_HW_CSUM |
@@ -2922,9 +2972,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
err = vnic_dev_get_supported_feature_ver(enic->vdev,
VIC_FEATURE_VXLAN,
- &patch_level);
+ &patch_level, &a1);
if (err)
patch_level = 0;
+ enic->vxlan.flags = (u8)a1;
/* mask bits that are supported by driver
*/
patch_level &= BIT_ULL(0) | BIT_ULL(2);
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 39bad67422dd..a2b376055b2d 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -1269,16 +1269,32 @@ int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
}
int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
- u64 *supported_versions)
+ u64 *supported_versions, u64 *a1)
{
u64 a0 = feature;
int wait = 1000;
- u64 a1 = 0;
int ret;
- ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, &a1, wait);
+ ret = vnic_dev_cmd(vdev, CMD_GET_SUPP_FEATURE_VER, &a0, a1, wait);
if (!ret)
*supported_versions = a0;
return ret;
}
+
+bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev)
+{
+ u64 a0 = CMD_NIC_CFG, a1 = 0;
+ u64 rss_hash_type;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err || !a0)
+ return 0;
+
+ rss_hash_type = (a1 >> NIC_CFG_RSS_HASH_TYPE_SHIFT) &
+ NIC_CFG_RSS_HASH_TYPE_MASK_FIELD;
+
+ return (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP);
+}
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index 9d43d6bb9907..59d4cc8fbb85 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -183,6 +183,7 @@ int vnic_dev_overlay_offload_ctrl(struct vnic_dev *vdev, u8 overlay, u8 config);
int vnic_dev_overlay_offload_cfg(struct vnic_dev *vdev, u8 overlay,
u16 vxlan_udp_port_number);
int vnic_dev_get_supported_feature_ver(struct vnic_dev *vdev, u8 feature,
- u64 *supported_versions);
+ u64 *supported_versions, u64 *a1);
+bool vnic_dev_capable_udp_rss(struct vnic_dev *vdev);
#endif /* _VNIC_DEV_H_ */
diff --git a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
index d83880b0d468..41de4ba622a1 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_devcmd.h
@@ -439,6 +439,7 @@ enum vnic_devcmd_cmd {
/* flags for CMD_OPEN */
#define CMD_OPENF_OPROM 0x1 /* open coming from option rom */
+#define CMD_OPENF_IG_DESCCACHE 0x2 /* Do not flush IG DESC cache */
/* flags for CMD_INIT */
#define CMD_INITF_DEFAULT_MAC 0x1 /* init with default mac addr */
@@ -697,6 +698,10 @@ enum overlay_ofld_cmd {
#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+#define ENIC_VXLAN_INNER_IPV6 BIT(0)
+#define ENIC_VXLAN_OUTER_IPV6 BIT(1)
+#define ENIC_VXLAN_MULTI_WQ BIT(2)
+
/* Use this enum to get the supported versions for each of these features
* If you need to use the devcmd_get_supported_feature_version(), add
* the new feature into this enum and install function handler in devcmd.c
diff --git a/drivers/net/ethernet/cisco/enic/vnic_nic.h b/drivers/net/ethernet/cisco/enic/vnic_nic.h
index 995a50dd4c99..5a93db0d7afc 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_nic.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_nic.h
@@ -47,6 +47,7 @@
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
+#define NIC_CFG_RSS_HASH_TYPE_UDP (1 << 7)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 1a49297224ed..ff92ab1daeb8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -19,7 +19,7 @@
#include "be.h"
#include "be_cmds.h"
-char *be_misconfig_evt_port_state[] = {
+const char * const be_misconfig_evt_port_state[] = {
"Physical Link is functional",
"Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.",
"Optics of two types installed – Remove one optic or install matching pair of optics.",
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 09da2d82c2f0..e8b43cf44b6f 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -201,7 +201,7 @@ enum {
phy_state == BE_PHY_UNQUALIFIED || \
phy_state == BE_PHY_UNCERTIFIED)
-extern char *be_misconfig_evt_port_state[];
+extern const char * const be_misconfig_evt_port_state[];
/* async event indicating misconfigured port */
struct be_async_event_misconfig_port {
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 7caa8da48421..159dc2df878d 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -454,6 +454,16 @@ static void dpaa_set_rx_mode(struct net_device *net_dev)
err);
}
+ if (!!(net_dev->flags & IFF_ALLMULTI) != priv->mac_dev->allmulti) {
+ priv->mac_dev->allmulti = !priv->mac_dev->allmulti;
+ err = priv->mac_dev->set_allmulti(priv->mac_dev->fman_mac,
+ priv->mac_dev->allmulti);
+ if (err < 0)
+ netif_err(priv, drv, net_dev,
+ "mac_dev->set_allmulti() = %d\n",
+ err);
+ }
+
err = priv->mac_dev->set_multi(net_dev, priv->mac_dev);
if (err < 0)
netif_err(priv, drv, net_dev, "mac_dev->set_multi() = %d\n",
@@ -1916,8 +1926,10 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
goto csum_failed;
}
+ /* SGT[0] is used by the linear part */
sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
- qm_sg_entry_set_len(&sgt[0], skb_headlen(skb));
+ frag_len = skb_headlen(skb);
+ qm_sg_entry_set_len(&sgt[0], frag_len);
sgt[0].bpid = FSL_DPAA_BPID_INV;
sgt[0].offset = 0;
addr = dma_map_single(dev, skb->data,
@@ -1930,9 +1942,9 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
qm_sg_entry_set64(&sgt[0], addr);
/* populate the rest of SGT entries */
- frag = &skb_shinfo(skb)->frags[0];
- frag_len = frag->size;
- for (i = 1; i <= nr_frags; i++, frag++) {
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ frag_len = frag->size;
WARN_ON(!skb_frag_page(frag));
addr = skb_frag_dma_map(dev, frag, 0,
frag_len, dma_dir);
@@ -1942,15 +1954,16 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
goto sg_map_failed;
}
- qm_sg_entry_set_len(&sgt[i], frag_len);
- sgt[i].bpid = FSL_DPAA_BPID_INV;
- sgt[i].offset = 0;
+ qm_sg_entry_set_len(&sgt[i + 1], frag_len);
+ sgt[i + 1].bpid = FSL_DPAA_BPID_INV;
+ sgt[i + 1].offset = 0;
/* keep the offset in the address */
- qm_sg_entry_set64(&sgt[i], addr);
- frag_len = frag->size;
+ qm_sg_entry_set64(&sgt[i + 1], addr);
}
- qm_sg_entry_set_f(&sgt[i - 1], frag_len);
+
+ /* Set the final bit in the last used entry of the SGT */
+ qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
@@ -2052,19 +2065,23 @@ static int dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
/* MAX_SKB_FRAGS is equal or larger than our dpaa_SGT_MAX_ENTRIES;
* make sure we don't feed FMan with more fragments than it supports.
*/
- if (nonlinear &&
- likely(skb_shinfo(skb)->nr_frags < DPAA_SGT_MAX_ENTRIES)) {
- /* Just create a S/G fd based on the skb */
- err = skb_to_sg_fd(priv, skb, &fd);
- percpu_priv->tx_frag_skbuffs++;
- } else {
+ if (unlikely(nonlinear &&
+ (skb_shinfo(skb)->nr_frags >= DPAA_SGT_MAX_ENTRIES))) {
/* If the egress skb contains more fragments than we support
* we have no choice but to linearize it ourselves.
*/
- if (unlikely(nonlinear) && __skb_linearize(skb))
+ if (__skb_linearize(skb))
goto enomem;
- /* Finally, create a contig FD from this skb */
+ nonlinear = skb_is_nonlinear(skb);
+ }
+
+ if (nonlinear) {
+ /* Just create a S/G fd based on the skb */
+ err = skb_to_sg_fd(priv, skb, &fd);
+ percpu_priv->tx_frag_skbuffs++;
+ } else {
+ /* Create a contig FD from this skb */
err = skb_to_contig_fd(priv, skb, &fd, &offset);
}
if (unlikely(err < 0))
@@ -2201,14 +2218,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
if (dpaa_eth_napi_schedule(percpu_priv, portal))
return qman_cb_dqrr_stop;
- if (dpaa_eth_refill_bpools(priv))
- /* Unable to refill the buffer pool due to insufficient
- * system memory. Just release the frame back into the pool,
- * otherwise we'll soon end up with an empty buffer pool.
- */
- dpaa_fd_release(net_dev, &dq->fd);
- else
- dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
+ dpaa_eth_refill_bpools(priv);
+ dpaa_rx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
return qman_cb_dqrr_consume;
}
@@ -2766,7 +2777,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
priv->channel = (u16)channel;
- /* Start a thread that will walk the CPUs with affine portals
+ /* Walk the CPUs with affine portals
* and add this pool channel to each's dequeue mask.
*/
dpaa_eth_add_channel(priv->channel);
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index faea674094b9..85306d1b2acf 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -211,7 +211,7 @@ static int dpaa_set_pauseparam(struct net_device *net_dev,
if (epause->rx_pause)
newadv = ADVERTISED_Pause | ADVERTISED_Asym_Pause;
if (epause->tx_pause)
- newadv |= ADVERTISED_Asym_Pause;
+ newadv ^= ADVERTISED_Asym_Pause;
oldadv = phydev->advertising &
(ADVERTISED_Pause | ADVERTISED_Asym_Pause);
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.c b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
index ea43b4974149..9a581faaa742 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.c
@@ -1117,6 +1117,25 @@ int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
return 0;
}
+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable)
+{
+ u32 tmp;
+ struct dtsec_regs __iomem *regs = dtsec->regs;
+
+ if (!is_init_done(dtsec->dtsec_drv_param))
+ return -EINVAL;
+
+ tmp = ioread32be(&regs->rctrl);
+ if (enable)
+ tmp |= RCTRL_MPROM;
+ else
+ tmp &= ~RCTRL_MPROM;
+
+ iowrite32be(tmp, &regs->rctrl);
+
+ return 0;
+}
+
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr)
{
struct dtsec_regs __iomem *regs = dtsec->regs;
diff --git a/drivers/net/ethernet/freescale/fman/fman_dtsec.h b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
index c4467c072058..1a689adf5a22 100644
--- a/drivers/net/ethernet/freescale/fman/fman_dtsec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_dtsec.h
@@ -55,5 +55,6 @@ int dtsec_set_exception(struct fman_mac *dtsec,
int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_del_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr);
int dtsec_get_version(struct fman_mac *dtsec, u32 *mac_version);
+int dtsec_set_allmulti(struct fman_mac *dtsec, bool enable);
#endif /* __DTSEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c
index c0296880feba..446a97b792e3 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.c
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.c
@@ -350,6 +350,7 @@ struct fman_mac {
struct fman_rev_info fm_rev_info;
bool basex_if;
struct phy_device *pcsphy;
+ bool allmulti_enabled;
};
static void add_addr_in_paddr(struct memac_regs __iomem *regs, u8 *adr,
@@ -940,6 +941,29 @@ int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
return 0;
}
+int memac_set_allmulti(struct fman_mac *memac, bool enable)
+{
+ u32 entry;
+ struct memac_regs __iomem *regs = memac->regs;
+
+ if (!is_init_done(memac->memac_drv_param))
+ return -EINVAL;
+
+ if (enable) {
+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry | HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ } else {
+ for (entry = 0; entry < HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry & ~HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
+
+ memac->allmulti_enabled = enable;
+
+ return 0;
+}
+
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
{
struct memac_regs __iomem *regs = memac->regs;
@@ -963,8 +987,12 @@ int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr)
break;
}
}
- if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
- iowrite32be(hash & ~HASH_CTRL_MCAST_EN, &regs->hashtable_ctrl);
+
+ if (!memac->allmulti_enabled) {
+ if (list_empty(&memac->multicast_addr_hash->lsts[hash]))
+ iowrite32be(hash & ~HASH_CTRL_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.h b/drivers/net/ethernet/freescale/fman/fman_memac.h
index c4a66469a907..b5a50338ed9a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_memac.h
+++ b/drivers/net/ethernet/freescale/fman/fman_memac.h
@@ -57,5 +57,6 @@ int memac_set_exception(struct fman_mac *memac,
enum fman_mac_exceptions exception, bool enable);
int memac_add_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
int memac_del_hash_mac_address(struct fman_mac *memac, enet_addr_t *eth_addr);
+int memac_set_allmulti(struct fman_mac *memac, bool enable);
#endif /* __MEMAC_H */
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.c b/drivers/net/ethernet/freescale/fman/fman_tgec.c
index 4b0f3a50b293..284735d4ebe9 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.c
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.c
@@ -217,6 +217,7 @@ struct fman_mac {
struct tgec_cfg *cfg;
void *fm;
struct fman_rev_info fm_rev_info;
+ bool allmulti_enabled;
};
static void set_mac_address(struct tgec_regs __iomem *regs, u8 *adr)
@@ -564,6 +565,29 @@ int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
return 0;
}
+int tgec_set_allmulti(struct fman_mac *tgec, bool enable)
+{
+ u32 entry;
+ struct tgec_regs __iomem *regs = tgec->regs;
+
+ if (!is_init_done(tgec->cfg))
+ return -EINVAL;
+
+ if (enable) {
+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry | TGEC_HASH_MCAST_EN,
+ &regs->hashtable_ctrl);
+ } else {
+ for (entry = 0; entry < TGEC_HASH_TABLE_SIZE; entry++)
+ iowrite32be(entry & ~TGEC_HASH_MCAST_EN,
+ &regs->hashtable_ctrl);
+ }
+
+ tgec->allmulti_enabled = enable;
+
+ return 0;
+}
+
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
{
struct tgec_regs __iomem *regs = tgec->regs;
@@ -591,9 +615,12 @@ int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr)
break;
}
}
- if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
- iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
- &regs->hashtable_ctrl);
+
+ if (!tgec->allmulti_enabled) {
+ if (list_empty(&tgec->multicast_addr_hash->lsts[hash]))
+ iowrite32be((hash & ~TGEC_HASH_MCAST_EN),
+ &regs->hashtable_ctrl);
+ }
return 0;
}
diff --git a/drivers/net/ethernet/freescale/fman/fman_tgec.h b/drivers/net/ethernet/freescale/fman/fman_tgec.h
index 514bba9f47ce..cbbd3b422a98 100644
--- a/drivers/net/ethernet/freescale/fman/fman_tgec.h
+++ b/drivers/net/ethernet/freescale/fman/fman_tgec.h
@@ -51,5 +51,6 @@ int tgec_set_exception(struct fman_mac *tgec,
int tgec_add_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_del_hash_mac_address(struct fman_mac *tgec, enet_addr_t *eth_addr);
int tgec_get_version(struct fman_mac *tgec, u32 *mac_version);
+int tgec_set_allmulti(struct fman_mac *tgec, bool enable);
#endif /* __TGEC_H */
diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c
index 88c0a0636b44..4829dcd9e077 100644
--- a/drivers/net/ethernet/freescale/fman/mac.c
+++ b/drivers/net/ethernet/freescale/fman/mac.c
@@ -470,6 +470,7 @@ static void setup_dtsec(struct mac_device *mac_dev)
mac_dev->set_tx_pause = dtsec_set_tx_pause_frames;
mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames;
mac_dev->set_exception = dtsec_set_exception;
+ mac_dev->set_allmulti = dtsec_set_allmulti;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -488,6 +489,7 @@ static void setup_tgec(struct mac_device *mac_dev)
mac_dev->set_tx_pause = tgec_set_tx_pause_frames;
mac_dev->set_rx_pause = tgec_accept_rx_pause_frames;
mac_dev->set_exception = tgec_set_exception;
+ mac_dev->set_allmulti = tgec_set_allmulti;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
@@ -506,6 +508,7 @@ static void setup_memac(struct mac_device *mac_dev)
mac_dev->set_tx_pause = memac_set_tx_pause_frames;
mac_dev->set_rx_pause = memac_accept_rx_pause_frames;
mac_dev->set_exception = memac_set_exception;
+ mac_dev->set_allmulti = memac_set_allmulti;
mac_dev->set_multi = set_multi;
mac_dev->start = start;
mac_dev->stop = stop;
diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h
index eefb3357e304..b520cec120ee 100644
--- a/drivers/net/ethernet/freescale/fman/mac.h
+++ b/drivers/net/ethernet/freescale/fman/mac.h
@@ -59,6 +59,7 @@ struct mac_device {
bool rx_pause_active;
bool tx_pause_active;
bool promisc;
+ bool allmulti;
int (*init)(struct mac_device *mac_dev);
int (*start)(struct mac_device *mac_dev);
@@ -66,6 +67,7 @@ struct mac_device {
void (*adjust_link)(struct mac_device *mac_dev);
int (*set_promisc)(struct fman_mac *mac_dev, bool enable);
int (*change_addr)(struct fman_mac *mac_dev, enet_addr_t *enet_addr);
+ int (*set_allmulti)(struct fman_mac *mac_dev, bool enable);
int (*set_multi)(struct net_device *net_dev,
struct mac_device *mac_dev);
int (*set_rx_pause)(struct fman_mac *mac_dev, bool en);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 1b3cc8bb0705..765407179fdd 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -90,7 +90,7 @@ MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
static int ibmvnic_remove(struct vio_dev *);
-static void release_sub_crqs(struct ibmvnic_adapter *);
+static void release_sub_crqs(struct ibmvnic_adapter *, bool);
static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
@@ -111,7 +111,7 @@ static int ibmvnic_poll(struct napi_struct *napi, int data);
static void send_map_query(struct ibmvnic_adapter *adapter);
static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
static void send_request_unmap(struct ibmvnic_adapter *, u8);
-static void send_login(struct ibmvnic_adapter *adapter);
+static int send_login(struct ibmvnic_adapter *adapter);
static void send_cap_queries(struct ibmvnic_adapter *adapter);
static int init_sub_crqs(struct ibmvnic_adapter *);
static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
@@ -361,14 +361,14 @@ static void release_stats_buffers(struct ibmvnic_adapter *adapter)
static int init_stats_buffers(struct ibmvnic_adapter *adapter)
{
adapter->tx_stats_buffers =
- kcalloc(adapter->req_tx_queues,
+ kcalloc(IBMVNIC_MAX_QUEUES,
sizeof(struct ibmvnic_tx_queue_stats),
GFP_KERNEL);
if (!adapter->tx_stats_buffers)
return -ENOMEM;
adapter->rx_stats_buffers =
- kcalloc(adapter->req_rx_queues,
+ kcalloc(IBMVNIC_MAX_QUEUES,
sizeof(struct ibmvnic_rx_queue_stats),
GFP_KERNEL);
if (!adapter->rx_stats_buffers)
@@ -509,7 +509,7 @@ static int init_rx_pools(struct net_device *netdev)
return -1;
}
- adapter->num_active_rx_pools = 0;
+ adapter->num_active_rx_pools = rxadd_subcrqs;
for (i = 0; i < rxadd_subcrqs; i++) {
rx_pool = &adapter->rx_pool[i];
@@ -554,8 +554,6 @@ static int init_rx_pools(struct net_device *netdev)
rx_pool->next_free = 0;
}
- adapter->num_active_rx_pools = rxadd_subcrqs;
-
return 0;
}
@@ -641,7 +639,7 @@ static int init_tx_pools(struct net_device *netdev)
if (!adapter->tx_pool)
return -1;
- adapter->num_active_tx_pools = 0;
+ adapter->num_active_tx_pools = tx_subcrqs;
for (i = 0; i < tx_subcrqs; i++) {
tx_pool = &adapter->tx_pool[i];
@@ -690,8 +688,6 @@ static int init_tx_pools(struct net_device *netdev)
tx_pool->producer_index = 0;
}
- adapter->num_active_tx_pools = tx_subcrqs;
-
return 0;
}
@@ -740,6 +736,45 @@ static void ibmvnic_napi_disable(struct ibmvnic_adapter *adapter)
adapter->napi_enabled = false;
}
+static int init_napi(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ adapter->napi = kcalloc(adapter->req_rx_queues,
+ sizeof(struct napi_struct), GFP_KERNEL);
+ if (!adapter->napi)
+ return -ENOMEM;
+
+ for (i = 0; i < adapter->req_rx_queues; i++) {
+ netdev_dbg(adapter->netdev, "Adding napi[%d]\n", i);
+ netif_napi_add(adapter->netdev, &adapter->napi[i],
+ ibmvnic_poll, NAPI_POLL_WEIGHT);
+ }
+
+ adapter->num_active_rx_napi = adapter->req_rx_queues;
+ return 0;
+}
+
+static void release_napi(struct ibmvnic_adapter *adapter)
+{
+ int i;
+
+ if (!adapter->napi)
+ return;
+
+ for (i = 0; i < adapter->num_active_rx_napi; i++) {
+ if (&adapter->napi[i]) {
+ netdev_dbg(adapter->netdev,
+ "Releasing napi[%d]\n", i);
+ netif_napi_del(&adapter->napi[i]);
+ }
+ }
+
+ kfree(adapter->napi);
+ adapter->napi = NULL;
+ adapter->num_active_rx_napi = 0;
+}
+
static int ibmvnic_login(struct net_device *netdev)
{
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
@@ -750,7 +785,7 @@ static int ibmvnic_login(struct net_device *netdev)
do {
if (adapter->renegotiate) {
adapter->renegotiate = false;
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
@@ -774,8 +809,11 @@ static int ibmvnic_login(struct net_device *netdev)
}
reinit_completion(&adapter->init_done);
- send_login(adapter);
- if (!wait_for_completion_timeout(&adapter->init_done,
+ rc = send_login(adapter);
+ if (rc) {
+ dev_err(dev, "Unable to attempt device login\n");
+ return rc;
+ } else if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Login timeout\n");
return -1;
@@ -805,29 +843,13 @@ static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter)
static void release_resources(struct ibmvnic_adapter *adapter)
{
- int i;
-
release_vpd_data(adapter);
release_tx_pools(adapter);
release_rx_pools(adapter);
- release_stats_token(adapter);
- release_stats_buffers(adapter);
release_error_buffers(adapter);
-
- if (adapter->napi) {
- for (i = 0; i < adapter->req_rx_queues; i++) {
- if (&adapter->napi[i]) {
- netdev_dbg(adapter->netdev,
- "Releasing napi[%d]\n", i);
- netif_napi_del(&adapter->napi[i]);
- }
- }
- }
- kfree(adapter->napi);
- adapter->napi = NULL;
-
+ release_napi(adapter);
release_login_rsp_buffer(adapter);
}
@@ -947,20 +969,12 @@ static int ibmvnic_get_vpd(struct ibmvnic_adapter *adapter)
static int init_resources(struct ibmvnic_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- int i, rc;
+ int rc;
rc = set_real_num_queues(netdev);
if (rc)
return rc;
- rc = init_stats_buffers(adapter);
- if (rc)
- return rc;
-
- rc = init_stats_token(adapter);
- if (rc)
- return rc;
-
adapter->vpd = kzalloc(sizeof(*adapter->vpd), GFP_KERNEL);
if (!adapter->vpd)
return -ENOMEM;
@@ -973,16 +987,10 @@ static int init_resources(struct ibmvnic_adapter *adapter)
}
adapter->map_id = 1;
- adapter->napi = kcalloc(adapter->req_rx_queues,
- sizeof(struct napi_struct), GFP_KERNEL);
- if (!adapter->napi)
- return -ENOMEM;
- for (i = 0; i < adapter->req_rx_queues; i++) {
- netdev_dbg(netdev, "Adding napi[%d]\n", i);
- netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
- NAPI_POLL_WEIGHT);
- }
+ rc = init_napi(adapter);
+ if (rc)
+ return rc;
send_map_query(adapter);
@@ -1076,6 +1084,7 @@ static int ibmvnic_open(struct net_device *netdev)
static void clean_rx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_rx_pool *rx_pool;
+ struct ibmvnic_rx_buff *rx_buff;
u64 rx_entries;
int rx_scrqs;
int i, j;
@@ -1089,14 +1098,15 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
/* Free any remaining skbs in the rx buffer pools */
for (i = 0; i < rx_scrqs; i++) {
rx_pool = &adapter->rx_pool[i];
- if (!rx_pool)
+ if (!rx_pool || !rx_pool->rx_buff)
continue;
netdev_dbg(adapter->netdev, "Cleaning rx_pool[%d]\n", i);
for (j = 0; j < rx_entries; j++) {
- if (rx_pool->rx_buff[j].skb) {
- dev_kfree_skb_any(rx_pool->rx_buff[j].skb);
- rx_pool->rx_buff[j].skb = NULL;
+ rx_buff = &rx_pool->rx_buff[j];
+ if (rx_buff && rx_buff->skb) {
+ dev_kfree_skb_any(rx_buff->skb);
+ rx_buff->skb = NULL;
}
}
}
@@ -1105,6 +1115,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter)
static void clean_tx_pools(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_tx_pool *tx_pool;
+ struct ibmvnic_tx_buff *tx_buff;
u64 tx_entries;
int tx_scrqs;
int i, j;
@@ -1118,14 +1129,15 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter)
/* Free any remaining skbs in the tx buffer pools */
for (i = 0; i < tx_scrqs; i++) {
tx_pool = &adapter->tx_pool[i];
- if (!tx_pool)
+ if (!tx_pool && !tx_pool->tx_buff)
continue;
netdev_dbg(adapter->netdev, "Cleaning tx_pool[%d]\n", i);
for (j = 0; j < tx_entries; j++) {
- if (tx_pool->tx_buff[j].skb) {
- dev_kfree_skb_any(tx_pool->tx_buff[j].skb);
- tx_pool->tx_buff[j].skb = NULL;
+ tx_buff = &tx_pool->tx_buff[j];
+ if (tx_buff && tx_buff->skb) {
+ dev_kfree_skb_any(tx_buff->skb);
+ tx_buff->skb = NULL;
}
}
}
@@ -1467,6 +1479,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
if ((*hdrs >> 7) & 1) {
build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
tx_crq.v1.n_crq_elem = num_entries;
+ tx_buff->num_entries = num_entries;
tx_buff->indir_arr[0] = tx_crq;
tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
sizeof(tx_buff->indir_arr),
@@ -1485,6 +1498,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
(u64)tx_buff->indir_dma,
(u64)num_entries);
} else {
+ tx_buff->num_entries = num_entries;
lpar_rc = send_subcrq(adapter, handle_array[queue_num],
&tx_crq);
}
@@ -1515,9 +1529,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out;
}
- if (atomic_inc_return(&tx_scrq->used)
+ if (atomic_add_return(num_entries, &tx_scrq->used)
>= adapter->req_tx_entries_per_subcrq) {
- netdev_info(netdev, "Stopping queue %d\n", queue_num);
+ netdev_dbg(netdev, "Stopping queue %d\n", queue_num);
netif_stop_subqueue(netdev, queue_num);
}
@@ -1653,7 +1667,7 @@ static int do_reset(struct ibmvnic_adapter *adapter,
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
adapter->wait_for_reset) {
release_resources(adapter);
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
}
@@ -1691,6 +1705,9 @@ static int do_reset(struct ibmvnic_adapter *adapter,
release_tx_pools(adapter);
init_rx_pools(netdev);
init_tx_pools(netdev);
+
+ release_napi(adapter);
+ init_napi(adapter);
} else {
rc = reset_tx_pools(adapter);
if (rc)
@@ -2295,24 +2312,27 @@ static int reset_sub_crq_queues(struct ibmvnic_adapter *adapter)
}
static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
- struct ibmvnic_sub_crq_queue *scrq)
+ struct ibmvnic_sub_crq_queue *scrq,
+ bool do_h_free)
{
struct device *dev = &adapter->vdev->dev;
long rc;
netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
- /* Close the sub-crqs */
- do {
- rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
- adapter->vdev->unit_address,
- scrq->crq_num);
- } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+ if (do_h_free) {
+ /* Close the sub-crqs */
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
+ adapter->vdev->unit_address,
+ scrq->crq_num);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
- if (rc) {
- netdev_err(adapter->netdev,
- "Failed to release sub-CRQ %16lx, rc = %ld\n",
- scrq->crq_num, rc);
+ if (rc) {
+ netdev_err(adapter->netdev,
+ "Failed to release sub-CRQ %16lx, rc = %ld\n",
+ scrq->crq_num, rc);
+ }
}
dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
@@ -2380,12 +2400,12 @@ zero_page_failed:
return NULL;
}
-static void release_sub_crqs(struct ibmvnic_adapter *adapter)
+static void release_sub_crqs(struct ibmvnic_adapter *adapter, bool do_h_free)
{
int i;
if (adapter->tx_scrq) {
- for (i = 0; i < adapter->req_tx_queues; i++) {
+ for (i = 0; i < adapter->num_active_tx_scrqs; i++) {
if (!adapter->tx_scrq[i])
continue;
@@ -2398,15 +2418,17 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->tx_scrq[i]->irq = 0;
}
- release_sub_crq_queue(adapter, adapter->tx_scrq[i]);
+ release_sub_crq_queue(adapter, adapter->tx_scrq[i],
+ do_h_free);
}
kfree(adapter->tx_scrq);
adapter->tx_scrq = NULL;
+ adapter->num_active_tx_scrqs = 0;
}
if (adapter->rx_scrq) {
- for (i = 0; i < adapter->req_rx_queues; i++) {
+ for (i = 0; i < adapter->num_active_rx_scrqs; i++) {
if (!adapter->rx_scrq[i])
continue;
@@ -2419,11 +2441,13 @@ static void release_sub_crqs(struct ibmvnic_adapter *adapter)
adapter->rx_scrq[i]->irq = 0;
}
- release_sub_crq_queue(adapter, adapter->rx_scrq[i]);
+ release_sub_crq_queue(adapter, adapter->rx_scrq[i],
+ do_h_free);
}
kfree(adapter->rx_scrq);
adapter->rx_scrq = NULL;
+ adapter->num_active_rx_scrqs = 0;
}
}
@@ -2473,6 +2497,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
restart_loop:
while (pending_scrq(adapter, scrq)) {
unsigned int pool = scrq->pool_index;
+ int num_entries = 0;
next = ibmvnic_next_scrq(adapter, scrq);
for (i = 0; i < next->tx_comp.num_comps; i++) {
@@ -2503,6 +2528,8 @@ restart_loop:
txbuff->skb = NULL;
}
+ num_entries += txbuff->num_entries;
+
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
producer_index] = index;
adapter->tx_pool[pool].producer_index =
@@ -2512,13 +2539,13 @@ restart_loop:
/* remove tx_comp scrq*/
next->tx_comp.first = 0;
- if (atomic_sub_return(next->tx_comp.num_comps, &scrq->used) <=
+ if (atomic_sub_return(num_entries, &scrq->used) <=
(adapter->req_tx_entries_per_subcrq / 2) &&
__netif_subqueue_stopped(adapter->netdev,
scrq->pool_index)) {
netif_wake_subqueue(adapter->netdev, scrq->pool_index);
- netdev_info(adapter->netdev, "Started queue %d\n",
- scrq->pool_index);
+ netdev_dbg(adapter->netdev, "Started queue %d\n",
+ scrq->pool_index);
}
}
@@ -2590,7 +2617,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
scrq->irq, rc);
irq_dispose_mapping(scrq->irq);
- goto req_rx_irq_failed;
+ goto req_tx_irq_failed;
}
}
@@ -2626,7 +2653,7 @@ req_tx_irq_failed:
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
}
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
return rc;
}
@@ -2688,6 +2715,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_tx_queues; i++) {
adapter->tx_scrq[i] = allqueues[i];
adapter->tx_scrq[i]->pool_index = i;
+ adapter->num_active_tx_scrqs++;
}
adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
@@ -2698,6 +2726,7 @@ static int init_sub_crqs(struct ibmvnic_adapter *adapter)
for (i = 0; i < adapter->req_rx_queues; i++) {
adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
adapter->rx_scrq[i]->scrq_num = i;
+ adapter->num_active_rx_scrqs++;
}
kfree(allqueues);
@@ -2708,7 +2737,7 @@ rx_failed:
adapter->tx_scrq = NULL;
tx_failed:
for (i = 0; i < registered_queues; i++)
- release_sub_crq_queue(adapter, allqueues[i]);
+ release_sub_crq_queue(adapter, allqueues[i], 1);
kfree(allqueues);
return -1;
}
@@ -3048,7 +3077,7 @@ static void vnic_add_client_data(struct ibmvnic_adapter *adapter,
strncpy(&vlcd->name, adapter->netdev->name, len);
}
-static void send_login(struct ibmvnic_adapter *adapter)
+static int send_login(struct ibmvnic_adapter *adapter)
{
struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
struct ibmvnic_login_buffer *login_buffer;
@@ -3064,6 +3093,12 @@ static void send_login(struct ibmvnic_adapter *adapter)
struct vnic_login_client_data *vlcd;
int i;
+ if (!adapter->tx_scrq || !adapter->rx_scrq) {
+ netdev_err(adapter->netdev,
+ "RX or TX queues are not allocated, device login failed\n");
+ return -1;
+ }
+
release_login_rsp_buffer(adapter);
client_data_len = vnic_client_data_len(adapter);
@@ -3161,7 +3196,7 @@ static void send_login(struct ibmvnic_adapter *adapter)
crq.login.len = cpu_to_be32(buffer_size);
ibmvnic_send_crq(adapter, &crq);
- return;
+ return 0;
buf_rsp_map_failed:
kfree(login_rsp_buffer);
@@ -3170,7 +3205,7 @@ buf_rsp_alloc_failed:
buf_map_failed:
kfree(login_buffer);
buf_alloc_failed:
- return;
+ return -1;
}
static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
@@ -4335,6 +4370,7 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
{
struct device *dev = &adapter->vdev->dev;
unsigned long timeout = msecs_to_jiffies(30000);
+ u64 old_num_rx_queues, old_num_tx_queues;
int rc;
if (adapter->resetting && !adapter->wait_for_reset) {
@@ -4352,6 +4388,9 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
adapter->from_passive_init = false;
+ old_num_rx_queues = adapter->req_rx_queues;
+ old_num_tx_queues = adapter->req_tx_queues;
+
init_completion(&adapter->init_done);
adapter->init_done_rc = 0;
ibmvnic_send_crq_init(adapter);
@@ -4371,10 +4410,18 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
return -1;
}
- if (adapter->resetting && !adapter->wait_for_reset)
- rc = reset_sub_crq_queues(adapter);
- else
+ if (adapter->resetting && !adapter->wait_for_reset) {
+ if (adapter->req_rx_queues != old_num_rx_queues ||
+ adapter->req_tx_queues != old_num_tx_queues) {
+ release_sub_crqs(adapter, 0);
+ rc = init_sub_crqs(adapter);
+ } else {
+ rc = reset_sub_crq_queues(adapter);
+ }
+ } else {
rc = init_sub_crqs(adapter);
+ }
+
if (rc) {
dev_err(dev, "Initialization of sub crqs failed\n");
release_crq_queue(adapter);
@@ -4387,6 +4434,14 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
release_crq_queue(adapter);
}
+ rc = init_stats_buffers(adapter);
+ if (rc)
+ return rc;
+
+ rc = init_stats_token(adapter);
+ if (rc)
+ return rc;
+
return rc;
}
@@ -4474,7 +4529,7 @@ ibmvnic_register_fail:
device_remove_file(&dev->dev, &dev_attr_failover);
ibmvnic_init_fail:
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
free_netdev(netdev);
@@ -4491,9 +4546,12 @@ static int ibmvnic_remove(struct vio_dev *dev)
mutex_lock(&adapter->reset_lock);
release_resources(adapter);
- release_sub_crqs(adapter);
+ release_sub_crqs(adapter, 1);
release_crq_queue(adapter);
+ release_stats_token(adapter);
+ release_stats_buffers(adapter);
+
adapter->state = VNIC_REMOVED;
mutex_unlock(&adapter->reset_lock);
diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
index fe21a6e2ddae..099c89d49945 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.h
+++ b/drivers/net/ethernet/ibm/ibmvnic.h
@@ -909,6 +909,7 @@ struct ibmvnic_tx_buff {
union sub_crq indir_arr[6];
u8 hdr_data[140];
dma_addr_t indir_dma;
+ int num_entries;
};
struct ibmvnic_tx_pool {
@@ -1091,8 +1092,11 @@ struct ibmvnic_adapter {
u64 opt_rxba_entries_per_subcrq;
__be64 tx_rx_desc_req;
u8 map_id;
- u64 num_active_rx_pools;
- u64 num_active_tx_pools;
+ u32 num_active_rx_scrqs;
+ u32 num_active_rx_pools;
+ u32 num_active_rx_napi;
+ u32 num_active_tx_scrqs;
+ u32 num_active_tx_pools;
struct tasklet_struct tasklet;
enum vnic_state state;
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_common.c b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
index 736a9f087bc9..c58a5377a287 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_common.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_common.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
+ * Copyright(c) 2013 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -262,6 +262,7 @@ s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
* fm10k_read_hw_stats_32b - Reads value of 32-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing a 32-bit value
+ * @stat: pointer to structure holding hw stat information
*
* Function reads the content of the register and returns the delta
* between the base and the current value.
@@ -281,6 +282,7 @@ u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
* fm10k_read_hw_stats_48b - Reads value of 48-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing the lower 32-bit value
+ * @stat: pointer to structure holding hw stat information
*
* Function reads the content of 2 registers, combined to represent a 48-bit
* statistical value. Extra processing is required to handle overflowing.
@@ -461,7 +463,6 @@ void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
- * @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
index 8e12aae065d8..2c93d719438f 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
@@ -28,13 +28,13 @@
#include "fm10k.h"
-#define DRV_VERSION "0.22.1-k"
+#define DRV_VERSION "0.23.4-k"
#define DRV_SUMMARY "Intel(R) Ethernet Switch Host Interface Driver"
const char fm10k_driver_version[] = DRV_VERSION;
char fm10k_driver_name[] = "fm10k";
static const char fm10k_driver_string[] = DRV_SUMMARY;
static const char fm10k_copyright[] =
- "Copyright(c) 2013 - 2017 Intel Corporation.";
+ "Copyright(c) 2013 - 2018 Intel Corporation.";
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
index a38ae5c54da3..75c99aed3c41 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
+ * Copyright(c) 2013 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -486,7 +486,7 @@ static void fm10k_insert_tunnel_port(struct list_head *ports,
/**
* fm10k_udp_tunnel_add
- * @netdev: network interface device structure
+ * @dev: network interface device structure
* @ti: Tunnel endpoint information
*
* This function is called when a new UDP tunnel port has been added.
@@ -518,8 +518,8 @@ static void fm10k_udp_tunnel_add(struct net_device *dev,
/**
* fm10k_udp_tunnel_del
- * @netdev: network interface device structure
- * @ti: Tunnel endpoint information
+ * @dev: network interface device structure
+ * @ti: Tunnel end point information
*
* This function is called when a new UDP tunnel port is deleted. The freed
* port will be removed from the list, then we reprogram the offloaded port
@@ -803,7 +803,7 @@ int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
* @glort: the target glort for this update
* @addr: the address to update
* @vid: the vid to update
- * @sync: whether to add or remove
+ * @set: whether to add or remove
*
* This function queues up a MAC request for sending to the switch manager.
* A separate thread monitors the queue and sends updates to the switch
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
index a434fecfdfeb..50f53e403ef5 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pci.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
+ * Copyright(c) 2013 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -29,7 +29,7 @@ static const struct fm10k_info *fm10k_info_tbl[] = {
[fm10k_device_vf] = &fm10k_vf_info,
};
-/**
+/*
* fm10k_pci_tbl - PCI Device ID Table
*
* Wildcard entries (PCI_ANY_ID) should come last
@@ -211,7 +211,7 @@ static void fm10k_start_service_event(struct fm10k_intfc *interface)
/**
* fm10k_service_timer - Timer Call-back
- * @data: pointer to interface cast into an unsigned long
+ * @t: pointer to timer data
**/
static void fm10k_service_timer(struct timer_list *t)
{
@@ -649,7 +649,7 @@ void fm10k_update_stats(struct fm10k_intfc *interface)
/**
* fm10k_watchdog_flush_tx - flush queues on host not ready
- * @interface - pointer to the device interface structure
+ * @interface: pointer to the device interface structure
**/
static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
{
@@ -679,7 +679,7 @@ static void fm10k_watchdog_flush_tx(struct fm10k_intfc *interface)
/**
* fm10k_watchdog_subtask - check and bring link up
- * @interface - pointer to the device interface structure
+ * @interface: pointer to the device interface structure
**/
static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
{
@@ -703,7 +703,7 @@ static void fm10k_watchdog_subtask(struct fm10k_intfc *interface)
/**
* fm10k_check_hang_subtask - check for hung queues and dropped interrupts
- * @interface - pointer to the device interface structure
+ * @interface: pointer to the device interface structure
*
* This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the
@@ -1995,6 +1995,7 @@ skip_tx_dma_drain:
/**
* fm10k_sw_init - Initialize general software structures
* @interface: host interface private structure to initialize
+ * @ent: PCI device ID entry
*
* fm10k_sw_init initializes the interface private data structure.
* Fields are initialized based on PCI device information and
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
index d6406fc31ffb..bee192fe2ffb 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_pf.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2017 Intel Corporation.
+ * Copyright(c) 2013 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1180,7 +1180,7 @@ s32 fm10k_iov_msg_msix_pf(struct fm10k_hw *hw, u32 **results,
/**
* fm10k_iov_select_vid - Select correct default VLAN ID
- * @hw: Pointer to hardware structure
+ * @vf_info: pointer to VF information structure
* @vid: VLAN ID to correct
*
* Will report an error if the VLAN ID is out of range. For VID = 0, it will
diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
index f8e87bf086b9..9d0d31da426b 100644
--- a/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
+++ b/drivers/net/ethernet/intel/fm10k/fm10k_tlv.c
@@ -1,5 +1,5 @@
/* Intel(R) Ethernet Switch Host Interface Driver
- * Copyright(c) 2013 - 2016 Intel Corporation.
+ * Copyright(c) 2013 - 2018 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -120,6 +120,7 @@ static s32 fm10k_tlv_attr_get_null_string(u32 *attr, unsigned char *string)
* @msg: Pointer to message block
* @attr_id: Attribute ID
* @mac_addr: MAC address to be stored
+ * @vlan: VLAN to be stored
*
* This function will reorder a MAC address to be CPU endian and store it
* in the attribute buffer. It will return success if provided with a
@@ -155,8 +156,8 @@ s32 fm10k_tlv_attr_put_mac_vlan(u32 *msg, u16 attr_id,
/**
* fm10k_tlv_attr_get_mac_vlan - Get MAC/VLAN stored in attribute
* @attr: Pointer to attribute
- * @attr_id: Attribute ID
* @mac_addr: location of buffer to store MAC address
+ * @vlan: location of buffer to store VLAN
*
* This function pulls the MAC address back out of the attribute and will
* place it in the array pointed by by mac_addr. It will return success
@@ -549,7 +550,7 @@ static s32 fm10k_tlv_attr_parse(u32 *attr, u32 **results,
* @hw: Pointer to hardware structure
* @msg: Pointer to message
* @mbx: Pointer to mailbox information structure
- * @func: Function array containing list of message handling functions
+ * @data: Pointer to message handler data structure
*
* This function should be the first function called upon receiving a
* message. The handler will identify the message type and call the correct
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 46e9f4e0a02c..36d9401a6258 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -507,6 +507,7 @@ struct i40e_pf {
#define I40E_HW_STOP_FW_LLDP BIT(16)
#define I40E_HW_PORT_ID_VALID BIT(17)
#define I40E_HW_RESTART_AUTONEG BIT(18)
+#define I40E_HW_STOPPABLE_FW_LLDP BIT(19)
u64 flags;
#define I40E_FLAG_RX_CSUM_ENABLED BIT_ULL(0)
@@ -824,6 +825,7 @@ struct i40e_q_vector {
struct i40e_ring_container rx;
struct i40e_ring_container tx;
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
u8 num_ringpairs; /* total number of ring pairs in vector */
cpumask_t affinity_mask;
@@ -832,8 +834,6 @@ struct i40e_q_vector {
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[I40E_INT_NAME_STR_LEN];
bool arm_wb_state;
-#define ITR_COUNTDOWN_START 100
- u8 itr_countdown; /* when 0 should adjust ITR */
} ____cacheline_internodealigned_in_smp;
/* lan device */
@@ -1109,4 +1109,10 @@ static inline bool i40e_enabled_xdp_vsi(struct i40e_vsi *vsi)
int i40e_create_queue_channel(struct i40e_vsi *vsi, struct i40e_channel *ch);
int i40e_set_bw_limit(struct i40e_vsi *vsi, u16 seid, u64 max_tx_rate);
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add);
#endif /* _I40E_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index a852775d3059..0dfc52772c45 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1914,6 +1914,43 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_DEFAULT = 0xFF,
};
+#define I40E_PHY_TYPES_BITMASK (BIT_ULL(I40E_PHY_TYPE_SGMII) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_KX) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4) | \
+ BIT_ULL(I40E_PHY_TYPE_XAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XFI) | \
+ BIT_ULL(I40E_PHY_TYPE_SFI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLAUI) | \
+ BIT_ULL(I40E_PHY_TYPE_XLPPI) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_UNRECOGNIZED) | \
+ BIT_ULL(I40E_PHY_TYPE_UNSUPPORTED) | \
+ BIT_ULL(I40E_PHY_TYPE_100BASE_TX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_T) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU) | \
+ BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4) | \
+ BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_SX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_LX) | \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL) | \
+ BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_KR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_CR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_SR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_LR) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_AOC) | \
+ BIT_ULL(I40E_PHY_TYPE_25GBASE_ACC))
+
#define I40E_LINK_SPEED_100MB_SHIFT 0x1
#define I40E_LINK_SPEED_1000MB_SHIFT 0x2
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 4c3b4243cf65..b829fd365693 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -155,8 +155,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev, " vlan_features = 0x%08lx\n",
(unsigned long int)nd->vlan_features);
}
- dev_info(&pf->pdev->dev,
- " vlgrp: & = %p\n", vsi->active_vlans);
+ dev_info(&pf->pdev->dev, " active_vlans is %s\n",
+ vsi->active_vlans ? "<valid>" : "<null>");
dev_info(&pf->pdev->dev,
" flags = 0x%08lx, netdev_registered = %i, current_netdev_flags = 0x%04x\n",
vsi->flags, vsi->netdev_registered, vsi->current_netdev_flags);
@@ -270,14 +270,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
continue;
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: desc = %p\n",
- i, rx_ring->desc);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: dev = %p, netdev = %p, rx_bi = %p\n",
- i, rx_ring->dev,
- rx_ring->netdev,
- rx_ring->rx_bi);
- dev_info(&pf->pdev->dev,
" rx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, *rx_ring->state,
rx_ring->queue_index,
@@ -307,17 +299,12 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, rx_ring->size,
- (unsigned long int)rx_ring->dma);
- dev_info(&pf->pdev->dev,
- " rx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, rx_ring->vsi,
- rx_ring->q_vector);
+ " rx_rings[%i]: size = %i\n",
+ i, rx_ring->size);
dev_info(&pf->pdev->dev,
- " rx_rings[%i]: rx_itr_setting = %d (%s)\n",
- i, rx_ring->rx_itr_setting,
- ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed");
+ " rx_rings[%i]: itr_setting = %d (%s)\n",
+ i, rx_ring->itr_setting,
+ ITR_IS_DYNAMIC(rx_ring->itr_setting) ? "dynamic" : "fixed");
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]);
@@ -326,14 +313,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
continue;
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: desc = %p\n",
- i, tx_ring->desc);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: dev = %p, netdev = %p, tx_bi = %p\n",
- i, tx_ring->dev,
- tx_ring->netdev,
- tx_ring->tx_bi);
- dev_info(&pf->pdev->dev,
" tx_rings[%i]: state = %lu, queue_index = %d, reg_idx = %d\n",
i, *tx_ring->state,
tx_ring->queue_index,
@@ -355,20 +334,15 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
tx_ring->tx_stats.tx_busy,
tx_ring->tx_stats.tx_done_old);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: size = %i, dma = 0x%08lx\n",
- i, tx_ring->size,
- (unsigned long int)tx_ring->dma);
- dev_info(&pf->pdev->dev,
- " tx_rings[%i]: vsi = %p, q_vector = %p\n",
- i, tx_ring->vsi,
- tx_ring->q_vector);
+ " tx_rings[%i]: size = %i\n",
+ i, tx_ring->size);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: DCB tc = %d\n",
i, tx_ring->dcb_tc);
dev_info(&pf->pdev->dev,
- " tx_rings[%i]: tx_itr_setting = %d (%s)\n",
- i, tx_ring->tx_itr_setting,
- ITR_IS_DYNAMIC(tx_ring->tx_itr_setting) ? "dynamic" : "fixed");
+ " tx_rings[%i]: itr_setting = %d (%s)\n",
+ i, tx_ring->itr_setting,
+ ITR_IS_DYNAMIC(tx_ring->itr_setting) ? "dynamic" : "fixed");
}
rcu_read_unlock();
dev_info(&pf->pdev->dev,
@@ -466,8 +440,6 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
vsi->info.resp_reserved[6], vsi->info.resp_reserved[7],
vsi->info.resp_reserved[8], vsi->info.resp_reserved[9],
vsi->info.resp_reserved[10], vsi->info.resp_reserved[11]);
- if (vsi->back)
- dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back);
dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx);
dev_info(&pf->pdev->dev,
" tc_config: numtc = %d, enabled_tc = 0x%x\n",
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 2f5bee713fef..89807e32a898 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -230,6 +230,8 @@ static const struct i40e_priv_flags i40e_gstrings_priv_flags[] = {
I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENABLED, 0),
I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENABLED, 0),
I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENABLED, 0),
+ I40E_PRIV_FLAG("link-down-on-close",
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED, 0),
I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX, 0),
I40E_PRIV_FLAG("disable-source-pruning",
I40E_FLAG_SOURCE_PRUNING_DISABLED, 0),
@@ -2244,14 +2246,14 @@ static int __i40e_get_coalesce(struct net_device *netdev,
rx_ring = vsi->rx_rings[queue];
tx_ring = vsi->tx_rings[queue];
- if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
+ if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
/* we use the _usecs_high to store/set the interrupt rate limit
* that the hardware supports, that almost but not quite
@@ -2311,34 +2313,35 @@ static void i40e_set_itr_per_queue(struct i40e_vsi *vsi,
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
struct i40e_q_vector *q_vector;
- u16 vector, intrl;
+ u16 intrl;
intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit);
- rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
- tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
if (ec->use_adaptive_rx_coalesce)
- rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
else
- rx_ring->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
if (ec->use_adaptive_tx_coalesce)
- tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
else
- tx_ring->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
+ tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC;
q_vector = rx_ring->q_vector;
- q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
- vector = vsi->base_vector + q_vector->v_idx;
- wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector = tx_ring->q_vector;
- q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
- vector = vsi->base_vector + q_vector->v_idx;
- wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
- wr32(hw, I40E_PFINT_RATEN(vector - 1), intrl);
+ /* The interrupt handler itself will take care of programming
+ * the Tx and Rx ITR values based on the values we have entered
+ * into the q_vector, no need to write the values now.
+ */
+
+ wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl);
i40e_flush(hw);
}
@@ -2364,11 +2367,11 @@ static int __i40e_set_coalesce(struct net_device *netdev,
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
if (queue < 0) {
- cur_rx_itr = vsi->rx_rings[0]->rx_itr_setting;
- cur_tx_itr = vsi->tx_rings[0]->tx_itr_setting;
+ cur_rx_itr = vsi->rx_rings[0]->itr_setting;
+ cur_tx_itr = vsi->tx_rings[0]->itr_setting;
} else if (queue < vsi->num_queue_pairs) {
- cur_rx_itr = vsi->rx_rings[queue]->rx_itr_setting;
- cur_tx_itr = vsi->tx_rings[queue]->tx_itr_setting;
+ cur_rx_itr = vsi->rx_rings[queue]->itr_setting;
+ cur_tx_itr = vsi->tx_rings[queue]->itr_setting;
} else {
netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n",
vsi->num_queue_pairs - 1);
@@ -2396,7 +2399,7 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
+ if (ec->rx_coalesce_usecs > I40E_MAX_ITR) {
netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -2407,16 +2410,16 @@ static int __i40e_set_coalesce(struct net_device *netdev,
return -EINVAL;
}
- if (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)) {
+ if (ec->tx_coalesce_usecs > I40E_MAX_ITR) {
netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
if (ec->use_adaptive_rx_coalesce && !cur_rx_itr)
- ec->rx_coalesce_usecs = I40E_MIN_ITR << 1;
+ ec->rx_coalesce_usecs = I40E_MIN_ITR;
if (ec->use_adaptive_tx_coalesce && !cur_tx_itr)
- ec->tx_coalesce_usecs = I40E_MIN_ITR << 1;
+ ec->tx_coalesce_usecs = I40E_MIN_ITR;
intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high);
vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg);
@@ -4406,6 +4409,8 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
}
flags_complete:
+ changed_flags = orig_flags ^ new_flags;
+
/* Before we finalize any flag changes, we need to perform some
* checks to ensure that the changes are supported and safe.
*/
@@ -4415,21 +4420,17 @@ flags_complete:
!(pf->hw_features & I40E_HW_ATR_EVICT_CAPABLE))
return -EOPNOTSUPP;
- /* Disable FW LLDP not supported if NPAR active or if FW
- * API version < 1.7
+ /* If the driver detected FW LLDP was disabled on init, this flag could
+ * be set, however we do not support _changing_ the flag if NPAR is
+ * enabled or FW API version < 1.7. There are situations where older
+ * FW versions/NPAR enabled PFs could disable LLDP, however we _must_
+ * not allow the user to enable/disable LLDP with this flag on
+ * unsupported FW versions.
*/
- if (new_flags & I40E_FLAG_DISABLE_FW_LLDP) {
- if (pf->hw.func_caps.npar_enable) {
- dev_warn(&pf->pdev->dev,
- "Unable to stop FW LLDP if NPAR active\n");
- return -EOPNOTSUPP;
- }
-
- if (pf->hw.aq.api_maj_ver < 1 ||
- (pf->hw.aq.api_maj_ver == 1 &&
- pf->hw.aq.api_min_ver < 7)) {
+ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
+ if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) {
dev_warn(&pf->pdev->dev,
- "FW ver does not support stopping FW LLDP\n");
+ "Device does not support changing FW LLDP\n");
return -EOPNOTSUPP;
}
}
@@ -4439,6 +4440,10 @@ flags_complete:
* something else has modified the flags variable since we copied it
* originally. We'll just punt with an error and log something in the
* message buffer.
+ *
+ * This is the point of no return for this function. We need to have
+ * checked any discrepancies or misconfigurations and returned
+ * EOPNOTSUPP before updating pf->flags here.
*/
if (cmpxchg64(&pf->flags, orig_flags, new_flags) != orig_flags) {
dev_warn(&pf->pdev->dev,
@@ -4446,8 +4451,6 @@ flags_complete:
return -EAGAIN;
}
- changed_flags = orig_flags ^ new_flags;
-
/* Process any additional changes needed as a result of flag changes.
* The changed_flags value reflects the list of bits that were
* changed in the code above.
@@ -4479,6 +4482,12 @@ flags_complete:
}
}
+ if ((changed_flags & pf->flags &
+ I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED) &&
+ (pf->flags & I40E_FLAG_MFP_ENABLED))
+ dev_warn(&pf->pdev->dev,
+ "Turning on link-down-on-close flag may affect other partitions\n");
+
if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) {
if (pf->flags & I40E_FLAG_DISABLE_FW_LLDP) {
struct i40e_dcbx_config *dcbcfg;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e31adbc75f9c..be9a1467a1a1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -69,12 +69,6 @@ static int i40e_reset(struct i40e_pf *pf);
static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
static void i40e_fdir_sb_setup(struct i40e_pf *pf);
static int i40e_veb_get_bw_info(struct i40e_veb *veb);
-static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add);
-static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add);
static int i40e_get_capabilities(struct i40e_pf *pf,
enum i40e_admin_queue_opc list_type);
@@ -215,8 +209,8 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
dev_info(&pf->pdev->dev,
- "param err: pile=%p needed=%d id=0x%04x\n",
- pile, needed, id);
+ "param err: pile=%s needed=%d id=0x%04x\n",
+ pile ? "<valid>" : "<null>", needed, id);
return -EINVAL;
}
@@ -1380,14 +1374,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
- /* If we're in overflow promisc mode, set the state directly
- * to failed, so we don't bother to try sending the filter
- * to the hardware.
- */
- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state))
- f->state = I40E_FILTER_FAILED;
- else
- f->state = I40E_FILTER_NEW;
+ f->state = I40E_FILTER_NEW;
INIT_HLIST_NODE(&f->hlist);
key = i40e_addr_to_hkey(macaddr);
@@ -2116,17 +2103,16 @@ void i40e_aqc_del_filters(struct i40e_vsi *vsi, const char *vsi_name,
* @list: the list of filters to send to firmware
* @add_head: Position in the add hlist
* @num_add: the number of filters to add
- * @promisc_change: set to true on exit if promiscuous mode was forced on
*
* Send a request to firmware via AdminQ to add a chunk of filters. Will set
- * promisc_changed to true if the firmware has run out of space for more
- * filters.
+ * __I40E_VSI_OVERFLOW_PROMISC bit in vsi->state if the firmware has run out of
+ * space for more filters.
*/
static
void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
struct i40e_aqc_add_macvlan_element_data *list,
struct i40e_new_mac_filter *add_head,
- int num_add, bool *promisc_changed)
+ int num_add)
{
struct i40e_hw *hw = &vsi->back->hw;
int aq_err, fcnt;
@@ -2136,7 +2122,6 @@ void i40e_aqc_add_filters(struct i40e_vsi *vsi, const char *vsi_name,
fcnt = i40e_update_filter_state(num_add, list, add_head);
if (fcnt != num_add) {
- *promisc_changed = true;
set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
"Error %s adding RX filters on %s, promiscuous mode forced on\n",
@@ -2177,11 +2162,13 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name,
NULL);
}
- if (aq_ret)
+ if (aq_ret) {
+ set_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
dev_warn(&vsi->back->pdev->dev,
- "Error %s setting broadcast promiscuous mode on %s\n",
+ "Error %s, forcing overflow promiscuous on %s\n",
i40e_aq_str(hw, hw->aq.asq_last_status),
vsi_name);
+ }
return aq_ret;
}
@@ -2267,9 +2254,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
struct i40e_mac_filter *f;
struct i40e_new_mac_filter *new, *add_head = NULL;
struct i40e_hw *hw = &vsi->back->hw;
+ bool old_overflow, new_overflow;
unsigned int failed_filters = 0;
unsigned int vlan_filters = 0;
- bool promisc_changed = false;
char vsi_name[16] = "PF";
int filter_list_len = 0;
i40e_status aq_ret = 0;
@@ -2291,6 +2278,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
usleep_range(1000, 2000);
pf = vsi->back;
+ old_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
if (vsi->netdev) {
changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
vsi->current_netdev_flags = vsi->netdev->flags;
@@ -2423,12 +2412,6 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_add = 0;
hlist_for_each_entry_safe(new, h, &tmp_add_list, hlist) {
- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC,
- vsi->state)) {
- new->state = I40E_FILTER_FAILED;
- continue;
- }
-
/* handle broadcast filters by updating the broadcast
* promiscuous flag instead of adding a MAC filter.
*/
@@ -2464,15 +2447,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
/* flush a full buffer */
if (num_add == filter_list_len) {
i40e_aqc_add_filters(vsi, vsi_name, add_list,
- add_head, num_add,
- &promisc_changed);
+ add_head, num_add);
memset(add_list, 0, list_size);
num_add = 0;
}
}
if (num_add) {
i40e_aqc_add_filters(vsi, vsi_name, add_list, add_head,
- num_add, &promisc_changed);
+ num_add);
}
/* Now move all of the filters from the temp add list back to
* the VSI's list.
@@ -2501,24 +2483,16 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
- /* If promiscuous mode has changed, we need to calculate a new
- * threshold for when we are safe to exit
- */
- if (promisc_changed)
- vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
-
/* Check if we are able to exit overflow promiscuous mode. We can
* safely exit if we didn't just enter, we no longer have any failed
* filters, and we have reduced filters below the threshold value.
*/
- if (test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state) &&
- !promisc_changed && !failed_filters &&
- (vsi->active_filters < vsi->promisc_threshold)) {
+ if (old_overflow && !failed_filters &&
+ vsi->active_filters < vsi->promisc_threshold) {
dev_info(&pf->pdev->dev,
"filter logjam cleared on %s, leaving overflow promiscuous mode\n",
vsi_name);
clear_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
- promisc_changed = true;
vsi->promisc_threshold = 0;
}
@@ -2528,6 +2502,14 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
goto out;
}
+ new_overflow = test_bit(__I40E_VSI_OVERFLOW_PROMISC, vsi->state);
+
+ /* If we are entering overflow promiscuous, we need to calculate a new
+ * threshold for when we are safe to exit
+ */
+ if (!old_overflow && new_overflow)
+ vsi->promisc_threshold = (vsi->active_filters * 3) / 4;
+
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
@@ -2548,12 +2530,11 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
}
}
- if ((changed_flags & IFF_PROMISC) || promisc_changed) {
+ if ((changed_flags & IFF_PROMISC) || old_overflow != new_overflow) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
- test_bit(__I40E_VSI_OVERFLOW_PROMISC,
- vsi->state));
+ new_overflow);
aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
@@ -3449,15 +3430,20 @@ static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
struct i40e_q_vector *q_vector = vsi->q_vectors[i];
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[i]->rx_itr_setting);
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr =
+ ITR_TO_REG(vsi->rx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
- q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[i]->tx_itr_setting);
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr =
+ ITR_TO_REG(vsi->tx_rings[i]->itr_setting);
wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
- q_vector->tx.itr);
+ q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+
wr32(hw, I40E_PFINT_RATEN(vector - 1),
i40e_intrl_usec_to_reg(vsi->int_rate_limit));
@@ -3558,13 +3544,14 @@ static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
u32 val;
/* set the ITR configuration */
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_rings[0]->rx_itr_setting);
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_rings[0]->tx_itr_setting);
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr = ITR_TO_REG(vsi->rx_rings[0]->itr_setting);
+ wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr = ITR_TO_REG(vsi->tx_rings[0]->itr_setting);
+ wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
i40e_enable_misc_int_causes(pf);
@@ -5375,7 +5362,7 @@ out:
* @vsi: VSI to be configured
*
**/
-int i40e_get_link_speed(struct i40e_vsi *vsi)
+static int i40e_get_link_speed(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
@@ -6560,6 +6547,75 @@ int i40e_up(struct i40e_vsi *vsi)
}
/**
+ * i40e_force_link_state - Force the link status
+ * @pf: board private structure
+ * @is_up: whether the link state should be forced up or down
+ **/
+static i40e_status i40e_force_link_state(struct i40e_pf *pf, bool is_up)
+{
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config = {0};
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status err;
+ u64 mask;
+
+ /* Get the current phy config */
+ err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+ NULL);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "failed to get phy cap., ret = %s last_status = %s\n",
+ i40e_stat_str(hw, err),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ return err;
+ }
+
+ /* If link needs to go up, but was not forced to go down,
+ * no need for a flap
+ */
+ if (is_up && abilities.phy_type != 0)
+ return I40E_SUCCESS;
+
+ /* To force link we need to set bits for all supported PHY types,
+ * but there are now more than 32, so we need to split the bitmap
+ * across two fields.
+ */
+ mask = I40E_PHY_TYPES_BITMASK;
+ config.phy_type = is_up ? cpu_to_le32((u32)(mask & 0xffffffff)) : 0;
+ config.phy_type_ext = is_up ? (u8)((mask >> 32) & 0xff) : 0;
+ /* Copy the old settings, except of phy_type */
+ config.abilities = abilities.abilities;
+ config.link_speed = abilities.link_speed;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ err = i40e_aq_set_phy_config(hw, &config, NULL);
+
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "set phy config ret = %s last_status = %s\n",
+ i40e_stat_str(&pf->hw, err),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ return err;
+ }
+
+ /* Update the link info */
+ err = i40e_update_link_info(hw);
+ if (err) {
+ /* Wait a little bit (on 40G cards it sometimes takes a really
+ * long time for link to come back from the atomic reset)
+ * and try once more
+ */
+ msleep(1000);
+ i40e_update_link_info(hw);
+ }
+
+ i40e_aq_set_link_restart_an(hw, true, NULL);
+
+ return I40E_SUCCESS;
+}
+
+/**
* i40e_down - Shutdown the connection processing
* @vsi: the VSI being stopped
**/
@@ -6576,6 +6632,9 @@ void i40e_down(struct i40e_vsi *vsi)
}
i40e_vsi_disable_irq(vsi);
i40e_vsi_stop_rings(vsi);
+ if (vsi->type == I40E_VSI_MAIN &&
+ vsi->back->flags & I40E_FLAG_LINK_DOWN_ON_CLOSE_ENABLED)
+ i40e_force_link_state(vsi->back, false);
i40e_napi_disable_all(vsi);
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -6848,8 +6907,8 @@ i40e_set_cld_element(struct i40e_cloud_filter *filter,
* Add or delete a cloud filter for a specific flow spec.
* Returns 0 if the filter were successfully added.
**/
-static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter, bool add)
+int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter, bool add)
{
struct i40e_aqc_cloud_filters_element_data cld_filter;
struct i40e_pf *pf = vsi->back;
@@ -6915,9 +6974,9 @@ static int i40e_add_del_cloud_filter(struct i40e_vsi *vsi,
* Add or delete a cloud filter for a specific flow spec using big buffer.
* Returns 0 if the filter were successfully added.
**/
-static int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
- struct i40e_cloud_filter *filter,
- bool add)
+int i40e_add_del_cloud_filter_big_buf(struct i40e_vsi *vsi,
+ struct i40e_cloud_filter *filter,
+ bool add)
{
struct i40e_aqc_cloud_filters_element_bb cld_filter;
struct i40e_pf *pf = vsi->back;
@@ -7537,6 +7596,9 @@ int i40e_open(struct net_device *netdev)
netif_carrier_off(netdev);
+ if (i40e_force_link_state(pf, true))
+ return -EAGAIN;
+
err = i40e_vsi_open(vsi);
if (err)
return err;
@@ -9215,6 +9277,17 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired)
}
i40e_get_oem_version(&pf->hw);
+ if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) &&
+ ((hw->aq.fw_maj_ver == 4 && hw->aq.fw_min_ver <= 33) ||
+ hw->aq.fw_maj_ver < 4) && hw->mac.type == I40E_MAC_XL710) {
+ /* The following delay is necessary for 4.33 firmware and older
+ * to recover after EMP reset. 200 ms should suffice but we
+ * put here 300 ms to be sure that FW is ready to operate
+ * after reset.
+ */
+ mdelay(300);
+ }
+
/* re-verify the eeprom if we just had an EMP reset */
if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state))
i40e_verify_eeprom(pf);
@@ -9937,18 +10010,17 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
mutex_lock(&pf->switch_mutex);
if (!pf->vsi[vsi->idx]) {
- dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
- vsi->idx, vsi->idx, vsi, vsi->type);
+ dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](type %d)\n",
+ vsi->idx, vsi->idx, vsi->type);
goto unlock_vsi;
}
if (pf->vsi[vsi->idx] != vsi) {
dev_err(&pf->pdev->dev,
- "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
+ "pf->vsi[%d](type %d) != vsi[%d](type %d): no free!\n",
pf->vsi[vsi->idx]->idx,
- pf->vsi[vsi->idx],
pf->vsi[vsi->idx]->type,
- vsi->idx, vsi, vsi->type);
+ vsi->idx, vsi->type);
goto unlock_vsi;
}
@@ -10018,7 +10090,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
ring->dcb_tc = 0;
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
- ring->tx_itr_setting = pf->tx_itr_default;
+ ring->itr_setting = pf->tx_itr_default;
vsi->tx_rings[i] = ring++;
if (!i40e_enabled_xdp_vsi(vsi))
@@ -10036,7 +10108,7 @@ static int i40e_alloc_rings(struct i40e_vsi *vsi)
if (vsi->back->hw_features & I40E_HW_WB_ON_ITR_CAPABLE)
ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
set_ring_xdp(ring);
- ring->tx_itr_setting = pf->tx_itr_default;
+ ring->itr_setting = pf->tx_itr_default;
vsi->xdp_rings[i] = ring++;
setup_rx:
@@ -10049,7 +10121,7 @@ setup_rx:
ring->count = vsi->num_desc;
ring->size = 0;
ring->dcb_tc = 0;
- ring->rx_itr_setting = pf->rx_itr_default;
+ ring->itr_setting = pf->rx_itr_default;
vsi->rx_rings[i] = ring;
}
@@ -10328,9 +10400,6 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx, int cpu)
netif_napi_add(vsi->netdev, &q_vector->napi,
i40e_napi_poll, NAPI_POLL_WEIGHT);
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
-
/* tie q_vector and vsi together */
vsi->q_vectors[v_idx] = q_vector;
@@ -11089,6 +11158,16 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* IWARP needs one extra vector for CQP just like MISC.*/
pf->num_iwarp_msix = (int)num_online_cpus() + 1;
}
+ /* Stopping the FW LLDP engine is only supported on the
+ * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP
+ * engine is not supported if NPAR is functioning on this
+ * part
+ */
+ if (pf->hw.mac.type == I40E_MAC_XL710 &&
+ !pf->hw.func_caps.npar_enable &&
+ (pf->hw.aq.api_maj_ver > 1 ||
+ (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6)))
+ pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP;
#ifdef CONFIG_PCI_IOV
if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index e554aa6cf070..97cfe944b568 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -708,16 +708,22 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring)
/**
* i40e_get_tx_pending - how many tx descriptors not processed
* @tx_ring: the ring of descriptors
+ * @in_sw: use SW variables
*
* Since there is no access to the ring head register
* in XL710, we need to use our local copies
**/
-u32 i40e_get_tx_pending(struct i40e_ring *ring)
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw)
{
u32 head, tail;
- head = i40e_get_head(ring);
- tail = readl(ring->tail);
+ if (!in_sw) {
+ head = i40e_get_head(ring);
+ tail = readl(ring->tail);
+ } else {
+ head = ring->next_to_clean;
+ tail = ring->next_to_use;
+ }
if (head != tail)
return (head < tail) ?
@@ -774,7 +780,7 @@ void i40e_detect_recover_hung(struct i40e_vsi *vsi)
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
- i40e_get_tx_pending(tx_ring) ? packets : -1;
+ i40e_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
}
@@ -898,7 +904,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
* them to be written back in case we stay in NAPI.
* In this mode on X722 we do not enable Interrupt.
*/
- unsigned int j = i40e_get_tx_pending(tx_ring);
+ unsigned int j = i40e_get_tx_pending(tx_ring, false);
if (budget &&
((j / WB_STRIDE) == 0) && (j > 0) &&
@@ -995,99 +1001,241 @@ void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
}
}
+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
+{
+ return &q_vector->rx == rc;
+}
+
+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+{
+ unsigned int divisor;
+
+ switch (q_vector->vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ case I40E_LINK_SPEED_20GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+ break;
+ default:
+ case I40E_LINK_SPEED_10GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ case I40E_LINK_SPEED_100MB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+ break;
+ }
+
+ return divisor;
+}
+
/**
- * i40e_set_new_dynamic_itr - Find new ITR level
+ * i40e_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
* @rc: structure containing ring performance data
*
- * Returns true if ITR changed, false if not
- *
- * Stores a new ITR value based on packets and byte counts during
- * the last interrupt. The advantage of per interrupt computation
- * is faster updates and more accurate ITR for the current traffic
- * pattern. Constants in this function were computed based on
- * theoretical maximum wire speed and thresholds were set based on
- * testing data as well as attempting to minimize response time
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static void i40e_update_itr(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
{
- enum i40e_latency_range new_latency_range = rc->latency_range;
- u32 new_itr = rc->itr;
- int bytes_per_usec;
- unsigned int usecs, estimated_usecs;
+ unsigned int avg_wire_size, packets, bytes, itr;
+ unsigned long next_update = jiffies;
- if (rc->total_packets == 0 || !rc->itr)
- return false;
+ /* If we don't have any rings just leave ourselves set for maximum
+ * possible latency so we take ourselves out of the equation.
+ */
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
+ return;
+
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = i40e_container_is_rx(q_vector, rc) ?
+ I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
+ I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ packets = rc->total_packets;
+ bytes = rc->total_bytes;
- usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
- bytes_per_usec = rc->total_bytes / usecs;
+ if (i40e_container_is_rx(q_vector, rc)) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
+ itr = I40E_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & I40E_ITR_MASK) ==
+ I40E_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+ }
- /* The calculations in this algorithm depend on interrupts actually
- * firing at the ITR rate. This may not happen if the packet rate is
- * really low, or if we've been napi polling. Check to make sure
- * that's not the case before we continue.
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
*/
- estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
- if (estimated_usecs > usecs) {
- new_latency_range = I40E_LOW_LATENCY;
- goto reset_latency;
+ if (packets < 56) {
+ itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= I40E_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr /= 2;
+ itr &= I40E_ITR_MASK;
+ if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
+ itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
}
- /* simple throttlerate management
- * 0-10MB/s lowest (50000 ints/s)
- * 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (18000 ints/s)
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = I40E_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * give the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
+ *
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
*
- * The math works out because the divisor is in 10^(-6) which
- * turns the bytes/us input value into MB/s values, but
- * make sure to use usecs, as the register values written
- * are in 2 usec increments in the ITR registers, and make sure
- * to use the smoothed values that the countdown timer gives us.
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
*/
- switch (new_latency_range) {
- case I40E_LOWEST_LATENCY:
- if (bytes_per_usec > 10)
- new_latency_range = I40E_LOW_LATENCY;
- break;
- case I40E_LOW_LATENCY:
- if (bytes_per_usec > 20)
- new_latency_range = I40E_BULK_LATENCY;
- else if (bytes_per_usec <= 10)
- new_latency_range = I40E_LOWEST_LATENCY;
- break;
- case I40E_BULK_LATENCY:
- default:
- if (bytes_per_usec <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ if (avg_wire_size <= 60) {
+ /* Start at 250k ints/sec */
+ avg_wire_size = 4096;
+ } else if (avg_wire_size <= 380) {
+ /* 250K ints/sec to 60K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 1696;
+ } else if (avg_wire_size <= 1084) {
+ /* 60K ints/sec to 36K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 36K ints/sec to 30K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 30K ints/sec */
+ avg_wire_size = 32256;
}
-reset_latency:
- rc->latency_range = new_latency_range;
+ /* If we are in low latency mode halve our delay which doubles the
+ * rate to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size /= 2;
- switch (new_latency_range) {
- case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_50K;
- break;
- case I40E_LOW_LATENCY:
- new_itr = I40E_ITR_20K;
- break;
- case I40E_BULK_LATENCY:
- new_itr = I40E_ITR_18K;
- break;
- default:
- break;
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
+ I40E_ITR_ADAPTIVE_MIN_INC;
+
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
}
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
rc->total_bytes = 0;
rc->total_packets = 0;
- rc->last_itr_update = jiffies;
-
- if (new_itr != rc->itr) {
- rc->itr = new_itr;
- return true;
- }
- return false;
}
/**
@@ -1991,7 +2139,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* @rx_buffer: rx buffer to pull data from
*
* This function will clean up the contents of the rx_buffer. It will
- * either recycle the bufer or unmap it and free the associated resources.
+ * either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
@@ -2274,29 +2422,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_packets;
}
-static u32 i40e_buildreg_itr(const int type, const u16 itr)
+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
{
u32 val;
+ /* We don't bother with setting the CLEARPBA bit as the data sheet
+ * points out doing so is "meaningless since it was already
+ * auto-cleared". The auto-clearing happens when the interrupt is
+ * asserted.
+ *
+ * Hardware errata 28 for also indicates that writing to a
+ * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
+ * an event in the PBA anyway so we need to rely on the automask
+ * to hold pending events for us until the interrupt is re-enabled
+ *
+ * The itr value is reported in microseconds, and the register
+ * value is recorded in 2 microsecond units. For this reason we
+ * only need to shift by the interval shift - 1 instead of the
+ * full value.
+ */
+ itr &= I40E_ITR_MASK;
+
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
- (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
+ (itr << (I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT - 1));
return val;
}
/* a small macro to shorten up some long lines */
#define INTREG I40E_PFINT_DYN_CTLN
-static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
-{
- return vsi->rx_rings[idx]->rx_itr_setting;
-}
-static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
-{
- return vsi->tx_rings[idx]->tx_itr_setting;
-}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -2308,10 +2472,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- bool rx = false, tx = false;
- u32 rxval, txval;
- int idx = q_vector->v_idx;
- int rx_itr_setting, tx_itr_setting;
+ u32 intval;
/* If we don't have MSIX, then we only need to re-enable icr0 */
if (!(vsi->back->flags & I40E_FLAG_MSIX_ENABLED)) {
@@ -2319,65 +2480,49 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
return;
}
- /* avoid dynamic calculation if in countdown mode OR if
- * all dynamic is disabled
- */
- rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
-
- rx_itr_setting = get_rx_itr(vsi, idx);
- tx_itr_setting = get_tx_itr(vsi, idx);
-
- if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(rx_itr_setting) &&
- !ITR_IS_DYNAMIC(tx_itr_setting))) {
- goto enable_int;
- }
-
- if (ITR_IS_DYNAMIC(rx_itr_setting)) {
- rx = i40e_set_new_dynamic_itr(&q_vector->rx);
- rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
- }
-
- if (ITR_IS_DYNAMIC(tx_itr_setting)) {
- tx = i40e_set_new_dynamic_itr(&q_vector->tx);
- txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
- }
+ /* These will do nothing if dynamic updates are not enabled */
+ i40e_update_itr(q_vector, &q_vector->tx);
+ i40e_update_itr(q_vector, &q_vector->rx);
- if (rx || tx) {
- /* get the higher of the two ITR adjustments and
- * use the same value for both ITR registers
- * when in adaptive mode (Rx and/or Tx)
- */
- u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
-
- q_vector->tx.itr = q_vector->rx.itr = itr;
- txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
- tx = true;
- rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
- rx = true;
- }
-
- /* only need to enable the interrupt once, but need
- * to possibly update both ITR values
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+ *
+ * 1. Rx is given higher priority than Tx if both are in same state
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
*/
- if (rx) {
- /* set the INTENA_MSK_MASK so that this first write
- * won't actually enable the interrupt, instead just
- * updating the ITR (it's bit 31 PF and VF)
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ /* Rx ITR needs to be reduced, this is highest priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
*/
- rxval |= BIT(31);
- /* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(q_vector->reg_idx), rxval);
+ intval = i40e_buildreg_itr(I40E_TX_ITR,
+ q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ /* Rx ITR needs to be increased, third priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else {
+ /* No ITR update, lowest priority */
+ intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
-enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), txval);
-
- if (q_vector->itr_countdown)
- q_vector->itr_countdown--;
- else
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, INTREG(q_vector->reg_idx), intval);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 701b708628b0..3c80ea784389 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -30,32 +30,37 @@
#include <net/xdp.h>
/* Interrupt Throttling and Rate Limiting Goodies */
-
-#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
-#define I40E_ITR_100K 0x0005
-#define I40E_ITR_50K 0x000A
-#define I40E_ITR_20K 0x0019
-#define I40E_ITR_18K 0x001B
-#define I40E_ITR_8K 0x003E
-#define I40E_ITR_4K 0x007A
-#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
- I40E_ITR_DYNAMIC)
-#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
- I40E_ITR_DYNAMIC)
-#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
-#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
#define I40E_DEFAULT_IRQ_WORK 256
-#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
-#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
-#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+/* The datasheet for the X710 and XL710 indicate that the maximum value for
+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
+ * the register value which is divided by 2 lets use the actual values and
+ * avoid an excessive amount of translation.
+ */
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
+#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
+#define I40E_ITR_100K 10 /* all values below must be even */
+#define I40E_ITR_50K 20
+#define I40E_ITR_20K 50
+#define I40E_ITR_18K 60
+#define I40E_ITR_8K 122
+#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
+
+#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+
/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
* the value of the rate limit is non-zero
*/
#define INTRL_ENA BIT(6)
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
+
/**
* i40e_intrl_usec_to_reg - convert interrupt rate limit to register
* @intrl: interrupt rate limit to convert
@@ -382,8 +387,7 @@ struct i40e_ring {
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
- u16 rx_itr_setting;
- u16 tx_itr_setting;
+ u16 itr_setting;
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -459,21 +463,21 @@ static inline void set_ring_xdp(struct i40e_ring *ring)
ring->flags |= I40E_TXR_FLAGS_XDP;
}
-enum i40e_latency_range {
- I40E_LOWEST_LATENCY = 0,
- I40E_LOW_LATENCY = 1,
- I40E_BULK_LATENCY = 2,
-};
+#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
+#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
+#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
+#define I40E_ITR_ADAPTIVE_BULK 0x0000
+#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
struct i40e_ring_container {
- /* array of pointers to rings */
- struct i40e_ring *ring;
+ struct i40e_ring *ring; /* pointer to linked list of ring(s) */
+ unsigned long next_update; /* jiffies value of next update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
- unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count;
- enum i40e_latency_range latency_range;
- u16 itr;
+ u16 target_itr; /* target ITR setting for ring(s) */
+ u16 current_itr; /* current ITR setting for ring(s) */
};
/* iterator for handling rings in ring container */
@@ -501,7 +505,7 @@ void i40e_free_tx_resources(struct i40e_ring *tx_ring);
void i40e_free_rx_resources(struct i40e_ring *rx_ring);
int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
-u32 i40e_get_tx_pending(struct i40e_ring *ring);
+u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
void i40e_detect_recover_hung(struct i40e_vsi *vsi);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index cd294e6a8587..b0eed8c0b2f2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -39,7 +39,7 @@
#define I40E_MASK(mask, shift) ((u32)(mask) << (shift))
#define I40E_MAX_VSI_QP 16
-#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_VF_VSI 4
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index e9309fb9084b..e23975c67417 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -258,6 +258,38 @@ static u16 i40e_vc_get_pf_queue_id(struct i40e_vf *vf, u16 vsi_id,
}
/**
+ * i40e_get_real_pf_qid
+ * @vf: pointer to the VF info
+ * @vsi_id: vsi id
+ * @queue_id: queue number
+ *
+ * wrapper function to get pf_queue_id handling ADq code as well
+ **/
+static u16 i40e_get_real_pf_qid(struct i40e_vf *vf, u16 vsi_id, u16 queue_id)
+{
+ int i;
+
+ if (vf->adq_enabled) {
+ /* Although VF considers all the queues(can be 1 to 16) as its
+ * own but they may actually belong to different VSIs(up to 4).
+ * We need to find which queues belongs to which VSI.
+ */
+ for (i = 0; i < vf->num_tc; i++) {
+ if (queue_id < vf->ch[i].num_qps) {
+ vsi_id = vf->ch[i].vsi_id;
+ break;
+ }
+ /* find right queue id which is relative to a
+ * given VSI.
+ */
+ queue_id -= vf->ch[i].num_qps;
+ }
+ }
+
+ return i40e_vc_get_pf_queue_id(vf, vsi_id, queue_id);
+}
+
+/**
* i40e_config_irq_link_list
* @vf: pointer to the VF info
* @vsi_id: id of VSI as given by the FW
@@ -310,7 +342,7 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id, vsi_queue_id);
+ pf_queue_id = i40e_get_real_pf_qid(vf, vsi_id, vsi_queue_id);
reg = ((qtype << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT) | pf_queue_id);
wr32(hw, reg_idx, reg);
@@ -333,8 +365,9 @@ static void i40e_config_irq_link_list(struct i40e_vf *vf, u16 vsi_id,
if (next_q < size) {
vsi_queue_id = next_q / I40E_VIRTCHNL_SUPPORTED_QTYPES;
qtype = next_q % I40E_VIRTCHNL_SUPPORTED_QTYPES;
- pf_queue_id = i40e_vc_get_pf_queue_id(vf, vsi_id,
- vsi_queue_id);
+ pf_queue_id = i40e_get_real_pf_qid(vf,
+ vsi_id,
+ vsi_queue_id);
} else {
pf_queue_id = I40E_QUEUE_END_OF_LIST;
qtype = 0;
@@ -669,18 +702,20 @@ error_param:
/**
* i40e_alloc_vsi_res
* @vf: pointer to the VF info
- * @type: type of VSI to allocate
+ * @idx: VSI index, applies only for ADq mode, zero otherwise
*
* alloc VF vsi context & resources
**/
-static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
+static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx)
{
struct i40e_mac_filter *f = NULL;
struct i40e_pf *pf = vf->pf;
struct i40e_vsi *vsi;
+ u64 max_tx_rate = 0;
int ret = 0;
- vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id);
+ vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid,
+ vf->vf_id);
if (!vsi) {
dev_err(&pf->pdev->dev,
@@ -689,7 +724,8 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
ret = -ENOENT;
goto error_alloc_vsi_res;
}
- if (type == I40E_VSI_SRIOV) {
+
+ if (!idx) {
u64 hena = i40e_pf_get_default_rss_hena(pf);
u8 broadcast[ETH_ALEN];
@@ -721,17 +757,29 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
spin_unlock_bh(&vsi->mac_filter_hash_lock);
wr32(&pf->hw, I40E_VFQF_HENA1(0, vf->vf_id), (u32)hena);
wr32(&pf->hw, I40E_VFQF_HENA1(1, vf->vf_id), (u32)(hena >> 32));
+ /* program mac filter only for VF VSI */
+ ret = i40e_sync_vsi_filters(vsi);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
}
- /* program mac filter */
- ret = i40e_sync_vsi_filters(vsi);
- if (ret)
- dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* storing VSI index and id for ADq and don't apply the mac filter */
+ if (vf->adq_enabled) {
+ vf->ch[idx].vsi_idx = vsi->idx;
+ vf->ch[idx].vsi_id = vsi->id;
+ }
/* Set VF bandwidth if specified */
if (vf->tx_rate) {
+ max_tx_rate = vf->tx_rate;
+ } else if (vf->ch[idx].max_tx_rate) {
+ max_tx_rate = vf->ch[idx].max_tx_rate;
+ }
+
+ if (max_tx_rate) {
+ max_tx_rate = div_u64(max_tx_rate, I40E_BW_CREDIT_DIVISOR);
ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
- vf->tx_rate / 50, 0, NULL);
+ max_tx_rate, 0, NULL);
if (ret)
dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
vf->vf_id, ret);
@@ -742,6 +790,92 @@ error_alloc_vsi_res:
}
/**
+ * i40e_map_pf_queues_to_vsi
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of first part VSILAN_QTABLE, mapping pf queues to VSI.
+ **/
+static void i40e_map_pf_queues_to_vsi(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qps;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < 7; j++) {
+ if (j * 2 >= qps) {
+ /* end of list */
+ reg = 0x07FF07FF;
+ } else {
+ u16 qid = i40e_vc_get_pf_queue_id(vf,
+ vsi_id,
+ j * 2);
+ reg = qid;
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id,
+ (j * 2) + 1);
+ reg |= qid << 16;
+ }
+ i40e_write_rx_ctl(hw,
+ I40E_VSILAN_QTABLE(j, vsi_id),
+ reg);
+ }
+ }
+}
+
+/**
+ * i40e_map_pf_to_vf_queues
+ * @vf: pointer to the VF info
+ *
+ * PF maps LQPs to a VF by programming VSILAN_QTABLE & VPLAN_QTABLE. This
+ * function takes care of the second part VPLAN_QTABLE & completes VF mappings.
+ **/
+static void i40e_map_pf_to_vf_queues(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg, total_qps = 0;
+ u32 qps, num_tc = 1; /* VF has at least one traffic class */
+ u16 vsi_id, qid;
+ int i, j;
+
+ if (vf->adq_enabled)
+ num_tc = vf->num_tc;
+
+ for (i = 0; i < num_tc; i++) {
+ if (vf->adq_enabled) {
+ qps = vf->ch[i].num_qps;
+ vsi_id = vf->ch[i].vsi_id;
+ } else {
+ qps = pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ vsi_id = vf->lan_vsi_id;
+ }
+
+ for (j = 0; j < qps; j++) {
+ qid = i40e_vc_get_pf_queue_id(vf, vsi_id, j);
+
+ reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
+ wr32(hw, I40E_VPLAN_QTABLE(total_qps, vf->vf_id),
+ reg);
+ total_qps++;
+ }
+ }
+}
+
+/**
* i40e_enable_vf_mappings
* @vf: pointer to the VF info
*
@@ -751,8 +885,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
- u32 reg, total_queue_pairs = 0;
- int j;
+ u32 reg;
/* Tell the hardware we're using noncontiguous mapping. HW requires
* that VF queues be mapped using this method, even when they are
@@ -765,30 +898,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
reg = I40E_VPLAN_MAPENA_TXRX_ENA_MASK;
wr32(hw, I40E_VPLAN_MAPENA(vf->vf_id), reg);
- /* map PF queues to VF queues */
- for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
-
- reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
- wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
- total_queue_pairs++;
- }
-
- /* map PF queues to VSI */
- for (j = 0; j < 7; j++) {
- if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) {
- reg = 0x07FF07FF; /* unused */
- } else {
- u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
- j * 2);
- reg = qid;
- qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id,
- (j * 2) + 1);
- reg |= qid << 16;
- }
- i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
- reg);
- }
+ i40e_map_pf_to_vf_queues(vf);
+ i40e_map_pf_queues_to_vsi(vf);
i40e_flush(hw);
}
@@ -824,7 +935,7 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
struct i40e_pf *pf = vf->pf;
struct i40e_hw *hw = &pf->hw;
u32 reg_idx, reg;
- int i, msix_vf;
+ int i, j, msix_vf;
/* Start by disabling VF's configuration API to prevent the OS from
* accessing the VF's VSI after it's freed / invalidated.
@@ -846,6 +957,20 @@ static void i40e_free_vf_res(struct i40e_vf *vf)
vf->lan_vsi_id = 0;
vf->num_mac = 0;
}
+
+ /* do the accounting and remove additional ADq VSI's */
+ if (vf->adq_enabled && vf->ch[0].vsi_idx) {
+ for (j = 0; j < vf->num_tc; j++) {
+ /* At this point VSI0 is already released so don't
+ * release it again and only clear their values in
+ * structure variables
+ */
+ if (j)
+ i40e_vsi_release(pf->vsi[vf->ch[j].vsi_idx]);
+ vf->ch[j].vsi_idx = 0;
+ vf->ch[j].vsi_id = 0;
+ }
+ }
msix_vf = pf->hw.func_caps.num_msix_vectors_vf;
/* disable interrupts so the VF starts in a known state */
@@ -891,7 +1016,7 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
{
struct i40e_pf *pf = vf->pf;
int total_queue_pairs = 0;
- int ret;
+ int ret, idx;
if (vf->num_req_queues &&
vf->num_req_queues <= pf->queues_left + I40E_DEFAULT_QUEUES_PER_VF)
@@ -900,11 +1025,30 @@ static int i40e_alloc_vf_res(struct i40e_vf *vf)
pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
/* allocate hw vsi context & associated resources */
- ret = i40e_alloc_vsi_res(vf, I40E_VSI_SRIOV);
+ ret = i40e_alloc_vsi_res(vf, 0);
if (ret)
goto error_alloc;
total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
+ /* allocate additional VSIs based on tc information for ADq */
+ if (vf->adq_enabled) {
+ if (pf->queues_left >=
+ (I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF)) {
+ /* TC 0 always belongs to VF VSI */
+ for (idx = 1; idx < vf->num_tc; idx++) {
+ ret = i40e_alloc_vsi_res(vf, idx);
+ if (ret)
+ goto error_alloc;
+ }
+ /* send correct number of queues */
+ total_queue_pairs = I40E_MAX_VF_QUEUES;
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d: Not enough queues to allocate, disabling ADq\n",
+ vf->vf_id);
+ vf->adq_enabled = false;
+ }
+ }
+
/* We account for each VF to get a default number of queue pairs. If
* the VF has now requested more, we need to account for that to make
* certain we never request more queues than we actually have left in
@@ -1537,6 +1681,27 @@ static int i40e_vc_get_version_msg(struct i40e_vf *vf, u8 *msg)
}
/**
+ * i40e_del_qch - delete all the additional VSIs created as a part of ADq
+ * @vf: pointer to VF structure
+ **/
+static void i40e_del_qch(struct i40e_vf *vf)
+{
+ struct i40e_pf *pf = vf->pf;
+ int i;
+
+ /* first element in the array belongs to primary VF VSI and we shouldn't
+ * delete it. We should however delete the rest of the VSIs created
+ */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (vf->ch[i].vsi_idx) {
+ i40e_vsi_release(pf->vsi[vf->ch[i].vsi_idx]);
+ vf->ch[i].vsi_idx = 0;
+ vf->ch[i].vsi_id = 0;
+ }
+ }
+}
+
+/**
* i40e_vc_get_vf_resources_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1631,6 +1796,9 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_REQ_QUEUES)
vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ if (vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)
+ vfres->vf_cap_flags |= VIRTCHNL_VF_OFFLOAD_ADQ;
+
vfres->num_vsis = num_vsis;
vfres->num_queue_pairs = vf->num_queue_pairs;
vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
@@ -1855,27 +2023,37 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
(struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi;
struct i40e_pf *pf = vf->pf;
- u16 vsi_id, vsi_queue_id;
+ u16 vsi_id, vsi_queue_id = 0;
i40e_status aq_ret = 0;
- int i;
+ int i, j = 0, idx = 0;
+
+ vsi_id = qci->vsi_id;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
- vsi_id = qci->vsi_id;
if (!i40e_vc_isvalid_vsi_id(vf, vsi_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+
for (i = 0; i < qci->num_queue_pairs; i++) {
qpi = &qci->qpair[i];
- vsi_queue_id = qpi->txq.queue_id;
- if ((qpi->txq.vsi_id != vsi_id) ||
- (qpi->rxq.vsi_id != vsi_id) ||
- (qpi->rxq.queue_id != vsi_queue_id) ||
- !i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
+
+ if (!vf->adq_enabled) {
+ vsi_queue_id = qpi->txq.queue_id;
+
+ if (qpi->txq.vsi_id != qci->vsi_id ||
+ qpi->rxq.vsi_id != qci->vsi_id ||
+ qpi->rxq.queue_id != vsi_queue_id) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
+ }
+ }
+
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, vsi_queue_id)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -1887,9 +2065,33 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
+
+ /* For ADq there can be up to 4 VSIs with max 4 queues each.
+ * VF does not know about these additional VSIs and all
+ * it cares is about its own queues. PF configures these queues
+ * to its appropriate VSIs based on TC mapping
+ **/
+ if (vf->adq_enabled) {
+ if (j == (vf->ch[idx].num_qps - 1)) {
+ idx++;
+ j = 0; /* resetting the queue count */
+ vsi_queue_id = 0;
+ } else {
+ j++;
+ vsi_queue_id++;
+ }
+ vsi_id = vf->ch[idx].vsi_id;
+ }
}
/* set vsi num_queue_pairs in use to num configured by VF */
- pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs;
+ if (!vf->adq_enabled) {
+ pf->vsi[vf->lan_vsi_idx]->num_queue_pairs =
+ qci->num_queue_pairs;
+ } else {
+ for (i = 0; i < vf->num_tc; i++)
+ pf->vsi[vf->ch[i].vsi_idx]->num_queue_pairs =
+ vf->ch[i].num_qps;
+ }
error_param:
/* send the response to the VF */
@@ -1898,6 +2100,33 @@ error_param:
}
/**
+ * i40e_validate_queue_map
+ * @vsi_id: vsi id
+ * @queuemap: Tx or Rx queue map
+ *
+ * check if Tx or Rx queue map is valid
+ **/
+static int i40e_validate_queue_map(struct i40e_vf *vf, u16 vsi_id,
+ unsigned long queuemap)
+{
+ u16 vsi_queue_id, queue_id;
+
+ for_each_set_bit(vsi_queue_id, &queuemap, I40E_MAX_VSI_QP) {
+ if (vf->adq_enabled) {
+ vsi_id = vf->ch[vsi_queue_id / I40E_MAX_VF_VSI].vsi_id;
+ queue_id = (vsi_queue_id % I40E_DEFAULT_QUEUES_PER_VF);
+ } else {
+ queue_id = vsi_queue_id;
+ }
+
+ if (!i40e_vc_isvalid_queue_id(vf, vsi_id, queue_id))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
* i40e_vc_config_irq_map_msg
* @vf: pointer to the VF info
* @msg: pointer to the msg buffer
@@ -1911,9 +2140,8 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct virtchnl_irq_map_info *irqmap_info =
(struct virtchnl_irq_map_info *)msg;
struct virtchnl_vector_map *map;
- u16 vsi_id, vsi_queue_id, vector_id;
+ u16 vsi_id, vector_id;
i40e_status aq_ret = 0;
- unsigned long tempmap;
int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
@@ -1923,7 +2151,6 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < irqmap_info->num_vectors; i++) {
map = &irqmap_info->vecmap[i];
-
vector_id = map->vector_id;
vsi_id = map->vsi_id;
/* validate msg params */
@@ -1933,23 +2160,14 @@ static int i40e_vc_config_irq_map_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
goto error_param;
}
- /* lookout for the invalid queue index */
- tempmap = map->rxq_map;
- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
- vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (i40e_validate_queue_map(vf, vsi_id, map->rxq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
}
- tempmap = map->txq_map;
- for_each_set_bit(vsi_queue_id, &tempmap, I40E_MAX_VSI_QP) {
- if (!i40e_vc_isvalid_queue_id(vf, vsi_id,
- vsi_queue_id)) {
- aq_ret = I40E_ERR_PARAM;
- goto error_param;
- }
+ if (i40e_validate_queue_map(vf, vsi_id, map->txq_map)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto error_param;
}
i40e_config_irq_link_list(vf, vsi_id, map);
@@ -1975,6 +2193,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
struct i40e_pf *pf = vf->pf;
u16 vsi_id = vqs->vsi_id;
i40e_status aq_ret = 0;
+ int i;
if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
aq_ret = I40E_ERR_PARAM;
@@ -1993,6 +2212,16 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
if (i40e_vsi_start_rings(pf->vsi[vf->lan_vsi_idx]))
aq_ret = I40E_ERR_TIMEOUT;
+
+ /* need to start the rings for additional ADq VSI's as well */
+ if (vf->adq_enabled) {
+ /* zero belongs to LAN VSI */
+ for (i = 1; i < vf->num_tc; i++) {
+ if (i40e_vsi_start_rings(pf->vsi[vf->ch[i].vsi_idx]))
+ aq_ret = I40E_ERR_TIMEOUT;
+ }
+ }
+
error_param:
/* send the response to the VF */
return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -2688,6 +2917,618 @@ err:
}
/**
+ * i40e_validate_cloud_filter
+ * @mask: mask for TC filter
+ * @data: data for TC filter
+ *
+ * This function validates cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_validate_cloud_filter(struct i40e_vf *vf,
+ struct virtchnl_filter *tc_filter)
+{
+ struct virtchnl_l4_spec mask = tc_filter->mask.tcp_spec;
+ struct virtchnl_l4_spec data = tc_filter->data.tcp_spec;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct i40e_mac_filter *f;
+ struct hlist_node *h;
+ bool found = false;
+ int bkt;
+
+ if (!tc_filter->action) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Currently ADq doesn't support Drop Action\n",
+ vf->vf_id);
+ goto err;
+ }
+
+ /* action_meta is TC number here to which the filter is applied */
+ if (!tc_filter->action_meta ||
+ tc_filter->action_meta > I40E_MAX_VF_VSI) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid TC number %u\n",
+ vf->vf_id, tc_filter->action_meta);
+ goto err;
+ }
+
+ /* Check filter if it's programmed for advanced mode or basic mode.
+ * There are two ADq modes (for VF only),
+ * 1. Basic mode: intended to allow as many filter options as possible
+ * to be added to a VF in Non-trusted mode. Main goal is
+ * to add filters to its own MAC and VLAN id.
+ * 2. Advanced mode: is for allowing filters to be applied other than
+ * its own MAC or VLAN. This mode requires the VF to be
+ * Trusted.
+ */
+ if (mask.dst_mac[0] && !mask.dst_ip[0]) {
+ vsi = pf->vsi[vf->lan_vsi_idx];
+ f = i40e_find_mac(vsi, data.dst_mac);
+
+ if (!f) {
+ dev_info(&pf->pdev->dev,
+ "Destination MAC %pM doesn't belong to VF %d\n",
+ data.dst_mac, vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id) {
+ hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f,
+ hlist) {
+ if (f->vlan == ntohs(data.vlan_id)) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ dev_info(&pf->pdev->dev,
+ "VF %d doesn't have any VLAN id %u\n",
+ vf->vf_id, ntohs(data.vlan_id));
+ goto err;
+ }
+ }
+ } else {
+ /* Check if VF is trusted */
+ if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d not trusted, make VF trusted to add advanced mode ADq cloud filters\n",
+ vf->vf_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask.dst_mac[0] & data.dst_mac[0]) {
+ if (is_broadcast_ether_addr(data.dst_mac) ||
+ is_zero_ether_addr(data.dst_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest MAC addr %pM\n",
+ vf->vf_id, data.dst_mac);
+ goto err;
+ }
+ }
+
+ if (mask.src_mac[0] & data.src_mac[0]) {
+ if (is_broadcast_ether_addr(data.src_mac) ||
+ is_zero_ether_addr(data.src_mac)) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source MAC addr %pM\n",
+ vf->vf_id, data.src_mac);
+ goto err;
+ }
+ }
+
+ if (mask.dst_port & data.dst_port) {
+ if (!data.dst_port || be16_to_cpu(data.dst_port) > 0xFFFF) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Dest port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (mask.src_port & data.src_port) {
+ if (!data.src_port || be16_to_cpu(data.src_port) > 0xFFFF) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Source port\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ if (tc_filter->flow_type != VIRTCHNL_TCP_V6_FLOW &&
+ tc_filter->flow_type != VIRTCHNL_TCP_V4_FLOW) {
+ dev_info(&pf->pdev->dev, "VF %d: Invalid Flow type\n",
+ vf->vf_id);
+ goto err;
+ }
+
+ if (mask.vlan_id & data.vlan_id) {
+ if (ntohs(data.vlan_id) > I40E_MAX_VLANID) {
+ dev_info(&pf->pdev->dev, "VF %d: invalid VLAN ID\n",
+ vf->vf_id);
+ goto err;
+ }
+ }
+
+ return I40E_SUCCESS;
+err:
+ return I40E_ERR_CONFIG;
+}
+
+/**
+ * i40e_find_vsi_from_seid - searches for the vsi with the given seid
+ * @vf: pointer to the VF info
+ * @seid - seid of the vsi it is searching for
+ **/
+static struct i40e_vsi *i40e_find_vsi_from_seid(struct i40e_vf *vf, u16 seid)
+{
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ int i;
+
+ for (i = 0; i < vf->num_tc ; i++) {
+ vsi = i40e_find_vsi_from_id(pf, vf->ch[i].vsi_id);
+ if (vsi && vsi->seid == seid)
+ return vsi;
+ }
+ return NULL;
+}
+
+/**
+ * i40e_del_all_cloud_filters
+ * @vf: pointer to the VF info
+ *
+ * This function deletes all cloud filters
+ **/
+static void i40e_del_all_cloud_filters(struct i40e_vf *vf)
+{
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ int ret;
+
+ hlist_for_each_entry_safe(cfilter, node,
+ &vf->cloud_filter_list, cloud_node) {
+ vsi = i40e_find_vsi_from_seid(vf, cfilter->seid);
+
+ if (!vsi) {
+ dev_err(&pf->pdev->dev, "VF %d: no VSI found for matching %u seid, can't delete cloud filter\n",
+ vf->vf_id, cfilter->seid);
+ continue;
+ }
+
+ if (cfilter->dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter,
+ false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, false);
+ if (ret)
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw,
+ pf->hw.aq.asq_last_status));
+
+ hlist_del(&cfilter->cloud_node);
+ kfree(cfilter);
+ vf->num_cloud_filters--;
+ }
+}
+
+/**
+ * i40e_vc_del_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function deletes a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_del_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter cfilter, *cf = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ struct hlist_node *node;
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ memset(&cfilter, 0, sizeof(cfilter));
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter.src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter.vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter.dst_port = mask.dst_port & tcf.dst_port;
+ cfilter.src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter.n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter.ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter.n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter.ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter.ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter.ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter.ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the vsi to which the tc belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter.seid = vsi->seid;
+ cfilter.flags = vcf->field_flags;
+
+ /* Deleting TC filter */
+ if (tcf.dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, &cfilter, false);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, &cfilter, false);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to delete cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+
+ hlist_for_each_entry_safe(cf, node,
+ &vf->cloud_filter_list, cloud_node) {
+ if (cf->seid != cfilter.seid)
+ continue;
+ if (mask.dst_port)
+ if (cfilter.dst_port != cf->dst_port)
+ continue;
+ if (mask.dst_mac[0])
+ if (!ether_addr_equal(cf->src_mac, cfilter.src_mac))
+ continue;
+ /* for ipv4 data to be valid, only first byte of mask is set */
+ if (cfilter.n_proto == ETH_P_IP && mask.dst_ip[0])
+ if (memcmp(&cfilter.ip.v4.dst_ip, &cf->ip.v4.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip)))
+ continue;
+ /* for ipv6, mask is set for all sixteen bytes (4 words) */
+ if (cfilter.n_proto == ETH_P_IPV6 && mask.dst_ip[3])
+ if (memcmp(&cfilter.ip.v6.dst_ip6, &cf->ip.v6.dst_ip6,
+ sizeof(cfilter.ip.v6.src_ip6)))
+ continue;
+ if (mask.vlan_id)
+ if (cfilter.vlan_id != cf->vlan_id)
+ continue;
+
+ hlist_del(&cf->cloud_node);
+ kfree(cf);
+ vf->num_cloud_filters--;
+ }
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_cloud_filter
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ *
+ * This function adds a cloud filter programmed as TC filter for ADq
+ **/
+static int i40e_vc_add_cloud_filter(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_filter *vcf = (struct virtchnl_filter *)msg;
+ struct virtchnl_l4_spec mask = vcf->mask.tcp_spec;
+ struct virtchnl_l4_spec tcf = vcf->data.tcp_spec;
+ struct i40e_cloud_filter *cfilter = NULL;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_vsi *vsi = NULL;
+ i40e_status aq_ret = 0;
+ int i, ret;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!vf->adq_enabled) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: ADq is not enabled, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (i40e_validate_cloud_filter(vf, vcf)) {
+ dev_info(&pf->pdev->dev,
+ "VF %d: Invalid input/s, can't apply cloud filter\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ cfilter = kzalloc(sizeof(*cfilter), GFP_KERNEL);
+ if (!cfilter)
+ return -ENOMEM;
+
+ /* parse destination mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->dst_mac[i] = mask.dst_mac[i] & tcf.dst_mac[i];
+
+ /* parse source mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ cfilter->src_mac[i] = mask.src_mac[i] & tcf.src_mac[i];
+
+ cfilter->vlan_id = mask.vlan_id & tcf.vlan_id;
+ cfilter->dst_port = mask.dst_port & tcf.dst_port;
+ cfilter->src_port = mask.src_port & tcf.src_port;
+
+ switch (vcf->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ cfilter->n_proto = ETH_P_IP;
+ if (mask.dst_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.dst_ip, tcf.dst_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ else if (mask.src_ip[0] & tcf.dst_ip[0])
+ memcpy(&cfilter->ip.v4.src_ip, tcf.src_ip,
+ ARRAY_SIZE(tcf.dst_ip));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ cfilter->n_proto = ETH_P_IPV6;
+ if (mask.dst_ip[3] & tcf.dst_ip[3])
+ memcpy(&cfilter->ip.v6.dst_ip6, tcf.dst_ip,
+ sizeof(cfilter->ip.v6.dst_ip6));
+ if (mask.src_ip[3] & tcf.src_ip[3])
+ memcpy(&cfilter->ip.v6.src_ip6, tcf.src_ip,
+ sizeof(cfilter->ip.v6.src_ip6));
+ break;
+ default:
+ /* TC filter can be configured based on different combinations
+ * and in this case IP is not a part of filter config
+ */
+ dev_info(&pf->pdev->dev, "VF %d: Flow type not configured\n",
+ vf->vf_id);
+ }
+
+ /* get the VSI to which the TC belongs to */
+ vsi = pf->vsi[vf->ch[vcf->action_meta].vsi_idx];
+ cfilter->seid = vsi->seid;
+ cfilter->flags = vcf->field_flags;
+
+ /* Adding cloud filter programmed as TC filter */
+ if (tcf.dst_port)
+ ret = i40e_add_del_cloud_filter_big_buf(vsi, cfilter, true);
+ else
+ ret = i40e_add_del_cloud_filter(vsi, cfilter, true);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: Failed to add cloud filter, err %s aq_err %s\n",
+ vf->vf_id, i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+ goto err;
+ }
+
+ INIT_HLIST_NODE(&cfilter->cloud_node);
+ hlist_add_head(&cfilter->cloud_node, &vf->cloud_filter_list);
+ vf->num_cloud_filters++;
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_add_qch_msg: Add queue channel and enable ADq
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_add_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct virtchnl_tc_info *tci =
+ (struct virtchnl_tc_info *)msg;
+ struct i40e_pf *pf = vf->pf;
+ struct i40e_link_status *ls = &pf->hw.phy.link_info;
+ int i, adq_request_qps = 0, speed = 0;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* ADq cannot be applied if spoof check is ON */
+ if (vf->spoofchk) {
+ dev_err(&pf->pdev->dev,
+ "Spoof check is ON, turn it OFF to enable ADq\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (!(vf->driver_caps & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+ dev_err(&pf->pdev->dev,
+ "VF %d attempting to enable ADq, but hasn't properly negotiated that capability\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* max number of traffic classes for VF currently capped at 4 */
+ if (!tci->num_tc || tci->num_tc > I40E_MAX_VF_VSI) {
+ dev_err(&pf->pdev->dev,
+ "VF %d trying to set %u TCs, valid range 1-4 TCs per VF\n",
+ vf->vf_id, tci->num_tc);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* validate queues for each TC */
+ for (i = 0; i < tci->num_tc; i++)
+ if (!tci->list[i].count ||
+ tci->list[i].count > I40E_DEFAULT_QUEUES_PER_VF) {
+ dev_err(&pf->pdev->dev,
+ "VF %d: TC %d trying to set %u queues, valid range 1-4 queues per TC\n",
+ vf->vf_id, i, tci->list[i].count);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* need Max VF queues but already have default number of queues */
+ adq_request_qps = I40E_MAX_VF_QUEUES - I40E_DEFAULT_QUEUES_PER_VF;
+
+ if (pf->queues_left < adq_request_qps) {
+ dev_err(&pf->pdev->dev,
+ "No queues left to allocate to VF %d\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ /* we need to allocate max VF queues to enable ADq so as to
+ * make sure ADq enabled VF always gets back queues when it
+ * goes through a reset.
+ */
+ vf->num_queue_pairs = I40E_MAX_VF_QUEUES;
+ }
+
+ /* get link speed in MB to validate rate limit */
+ switch (ls->link_speed) {
+ case VIRTCHNL_LINK_SPEED_100MB:
+ speed = SPEED_100;
+ break;
+ case VIRTCHNL_LINK_SPEED_1GB:
+ speed = SPEED_1000;
+ break;
+ case VIRTCHNL_LINK_SPEED_10GB:
+ speed = SPEED_10000;
+ break;
+ case VIRTCHNL_LINK_SPEED_20GB:
+ speed = SPEED_20000;
+ break;
+ case VIRTCHNL_LINK_SPEED_25GB:
+ speed = SPEED_25000;
+ break;
+ case VIRTCHNL_LINK_SPEED_40GB:
+ speed = SPEED_40000;
+ break;
+ default:
+ dev_err(&pf->pdev->dev,
+ "Cannot detect link speed\n");
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ /* parse data from the queue channel info */
+ vf->num_tc = tci->num_tc;
+ for (i = 0; i < vf->num_tc; i++) {
+ if (tci->list[i].max_tx_rate) {
+ if (tci->list[i].max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev,
+ "Invalid max tx rate %llu specified for VF %d.",
+ tci->list[i].max_tx_rate,
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ } else {
+ vf->ch[i].max_tx_rate =
+ tci->list[i].max_tx_rate;
+ }
+ }
+ vf->ch[i].num_qps = tci->list[i].count;
+ }
+
+ /* set this flag only after making sure all inputs are sane */
+ vf->adq_enabled = true;
+ /* num_req_queues is set when user changes number of queues via ethtool
+ * and this causes issue for default VSI(which depends on this variable)
+ * when ADq is enabled, hence reset it.
+ */
+ vf->num_req_queues = 0;
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+
+ return I40E_SUCCESS;
+
+ /* send the response to the VF */
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
+ * i40e_vc_del_qch_msg
+ * @vf: pointer to the VF info
+ * @msg: pointer to the msg buffer
+ **/
+static int i40e_vc_del_qch_msg(struct i40e_vf *vf, u8 *msg)
+{
+ struct i40e_pf *pf = vf->pf;
+ i40e_status aq_ret = 0;
+
+ if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) {
+ aq_ret = I40E_ERR_PARAM;
+ goto err;
+ }
+
+ if (vf->adq_enabled) {
+ i40e_del_all_cloud_filters(vf);
+ i40e_del_qch(vf);
+ vf->adq_enabled = false;
+ vf->num_tc = 0;
+ dev_info(&pf->pdev->dev,
+ "Deleting Queue Channels and cloud filters for ADq on VF %d\n",
+ vf->vf_id);
+ } else {
+ dev_info(&pf->pdev->dev, "VF %d trying to delete queue channels but ADq isn't enabled\n",
+ vf->vf_id);
+ aq_ret = I40E_ERR_PARAM;
+ }
+
+ /* reset the VF in order to allocate resources */
+ i40e_vc_notify_vf_reset(vf);
+ i40e_reset_vf(vf, false);
+
+ return I40E_SUCCESS;
+
+err:
+ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_DISABLE_CHANNELS,
+ aq_ret);
+}
+
+/**
* i40e_vc_process_vf_msg
* @pf: pointer to the PF structure
* @vf_id: source VF id
@@ -2816,7 +3657,18 @@ int i40e_vc_process_vf_msg(struct i40e_pf *pf, s16 vf_id, u32 v_opcode,
case VIRTCHNL_OP_REQUEST_QUEUES:
ret = i40e_vc_request_queues_msg(vf, msg, msglen);
break;
-
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ ret = i40e_vc_add_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ ret = i40e_vc_del_qch_msg(vf, msg);
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER:
+ ret = i40e_vc_add_cloud_filter(vf, msg);
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER:
+ ret = i40e_vc_del_cloud_filter(vf, msg);
+ break;
case VIRTCHNL_OP_UNKNOWN:
default:
dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n",
@@ -3382,6 +4234,16 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting)
i40e_vc_disable_vf(vf);
dev_info(&pf->pdev->dev, "VF %u is now %strusted\n",
vf_id, setting ? "" : "un");
+
+ if (vf->adq_enabled) {
+ if (!vf->trusted) {
+ dev_info(&pf->pdev->dev,
+ "VF %u no longer Trusted, deleting all cloud filters\n",
+ vf_id);
+ i40e_del_all_cloud_filters(vf);
+ }
+ }
+
out:
return ret;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 5efc4f92bb37..6852599b2379 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -69,6 +69,19 @@ enum i40e_vf_capabilities {
I40E_VIRTCHNL_VF_CAP_IWARP,
};
+/* In ADq, max 4 VSI's can be allocated per VF including primary VF VSI.
+ * These variables are used to store indices, id's and number of queues
+ * for each VSI including that of primary VF VSI. Each Traffic class is
+ * termed as channel and each channel can in-turn have 4 queues which
+ * means max 16 queues overall per VF.
+ */
+struct i40evf_channel {
+ u16 vsi_idx; /* index in PF struct for all channel VSIs */
+ u16 vsi_id; /* VSI ID used by firmware */
+ u16 num_qps; /* number of queue pairs requested by user */
+ u64 max_tx_rate; /* bandwidth rate allocation for VSIs */
+};
+
/* VF information structure */
struct i40e_vf {
struct i40e_pf *pf;
@@ -111,6 +124,13 @@ struct i40e_vf {
u16 num_mac;
u16 num_vlan;
+ /* ADq related variables */
+ bool adq_enabled; /* flag to enable adq */
+ u8 num_tc;
+ struct i40evf_channel ch[I40E_MAX_VF_VSI];
+ struct hlist_head cloud_filter_list;
+ u16 num_cloud_filters;
+
/* RDMA Client */
struct virtchnl_iwarp_qvlist_info *qvlist_info;
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 357d6051281f..e088d23eb083 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -196,7 +196,7 @@ void i40evf_detect_recover_hung(struct i40e_vsi *vsi)
*/
smp_rmb();
tx_ring->tx_stats.prev_pkt_ctr =
- i40evf_get_tx_pending(tx_ring, false) ? packets : -1;
+ i40evf_get_tx_pending(tx_ring, true) ? packets : -1;
}
}
}
@@ -392,99 +392,241 @@ void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
val);
}
+static inline bool i40e_container_is_rx(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
+{
+ return &q_vector->rx == rc;
+}
+
+static inline unsigned int i40e_itr_divisor(struct i40e_q_vector *q_vector)
+{
+ unsigned int divisor;
+
+ switch (q_vector->adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 1024;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ case I40E_LINK_SPEED_20GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 512;
+ break;
+ default:
+ case I40E_LINK_SPEED_10GB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 256;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ case I40E_LINK_SPEED_100MB:
+ divisor = I40E_ITR_ADAPTIVE_MIN_INC * 32;
+ break;
+ }
+
+ return divisor;
+}
+
/**
- * i40e_set_new_dynamic_itr - Find new ITR level
+ * i40e_update_itr - update the dynamic ITR value based on statistics
+ * @q_vector: structure containing interrupt and ring information
* @rc: structure containing ring performance data
*
- * Returns true if ITR changed, false if not
- *
- * Stores a new ITR value based on packets and byte counts during
- * the last interrupt. The advantage of per interrupt computation
- * is faster updates and more accurate ITR for the current traffic
- * pattern. Constants in this function were computed based on
- * theoretical maximum wire speed and thresholds were set based on
- * testing data as well as attempting to minimize response time
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
* while increasing bulk throughput.
**/
-static bool i40e_set_new_dynamic_itr(struct i40e_ring_container *rc)
+static void i40e_update_itr(struct i40e_q_vector *q_vector,
+ struct i40e_ring_container *rc)
{
- enum i40e_latency_range new_latency_range = rc->latency_range;
- u32 new_itr = rc->itr;
- int bytes_per_usec;
- unsigned int usecs, estimated_usecs;
+ unsigned int avg_wire_size, packets, bytes, itr;
+ unsigned long next_update = jiffies;
- if (rc->total_packets == 0 || !rc->itr)
- return false;
+ /* If we don't have any rings just leave ourselves set for maximum
+ * possible latency so we take ourselves out of the equation.
+ */
+ if (!rc->ring || !ITR_IS_DYNAMIC(rc->ring->itr_setting))
+ return;
+
+ /* For Rx we want to push the delay up and default to low latency.
+ * for Tx we want to pull the delay down and default to high latency.
+ */
+ itr = i40e_container_is_rx(q_vector, rc) ?
+ I40E_ITR_ADAPTIVE_MIN_USECS | I40E_ITR_ADAPTIVE_LATENCY :
+ I40E_ITR_ADAPTIVE_MAX_USECS | I40E_ITR_ADAPTIVE_LATENCY;
+
+ /* If we didn't update within up to 1 - 2 jiffies we can assume
+ * that either packets are coming in so slow there hasn't been
+ * any work, or that there is so much work that NAPI is dealing
+ * with interrupt moderation and we don't need to do anything.
+ */
+ if (time_after(next_update, rc->next_update))
+ goto clear_counts;
+
+ /* If itr_countdown is set it means we programmed an ITR within
+ * the last 4 interrupt cycles. This has a side effect of us
+ * potentially firing an early interrupt. In order to work around
+ * this we need to throw out any data received for a few
+ * interrupts following the update.
+ */
+ if (q_vector->itr_countdown) {
+ itr = rc->target_itr;
+ goto clear_counts;
+ }
+
+ packets = rc->total_packets;
+ bytes = rc->total_bytes;
- usecs = (rc->itr << 1) * ITR_COUNTDOWN_START;
- bytes_per_usec = rc->total_bytes / usecs;
+ if (i40e_container_is_rx(q_vector, rc)) {
+ /* If Rx there are 1 to 4 packets and bytes are less than
+ * 9000 assume insufficient data to use bulk rate limiting
+ * approach unless Tx is already in bulk rate limiting. We
+ * are likely latency driven.
+ */
+ if (packets && packets < 4 && bytes < 9000 &&
+ (q_vector->tx.target_itr & I40E_ITR_ADAPTIVE_LATENCY)) {
+ itr = I40E_ITR_ADAPTIVE_LATENCY;
+ goto adjust_by_size;
+ }
+ } else if (packets < 4) {
+ /* If we have Tx and Rx ITR maxed and Tx ITR is running in
+ * bulk mode and we are receiving 4 or fewer packets just
+ * reset the ITR_ADAPTIVE_LATENCY bit for latency mode so
+ * that the Rx can relax.
+ */
+ if (rc->target_itr == I40E_ITR_ADAPTIVE_MAX_USECS &&
+ (q_vector->rx.target_itr & I40E_ITR_MASK) ==
+ I40E_ITR_ADAPTIVE_MAX_USECS)
+ goto clear_counts;
+ } else if (packets > 32) {
+ /* If we have processed over 32 packets in a single interrupt
+ * for Tx assume we need to switch over to "bulk" mode.
+ */
+ rc->target_itr &= ~I40E_ITR_ADAPTIVE_LATENCY;
+ }
- /* The calculations in this algorithm depend on interrupts actually
- * firing at the ITR rate. This may not happen if the packet rate is
- * really low, or if we've been napi polling. Check to make sure
- * that's not the case before we continue.
+ /* We have no packets to actually measure against. This means
+ * either one of the other queues on this vector is active or
+ * we are a Tx queue doing TSO with too high of an interrupt rate.
+ *
+ * Between 4 and 56 we can assume that our current interrupt delay
+ * is only slightly too low. As such we should increase it by a small
+ * fixed amount.
*/
- estimated_usecs = jiffies_to_usecs(jiffies - rc->last_itr_update);
- if (estimated_usecs > usecs) {
- new_latency_range = I40E_LOW_LATENCY;
- goto reset_latency;
+ if (packets < 56) {
+ itr = rc->target_itr + I40E_ITR_ADAPTIVE_MIN_INC;
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
+ }
+ goto clear_counts;
+ }
+
+ if (packets <= 256) {
+ itr = min(q_vector->tx.current_itr, q_vector->rx.current_itr);
+ itr &= I40E_ITR_MASK;
+
+ /* Between 56 and 112 is our "goldilocks" zone where we are
+ * working out "just right". Just report that our current
+ * ITR is good for us.
+ */
+ if (packets <= 112)
+ goto clear_counts;
+
+ /* If packet count is 128 or greater we are likely looking
+ * at a slight overrun of the delay we want. Try halving
+ * our delay to see if that will cut the number of packets
+ * in half per interrupt.
+ */
+ itr /= 2;
+ itr &= I40E_ITR_MASK;
+ if (itr < I40E_ITR_ADAPTIVE_MIN_USECS)
+ itr = I40E_ITR_ADAPTIVE_MIN_USECS;
+
+ goto clear_counts;
}
- /* simple throttlerate management
- * 0-10MB/s lowest (50000 ints/s)
- * 10-20MB/s low (20000 ints/s)
- * 20-1249MB/s bulk (18000 ints/s)
+ /* The paths below assume we are dealing with a bulk ITR since
+ * number of packets is greater than 256. We are just going to have
+ * to compute a value and try to bring the count under control,
+ * though for smaller packet sizes there isn't much we can do as
+ * NAPI polling will likely be kicking in sooner rather than later.
+ */
+ itr = I40E_ITR_ADAPTIVE_BULK;
+
+adjust_by_size:
+ /* If packet counts are 256 or greater we can assume we have a gross
+ * overestimation of what the rate should be. Instead of trying to fine
+ * tune it just use the formula below to try and dial in an exact value
+ * give the current packet size of the frame.
+ */
+ avg_wire_size = bytes / packets;
+
+ /* The following is a crude approximation of:
+ * wmem_default / (size + overhead) = desired_pkts_per_int
+ * rate / bits_per_byte / (size + ethernet overhead) = pkt_rate
+ * (desired_pkt_rate / pkt_rate) * usecs_per_sec = ITR value
*
- * The math works out because the divisor is in 10^(-6) which
- * turns the bytes/us input value into MB/s values, but
- * make sure to use usecs, as the register values written
- * are in 2 usec increments in the ITR registers, and make sure
- * to use the smoothed values that the countdown timer gives us.
+ * Assuming wmem_default is 212992 and overhead is 640 bytes per
+ * packet, (256 skb, 64 headroom, 320 shared info), we can reduce the
+ * formula down to
+ *
+ * (170 * (size + 24)) / (size + 640) = ITR
+ *
+ * We first do some math on the packet size and then finally bitshift
+ * by 8 after rounding up. We also have to account for PCIe link speed
+ * difference as ITR scales based on this.
*/
- switch (new_latency_range) {
- case I40E_LOWEST_LATENCY:
- if (bytes_per_usec > 10)
- new_latency_range = I40E_LOW_LATENCY;
- break;
- case I40E_LOW_LATENCY:
- if (bytes_per_usec > 20)
- new_latency_range = I40E_BULK_LATENCY;
- else if (bytes_per_usec <= 10)
- new_latency_range = I40E_LOWEST_LATENCY;
- break;
- case I40E_BULK_LATENCY:
- default:
- if (bytes_per_usec <= 20)
- new_latency_range = I40E_LOW_LATENCY;
- break;
+ if (avg_wire_size <= 60) {
+ /* Start at 250k ints/sec */
+ avg_wire_size = 4096;
+ } else if (avg_wire_size <= 380) {
+ /* 250K ints/sec to 60K ints/sec */
+ avg_wire_size *= 40;
+ avg_wire_size += 1696;
+ } else if (avg_wire_size <= 1084) {
+ /* 60K ints/sec to 36K ints/sec */
+ avg_wire_size *= 15;
+ avg_wire_size += 11452;
+ } else if (avg_wire_size <= 1980) {
+ /* 36K ints/sec to 30K ints/sec */
+ avg_wire_size *= 5;
+ avg_wire_size += 22420;
+ } else {
+ /* plateau at a limit of 30K ints/sec */
+ avg_wire_size = 32256;
}
-reset_latency:
- rc->latency_range = new_latency_range;
+ /* If we are in low latency mode halve our delay which doubles the
+ * rate to somewhere between 100K to 16K ints/sec
+ */
+ if (itr & I40E_ITR_ADAPTIVE_LATENCY)
+ avg_wire_size /= 2;
- switch (new_latency_range) {
- case I40E_LOWEST_LATENCY:
- new_itr = I40E_ITR_50K;
- break;
- case I40E_LOW_LATENCY:
- new_itr = I40E_ITR_20K;
- break;
- case I40E_BULK_LATENCY:
- new_itr = I40E_ITR_18K;
- break;
- default:
- break;
+ /* Resultant value is 256 times larger than it needs to be. This
+ * gives us room to adjust the value as needed to either increase
+ * or decrease the value based on link speeds of 10G, 2.5G, 1G, etc.
+ *
+ * Use addition as we have already recorded the new latency flag
+ * for the ITR value.
+ */
+ itr += DIV_ROUND_UP(avg_wire_size, i40e_itr_divisor(q_vector)) *
+ I40E_ITR_ADAPTIVE_MIN_INC;
+
+ if ((itr & I40E_ITR_MASK) > I40E_ITR_ADAPTIVE_MAX_USECS) {
+ itr &= I40E_ITR_ADAPTIVE_LATENCY;
+ itr += I40E_ITR_ADAPTIVE_MAX_USECS;
}
+clear_counts:
+ /* write back value */
+ rc->target_itr = itr;
+
+ /* next update should occur within next jiffy */
+ rc->next_update = next_update + 1;
+
rc->total_bytes = 0;
rc->total_packets = 0;
- rc->last_itr_update = jiffies;
-
- if (new_itr != rc->itr) {
- rc->itr = new_itr;
- return true;
- }
- return false;
}
/**
@@ -1273,7 +1415,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
* @rx_buffer: rx buffer to pull data from
*
* This function will clean up the contents of the rx_buffer. It will
- * either recycle the bufer or unmap it and free the associated resources.
+ * either recycle the buffer or unmap it and free the associated resources.
*/
static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
struct i40e_rx_buffer *rx_buffer)
@@ -1457,33 +1599,45 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
return failure ? budget : (int)total_rx_packets;
}
-static u32 i40e_buildreg_itr(const int type, const u16 itr)
+static inline u32 i40e_buildreg_itr(const int type, u16 itr)
{
u32 val;
+ /* We don't bother with setting the CLEARPBA bit as the data sheet
+ * points out doing so is "meaningless since it was already
+ * auto-cleared". The auto-clearing happens when the interrupt is
+ * asserted.
+ *
+ * Hardware errata 28 for also indicates that writing to a
+ * xxINT_DYN_CTLx CSR with INTENA_MSK (bit 31) set to 0 will clear
+ * an event in the PBA anyway so we need to rely on the automask
+ * to hold pending events for us until the interrupt is re-enabled
+ *
+ * The itr value is reported in microseconds, and the register
+ * value is recorded in 2 microsecond units. For this reason we
+ * only need to shift by the interval shift - 1 instead of the
+ * full value.
+ */
+ itr &= I40E_ITR_MASK;
+
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
- (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
+ (itr << (I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT - 1));
return val;
}
/* a small macro to shorten up some long lines */
#define INTREG I40E_VFINT_DYN_CTLN1
-static inline int get_rx_itr(struct i40e_vsi *vsi, int idx)
-{
- struct i40evf_adapter *adapter = vsi->back;
- return adapter->rx_rings[idx].rx_itr_setting;
-}
-
-static inline int get_tx_itr(struct i40e_vsi *vsi, int idx)
-{
- struct i40evf_adapter *adapter = vsi->back;
-
- return adapter->tx_rings[idx].tx_itr_setting;
-}
+/* The act of updating the ITR will cause it to immediately trigger. In order
+ * to prevent this from throwing off adaptive update statistics we defer the
+ * update so that it can only happen so often. So after either Tx or Rx are
+ * updated we make the adaptive scheme wait until either the ITR completely
+ * expires via the next_update expiration or we have been through at least
+ * 3 interrupts.
+ */
+#define ITR_COUNTDOWN_START 3
/**
* i40e_update_enable_itr - Update itr and re-enable MSIX interrupt
@@ -1495,70 +1649,51 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{
struct i40e_hw *hw = &vsi->back->hw;
- bool rx = false, tx = false;
- u32 rxval, txval;
- int idx = q_vector->v_idx;
- int rx_itr_setting, tx_itr_setting;
-
- /* avoid dynamic calculation if in countdown mode OR if
- * all dynamic is disabled
- */
- rxval = txval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
-
- rx_itr_setting = get_rx_itr(vsi, idx);
- tx_itr_setting = get_tx_itr(vsi, idx);
+ u32 intval;
- if (q_vector->itr_countdown > 0 ||
- (!ITR_IS_DYNAMIC(rx_itr_setting) &&
- !ITR_IS_DYNAMIC(tx_itr_setting))) {
- goto enable_int;
- }
-
- if (ITR_IS_DYNAMIC(rx_itr_setting)) {
- rx = i40e_set_new_dynamic_itr(&q_vector->rx);
- rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
- }
+ /* These will do nothing if dynamic updates are not enabled */
+ i40e_update_itr(q_vector, &q_vector->tx);
+ i40e_update_itr(q_vector, &q_vector->rx);
- if (ITR_IS_DYNAMIC(tx_itr_setting)) {
- tx = i40e_set_new_dynamic_itr(&q_vector->tx);
- txval = i40e_buildreg_itr(I40E_TX_ITR, q_vector->tx.itr);
- }
-
- if (rx || tx) {
- /* get the higher of the two ITR adjustments and
- * use the same value for both ITR registers
- * when in adaptive mode (Rx and/or Tx)
- */
- u16 itr = max(q_vector->tx.itr, q_vector->rx.itr);
-
- q_vector->tx.itr = q_vector->rx.itr = itr;
- txval = i40e_buildreg_itr(I40E_TX_ITR, itr);
- tx = true;
- rxval = i40e_buildreg_itr(I40E_RX_ITR, itr);
- rx = true;
- }
-
- /* only need to enable the interrupt once, but need
- * to possibly update both ITR values
+ /* This block of logic allows us to get away with only updating
+ * one ITR value with each interrupt. The idea is to perform a
+ * pseudo-lazy update with the following criteria.
+ *
+ * 1. Rx is given higher priority than Tx if both are in same state
+ * 2. If we must reduce an ITR that is given highest priority.
+ * 3. We then give priority to increasing ITR based on amount.
*/
- if (rx) {
- /* set the INTENA_MSK_MASK so that this first write
- * won't actually enable the interrupt, instead just
- * updating the ITR (it's bit 31 PF and VF)
+ if (q_vector->rx.target_itr < q_vector->rx.current_itr) {
+ /* Rx ITR needs to be reduced, this is highest priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if ((q_vector->tx.target_itr < q_vector->tx.current_itr) ||
+ ((q_vector->rx.target_itr - q_vector->rx.current_itr) <
+ (q_vector->tx.target_itr - q_vector->tx.current_itr))) {
+ /* Tx ITR needs to be reduced, this is second priority
+ * Tx ITR needs to be increased more than Rx, fourth priority
*/
- rxval |= BIT(31);
- /* don't check _DOWN because interrupt isn't being enabled */
- wr32(hw, INTREG(q_vector->reg_idx), rxval);
+ intval = i40e_buildreg_itr(I40E_TX_ITR,
+ q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else if (q_vector->rx.current_itr != q_vector->rx.target_itr) {
+ /* Rx ITR needs to be increased, third priority */
+ intval = i40e_buildreg_itr(I40E_RX_ITR,
+ q_vector->rx.target_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
+ q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ } else {
+ /* No ITR update, lowest priority */
+ intval = i40e_buildreg_itr(I40E_ITR_NONE, 0);
+ if (q_vector->itr_countdown)
+ q_vector->itr_countdown--;
}
-enable_int:
if (!test_bit(__I40E_VSI_DOWN, vsi->state))
- wr32(hw, INTREG(q_vector->reg_idx), txval);
-
- if (q_vector->itr_countdown)
- q_vector->itr_countdown--;
- else
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ wr32(hw, INTREG(q_vector->reg_idx), intval);
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 7798a6645c3f..9129447d079b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -28,31 +28,35 @@
#define _I40E_TXRX_H_
/* Interrupt Throttling and Rate Limiting Goodies */
-
-#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
-#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
-#define I40E_ITR_100K 0x0005
-#define I40E_ITR_50K 0x000A
-#define I40E_ITR_20K 0x0019
-#define I40E_ITR_18K 0x001B
-#define I40E_ITR_8K 0x003E
-#define I40E_ITR_4K 0x007A
-#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
-#define I40E_ITR_RX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
- I40E_ITR_DYNAMIC)
-#define I40E_ITR_TX_DEF (ITR_REG_TO_USEC(I40E_ITR_20K) | \
- I40E_ITR_DYNAMIC)
-#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
-#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
-#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
#define I40E_DEFAULT_IRQ_WORK 256
-#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
-#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
-#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
+
+/* The datasheet for the X710 and XL710 indicate that the maximum value for
+ * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec
+ * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing
+ * the register value which is divided by 2 lets use the actual values and
+ * avoid an excessive amount of translation.
+ */
+#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
+#define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */
+#define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */
+#define I40E_ITR_100K 10 /* all values below must be even */
+#define I40E_ITR_50K 20
+#define I40E_ITR_20K 50
+#define I40E_ITR_18K 60
+#define I40E_ITR_8K 122
+#define I40E_MAX_ITR 8160 /* maximum value as per datasheet */
+#define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC)
+#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK)
+#define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC))
+
+#define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+#define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC)
+
/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
* the value of the rate limit is non-zero
*/
#define INTRL_ENA BIT(6)
+#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
#define I40E_INTRL_8K 125 /* 8000 ints/sec */
@@ -362,8 +366,7 @@ struct i40e_ring {
* these values always store the USER setting, and must be converted
* before programming to a register.
*/
- u16 rx_itr_setting;
- u16 tx_itr_setting;
+ u16 itr_setting;
u16 count; /* Number of descriptors */
u16 reg_idx; /* HW register index of the ring */
@@ -425,21 +428,21 @@ static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring)
ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED;
}
-enum i40e_latency_range {
- I40E_LOWEST_LATENCY = 0,
- I40E_LOW_LATENCY = 1,
- I40E_BULK_LATENCY = 2,
-};
+#define I40E_ITR_ADAPTIVE_MIN_INC 0x0002
+#define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002
+#define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e
+#define I40E_ITR_ADAPTIVE_LATENCY 0x8000
+#define I40E_ITR_ADAPTIVE_BULK 0x0000
+#define ITR_IS_BULK(x) (!((x) & I40E_ITR_ADAPTIVE_LATENCY))
struct i40e_ring_container {
- /* array of pointers to rings */
- struct i40e_ring *ring;
+ struct i40e_ring *ring; /* pointer to linked list of ring(s) */
+ unsigned long next_update; /* jiffies value of next update */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
- unsigned long last_itr_update; /* jiffies of last ITR update */
u16 count;
- enum i40e_latency_range latency_range;
- u16 itr;
+ u16 target_itr; /* target ITR setting for ring(s) */
+ u16 current_itr; /* current ITR setting for ring(s) */
};
/* iterator for handling rings in ring container */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 9690c1ea019e..e46555ad7122 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -52,7 +52,10 @@
#include <linux/socket.h>
#include <linux/jiffies.h>
#include <net/ip6_checksum.h>
+#include <net/pkt_cls.h>
#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
+#include <net/tc_act/tc_mirred.h>
#include "i40e_type.h"
#include <linux/avf/virtchnl.h>
@@ -106,6 +109,7 @@ struct i40e_vsi {
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
+#define I40EVF_MBPS_DIVISOR 125000 /* divisor to convert to Mbps */
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
@@ -117,9 +121,8 @@ struct i40e_q_vector {
struct i40e_ring_container rx;
struct i40e_ring_container tx;
u32 ring_mask;
+ u8 itr_countdown; /* when 0 should adjust adaptive ITR */
u8 num_ringpairs; /* total number of ring pairs in vector */
-#define ITR_COUNTDOWN_START 100
- u8 itr_countdown; /* when 0 or 1 update ITR */
u16 v_idx; /* index in the vsi->q_vector array. */
u16 reg_idx; /* register index of the interrupt */
char name[IFNAMSIZ + 15];
@@ -169,6 +172,28 @@ struct i40evf_vlan_filter {
bool add; /* filter needs to be added */
};
+#define I40EVF_MAX_TRAFFIC_CLASS 4
+/* State of traffic class creation */
+enum i40evf_tc_state_t {
+ __I40EVF_TC_INVALID, /* no traffic class, default state */
+ __I40EVF_TC_RUNNING, /* traffic classes have been created */
+};
+
+/* channel info */
+struct i40evf_channel_config {
+ struct virtchnl_channel_info ch_info[I40EVF_MAX_TRAFFIC_CLASS];
+ enum i40evf_tc_state_t state;
+ u8 total_qps;
+};
+
+/* State of cloud filter */
+enum i40evf_cloud_filter_state_t {
+ __I40EVF_CF_INVALID, /* cloud filter not added */
+ __I40EVF_CF_ADD_PENDING, /* cloud filter pending add by the PF */
+ __I40EVF_CF_DEL_PENDING, /* cloud filter pending del by the PF */
+ __I40EVF_CF_ACTIVE, /* cloud filter is active */
+};
+
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
@@ -190,6 +215,36 @@ enum i40evf_critical_section_t {
__I40EVF_IN_REMOVE_TASK, /* device being removed */
};
+#define I40EVF_CLOUD_FIELD_OMAC 0x01
+#define I40EVF_CLOUD_FIELD_IMAC 0x02
+#define I40EVF_CLOUD_FIELD_IVLAN 0x04
+#define I40EVF_CLOUD_FIELD_TEN_ID 0x08
+#define I40EVF_CLOUD_FIELD_IIP 0x10
+
+#define I40EVF_CF_FLAGS_OMAC I40EVF_CLOUD_FIELD_OMAC
+#define I40EVF_CF_FLAGS_IMAC I40EVF_CLOUD_FIELD_IMAC
+#define I40EVF_CF_FLAGS_IMAC_IVLAN (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_IVLAN)
+#define I40EVF_CF_FLAGS_IMAC_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_OMAC_TEN_ID_IMAC (I40EVF_CLOUD_FIELD_OMAC |\
+ I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IMAC_IVLAN_TEN_ID (I40EVF_CLOUD_FIELD_IMAC |\
+ I40EVF_CLOUD_FIELD_IVLAN |\
+ I40EVF_CLOUD_FIELD_TEN_ID)
+#define I40EVF_CF_FLAGS_IIP I40E_CLOUD_FIELD_IIP
+
+/* bookkeeping of cloud filters */
+struct i40evf_cloud_filter {
+ enum i40evf_cloud_filter_state_t state;
+ struct list_head list;
+ struct virtchnl_filter f;
+ unsigned long cookie;
+ bool del; /* filter needs to be deleted */
+ bool add; /* filter needs to be added */
+};
+
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
@@ -241,6 +296,7 @@ struct i40evf_adapter {
#define I40EVF_FLAG_ALLMULTI_ON BIT(14)
#define I40EVF_FLAG_LEGACY_RX BIT(15)
#define I40EVF_FLAG_REINIT_ITR_NEEDED BIT(16)
+#define I40EVF_FLAG_QUEUES_DISABLED BIT(17)
/* duplicates for common code */
#define I40E_FLAG_DCB_ENABLED 0
#define I40E_FLAG_RX_CSUM_ENABLED I40EVF_FLAG_RX_CSUM_ENABLED
@@ -269,6 +325,10 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_RELEASE_ALLMULTI BIT(18)
#define I40EVF_FLAG_AQ_ENABLE_VLAN_STRIPPING BIT(19)
#define I40EVF_FLAG_AQ_DISABLE_VLAN_STRIPPING BIT(20)
+#define I40EVF_FLAG_AQ_ENABLE_CHANNELS BIT(21)
+#define I40EVF_FLAG_AQ_DISABLE_CHANNELS BIT(22)
+#define I40EVF_FLAG_AQ_ADD_CLOUD_FILTER BIT(23)
+#define I40EVF_FLAG_AQ_DEL_CLOUD_FILTER BIT(24)
/* OS defined structs */
struct net_device *netdev;
@@ -314,6 +374,13 @@ struct i40evf_adapter {
u16 rss_lut_size;
u8 *rss_key;
u8 *rss_lut;
+ /* ADQ related members */
+ struct i40evf_channel_config ch_config;
+ u8 num_tc;
+ struct list_head cloud_filter_list;
+ /* lock to protest access to the cloud filter list */
+ spinlock_t cloud_filter_list_lock;
+ u16 num_cloud_filters;
};
@@ -380,4 +447,8 @@ void i40evf_notify_client_message(struct i40e_vsi *vsi, u8 *msg, u16 len);
void i40evf_notify_client_l2_params(struct i40e_vsi *vsi);
void i40evf_notify_client_open(struct i40e_vsi *vsi);
void i40evf_notify_client_close(struct i40e_vsi *vsi, bool reset);
+void i40evf_enable_channels(struct i40evf_adapter *adapter);
+void i40evf_disable_channels(struct i40evf_adapter *adapter);
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter);
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter);
#endif /* _I40EVF_H_ */
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index e2d8aa19d205..e6793255de0b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -457,14 +457,14 @@ static int __i40evf_get_coalesce(struct net_device *netdev,
rx_ring = &adapter->rx_rings[queue];
tx_ring = &adapter->tx_rings[queue];
- if (ITR_IS_DYNAMIC(rx_ring->rx_itr_setting))
+ if (ITR_IS_DYNAMIC(rx_ring->itr_setting))
ec->use_adaptive_rx_coalesce = 1;
- if (ITR_IS_DYNAMIC(tx_ring->tx_itr_setting))
+ if (ITR_IS_DYNAMIC(tx_ring->itr_setting))
ec->use_adaptive_tx_coalesce = 1;
- ec->rx_coalesce_usecs = rx_ring->rx_itr_setting & ~I40E_ITR_DYNAMIC;
- ec->tx_coalesce_usecs = tx_ring->tx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -502,7 +502,7 @@ static int i40evf_get_per_queue_coalesce(struct net_device *netdev,
/**
* i40evf_set_itr_per_queue - set ITR values for specific queue
- * @vsi: the VSI to set values for
+ * @adapter: the VF adapter struct to set values for
* @ec: coalesce settings from ethtool
* @queue: the queue to modify
*
@@ -514,33 +514,29 @@ static void i40evf_set_itr_per_queue(struct i40evf_adapter *adapter,
{
struct i40e_ring *rx_ring = &adapter->rx_rings[queue];
struct i40e_ring *tx_ring = &adapter->tx_rings[queue];
- struct i40e_vsi *vsi = &adapter->vsi;
- struct i40e_hw *hw = &adapter->hw;
struct i40e_q_vector *q_vector;
- u16 vector;
- rx_ring->rx_itr_setting = ec->rx_coalesce_usecs;
- tx_ring->tx_itr_setting = ec->tx_coalesce_usecs;
+ rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs);
+ tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs);
- rx_ring->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ rx_ring->itr_setting |= I40E_ITR_DYNAMIC;
if (!ec->use_adaptive_rx_coalesce)
- rx_ring->rx_itr_setting ^= I40E_ITR_DYNAMIC;
+ rx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
- tx_ring->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ tx_ring->itr_setting |= I40E_ITR_DYNAMIC;
if (!ec->use_adaptive_tx_coalesce)
- tx_ring->tx_itr_setting ^= I40E_ITR_DYNAMIC;
+ tx_ring->itr_setting ^= I40E_ITR_DYNAMIC;
q_vector = rx_ring->q_vector;
- q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
- vector = vsi->base_vector + q_vector->v_idx;
- wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, vector - 1), q_vector->rx.itr);
+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector = tx_ring->q_vector;
- q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
- vector = vsi->base_vector + q_vector->v_idx;
- wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, vector - 1), q_vector->tx.itr);
+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
- i40e_flush(hw);
+ /* The interrupt handler itself will take care of programming
+ * the Tx and Rx ITR values based on the values we have entered
+ * into the q_vector, no need to write the values now.
+ */
}
/**
@@ -565,8 +561,8 @@ static int __i40evf_set_coalesce(struct net_device *netdev,
if (ec->rx_coalesce_usecs == 0) {
if (ec->use_adaptive_rx_coalesce)
netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n");
- } else if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ } else if ((ec->rx_coalesce_usecs < I40E_MIN_ITR) ||
+ (ec->rx_coalesce_usecs > I40E_MAX_ITR)) {
netif_info(adapter, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -575,8 +571,8 @@ static int __i40evf_set_coalesce(struct net_device *netdev,
if (ec->tx_coalesce_usecs == 0) {
if (ec->use_adaptive_tx_coalesce)
netif_info(adapter, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n");
- } else if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1))) {
+ } else if ((ec->tx_coalesce_usecs < I40E_MIN_ITR) ||
+ (ec->tx_coalesce_usecs > I40E_MAX_ITR)) {
netif_info(adapter, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n");
return -EINVAL;
}
@@ -699,6 +695,12 @@ static int i40evf_set_channels(struct net_device *netdev,
return -EINVAL;
}
+ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc) {
+ dev_info(&adapter->pdev->dev, "Cannot set channels since ADq is enabled.\n");
+ return -EINVAL;
+ }
+
/* All of these should have already been checked by ethtool before this
* even gets to us, but just to be sure.
*/
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 16989ad2ca90..dae121877935 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -353,11 +353,12 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
rx_ring->vsi = &adapter->vsi;
q_vector->rx.ring = rx_ring;
q_vector->rx.count++;
- q_vector->rx.latency_range = I40E_LOW_LATENCY;
- q_vector->rx.itr = ITR_TO_REG(rx_ring->rx_itr_setting);
+ q_vector->rx.next_update = jiffies + 1;
+ q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
q_vector->ring_mask |= BIT(r_idx);
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
- wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, v_idx - 1), q_vector->rx.itr);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_RX_ITR, q_vector->reg_idx),
+ q_vector->rx.current_itr);
+ q_vector->rx.current_itr = q_vector->rx.target_itr;
}
/**
@@ -378,11 +379,12 @@ i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
tx_ring->vsi = &adapter->vsi;
q_vector->tx.ring = tx_ring;
q_vector->tx.count++;
- q_vector->tx.latency_range = I40E_LOW_LATENCY;
- q_vector->tx.itr = ITR_TO_REG(tx_ring->tx_itr_setting);
- q_vector->itr_countdown = ITR_COUNTDOWN_START;
+ q_vector->tx.next_update = jiffies + 1;
+ q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
q_vector->num_ringpairs++;
- wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, v_idx - 1), q_vector->tx.itr);
+ wr32(hw, I40E_VFINT_ITRN1(I40E_TX_ITR, q_vector->reg_idx),
+ q_vector->tx.target_itr);
+ q_vector->tx.current_itr = q_vector->tx.target_itr;
}
/**
@@ -783,7 +785,7 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
**/
static struct
i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
- u8 *macaddr)
+ const u8 *macaddr)
{
struct i40evf_mac_filter *f;
@@ -806,20 +808,18 @@ i40evf_mac_filter *i40evf_find_filter(struct i40evf_adapter *adapter,
**/
static struct
i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
- u8 *macaddr)
+ const u8 *macaddr)
{
struct i40evf_mac_filter *f;
if (!macaddr)
return NULL;
- spin_lock_bh(&adapter->mac_vlan_list_lock);
-
f = i40evf_find_filter(adapter, macaddr);
if (!f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (!f)
- goto clearout;
+ return f;
ether_addr_copy(f->macaddr, macaddr);
@@ -830,8 +830,6 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
f->remove = false;
}
-clearout:
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
return f;
}
@@ -866,9 +864,10 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
}
+ f = i40evf_add_filter(adapter, addr->sa_data);
+
spin_unlock_bh(&adapter->mac_vlan_list_lock);
- f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
@@ -878,50 +877,64 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
}
/**
- * i40evf_set_rx_mode - NDO callback to set the netdev filters
- * @netdev: network interface device structure
- **/
-static void i40evf_set_rx_mode(struct net_device *netdev)
+ * i40evf_addr_sync - Callback for dev_(mc|uc)_sync to add address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40evf_addr_sync(struct net_device *netdev, const u8 *addr)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40evf_mac_filter *f, *ftmp;
- struct netdev_hw_addr *uca;
- struct netdev_hw_addr *mca;
- struct netdev_hw_addr *ha;
-
- /* add addr if not already in the filter list */
- netdev_for_each_uc_addr(uca, netdev) {
- i40evf_add_filter(adapter, uca->addr);
- }
- netdev_for_each_mc_addr(mca, netdev) {
- i40evf_add_filter(adapter, mca->addr);
- }
- spin_lock_bh(&adapter->mac_vlan_list_lock);
-
- list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
- netdev_for_each_mc_addr(mca, netdev)
- if (ether_addr_equal(mca->addr, f->macaddr))
- goto bottom_of_search_loop;
-
- netdev_for_each_uc_addr(uca, netdev)
- if (ether_addr_equal(uca->addr, f->macaddr))
- goto bottom_of_search_loop;
+ if (i40evf_add_filter(adapter, addr))
+ return 0;
+ else
+ return -ENOMEM;
+}
- for_each_dev_addr(netdev, ha)
- if (ether_addr_equal(ha->addr, f->macaddr))
- goto bottom_of_search_loop;
+/**
+ * i40evf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
+ * @netdev: the netdevice
+ * @addr: address to add
+ *
+ * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
+ * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
+ */
+static int i40evf_addr_unsync(struct net_device *netdev, const u8 *addr)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_mac_filter *f;
- if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr))
- goto bottom_of_search_loop;
+ /* Under some circumstances, we might receive a request to delete
+ * our own device address from our uc list. Because we store the
+ * device address in the VSI's MAC/VLAN filter list, we need to ignore
+ * such requests and not delete our device address from this list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
- /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
+ f = i40evf_find_filter(adapter, addr);
+ if (f) {
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-
-bottom_of_search_loop:
- continue;
}
+ return 0;
+}
+
+/**
+ * i40evf_set_rx_mode - NDO callback to set the netdev filters
+ * @netdev: network interface device structure
+ **/
+static void i40evf_set_rx_mode(struct net_device *netdev)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+ __dev_uc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
+ __dev_mc_sync(netdev, i40evf_addr_sync, i40evf_addr_unsync);
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
if (netdev->flags & IFF_PROMISC &&
!(adapter->flags & I40EVF_FLAG_PROMISC_ON))
@@ -936,8 +949,6 @@ bottom_of_search_loop:
else if (!(netdev->flags & IFF_ALLMULTI) &&
adapter->flags & I40EVF_FLAG_ALLMULTI_ON)
adapter->aq_required |= I40EVF_FLAG_AQ_RELEASE_ALLMULTI;
-
- spin_unlock_bh(&adapter->mac_vlan_list_lock);
}
/**
@@ -1025,7 +1036,9 @@ static void i40evf_up_complete(struct i40evf_adapter *adapter)
void i40evf_down(struct i40evf_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ struct i40evf_vlan_filter *vlf;
struct i40evf_mac_filter *f;
+ struct i40evf_cloud_filter *cf;
if (adapter->state <= __I40EVF_DOWN_PENDING)
return;
@@ -1038,17 +1051,29 @@ void i40evf_down(struct i40evf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
+ /* clear the sync flag on all filters */
+ __dev_uc_unsync(adapter->netdev, NULL);
+ __dev_mc_unsync(adapter->netdev, NULL);
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
+
/* remove all VLAN filters */
- list_for_each_entry(f, &adapter->vlan_filter_list, list) {
- f->remove = true;
+ list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
+ vlf->remove = true;
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* remove all cloud filters */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ cf->del = true;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
adapter->state != __I40EVF_RESETTING) {
/* cancel any current operation */
@@ -1059,6 +1084,7 @@ void i40evf_down(struct i40evf_adapter *adapter)
*/
adapter->aq_required = I40EVF_FLAG_AQ_DEL_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
}
@@ -1144,6 +1170,9 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
*/
if (adapter->num_req_queues)
num_active_queues = adapter->num_req_queues;
+ else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc)
+ num_active_queues = adapter->ch_config.total_qps;
else
num_active_queues = min_t(int,
adapter->vsi_res->num_queue_pairs,
@@ -1169,7 +1198,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
tx_ring->count = adapter->tx_desc_count;
- tx_ring->tx_itr_setting = I40E_ITR_TX_DEF;
+ tx_ring->itr_setting = I40E_ITR_TX_DEF;
if (adapter->flags & I40EVF_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
@@ -1178,7 +1207,7 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count;
- rx_ring->rx_itr_setting = I40E_ITR_RX_DEF;
+ rx_ring->itr_setting = I40E_ITR_RX_DEF;
}
adapter->num_active_queues = num_active_queues;
@@ -1471,6 +1500,16 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
goto err_alloc_q_vectors;
}
+ /* If we've made it so far while ADq flag being ON, then we haven't
+ * bailed out anywhere in middle. And ADq isn't just enabled but actual
+ * resources have been allocated in the reset path.
+ * Now we can truly claim that ADq is enabled.
+ */
+ if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc)
+ dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
+ adapter->num_tc);
+
dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
(adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
adapter->num_active_queues);
@@ -1712,6 +1751,27 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_set_promiscuous(adapter, 0);
goto watchdog_done;
}
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ENABLE_CHANNELS) {
+ i40evf_enable_channels(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DISABLE_CHANNELS) {
+ i40evf_disable_channels(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_ADD_CLOUD_FILTER) {
+ i40evf_add_cloud_filter(adapter);
+ goto watchdog_done;
+ }
+
+ if (adapter->aq_required & I40EVF_FLAG_AQ_DEL_CLOUD_FILTER) {
+ i40evf_del_cloud_filter(adapter);
+ goto watchdog_done;
+ }
+
schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
if (adapter->state == __I40EVF_RUNNING)
@@ -1735,6 +1795,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
{
struct i40evf_mac_filter *f, *ftmp;
struct i40evf_vlan_filter *fv, *fvtmp;
+ struct i40evf_cloud_filter *cf, *cftmp;
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
@@ -1756,7 +1817,7 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
spin_lock_bh(&adapter->mac_vlan_list_lock);
- /* Delete all of the filters, both MAC and VLAN. */
+ /* Delete all of the filters */
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
list_del(&f->list);
kfree(f);
@@ -1769,6 +1830,14 @@ static void i40evf_disable_vf(struct i40evf_adapter *adapter)
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1798,9 +1867,11 @@ static void i40evf_reset_task(struct work_struct *work)
struct i40evf_adapter *adapter = container_of(work,
struct i40evf_adapter,
reset_task);
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
struct i40evf_vlan_filter *vlf;
+ struct i40evf_cloud_filter *cf;
struct i40evf_mac_filter *f;
u32 reg_val;
int i = 0, err;
@@ -1893,6 +1964,7 @@ continue_reset:
i40evf_free_all_rx_resources(adapter);
i40evf_free_all_tx_resources(adapter);
+ adapter->flags |= I40EVF_FLAG_QUEUES_DISABLED;
/* kill and reinit the admin queue */
i40evf_shutdown_adminq(hw);
adapter->current_op = VIRTCHNL_OP_UNKNOWN;
@@ -1924,8 +1996,19 @@ continue_reset:
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ /* check if TCs are running and re-add all cloud filters */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
+ adapter->num_tc) {
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ cf->add = true;
+ }
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
i40evf_misc_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies + 2);
@@ -2191,6 +2274,713 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_validate_tx_bandwidth - validate the max Tx bandwidth
+ * @adapter: board private structure
+ * @max_tx_rate: max Tx bw for a tc
+ **/
+static int i40evf_validate_tx_bandwidth(struct i40evf_adapter *adapter,
+ u64 max_tx_rate)
+{
+ int speed = 0, ret = 0;
+
+ switch (adapter->link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_25GB:
+ speed = 25000;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ speed = 20000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ case I40E_LINK_SPEED_100MB:
+ speed = 100;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid tx rate specified\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_validate_channel_config - validate queue mapping info
+ * @adapter: board private structure
+ * @mqprio_qopt: queue parameters
+ *
+ * This function validates if the config provided by the user to
+ * configure queue channels is valid or not. Returns 0 on a valid
+ * config.
+ **/
+static int i40evf_validate_ch_config(struct i40evf_adapter *adapter,
+ struct tc_mqprio_qopt_offload *mqprio_qopt)
+{
+ u64 total_max_rate = 0;
+ int i, num_qps = 0;
+ u64 tx_rate = 0;
+ int ret = 0;
+
+ if (mqprio_qopt->qopt.num_tc > I40EVF_MAX_TRAFFIC_CLASS ||
+ mqprio_qopt->qopt.num_tc < 1)
+ return -EINVAL;
+
+ for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
+ if (!mqprio_qopt->qopt.count[i] ||
+ mqprio_qopt->qopt.offset[i] != num_qps)
+ return -EINVAL;
+ if (mqprio_qopt->min_rate[i]) {
+ dev_err(&adapter->pdev->dev,
+ "Invalid min tx rate (greater than 0) specified\n");
+ return -EINVAL;
+ }
+ /*convert to Mbps */
+ tx_rate = div_u64(mqprio_qopt->max_rate[i],
+ I40EVF_MBPS_DIVISOR);
+ total_max_rate += tx_rate;
+ num_qps += mqprio_qopt->qopt.count[i];
+ }
+ if (num_qps > MAX_QUEUES)
+ return -EINVAL;
+
+ ret = i40evf_validate_tx_bandwidth(adapter, total_max_rate);
+ return ret;
+}
+
+/**
+ * i40evf_del_all_cloud_filters - delete all cloud filters
+ * on the traffic classes
+ **/
+static void i40evf_del_all_cloud_filters(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+}
+
+/**
+ * __i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type_date: tc offload data
+ *
+ * This function processes the config information provided by the
+ * user to configure traffic classes/queue channels and packages the
+ * information to request the PF to setup traffic classes.
+ *
+ * Returns 0 on success.
+ **/
+static int __i40evf_setup_tc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct virtchnl_vf_resource *vfres = adapter->vf_res;
+ u8 num_tc = 0, total_qps = 0;
+ int ret = 0, netdev_tc = 0;
+ u64 max_tx_rate;
+ u16 mode;
+ int i;
+
+ num_tc = mqprio_qopt->qopt.num_tc;
+ mode = mqprio_qopt->mode;
+
+ /* delete queue_channel */
+ if (!mqprio_qopt->qopt.hw) {
+ if (adapter->ch_config.state == __I40EVF_TC_RUNNING) {
+ /* reset the tc configuration */
+ netdev_reset_tc(netdev);
+ adapter->num_tc = 0;
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ i40evf_del_all_cloud_filters(adapter);
+ adapter->aq_required = I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+ goto exit;
+ } else {
+ return -EINVAL;
+ }
+ }
+
+ /* add queue channel */
+ if (mode == TC_MQPRIO_MODE_CHANNEL) {
+ if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
+ dev_err(&adapter->pdev->dev, "ADq not supported\n");
+ return -EOPNOTSUPP;
+ }
+ if (adapter->ch_config.state != __I40EVF_TC_INVALID) {
+ dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
+ return -EINVAL;
+ }
+
+ ret = i40evf_validate_ch_config(adapter, mqprio_qopt);
+ if (ret)
+ return ret;
+ /* Return if same TC config is requested */
+ if (adapter->num_tc == num_tc)
+ return 0;
+ adapter->num_tc = num_tc;
+
+ for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ if (i < num_tc) {
+ adapter->ch_config.ch_info[i].count =
+ mqprio_qopt->qopt.count[i];
+ adapter->ch_config.ch_info[i].offset =
+ mqprio_qopt->qopt.offset[i];
+ total_qps += mqprio_qopt->qopt.count[i];
+ max_tx_rate = mqprio_qopt->max_rate[i];
+ /* convert to Mbps */
+ max_tx_rate = div_u64(max_tx_rate,
+ I40EVF_MBPS_DIVISOR);
+ adapter->ch_config.ch_info[i].max_tx_rate =
+ max_tx_rate;
+ } else {
+ adapter->ch_config.ch_info[i].count = 1;
+ adapter->ch_config.ch_info[i].offset = 0;
+ }
+ }
+ adapter->ch_config.total_qps = total_qps;
+ netif_tx_stop_all_queues(netdev);
+ netif_tx_disable(netdev);
+ adapter->aq_required |= I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+ netdev_reset_tc(netdev);
+ /* Report the tc mapping up the stack */
+ netdev_set_num_tc(adapter->netdev, num_tc);
+ for (i = 0; i < I40EVF_MAX_TRAFFIC_CLASS; i++) {
+ u16 qcount = mqprio_qopt->qopt.count[i];
+ u16 qoffset = mqprio_qopt->qopt.offset[i];
+
+ if (i < num_tc)
+ netdev_set_tc_queue(netdev, netdev_tc++, qcount,
+ qoffset);
+ }
+ }
+exit:
+ return ret;
+}
+
+/**
+ * i40evf_parse_cls_flower - Parse tc flower filters provided by kernel
+ * @adapter: board private structure
+ * @cls_flower: pointer to struct tc_cls_flower_offload
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_parse_cls_flower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *f,
+ struct i40evf_cloud_filter *filter)
+{
+ u16 n_proto_mask = 0;
+ u16 n_proto_key = 0;
+ u8 field_flags = 0;
+ u16 addr_type = 0;
+ u16 n_proto = 0;
+ int i = 0;
+ struct virtchnl_filter *vf = &filter->f;
+
+ if (f->dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_VLAN) |
+ BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
+ BIT(FLOW_DISSECTOR_KEY_PORTS) |
+ BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
+ dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
+ f->dissector->used_keys);
+ return -EOPNOTSUPP;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+ struct flow_dissector_key_keyid *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_KEYID,
+ f->mask);
+
+ if (mask->keyid != 0)
+ field_flags |= I40EVF_CLOUD_FIELD_TEN_ID;
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_dissector_key_basic *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->key);
+
+ struct flow_dissector_key_basic *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ f->mask);
+ n_proto_key = ntohs(key->n_proto);
+ n_proto_mask = ntohs(mask->n_proto);
+
+ if (n_proto_key == ETH_P_ALL) {
+ n_proto_key = 0;
+ n_proto_mask = 0;
+ }
+ n_proto = n_proto_key & n_proto_mask;
+ if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
+ return -EINVAL;
+ if (n_proto == ETH_P_IPV6) {
+ /* specify flow type as TCP IPv6 */
+ vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
+ }
+
+ if (key->ip_proto != IPPROTO_TCP) {
+ dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
+ return -EINVAL;
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_dissector_key_eth_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->key);
+
+ struct flow_dissector_key_eth_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ETH_ADDRS,
+ f->mask);
+ /* use is_broadcast and is_zero to check for all 0xf or 0 */
+ if (!is_zero_ether_addr(mask->dst)) {
+ if (is_broadcast_ether_addr(mask->dst)) {
+ field_flags |= I40EVF_CLOUD_FIELD_OMAC;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
+ mask->dst);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(mask->src)) {
+ if (is_broadcast_ether_addr(mask->src)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IMAC;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
+ mask->src);
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (!is_zero_ether_addr(key->dst))
+ if (is_valid_ether_addr(key->dst) ||
+ is_multicast_ether_addr(key->dst)) {
+ /* set the mask if a valid dst_mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ vf->mask.tcp_spec.dst_mac[i] |= 0xff;
+ ether_addr_copy(vf->data.tcp_spec.dst_mac,
+ key->dst);
+ }
+
+ if (!is_zero_ether_addr(key->src))
+ if (is_valid_ether_addr(key->src) ||
+ is_multicast_ether_addr(key->src)) {
+ /* set the mask if a valid dst_mac address */
+ for (i = 0; i < ETH_ALEN; i++)
+ vf->mask.tcp_spec.src_mac[i] |= 0xff;
+ ether_addr_copy(vf->data.tcp_spec.src_mac,
+ key->src);
+ }
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_dissector_key_vlan *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->key);
+ struct flow_dissector_key_vlan *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_VLAN,
+ f->mask);
+
+ if (mask->vlan_id) {
+ if (mask->vlan_id == VLAN_VID_MASK) {
+ field_flags |= I40EVF_CLOUD_FIELD_IVLAN;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
+ mask->vlan_id);
+ return I40E_ERR_CONFIG;
+ }
+ }
+ vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
+ vf->data.tcp_spec.vlan_id = cpu_to_be16(key->vlan_id);
+ }
+
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_dissector_key_control *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_CONTROL,
+ f->key);
+
+ addr_type = key->addr_type;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_dissector_key_ipv4_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv4_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV4_ADDRS,
+ f->mask);
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
+ be32_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be32(0xffffffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
+ be32_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (field_flags & I40EVF_CLOUD_FIELD_TEN_ID) {
+ dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (key->dst) {
+ vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
+ vf->data.tcp_spec.dst_ip[0] = key->dst;
+ }
+ if (key->src) {
+ vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
+ vf->data.tcp_spec.src_ip[0] = key->src;
+ }
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_IPV6_ADDRS,
+ f->mask);
+
+ /* validate mask, make sure it is not IPV6_ADDR_ANY */
+ if (ipv6_addr_any(&mask->dst)) {
+ dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
+ IPV6_ADDR_ANY);
+ return I40E_ERR_CONFIG;
+ }
+
+ /* src and dest IPv6 address should not be LOOPBACK
+ * (0:0:0:0:0:0:0:1) which can be represented as ::1
+ */
+ if (ipv6_addr_loopback(&key->dst) ||
+ ipv6_addr_loopback(&key->src)) {
+ dev_err(&adapter->pdev->dev,
+ "ipv6 addr should not be loopback\n");
+ return I40E_ERR_CONFIG;
+ }
+ if (!ipv6_addr_any(&mask->dst) || !ipv6_addr_any(&mask->src))
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+
+ for (i = 0; i < 4; i++)
+ vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
+ memcpy(&vf->data.tcp_spec.dst_ip, &key->dst.s6_addr32,
+ sizeof(vf->data.tcp_spec.dst_ip));
+ for (i = 0; i < 4; i++)
+ vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
+ memcpy(&vf->data.tcp_spec.src_ip, &key->src.s6_addr32,
+ sizeof(vf->data.tcp_spec.src_ip));
+ }
+ if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_dissector_key_ports *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->key);
+ struct flow_dissector_key_ports *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_PORTS,
+ f->mask);
+
+ if (mask->src) {
+ if (mask->src == cpu_to_be16(0xffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
+ be16_to_cpu(mask->src));
+ return I40E_ERR_CONFIG;
+ }
+ }
+
+ if (mask->dst) {
+ if (mask->dst == cpu_to_be16(0xffff)) {
+ field_flags |= I40EVF_CLOUD_FIELD_IIP;
+ } else {
+ dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
+ be16_to_cpu(mask->dst));
+ return I40E_ERR_CONFIG;
+ }
+ }
+ if (key->dst) {
+ vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
+ vf->data.tcp_spec.dst_port = key->dst;
+ }
+
+ if (key->src) {
+ vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
+ vf->data.tcp_spec.src_port = key->dst;
+ }
+ }
+ vf->field_flags = field_flags;
+
+ return 0;
+}
+
+/**
+ * i40evf_handle_tclass - Forward to a traffic class on the device
+ * @adapter: board private structure
+ * @tc: traffic class index on the device
+ * @filter: pointer to cloud filter structure
+ */
+static int i40evf_handle_tclass(struct i40evf_adapter *adapter, u32 tc,
+ struct i40evf_cloud_filter *filter)
+{
+ if (tc == 0)
+ return 0;
+ if (tc < adapter->num_tc) {
+ if (!filter->f.data.tcp_spec.dst_port) {
+ dev_err(&adapter->pdev->dev,
+ "Specify destination port to redirect to traffic class other than TC0\n");
+ return -EINVAL;
+ }
+ }
+ /* redirect to a traffic class on the same device */
+ filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
+ filter->f.action_meta = tc;
+ return 0;
+}
+
+/**
+ * i40evf_configure_clsflower - Add tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_configure_clsflower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
+ struct i40evf_cloud_filter *filter = NULL;
+ int err = 0, count = 50;
+
+ while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section)) {
+ udelay(1);
+ if (--count == 0)
+ return -EINVAL;
+ }
+
+ if (tc < 0) {
+ dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ filter = kzalloc(sizeof(*filter), GFP_KERNEL);
+ if (!filter) {
+ err = -ENOMEM;
+ goto clearout;
+ }
+ filter->cookie = cls_flower->cookie;
+
+ /* set the mask to all zeroes to begin with */
+ memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
+ /* start out with flow type and eth type IPv4 to begin with */
+ filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
+ err = i40evf_parse_cls_flower(adapter, cls_flower, filter);
+ if (err < 0)
+ goto err;
+
+ err = i40evf_handle_tclass(adapter, tc, filter);
+ if (err < 0)
+ goto err;
+
+ /* add filter to the list */
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_add_tail(&filter->list, &adapter->cloud_filter_list);
+ adapter->num_cloud_filters++;
+ filter->add = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+err:
+ if (err)
+ kfree(filter);
+clearout:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return err;
+}
+
+/* i40evf_find_cf - Find the cloud filter in the list
+ * @adapter: Board private structure
+ * @cookie: filter specific cookie
+ *
+ * Returns ptr to the filter object or NULL. Must be called while holding the
+ * cloud_filter_list_lock.
+ */
+static struct i40evf_cloud_filter *i40evf_find_cf(struct i40evf_adapter *adapter,
+ unsigned long *cookie)
+{
+ struct i40evf_cloud_filter *filter = NULL;
+
+ if (!cookie)
+ return NULL;
+
+ list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
+ if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
+ return filter;
+ }
+ return NULL;
+}
+
+/**
+ * i40evf_delete_clsflower - Remove tc flower filters
+ * @adapter: board private structure
+ * @cls_flower: Pointer to struct tc_cls_flower_offload
+ */
+static int i40evf_delete_clsflower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ struct i40evf_cloud_filter *filter = NULL;
+ int err = 0;
+
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ filter = i40evf_find_cf(adapter, &cls_flower->cookie);
+ if (filter) {
+ filter->del = true;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ } else {
+ err = -EINVAL;
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
+ return err;
+}
+
+/**
+ * i40evf_setup_tc_cls_flower - flower classifier offloads
+ * @netdev: net device to configure
+ * @type_data: offload data
+ */
+static int i40evf_setup_tc_cls_flower(struct i40evf_adapter *adapter,
+ struct tc_cls_flower_offload *cls_flower)
+{
+ if (cls_flower->common.chain_index)
+ return -EOPNOTSUPP;
+
+ switch (cls_flower->command) {
+ case TC_CLSFLOWER_REPLACE:
+ return i40evf_configure_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_DESTROY:
+ return i40evf_delete_clsflower(adapter, cls_flower);
+ case TC_CLSFLOWER_STATS:
+ return -EOPNOTSUPP;
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * i40evf_setup_tc_block_cb - block callback for tc
+ * @type: type of offload
+ * @type_data: offload data
+ * @cb_priv:
+ *
+ * This function is the block callback for traffic classes
+ **/
+static int i40evf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return i40evf_setup_tc_cls_flower(cb_priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40evf_setup_tc_block - register callbacks for tc
+ * @netdev: network interface device structure
+ * @f: tc offload data
+ *
+ * This function registers block callbacks for tc
+ * offloads
+ **/
+static int i40evf_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct i40evf_adapter *adapter = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, i40evf_setup_tc_block_cb,
+ adapter, adapter);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, i40evf_setup_tc_block_cb,
+ adapter);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * i40evf_setup_tc - configure multiple traffic classes
+ * @netdev: network interface device structure
+ * @type: type of offload
+ * @type_date: tc offload data
+ *
+ * This function is the callback to ndo_setup_tc in the
+ * netdev_ops.
+ *
+ * Returns 0 on success
+ **/
+static int i40evf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_MQPRIO:
+ return __i40evf_setup_tc(netdev, type_data);
+ case TC_SETUP_BLOCK:
+ return i40evf_setup_tc_block(netdev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
* i40evf_open - Called when a network interface is made active
* @netdev: network interface device structure
*
@@ -2236,7 +3026,12 @@ static int i40evf_open(struct net_device *netdev)
if (err)
goto err_req_irq;
+ spin_lock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_add_filter(adapter, adapter->hw.mac.addr);
+
+ spin_unlock_bh(&adapter->mac_vlan_list_lock);
+
i40evf_configure(adapter);
i40evf_up_complete(adapter);
@@ -2457,6 +3252,7 @@ static const struct net_device_ops i40evf_netdev_ops = {
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = i40evf_netpoll,
#endif
+ .ndo_setup_tc = i40evf_setup_tc,
};
/**
@@ -2571,6 +3367,9 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
+ /* Enable cloud filter if ADQ is supported */
+ if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
+ hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= hw_features;
@@ -2938,9 +3737,11 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&hw->aq.arq_mutex);
spin_lock_init(&adapter->mac_vlan_list_lock);
+ spin_lock_init(&adapter->cloud_filter_list_lock);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
+ INIT_LIST_HEAD(&adapter->cloud_filter_list);
INIT_WORK(&adapter->reset_task, i40evf_reset_task);
INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
@@ -3065,7 +3866,9 @@ static void i40evf_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40evf_vlan_filter *vlf, *vlftmp;
struct i40evf_mac_filter *f, *ftmp;
+ struct i40evf_cloud_filter *cf, *cftmp;
struct i40e_hw *hw = &adapter->hw;
int err;
/* Indicate we are in remove and not to run reset_task */
@@ -3087,6 +3890,7 @@ static void i40evf_remove(struct pci_dev *pdev)
/* Shut down all the garbage mashers on the detention level */
adapter->state = __I40EVF_REMOVE;
adapter->aq_required = 0;
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
i40evf_request_reset(adapter);
msleep(50);
/* If the FW isn't responding, kick it once, but only once. */
@@ -3127,13 +3931,21 @@ static void i40evf_remove(struct pci_dev *pdev)
list_del(&f->list);
kfree(f);
}
- list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
- list_del(&f->list);
- kfree(f);
+ list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
+ list) {
+ list_del(&vlf->list);
+ kfree(vlf);
}
spin_unlock_bh(&adapter->mac_vlan_list_lock);
+ spin_lock_bh(&adapter->cloud_filter_list_lock);
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ list_del(&cf->list);
+ kfree(cf);
+ }
+ spin_unlock_bh(&adapter->cloud_filter_list_lock);
+
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index 50ce0d6c09ef..3c76c817ca1a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -161,7 +161,8 @@ int i40evf_send_vf_config_msg(struct i40evf_adapter *adapter)
VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 |
VIRTCHNL_VF_OFFLOAD_ENCAP |
VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM |
- VIRTCHNL_VF_OFFLOAD_REQ_QUEUES;
+ VIRTCHNL_VF_OFFLOAD_REQ_QUEUES |
+ VIRTCHNL_VF_OFFLOAD_ADQ;
adapter->current_op = VIRTCHNL_OP_GET_VF_RESOURCES;
adapter->aq_required &= ~I40EVF_FLAG_AQ_GET_CONFIG;
@@ -344,6 +345,7 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
void i40evf_map_queues(struct i40evf_adapter *adapter)
{
struct virtchnl_irq_map_info *vimi;
+ struct virtchnl_vector_map *vecmap;
int v_idx, q_vectors, len;
struct i40e_q_vector *q_vector;
@@ -367,17 +369,22 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
- q_vector = adapter->q_vectors + v_idx;
- vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
- vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
- vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
- vimi->vecmap[v_idx].rxq_map = q_vector->ring_mask;
+ q_vector = &adapter->q_vectors[v_idx];
+ vecmap = &vimi->vecmap[v_idx];
+
+ vecmap->vsi_id = adapter->vsi_res->vsi_id;
+ vecmap->vector_id = v_idx + NONQ_VECS;
+ vecmap->txq_map = q_vector->ring_mask;
+ vecmap->rxq_map = q_vector->ring_mask;
+ vecmap->rxitr_idx = I40E_RX_ITR;
+ vecmap->txitr_idx = I40E_TX_ITR;
}
/* Misc vector last - this is only for AdminQ messages */
- vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
- vimi->vecmap[v_idx].vector_id = 0;
- vimi->vecmap[v_idx].txq_map = 0;
- vimi->vecmap[v_idx].rxq_map = 0;
+ vecmap = &vimi->vecmap[v_idx];
+ vecmap->vsi_id = adapter->vsi_res->vsi_id;
+ vecmap->vector_id = 0;
+ vecmap->txq_map = 0;
+ vecmap->rxq_map = 0;
adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
i40evf_send_pf_msg(adapter, VIRTCHNL_OP_CONFIG_IRQ_MAP,
@@ -459,7 +466,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
more = true;
}
- veal = kzalloc(len, GFP_KERNEL);
+ veal = kzalloc(len, GFP_ATOMIC);
if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -532,7 +539,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
(count * sizeof(struct virtchnl_ether_addr));
more = true;
}
- veal = kzalloc(len, GFP_KERNEL);
+ veal = kzalloc(len, GFP_ATOMIC);
if (!veal) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -606,7 +613,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
more = true;
}
- vvfl = kzalloc(len, GFP_KERNEL);
+ vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -678,7 +685,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
(count * sizeof(u16));
more = true;
}
- vvfl = kzalloc(len, GFP_KERNEL);
+ vvfl = kzalloc(len, GFP_ATOMIC);
if (!vvfl) {
spin_unlock_bh(&adapter->mac_vlan_list_lock);
return;
@@ -967,6 +974,205 @@ static void i40evf_print_link_message(struct i40evf_adapter *adapter)
}
/**
+ * i40evf_enable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF enable channels as specified by
+ * the user via tc tool.
+ **/
+void i40evf_enable_channels(struct i40evf_adapter *adapter)
+{
+ struct virtchnl_tc_info *vti = NULL;
+ u16 len;
+ int i;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ len = (adapter->num_tc * sizeof(struct virtchnl_channel_info)) +
+ sizeof(struct virtchnl_tc_info);
+
+ vti = kzalloc(len, GFP_KERNEL);
+ if (!vti)
+ return;
+ vti->num_tc = adapter->num_tc;
+ for (i = 0; i < vti->num_tc; i++) {
+ vti->list[i].count = adapter->ch_config.ch_info[i].count;
+ vti->list[i].offset = adapter->ch_config.ch_info[i].offset;
+ vti->list[i].pad = 0;
+ vti->list[i].max_tx_rate =
+ adapter->ch_config.ch_info[i].max_tx_rate;
+ }
+
+ adapter->ch_config.state = __I40EVF_TC_RUNNING;
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->current_op = VIRTCHNL_OP_ENABLE_CHANNELS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_CHANNELS;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_ENABLE_CHANNELS,
+ (u8 *)vti, len);
+ kfree(vti);
+}
+
+/**
+ * i40evf_disable_channel
+ * @adapter: adapter structure
+ *
+ * Request that the PF disable channels that are configured
+ **/
+void i40evf_disable_channels(struct i40evf_adapter *adapter)
+{
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot configure mqprio, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+
+ adapter->ch_config.state = __I40EVF_TC_INVALID;
+ adapter->flags |= I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->current_op = VIRTCHNL_OP_DISABLE_CHANNELS;
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_CHANNELS;
+ i40evf_send_pf_msg(adapter, VIRTCHNL_OP_DISABLE_CHANNELS,
+ NULL, 0);
+}
+
+/**
+ * i40evf_print_cloud_filter
+ * @adapter: adapter structure
+ * @f: cloud filter to print
+ *
+ * Print the cloud filter
+ **/
+static void i40evf_print_cloud_filter(struct i40evf_adapter *adapter,
+ struct virtchnl_filter *f)
+{
+ switch (f->flow_type) {
+ case VIRTCHNL_TCP_V4_FLOW:
+ dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI4 src_ip %pI4 dst_port %hu src_port %hu\n",
+ &f->data.tcp_spec.dst_mac,
+ &f->data.tcp_spec.src_mac,
+ ntohs(f->data.tcp_spec.vlan_id),
+ &f->data.tcp_spec.dst_ip[0],
+ &f->data.tcp_spec.src_ip[0],
+ ntohs(f->data.tcp_spec.dst_port),
+ ntohs(f->data.tcp_spec.src_port));
+ break;
+ case VIRTCHNL_TCP_V6_FLOW:
+ dev_info(&adapter->pdev->dev, "dst_mac: %pM src_mac: %pM vlan_id: %hu dst_ip: %pI6 src_ip %pI6 dst_port %hu src_port %hu\n",
+ &f->data.tcp_spec.dst_mac,
+ &f->data.tcp_spec.src_mac,
+ ntohs(f->data.tcp_spec.vlan_id),
+ &f->data.tcp_spec.dst_ip,
+ &f->data.tcp_spec.src_ip,
+ ntohs(f->data.tcp_spec.dst_port),
+ ntohs(f->data.tcp_spec.src_port));
+ break;
+ }
+}
+
+/**
+ * i40evf_add_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF add cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_add_cloud_filter(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf;
+ struct virtchnl_filter *f;
+ int len = 0, count = 0;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot add cloud filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->add) {
+ count++;
+ break;
+ }
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_CLOUD_FILTER;
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_ADD_CLOUD_FILTER;
+
+ len = sizeof(struct virtchnl_filter);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->add) {
+ memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+ cf->add = false;
+ cf->state = __I40EVF_CF_ADD_PENDING;
+ i40evf_send_pf_msg(adapter,
+ VIRTCHNL_OP_ADD_CLOUD_FILTER,
+ (u8 *)f, len);
+ }
+ }
+ kfree(f);
+}
+
+/**
+ * i40evf_del_cloud_filter
+ * @adapter: adapter structure
+ *
+ * Request that the PF delete cloud filters as specified
+ * by the user via tc tool.
+ **/
+void i40evf_del_cloud_filter(struct i40evf_adapter *adapter)
+{
+ struct i40evf_cloud_filter *cf, *cftmp;
+ struct virtchnl_filter *f;
+ int len = 0, count = 0;
+
+ if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
+ /* bail because we already have a command pending */
+ dev_err(&adapter->pdev->dev, "Cannot remove cloud filter, command %d pending\n",
+ adapter->current_op);
+ return;
+ }
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->del) {
+ count++;
+ break;
+ }
+ }
+ if (!count) {
+ adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_CLOUD_FILTER;
+ return;
+ }
+ adapter->current_op = VIRTCHNL_OP_DEL_CLOUD_FILTER;
+
+ len = sizeof(struct virtchnl_filter);
+ f = kzalloc(len, GFP_KERNEL);
+ if (!f)
+ return;
+
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
+ if (cf->del) {
+ memcpy(f, &cf->f, sizeof(struct virtchnl_filter));
+ cf->del = false;
+ cf->state = __I40EVF_CF_DEL_PENDING;
+ i40evf_send_pf_msg(adapter,
+ VIRTCHNL_OP_DEL_CLOUD_FILTER,
+ (u8 *)f, len);
+ }
+ }
+ kfree(f);
+}
+
+/**
* i40evf_request_reset
* @adapter: adapter structure
*
@@ -1011,14 +1217,25 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
if (adapter->link_up == link_up)
break;
- /* If we get link up message and start queues before
- * our queues are configured it will trigger a TX hang.
- * In that case, just ignore the link status message,
- * we'll get another one after we enable queues and
- * actually prepared to send traffic.
- */
- if (link_up && adapter->state != __I40EVF_RUNNING)
- break;
+ if (link_up) {
+ /* If we get link up message and start queues
+ * before our queues are configured it will
+ * trigger a TX hang. In that case, just ignore
+ * the link status message,we'll get another one
+ * after we enable queues and actually prepared
+ * to send traffic.
+ */
+ if (adapter->state != __I40EVF_RUNNING)
+ break;
+
+ /* For ADq enabled VF, we reconfigure VSIs and
+ * re-allocate queues. Hence wait till all
+ * queues are enabled.
+ */
+ if (adapter->flags &
+ I40EVF_FLAG_QUEUES_DISABLED)
+ break;
+ }
adapter->link_up = link_up;
if (link_up) {
@@ -1031,7 +1248,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
i40evf_print_link_message(adapter);
break;
case VIRTCHNL_EVENT_RESET_IMPENDING:
- dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ dev_info(&adapter->pdev->dev, "Reset warning received from the PF\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
@@ -1063,6 +1280,57 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
dev_err(&adapter->pdev->dev, "Failed to delete MAC filter, error %s\n",
i40evf_stat_str(&adapter->hw, v_retval));
break;
+ case VIRTCHNL_OP_ENABLE_CHANNELS:
+ dev_err(&adapter->pdev->dev, "Failed to configure queue channels, error %s\n",
+ i40evf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __I40EVF_TC_INVALID;
+ netdev_reset_tc(netdev);
+ netif_tx_start_all_queues(netdev);
+ break;
+ case VIRTCHNL_OP_DISABLE_CHANNELS:
+ dev_err(&adapter->pdev->dev, "Failed to disable queue channels, error %s\n",
+ i40evf_stat_str(&adapter->hw, v_retval));
+ adapter->flags &= ~I40EVF_FLAG_REINIT_ITR_NEEDED;
+ adapter->ch_config.state = __I40EVF_TC_RUNNING;
+ netif_tx_start_all_queues(netdev);
+ break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ list_for_each_entry_safe(cf, cftmp,
+ &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_ADD_PENDING) {
+ cf->state = __I40EVF_CF_INVALID;
+ dev_info(&adapter->pdev->dev, "Failed to add cloud filter, error %s\n",
+ i40evf_stat_str(&adapter->hw,
+ v_retval));
+ i40evf_print_cloud_filter(adapter,
+ &cf->f);
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ }
+ }
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_DEL_PENDING) {
+ cf->state = __I40EVF_CF_ACTIVE;
+ dev_info(&adapter->pdev->dev, "Failed to del cloud filter, error %s\n",
+ i40evf_stat_str(&adapter->hw,
+ v_retval));
+ i40evf_print_cloud_filter(adapter,
+ &cf->f);
+ }
+ }
+ }
+ break;
default:
dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n",
v_retval,
@@ -1102,6 +1370,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
case VIRTCHNL_OP_ENABLE_QUEUES:
/* enable transmits */
i40evf_irq_enable(adapter, true);
+ adapter->flags &= ~I40EVF_FLAG_QUEUES_DISABLED;
break;
case VIRTCHNL_OP_DISABLE_QUEUES:
i40evf_free_all_tx_resources(adapter);
@@ -1156,6 +1425,29 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
}
break;
+ case VIRTCHNL_OP_ADD_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf;
+
+ list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
+ if (cf->state == __I40EVF_CF_ADD_PENDING)
+ cf->state = __I40EVF_CF_ACTIVE;
+ }
+ }
+ break;
+ case VIRTCHNL_OP_DEL_CLOUD_FILTER: {
+ struct i40evf_cloud_filter *cf, *cftmp;
+
+ list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
+ list) {
+ if (cf->state == __I40EVF_CF_DEL_PENDING) {
+ cf->state = __I40EVF_CF_INVALID;
+ list_del(&cf->list);
+ kfree(cf);
+ adapter->num_cloud_filters--;
+ }
+ }
+ }
+ break;
default:
if (adapter->current_op && (v_opcode != adapter->current_op))
dev_warn(&adapter->pdev->dev, "Expected response %d from PF, received %d\n",
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 221f15803480..89edb9fae6f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -3059,6 +3059,8 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
for (i = 0; i < reta_entries; i++)
adapter->rss_indir_tbl[i] = indir[i];
+
+ ixgbe_store_reta(adapter);
}
/* Fill out the rss hash key */
@@ -3067,8 +3069,6 @@ static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir,
ixgbe_store_key(adapter);
}
- ixgbe_store_reta(adapter);
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 4242f0213e46..ed4cbe94c355 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -58,7 +58,6 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter)
return false;
/* start at VMDq register offset for SR-IOV enabled setups */
- pool = 0;
reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask);
for (i = 0, pool = 0; i < adapter->num_rx_queues; i++, reg_idx++) {
/* If we are greater than indices move to next pool */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 9fc063af233c..85369423452d 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7711,7 +7711,8 @@ static void ixgbe_service_task(struct work_struct *work)
if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) {
ixgbe_ptp_overflow_check(adapter);
- ixgbe_ptp_rx_hang(adapter);
+ if (adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)
+ ixgbe_ptp_rx_hang(adapter);
ixgbe_ptp_tx_hang(adapter);
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 4400e49090b4..e7623fed42da 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -94,6 +94,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
+#define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
+ "legacy-rx",
+};
+
+#define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
+
static int ixgbevf_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd)
{
@@ -241,6 +248,8 @@ static void ixgbevf_get_drvinfo(struct net_device *netdev,
sizeof(drvinfo->version));
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
+
+ drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
}
static void ixgbevf_get_ringparam(struct net_device *netdev,
@@ -392,6 +401,8 @@ static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
return IXGBEVF_TEST_LEN;
case ETH_SS_STATS:
return IXGBEVF_STATS_LEN;
+ case ETH_SS_PRIV_FLAGS:
+ return IXGBEVF_PRIV_FLAGS_STR_LEN;
default:
return -EINVAL;
}
@@ -496,6 +507,10 @@ static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
p += ETH_GSTRING_LEN;
}
break;
+ case ETH_SS_PRIV_FLAGS:
+ memcpy(data, ixgbevf_priv_flags_strings,
+ IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
+ break;
}
}
@@ -888,6 +903,37 @@ static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
return err;
}
+static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ u32 priv_flags = 0;
+
+ if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
+ priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
+
+ return priv_flags;
+}
+
+static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ unsigned int flags = adapter->flags;
+
+ flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
+ if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
+ flags |= IXGBEVF_FLAGS_LEGACY_RX;
+
+ if (flags != adapter->flags) {
+ adapter->flags = flags;
+
+ /* reset interface to repopulate queues */
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_drvinfo = ixgbevf_get_drvinfo,
.get_regs_len = ixgbevf_get_regs_len,
@@ -909,6 +955,8 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
.get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
.get_rxfh = ixgbevf_get_rxfh,
.get_link_ksettings = ixgbevf_get_link_ksettings,
+ .get_priv_flags = ixgbevf_get_priv_flags,
+ .set_priv_flags = ixgbevf_set_priv_flags,
};
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
index f6952425c87d..f65ca156af2d 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
@@ -89,19 +89,15 @@ struct ixgbevf_rx_queue_stats {
};
enum ixgbevf_ring_state_t {
+ __IXGBEVF_RX_3K_BUFFER,
+ __IXGBEVF_RX_BUILD_SKB_ENABLED,
__IXGBEVF_TX_DETECT_HANG,
__IXGBEVF_HANG_CHECK_ARMED,
};
-#define check_for_tx_hang(ring) \
- test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
-#define set_check_for_tx_hang(ring) \
- set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
-#define clear_check_for_tx_hang(ring) \
- clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
-
struct ixgbevf_ring {
struct ixgbevf_ring *next;
+ struct ixgbevf_q_vector *q_vector; /* backpointer to q_vector */
struct net_device *netdev;
struct device *dev;
void *desc; /* descriptor ring memory */
@@ -133,7 +129,7 @@ struct ixgbevf_ring {
*/
u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */
-};
+} ____cacheline_internodealigned_in_smp;
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
@@ -156,12 +152,20 @@ struct ixgbevf_ring {
/* Supported Rx Buffer Sizes */
#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_RXBUFFER_3072 3072
#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
-#define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+#define IXGBEVF_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
+#if (PAGE_SIZE < 8192)
+#define IXGBEVF_MAX_FRAME_BUILD_SKB \
+ (SKB_WITH_OVERHEAD(IXGBEVF_RXBUFFER_2048) - IXGBEVF_SKB_PAD)
+#else
+#define IXGBEVF_MAX_FRAME_BUILD_SKB IXGBEVF_RXBUFFER_2048
+#endif
+
#define IXGBE_TX_FLAGS_CSUM BIT(0)
#define IXGBE_TX_FLAGS_VLAN BIT(1)
#define IXGBE_TX_FLAGS_TSO BIT(2)
@@ -170,6 +174,50 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+#define ring_uses_large_buffer(ring) \
+ test_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+#define set_ring_uses_large_buffer(ring) \
+ set_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+#define clear_ring_uses_large_buffer(ring) \
+ clear_bit(__IXGBEVF_RX_3K_BUFFER, &(ring)->state)
+
+#define ring_uses_build_skb(ring) \
+ test_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(__IXGBEVF_RX_BUILD_SKB_ENABLED, &(ring)->state)
+
+static inline unsigned int ixgbevf_rx_bufsz(struct ixgbevf_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return IXGBEVF_RXBUFFER_3072;
+
+ if (ring_uses_build_skb(ring))
+ return IXGBEVF_MAX_FRAME_BUILD_SKB;
+#endif
+ return IXGBEVF_RXBUFFER_2048;
+}
+
+static inline unsigned int ixgbevf_rx_pg_order(struct ixgbevf_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring_uses_large_buffer(ring))
+ return 1;
+#endif
+ return 0;
+}
+
+#define ixgbevf_rx_pg_size(_ring) (PAGE_SIZE << ixgbevf_rx_pg_order(_ring))
+
+#define check_for_tx_hang(ring) \
+ test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define set_check_for_tx_hang(ring) \
+ set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+#define clear_check_for_tx_hang(ring) \
+ clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
+
struct ixgbevf_ring_container {
struct ixgbevf_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
@@ -194,7 +242,11 @@ struct ixgbevf_q_vector {
u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi;
struct ixgbevf_ring_container rx, tx;
+ struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
+
+ /* for dynamic allocation of rings associated with this q_vector */
+ struct ixgbevf_ring ring[0] ____cacheline_internodealigned_in_smp;
#ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state;
#define IXGBEVF_QV_STATE_IDLE 0
@@ -331,6 +383,8 @@ struct ixgbevf_adapter {
u32 *rss_key;
u8 rss_indir_tbl[IXGBEVF_X550_VFRETA_SIZE];
+ u32 flags;
+#define IXGBEVF_FLAGS_LEGACY_RX BIT(1)
};
enum ixbgevf_state_t {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9b3d43d28106..f37307131eb6 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -130,6 +130,9 @@ static void ixgbevf_service_event_complete(struct ixgbevf_adapter *adapter)
static void ixgbevf_queue_reset_subtask(struct ixgbevf_adapter *adapter);
static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter);
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer);
+static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *old_buff);
static void ixgbevf_remove_adapter(struct ixgbe_hw *hw)
{
@@ -527,6 +530,49 @@ static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
skb->protocol = eth_type_trans(skb, rx_ring->netdev);
}
+static
+struct ixgbevf_rx_buffer *ixgbevf_get_rx_buffer(struct ixgbevf_ring *rx_ring,
+ const unsigned int size)
+{
+ struct ixgbevf_rx_buffer *rx_buffer;
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ prefetchw(rx_buffer->page);
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ size,
+ DMA_FROM_DEVICE);
+
+ rx_buffer->pagecnt_bias--;
+
+ return rx_buffer;
+}
+
+static void ixgbevf_put_rx_buffer(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer)
+{
+ if (ixgbevf_can_reuse_rx_page(rx_buffer)) {
+ /* hand second half of page back to the ring */
+ ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* We are not reusing the buffer so unmap it and free
+ * any references we are holding to it
+ */
+ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
+ ixgbevf_rx_pg_size(rx_ring),
+ DMA_FROM_DEVICE,
+ IXGBEVF_RX_DMA_ATTR);
+ __page_frag_cache_drain(rx_buffer->page,
+ rx_buffer->pagecnt_bias);
+ }
+
+ /* clear contents of rx_buffer */
+ rx_buffer->page = NULL;
+}
+
/**
* ixgbevf_is_non_eop - process handling of non-EOP buffers
* @rx_ring: Rx ring being processed
@@ -554,32 +600,38 @@ static bool ixgbevf_is_non_eop(struct ixgbevf_ring *rx_ring,
return true;
}
+static inline unsigned int ixgbevf_rx_offset(struct ixgbevf_ring *rx_ring)
+{
+ return ring_uses_build_skb(rx_ring) ? IXGBEVF_SKB_PAD : 0;
+}
+
static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *bi)
{
struct page *page = bi->page;
- dma_addr_t dma = bi->dma;
+ dma_addr_t dma;
/* since we are recycling buffers we should seldom need to alloc */
if (likely(page))
return true;
/* alloc new page for storage */
- page = dev_alloc_page();
+ page = dev_alloc_pages(ixgbevf_rx_pg_order(rx_ring));
if (unlikely(!page)) {
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
}
/* map page for use */
- dma = dma_map_page_attrs(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page_attrs(rx_ring->dev, page, 0,
+ ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE, IXGBEVF_RX_DMA_ATTR);
/* if mapping failed free memory back to system since
* there isn't much point in holding memory we can't use
*/
if (dma_mapping_error(rx_ring->dev, dma)) {
- __free_page(page);
+ __free_pages(page, ixgbevf_rx_pg_order(rx_ring));
rx_ring->rx_stats.alloc_rx_page_failed++;
return false;
@@ -587,7 +639,7 @@ static bool ixgbevf_alloc_mapped_page(struct ixgbevf_ring *rx_ring,
bi->dma = dma;
bi->page = page;
- bi->page_offset = 0;
+ bi->page_offset = ixgbevf_rx_offset(rx_ring);
bi->pagecnt_bias = 1;
rx_ring->rx_stats.alloc_rx_page++;
@@ -621,7 +673,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, bi->dma,
bi->page_offset,
- IXGBEVF_RX_BUFSZ,
+ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* Refresh the desc even if pkt_addr didn't change
@@ -734,11 +786,10 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page);
}
-static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
- struct page *page,
- const unsigned int truesize)
+static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer)
{
- unsigned int pagecnt_bias = rx_buffer->pagecnt_bias--;
+ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias;
+ struct page *page = rx_buffer->page;
/* avoid re-using remote pages */
if (unlikely(ixgbevf_page_is_reserved(page)))
@@ -746,17 +797,13 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
#if (PAGE_SIZE < 8192)
/* if we are only owner of page we can reuse it */
- if (unlikely(page_ref_count(page) != pagecnt_bias))
+ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1))
return false;
-
- /* flip page offset to other buffer */
- rx_buffer->page_offset ^= IXGBEVF_RX_BUFSZ;
-
#else
- /* move offset up to the next cache line */
- rx_buffer->page_offset += truesize;
+#define IXGBEVF_LAST_OFFSET \
+ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBEVF_RXBUFFER_2048)
- if (rx_buffer->page_offset > (PAGE_SIZE - IXGBEVF_RX_BUFSZ))
+ if (rx_buffer->page_offset > IXGBEVF_LAST_OFFSET)
return false;
#endif
@@ -765,7 +812,7 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* the pagecnt_bias and page count so that we fully restock the
* number of references the driver holds.
*/
- if (unlikely(pagecnt_bias == 1)) {
+ if (unlikely(!pagecnt_bias)) {
page_ref_add(page, USHRT_MAX);
rx_buffer->pagecnt_bias = USHRT_MAX;
}
@@ -777,127 +824,81 @@ static bool ixgbevf_can_reuse_rx_page(struct ixgbevf_rx_buffer *rx_buffer,
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add
- * @rx_desc: descriptor containing length of buffer written by hardware
* @skb: sk_buff to place the data into
+ * @size: size of buffer to be added
*
* This function will add the data contained in rx_buffer->page to the skb.
- * This is done either through a direct copy if the data in the buffer is
- * less than the skb header size, otherwise it will just attach the page as
- * a frag to the skb.
- *
- * The function will then update the page offset if necessary and return
- * true if the buffer can be reused by the adapter.
**/
-static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
+static void ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer,
- u16 size,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int size)
{
- struct page *page = rx_buffer->page;
- unsigned char *va = page_address(page) + rx_buffer->page_offset;
#if (PAGE_SIZE < 8192)
- unsigned int truesize = IXGBEVF_RX_BUFSZ;
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
#else
- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
+ unsigned int truesize = ring_uses_build_skb(rx_ring) ?
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size) :
+ SKB_DATA_ALIGN(size);
+#endif
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
+ rx_buffer->page_offset, size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
#endif
- unsigned int pull_len;
-
- if (unlikely(skb_is_nonlinear(skb)))
- goto add_tail_frag;
-
- if (likely(size <= IXGBEVF_RX_HDR_SIZE)) {
- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
-
- /* page is not reserved, we can reuse buffer as is */
- if (likely(!ixgbevf_page_is_reserved(page)))
- return true;
-
- /* this page cannot be reused so discard it */
- return false;
- }
-
- /* we need the header to contain the greater of either ETH_HLEN or
- * 60 bytes if the skb->len is less than 60 for skb_pad.
- */
- pull_len = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
-
- /* align pull length to size of long to optimize memcpy performance */
- memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
-
- /* update all of the pointers */
- va += pull_len;
- size -= pull_len;
-
-add_tail_frag:
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- (unsigned long)va & ~PAGE_MASK, size, truesize);
-
- return ixgbevf_can_reuse_rx_page(rx_buffer, page, truesize);
}
-static struct sk_buff *ixgbevf_fetch_rx_buffer(struct ixgbevf_ring *rx_ring,
- union ixgbe_adv_rx_desc *rx_desc,
- struct sk_buff *skb)
+static
+struct sk_buff *ixgbevf_construct_skb(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
{
- struct ixgbevf_rx_buffer *rx_buffer;
- struct page *page;
- u16 size = le16_to_cpu(rx_desc->wb.upper.length);
-
- rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
- page = rx_buffer->page;
- prefetchw(page);
-
- /* we are reusing so sync this buffer for CPU use */
- dma_sync_single_range_for_cpu(rx_ring->dev,
- rx_buffer->dma,
- rx_buffer->page_offset,
- size,
- DMA_FROM_DEVICE);
-
- if (likely(!skb)) {
- void *page_addr = page_address(page) +
- rx_buffer->page_offset;
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(size);
+#endif
+ unsigned int headlen;
+ struct sk_buff *skb;
- /* prefetch first cache line of first page */
- prefetch(page_addr);
+ /* prefetch first cache line of first page */
+ prefetch(va);
#if L1_CACHE_BYTES < 128
- prefetch(page_addr + L1_CACHE_BYTES);
+ prefetch(va + L1_CACHE_BYTES);
#endif
- /* allocate a skb to store the frags */
- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
- IXGBEVF_RX_HDR_SIZE);
- if (unlikely(!skb)) {
- rx_ring->rx_stats.alloc_rx_buff_failed++;
- return NULL;
- }
+ /* allocate a skb to store the frags */
+ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBEVF_RX_HDR_SIZE);
+ if (unlikely(!skb))
+ return NULL;
- /* we will be copying header into skb->data in
- * pskb_may_pull so it is in our interest to prefetch
- * it now to avoid a possible cache miss
- */
- prefetchw(skb->data);
- }
+ /* Determine available headroom for copy */
+ headlen = size;
+ if (headlen > IXGBEVF_RX_HDR_SIZE)
+ headlen = eth_get_headlen(va, IXGBEVF_RX_HDR_SIZE);
- /* pull page into skb */
- if (ixgbevf_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
- /* hand second half of page back to the ring */
- ixgbevf_reuse_rx_page(rx_ring, rx_buffer);
+ /* align pull length to size of long to optimize memcpy performance */
+ memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
+
+ /* update all of the pointers */
+ size -= headlen;
+ if (size) {
+ skb_add_rx_frag(skb, 0, rx_buffer->page,
+ (va + headlen) - page_address(rx_buffer->page),
+ size, truesize);
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
} else {
- /* We are not reusing the buffer so unmap it and free
- * any references we are holding to it
- */
- dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
- PAGE_SIZE, DMA_FROM_DEVICE,
- IXGBEVF_RX_DMA_ATTR);
- __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
+ rx_buffer->pagecnt_bias++;
}
- /* clear contents of buffer_info */
- rx_buffer->dma = 0;
- rx_buffer->page = NULL;
-
return skb;
}
@@ -909,6 +910,44 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, qmask);
}
+static struct sk_buff *ixgbevf_build_skb(struct ixgbevf_ring *rx_ring,
+ struct ixgbevf_rx_buffer *rx_buffer,
+ union ixgbe_adv_rx_desc *rx_desc,
+ unsigned int size)
+{
+ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = ixgbevf_rx_pg_size(rx_ring) / 2;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(IXGBEVF_SKB_PAD + size);
+#endif
+ struct sk_buff *skb;
+
+ /* prefetch first cache line of first page */
+ prefetch(va);
+#if L1_CACHE_BYTES < 128
+ prefetch(va + L1_CACHE_BYTES);
+#endif
+
+ /* build an skb to around the page buffer */
+ skb = build_skb(va - IXGBEVF_SKB_PAD, truesize);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, IXGBEVF_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* update buffer offset */
+#if (PAGE_SIZE < 8192)
+ rx_buffer->page_offset ^= truesize;
+#else
+ rx_buffer->page_offset += truesize;
+#endif
+
+ return skb;
+}
static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring,
int budget)
@@ -919,6 +958,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *rx_buffer;
+ unsigned int size;
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
@@ -927,8 +968,8 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
}
rx_desc = IXGBEVF_RX_DESC(rx_ring, rx_ring->next_to_clean);
-
- if (!rx_desc->wb.upper.length)
+ size = le16_to_cpu(rx_desc->wb.upper.length);
+ if (!size)
break;
/* This memory barrier is needed to keep us from reading
@@ -937,15 +978,26 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
*/
rmb();
+ rx_buffer = ixgbevf_get_rx_buffer(rx_ring, size);
+
/* retrieve a buffer from the ring */
- skb = ixgbevf_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (skb)
+ ixgbevf_add_rx_frag(rx_ring, rx_buffer, skb, size);
+ else if (ring_uses_build_skb(rx_ring))
+ skb = ixgbevf_build_skb(rx_ring, rx_buffer,
+ rx_desc, size);
+ else
+ skb = ixgbevf_construct_skb(rx_ring, rx_buffer,
+ rx_desc, size);
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_rx_buff_failed++;
+ rx_buffer->pagecnt_bias++;
break;
}
+ ixgbevf_put_rx_buffer(rx_ring, rx_buffer);
cleaned_count++;
/* fetch next buffer in frame if non-eop */
@@ -1260,85 +1312,6 @@ static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED;
}
-static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
- int r_idx)
-{
- struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
-
- a->rx_ring[r_idx]->next = q_vector->rx.ring;
- q_vector->rx.ring = a->rx_ring[r_idx];
- q_vector->rx.count++;
-}
-
-static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
- int t_idx)
-{
- struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
-
- a->tx_ring[t_idx]->next = q_vector->tx.ring;
- q_vector->tx.ring = a->tx_ring[t_idx];
- q_vector->tx.count++;
-}
-
-/**
- * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
- * @adapter: board private structure to initialize
- *
- * This function maps descriptor rings to the queue-specific vectors
- * we were allotted through the MSI-X enabling code. Ideally, we'd have
- * one vector per ring/queue, but on a constrained vector budget, we
- * group the rings as "efficiently" as possible. You would add new
- * mapping configurations in here.
- **/
-static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
-{
- int q_vectors;
- int v_start = 0;
- int rxr_idx = 0, txr_idx = 0;
- int rxr_remaining = adapter->num_rx_queues;
- int txr_remaining = adapter->num_tx_queues;
- int i, j;
- int rqpv, tqpv;
-
- q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- /* The ideal configuration...
- * We have enough vectors to map one per queue.
- */
- if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
- for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
- map_vector_to_rxq(adapter, v_start, rxr_idx);
-
- for (; txr_idx < txr_remaining; v_start++, txr_idx++)
- map_vector_to_txq(adapter, v_start, txr_idx);
- return 0;
- }
-
- /* If we don't have enough vectors for a 1-to-1
- * mapping, we'll have to group them so there are
- * multiple queues per vector.
- */
- /* Re-adjusting *qpv takes care of the remainder. */
- for (i = v_start; i < q_vectors; i++) {
- rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
- for (j = 0; j < rqpv; j++) {
- map_vector_to_rxq(adapter, i, rxr_idx);
- rxr_idx++;
- rxr_remaining--;
- }
- }
- for (i = v_start; i < q_vectors; i++) {
- tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
- for (j = 0; j < tqpv; j++) {
- map_vector_to_txq(adapter, i, txr_idx);
- txr_idx++;
- txr_remaining--;
- }
- }
-
- return 0;
-}
-
/**
* ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
* @adapter: board private structure
@@ -1411,20 +1384,6 @@ free_queue_irqs:
return err;
}
-static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
-{
- int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- for (i = 0; i < q_vectors; i++) {
- struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
-
- q_vector->rx.ring = NULL;
- q_vector->tx.ring = NULL;
- q_vector->rx.count = 0;
- q_vector->tx.count = 0;
- }
-}
-
/**
* ixgbevf_request_irq - initialize interrupts
* @adapter: board private structure
@@ -1464,8 +1423,6 @@ static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]);
}
-
- ixgbevf_reset_q_vectors(adapter);
}
/**
@@ -1587,7 +1544,8 @@ static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
-static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *ring, int index)
{
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl;
@@ -1595,7 +1553,10 @@ static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
srrctl = IXGBE_SRRCTL_DROP_EN;
srrctl |= IXGBEVF_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
- srrctl |= IXGBEVF_RX_BUFSZ >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ if (ring_uses_large_buffer(ring))
+ srrctl |= IXGBEVF_RXBUFFER_3072 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= IXGBEVF_RXBUFFER_2048 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
@@ -1767,10 +1728,21 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ring->next_to_use = 0;
ring->next_to_alloc = 0;
- ixgbevf_configure_srrctl(adapter, reg_idx);
+ ixgbevf_configure_srrctl(adapter, ring, reg_idx);
+
+ /* RXDCTL.RLPML does not work on 82599 */
+ if (adapter->hw.mac.type != ixgbe_mac_82599_vf) {
+ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK |
+ IXGBE_RXDCTL_RLPML_EN);
- /* allow any size packet since we can handle overflow */
- rxdctl &= ~IXGBE_RXDCTL_RLPML_EN;
+#if (PAGE_SIZE < 8192)
+ /* Limit the maximum frame size so we don't overrun the skb */
+ if (ring_uses_build_skb(ring) &&
+ !ring_uses_large_buffer(ring))
+ rxdctl |= IXGBEVF_MAX_FRAME_BUILD_SKB |
+ IXGBE_RXDCTL_RLPML_EN;
+#endif
+ }
rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
@@ -1779,6 +1751,29 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
}
+static void ixgbevf_set_rx_buffer_len(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ unsigned int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* set build_skb and buffer size flags */
+ clear_ring_build_skb_enabled(rx_ring);
+ clear_ring_uses_large_buffer(rx_ring);
+
+ if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
+ return;
+
+ set_ring_build_skb_enabled(rx_ring);
+
+#if (PAGE_SIZE < 8192)
+ if (max_frame <= IXGBEVF_MAX_FRAME_BUILD_SKB)
+ return;
+
+ set_ring_uses_large_buffer(rx_ring);
+#endif
+}
+
/**
* ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
* @adapter: board private structure
@@ -1806,8 +1801,12 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring
*/
- for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring[i];
+
+ ixgbevf_set_rx_buffer_len(adapter, rx_ring);
+ ixgbevf_configure_rx_ring(adapter, rx_ring);
+ }
}
static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev,
@@ -2136,13 +2135,13 @@ static void ixgbevf_clean_rx_ring(struct ixgbevf_ring *rx_ring)
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
- IXGBEVF_RX_BUFSZ,
+ ixgbevf_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* free resources associated with mapping */
dma_unmap_page_attrs(rx_ring->dev,
rx_buffer->dma,
- PAGE_SIZE,
+ ixgbevf_rx_pg_size(rx_ring),
DMA_FROM_DEVICE,
IXGBEVF_RX_DMA_ATTR);
@@ -2405,105 +2404,171 @@ static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
}
/**
- * ixgbevf_alloc_queues - Allocate memory for all rings
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int vector, v_budget;
+
+ /* It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) the same number of vectors as there are CPU's.
+ * The default is to use pairs of vectors.
+ */
+ v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
+ v_budget = min_t(int, v_budget, num_online_cpus());
+ v_budget += NON_Q_VECTORS;
+
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries)
+ return -ENOMEM;
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but the VF driver
+ * does not support any other modes, so we will simply fail here. Note
+ * that we clean up the msix_entries pointer else-where.
+ */
+ return ixgbevf_acquire_msix_vectors(adapter, v_budget);
+}
+
+static void ixgbevf_add_ring(struct ixgbevf_ring *ring,
+ struct ixgbevf_ring_container *head)
+{
+ ring->next = head->ring;
+ head->ring = ring;
+ head->count++;
+}
+
+/**
+ * ixgbevf_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
+ * @txr_count: number of Tx rings for q vector
+ * @txr_idx: index of first Tx ring to assign
+ * @rxr_count: number of Rx rings for q vector
+ * @rxr_idx: index of first Rx ring to assign
*
- * We allocate one ring per queue at run-time since we don't know the
- * number of queues at compile-time. The polling_netdev array is
- * intended for Multiqueue, but should work fine with a single queue.
+ * We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
-static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+static int ixgbevf_alloc_q_vector(struct ixgbevf_adapter *adapter, int v_idx,
+ int txr_count, int txr_idx,
+ int rxr_count, int rxr_idx)
{
+ struct ixgbevf_q_vector *q_vector;
struct ixgbevf_ring *ring;
- int rx = 0, tx = 0;
+ int ring_count, size;
+
+ ring_count = txr_count + rxr_count;
+ size = sizeof(*q_vector) + (sizeof(*ring) * ring_count);
+
+ /* allocate q_vector and rings */
+ q_vector = kzalloc(size, GFP_KERNEL);
+ if (!q_vector)
+ return -ENOMEM;
+
+ /* initialize NAPI */
+ netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
+
+ /* tie q_vector and adapter together */
+ adapter->q_vector[v_idx] = q_vector;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = v_idx;
- for (; tx < adapter->num_tx_queues; tx++) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
+ /* initialize pointer to rings */
+ ring = q_vector->ring;
+ while (txr_count) {
+ /* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
+
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Tx values */
+ ixgbevf_add_ring(ring, &q_vector->tx);
+
+ /* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
- ring->queue_index = tx;
- ring->reg_idx = tx;
+ ring->queue_index = txr_idx;
+ ring->reg_idx = txr_idx;
- adapter->tx_ring[tx] = ring;
- }
+ /* assign ring to adapter */
+ adapter->tx_ring[txr_idx] = ring;
+
+ /* update count and index */
+ txr_count--;
+ txr_idx++;
- for (; rx < adapter->num_rx_queues; rx++) {
- ring = kzalloc(sizeof(*ring), GFP_KERNEL);
- if (!ring)
- goto err_allocation;
+ /* push pointer to next ring */
+ ring++;
+ }
+ while (rxr_count) {
+ /* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
+ /* configure backlink on ring */
+ ring->q_vector = q_vector;
+
+ /* update q_vector Rx values */
+ ixgbevf_add_ring(ring, &q_vector->rx);
+
+ /* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
- ring->queue_index = rx;
- ring->reg_idx = rx;
+ ring->queue_index = rxr_idx;
+ ring->reg_idx = rxr_idx;
- adapter->rx_ring[rx] = ring;
- }
+ /* assign ring to adapter */
+ adapter->rx_ring[rxr_idx] = ring;
- return 0;
+ /* update count and index */
+ rxr_count--;
+ rxr_idx++;
-err_allocation:
- while (tx) {
- kfree(adapter->tx_ring[--tx]);
- adapter->tx_ring[tx] = NULL;
+ /* push pointer to next ring */
+ ring++;
}
- while (rx) {
- kfree(adapter->rx_ring[--rx]);
- adapter->rx_ring[rx] = NULL;
- }
- return -ENOMEM;
+ return 0;
}
/**
- * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * ixgbevf_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
+ * @v_idx: index of vector in adapter struct
*
- * Attempt to configure the interrupts using the best available
- * capabilities of the hardware and the kernel.
+ * This function frees the memory allocated to the q_vector. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
**/
-static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+static void ixgbevf_free_q_vector(struct ixgbevf_adapter *adapter, int v_idx)
{
- struct net_device *netdev = adapter->netdev;
- int err;
- int vector, v_budget;
-
- /* It's easy to be greedy for MSI-X vectors, but it really
- * doesn't do us much good if we have a lot more vectors
- * than CPU's. So let's be conservative and only ask for
- * (roughly) the same number of vectors as there are CPU's.
- * The default is to use pairs of vectors.
- */
- v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
- v_budget = min_t(int, v_budget, num_online_cpus());
- v_budget += NON_Q_VECTORS;
-
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter.
- */
- adapter->msix_entries = kcalloc(v_budget,
- sizeof(struct msix_entry), GFP_KERNEL);
- if (!adapter->msix_entries)
- return -ENOMEM;
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[v_idx];
+ struct ixgbevf_ring *ring;
- for (vector = 0; vector < v_budget; vector++)
- adapter->msix_entries[vector].entry = vector;
+ ixgbevf_for_each_ring(ring, q_vector->tx)
+ adapter->tx_ring[ring->queue_index] = NULL;
- err = ixgbevf_acquire_msix_vectors(adapter, v_budget);
- if (err)
- return err;
+ ixgbevf_for_each_ring(ring, q_vector->rx)
+ adapter->rx_ring[ring->queue_index] = NULL;
- err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
- if (err)
- return err;
+ adapter->q_vector[v_idx] = NULL;
+ netif_napi_del(&q_vector->napi);
- return netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ /* ixgbevf_get_stats() might access the rings on this vector,
+ * we must wait a grace period before freeing it.
+ */
+ kfree_rcu(q_vector, rcu);
}
/**
@@ -2515,35 +2580,53 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
**/
static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors;
- struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+ int err;
+
+ if (q_vectors >= (rxr_remaining + txr_remaining)) {
+ for (; rxr_remaining; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+
+ err = ixgbevf_alloc_q_vector(adapter, v_idx,
+ 0, 0, rqpv, rxr_idx);
+ if (err)
+ goto err_out;
- num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ }
+ }
+
+ for (; q_vectors; v_idx++, q_vectors--) {
+ int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
+ int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
- if (!q_vector)
+ err = ixgbevf_alloc_q_vector(adapter, v_idx,
+ tqpv, txr_idx,
+ rqpv, rxr_idx);
+
+ if (err)
goto err_out;
- q_vector->adapter = adapter;
- q_vector->v_idx = q_idx;
- netif_napi_add(adapter->netdev, &q_vector->napi,
- ixgbevf_poll, 64);
- adapter->q_vector[q_idx] = q_vector;
+
+ /* update counts and index */
+ rxr_remaining -= rqpv;
+ rxr_idx += rqpv;
+ txr_remaining -= tqpv;
+ txr_idx += tqpv;
}
return 0;
err_out:
- while (q_idx) {
- q_idx--;
- q_vector = adapter->q_vector[q_idx];
-#ifdef CONFIG_NET_RX_BUSY_POLL
- napi_hash_del(&q_vector->napi);
-#endif
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- adapter->q_vector[q_idx] = NULL;
+ while (v_idx) {
+ v_idx--;
+ ixgbevf_free_q_vector(adapter, v_idx);
}
+
return -ENOMEM;
}
@@ -2557,17 +2640,11 @@ err_out:
**/
static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
{
- int q_idx, num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
-
- for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
- adapter->q_vector[q_idx] = NULL;
-#ifdef CONFIG_NET_RX_BUSY_POLL
- napi_hash_del(&q_vector->napi);
-#endif
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
+ while (q_vectors) {
+ q_vectors--;
+ ixgbevf_free_q_vector(adapter, q_vectors);
}
}
@@ -2611,12 +2688,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_q_vectors;
}
- err = ixgbevf_alloc_queues(adapter);
- if (err) {
- pr_err("Unable to allocate memory for queues\n");
- goto err_alloc_queues;
- }
-
hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" :
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
@@ -2624,8 +2695,6 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
set_bit(__IXGBEVF_DOWN, &adapter->state);
return 0;
-err_alloc_queues:
- ixgbevf_free_q_vectors(adapter);
err_alloc_q_vectors:
ixgbevf_reset_interrupt_capability(adapter);
err_set_interrupt:
@@ -2641,17 +2710,6 @@ err_set_interrupt:
**/
static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
{
- int i;
-
- for (i = 0; i < adapter->num_tx_queues; i++) {
- kfree(adapter->tx_ring[i]);
- adapter->tx_ring[i] = NULL;
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- kfree(adapter->rx_ring[i]);
- adapter->rx_ring[i] = NULL;
- }
-
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
@@ -3088,9 +3146,14 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
- break;
+ goto err_setup_tx;
}
+ return 0;
+err_setup_tx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbevf_free_tx_resources(adapter->tx_ring[i]);
return err;
}
@@ -3148,8 +3211,14 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
if (!err)
continue;
hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
- break;
+ goto err_setup_rx;
}
+
+ return 0;
+err_setup_rx:
+ /* rewind the index freeing the rings as we go */
+ while (i--)
+ ixgbevf_free_rx_resources(adapter->rx_ring[i]);
return err;
}
@@ -3244,28 +3313,31 @@ int ixgbevf_open(struct net_device *netdev)
ixgbevf_configure(adapter);
- /* Map the Tx/Rx rings to the vectors we were allotted.
- * if request_irq will be called in this function map_rings
- * must be called *before* up_complete
- */
- ixgbevf_map_rings_to_vectors(adapter);
-
err = ixgbevf_request_irq(adapter);
if (err)
goto err_req_irq;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues);
+ if (err)
+ goto err_set_queues;
+
+ err = netif_set_real_num_rx_queues(netdev, adapter->num_rx_queues);
+ if (err)
+ goto err_set_queues;
+
ixgbevf_up_complete(adapter);
return 0;
+err_set_queues:
+ ixgbevf_free_irq(adapter);
err_req_irq:
- ixgbevf_down(adapter);
-err_setup_rx:
ixgbevf_free_all_rx_resources(adapter);
-err_setup_tx:
+err_setup_rx:
ixgbevf_free_all_tx_resources(adapter);
+err_setup_tx:
ixgbevf_reset(adapter);
-
err_setup_reset:
return err;
@@ -3707,11 +3779,10 @@ static int ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
return __ixgbevf_maybe_stop_tx(tx_ring, size);
}
-static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+static int ixgbevf_xmit_frame_ring(struct sk_buff *skb,
+ struct ixgbevf_ring *tx_ring)
{
- struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbevf_tx_buffer *first;
- struct ixgbevf_ring *tx_ring;
int tso;
u32 tx_flags = 0;
u16 count = TXD_USE_COUNT(skb_headlen(skb));
@@ -3726,8 +3797,6 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK;
}
- tx_ring = adapter->tx_ring[skb->queue_mapping];
-
/* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head,
@@ -3780,6 +3849,29 @@ out_drop:
return NETDEV_TX_OK;
}
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+
+ if (skb->len <= 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* The minimum packet size for olinfo paylen is 17 so pad the skb
+ * in order to meet this minimum size requirement.
+ */
+ if (skb->len < 17) {
+ if (skb_padto(skb, 17))
+ return NETDEV_TX_OK;
+ skb->len = 17;
+ }
+
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
+ return ixgbevf_xmit_frame_ring(skb, tx_ring);
+}
+
/**
* ixgbevf_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
@@ -3839,6 +3931,9 @@ static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
return 0;
}
@@ -3917,17 +4012,11 @@ static int ixgbevf_resume(struct pci_dev *pdev)
rtnl_lock();
err = ixgbevf_init_interrupt_scheme(adapter);
+ if (!err && netif_running(netdev))
+ err = ixgbevf_open(netdev);
rtnl_unlock();
- if (err) {
- dev_err(&pdev->dev, "Cannot initialize interrupts\n");
+ if (err)
return err;
- }
-
- if (netif_running(netdev)) {
- err = ixgbevf_open(netdev);
- if (err)
- return err;
- }
netif_device_attach(netdev);
@@ -3953,6 +4042,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
stats->multicast = adapter->stats.vfmprc - adapter->stats.base_vfmprc;
+ rcu_read_lock();
for (i = 0; i < adapter->num_rx_queues; i++) {
ring = adapter->rx_ring[i];
do {
@@ -3974,6 +4064,7 @@ static void ixgbevf_get_stats(struct net_device *netdev,
stats->tx_bytes += bytes;
stats->tx_packets += packets;
}
+ rcu_read_unlock();
}
#define IXGBEVF_MAX_MAC_HDR_LEN 127
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 5a1668cdb461..ac0a0dc8f157 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -44,6 +44,7 @@
#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port))
#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60
#define MVPP2_RX_FIFO_INIT_REG 0x64
+#define MVPP22_TX_FIFO_THRESH_REG(port) (0x8840 + 4 * (port))
#define MVPP22_TX_FIFO_SIZE_REG(port) (0x8860 + 4 * (port))
/* RX DMA Top Registers */
@@ -65,6 +66,10 @@
#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000
#define MVPP2_RXQ_DISABLE_MASK BIT(31)
+/* Top Registers */
+#define MVPP2_MH_REG(port) (0x5040 + 4 * (port))
+#define MVPP2_DSA_EXTENDED BIT(5)
+
/* Parser Registers */
#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000
#define MVPP2_PRS_PORT_LU_MAX 0xf
@@ -254,6 +259,7 @@
#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4))
#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4))
#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff
+#define MVPP22_BM_POOL_PTRS_NUM_MASK 0xfff8
#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16)
#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4))
#define MVPP2_BM_START_MASK BIT(0)
@@ -473,6 +479,7 @@
#define MVPP2_ETH_TYPE_LEN 2
#define MVPP2_PPPOE_HDR_SIZE 8
#define MVPP2_VLAN_TAG_LEN 4
+#define MVPP2_VLAN_TAG_EDSA_LEN 8
/* Lbtd 802.3 type */
#define MVPP2_IP_LBDT_TYPE 0xfffa
@@ -536,6 +543,11 @@
/* TX FIFO constants */
#define MVPP22_TX_FIFO_DATA_SIZE_10KB 0xa
#define MVPP22_TX_FIFO_DATA_SIZE_3KB 0x3
+#define MVPP2_TX_FIFO_THRESHOLD_MIN 256
+#define MVPP2_TX_FIFO_THRESHOLD_10KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_10KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
+#define MVPP2_TX_FIFO_THRESHOLD_3KB \
+ (MVPP22_TX_FIFO_DATA_SIZE_3KB * 1024 - MVPP2_TX_FIFO_THRESHOLD_MIN)
/* RX buffer constants */
#define MVPP2_SKB_SHINFO_SIZE \
@@ -609,35 +621,64 @@ enum mvpp2_tag_type {
#define MVPP2_PRS_TCAM_LU_BYTE 20
#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2)
#define MVPP2_PRS_TCAM_INV_WORD 5
+
+#define MVPP2_PRS_VID_TCAM_BYTE 2
+
+/* There is a TCAM range reserved for VLAN filtering entries, range size is 33
+ * 10 VLAN ID filter entries per port
+ * 1 default VLAN filter entry per port
+ * It is assumed that there are 3 ports for filter, not including loopback port
+ */
+#define MVPP2_PRS_VLAN_FILT_MAX 11
+#define MVPP2_PRS_VLAN_FILT_RANGE_SIZE 33
+
+#define MVPP2_PRS_VLAN_FILT_MAX_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 2)
+#define MVPP2_PRS_VLAN_FILT_DFLT_ENTRY (MVPP2_PRS_VLAN_FILT_MAX - 1)
+
/* Tcam entries ID */
#define MVPP2_PE_DROP_ALL 0
#define MVPP2_PE_FIRST_FREE_TID 1
-#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+
+/* VLAN filtering range */
+#define MVPP2_PE_VID_FILT_RANGE_END (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_VID_FILT_RANGE_START (MVPP2_PE_VID_FILT_RANGE_END - \
+ MVPP2_PRS_VLAN_FILT_RANGE_SIZE + 1)
+#define MVPP2_PE_LAST_FREE_TID (MVPP2_PE_VID_FILT_RANGE_START - 1)
#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
-#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
-#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
-#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
-#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
-#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
-#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
-#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
-#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
-#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
-#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
-#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
-#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
-#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
-#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 21)
+#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 20)
+#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_VID_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+#define MVPP2_PRS_VID_PORT_FIRST(port) (MVPP2_PE_VID_FILT_RANGE_START + \
+ ((port) * MVPP2_PRS_VLAN_FILT_MAX))
+#define MVPP2_PRS_VID_PORT_LAST(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_MAX_ENTRY)
+/* Index of default vid filter for given port */
+#define MVPP2_PRS_VID_PORT_DFLT(port) (MVPP2_PRS_VID_PORT_FIRST(port) \
+ + MVPP2_PRS_VLAN_FILT_DFLT_ENTRY)
+
/* Sram structure
* The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
*/
@@ -725,6 +766,7 @@ enum mvpp2_tag_type {
#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4)
#define MVPP2_PRS_SINGLE_VLAN_AI 0
#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7)
+#define MVPP2_PRS_EDSA_VID_AI_BIT BIT(0)
/* DSA/EDSA type */
#define MVPP2_PRS_TAGGED true
@@ -747,6 +789,7 @@ enum mvpp2_prs_lookup {
MVPP2_PRS_LU_MAC,
MVPP2_PRS_LU_DSA,
MVPP2_PRS_LU_VLAN,
+ MVPP2_PRS_LU_VID,
MVPP2_PRS_LU_L2,
MVPP2_PRS_LU_PPPOE,
MVPP2_PRS_LU_IP4,
@@ -772,23 +815,26 @@ enum mvpp2_prs_l3_cast {
#define MVPP22_RSS_TABLE_ENTRIES 32
/* BM constants */
-#define MVPP2_BM_POOLS_NUM 8
+#define MVPP2_BM_JUMBO_BUF_NUM 512
#define MVPP2_BM_LONG_BUF_NUM 1024
#define MVPP2_BM_SHORT_BUF_NUM 2048
#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
#define MVPP2_BM_POOL_PTR_ALIGN 128
-#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port)
-#define MVPP2_BM_SWF_SHORT_POOL 3
/* BM cookie (32 bits) definition */
#define MVPP2_BM_COOKIE_POOL_OFFS 8
#define MVPP2_BM_COOKIE_CPU_OFFS 24
+#define MVPP2_BM_SHORT_FRAME_SIZE 512
+#define MVPP2_BM_LONG_FRAME_SIZE 2048
+#define MVPP2_BM_JUMBO_FRAME_SIZE 10240
/* BM short pool packet size
* These value assure that for SWF the total number
* of bytes allocated for each buffer will be 512
*/
-#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512)
+#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_SHORT_FRAME_SIZE)
+#define MVPP2_BM_LONG_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_LONG_FRAME_SIZE)
+#define MVPP2_BM_JUMBO_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(MVPP2_BM_JUMBO_FRAME_SIZE)
#define MVPP21_ADDR_SPACE_SZ 0
#define MVPP22_ADDR_SPACE_SZ SZ_64K
@@ -796,12 +842,18 @@ enum mvpp2_prs_l3_cast {
#define MVPP2_MAX_THREADS 8
#define MVPP2_MAX_QVECS MVPP2_MAX_THREADS
-enum mvpp2_bm_type {
- MVPP2_BM_FREE,
- MVPP2_BM_SWF_LONG,
- MVPP2_BM_SWF_SHORT
+enum mvpp2_bm_pool_log_num {
+ MVPP2_BM_SHORT,
+ MVPP2_BM_LONG,
+ MVPP2_BM_JUMBO,
+ MVPP2_BM_POOLS_NUM
};
+static struct {
+ int pkt_size;
+ int buf_num;
+} mvpp2_pools[MVPP2_BM_POOLS_NUM];
+
/* GMAC MIB Counters register definitions */
#define MVPP21_MIB_COUNTERS_OFFSET 0x1000
#define MVPP21_MIB_COUNTERS_PORT_SZ 0x400
@@ -1230,7 +1282,6 @@ struct mvpp2_cls_lookup_entry {
struct mvpp2_bm_pool {
/* Pool number in the range 0-7 */
int id;
- enum mvpp2_bm_type type;
/* Buffer Pointers Pool External (BPPE) size */
int size;
@@ -1662,6 +1713,14 @@ static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
}
+/* Set vid in tcam sw entry */
+static void mvpp2_prs_match_vid(struct mvpp2_prs_entry *pe, int offset,
+ unsigned short vid)
+{
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 0, (vid & 0xf00) >> 8, 0xf);
+ mvpp2_prs_tcam_data_byte_set(pe, offset + 1, vid & 0xff, 0xff);
+}
+
/* Set bits in sram sw entry */
static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
int val)
@@ -2029,24 +2088,30 @@ static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
pe.index = tid;
- /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
- mvpp2_prs_sram_shift_set(&pe, shift,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
-
/* Update shadow table */
mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
if (tagged) {
/* Set tagged bit in DSA tag */
mvpp2_prs_tcam_data_byte_set(&pe, 0,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
- MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
- /* Clear all ai bits for next iteration */
- mvpp2_prs_sram_ai_update(&pe, 0,
- MVPP2_PRS_SRAM_AI_MASK);
- /* If packet is tagged continue check vlans */
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+ MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+
+ /* Set ai bits for next iteration */
+ if (extend)
+ mvpp2_prs_sram_ai_update(&pe, 1,
+ MVPP2_PRS_SRAM_AI_MASK);
+ else
+ mvpp2_prs_sram_ai_update(&pe, 0,
+ MVPP2_PRS_SRAM_AI_MASK);
+
+ /* If packet is tagged continue check vid filtering */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
} else {
+ /* Shift 4 bytes for DSA tag or 8 bytes for EDSA tag*/
+ mvpp2_prs_sram_shift_set(&pe, shift,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
/* Set result info bits to 'no vlans' */
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
MVPP2_PRS_RI_VLAN_MASK);
@@ -2231,10 +2296,9 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
mvpp2_prs_match_etype(pe, 0, tpid);
- mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
- /* Shift 4 bytes - skip 1 vlan tag */
- mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
- MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+ /* VLAN tag detected, proceed with VID filtering */
+ mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VID);
+
/* Clear all ai bits for next iteration */
mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
@@ -2375,8 +2439,8 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
mvpp2_prs_match_etype(pe, 4, tpid2);
mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
- /* Shift 8 bytes - skip 2 vlan tags */
- mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+ /* Shift 4 bytes - skip outer vlan tag */
+ mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
MVPP2_PRS_RI_VLAN_MASK);
@@ -2755,6 +2819,62 @@ static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
mvpp2_prs_hw_write(priv, &pe);
}
+/* Initialize parser entries for VID filtering */
+static void mvpp2_prs_vid_init(struct mvpp2 *priv)
+{
+ struct mvpp2_prs_entry pe;
+
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 4 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ /* Set default vid entry for extended DSA*/
+ memset(&pe, 0, sizeof(pe));
+
+ /* Set default vid entry */
+ pe.index = MVPP2_PE_VID_EDSA_FLTR_DEFAULT;
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_EDSA_VID_AI_BIT,
+ MVPP2_PRS_EDSA_VID_AI_BIT);
+
+ /* Skip VLAN header - Set offset to 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, MVPP2_VLAN_TAG_EDSA_LEN,
+ MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Unmask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+ /* Update shadow table and hw entry */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Match basic ethertypes */
static int mvpp2_prs_etype_init(struct mvpp2 *priv)
{
@@ -3023,7 +3143,8 @@ static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
pe.index = MVPP2_PE_VLAN_DBL;
- mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VID);
+
/* Clear ai for next iterations */
mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
@@ -3386,6 +3507,192 @@ static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
return 0;
}
+/* Find tcam entry with matched pair <vid,port> */
+static int mvpp2_prs_vid_range_find(struct mvpp2 *priv, int pmap, u16 vid,
+ u16 mask)
+{
+ unsigned char byte[2], enable[2];
+ struct mvpp2_prs_entry pe;
+ u16 rvid, rmask;
+ int tid;
+
+ /* Go through the all entries with MVPP2_PRS_LU_VID */
+ for (tid = MVPP2_PE_VID_FILT_RANGE_START;
+ tid <= MVPP2_PE_VID_FILT_RANGE_END; tid++) {
+ if (!priv->prs_shadow[tid].valid ||
+ priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VID)
+ continue;
+
+ pe.index = tid;
+
+ mvpp2_prs_hw_read(priv, &pe);
+ mvpp2_prs_tcam_data_byte_get(&pe, 2, &byte[0], &enable[0]);
+ mvpp2_prs_tcam_data_byte_get(&pe, 3, &byte[1], &enable[1]);
+
+ rvid = ((byte[0] & 0xf) << 8) + byte[1];
+ rmask = ((enable[0] & 0xf) << 8) + enable[1];
+
+ if (rvid != vid || rmask != mask)
+ continue;
+
+ return tid;
+ }
+
+ return 0;
+}
+
+/* Write parser entry for VID filtering */
+static int mvpp2_prs_vid_entry_add(struct mvpp2_port *port, u16 vid)
+{
+ unsigned int vid_start = MVPP2_PE_VID_FILT_RANGE_START +
+ port->id * MVPP2_PRS_VLAN_FILT_MAX;
+ unsigned int mask = 0xfff, reg_val, shift;
+ struct mvpp2 *priv = port->priv;
+ struct mvpp2_prs_entry pe;
+ int tid;
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, mask);
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ /* No such entry */
+ if (!tid) {
+ memset(&pe, 0, sizeof(pe));
+
+ /* Go through all entries from first to last in vlan range */
+ tid = mvpp2_prs_tcam_first_free(priv, vid_start,
+ vid_start +
+ MVPP2_PRS_VLAN_FILT_MAX_ENTRY);
+
+ /* There isn't room for a new VID filter */
+ if (tid < 0)
+ return tid;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+ pe.index = tid;
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+ } else {
+ mvpp2_prs_hw_read(priv, &pe);
+ }
+
+ /* Enable the current port */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Set match on VID */
+ mvpp2_prs_match_vid(&pe, MVPP2_PRS_VID_TCAM_BYTE, vid);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+
+ return 0;
+}
+
+/* Write parser entry for VID filtering */
+static void mvpp2_prs_vid_entry_remove(struct mvpp2_port *port, u16 vid)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ /* Scan TCAM and see if entry with this <vid,port> already exist */
+ tid = mvpp2_prs_vid_range_find(priv, (1 << port->id), vid, 0xfff);
+
+ /* No such entry */
+ if (tid)
+ return;
+
+ mvpp2_prs_hw_inv(priv, tid);
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Remove all existing VID filters on this port */
+static void mvpp2_prs_vid_remove_all(struct mvpp2_port *port)
+{
+ struct mvpp2 *priv = port->priv;
+ int tid;
+
+ for (tid = MVPP2_PRS_VID_PORT_FIRST(port->id);
+ tid <= MVPP2_PRS_VID_PORT_LAST(port->id); tid++) {
+ if (priv->prs_shadow[tid].valid)
+ mvpp2_prs_vid_entry_remove(port, tid);
+ }
+}
+
+/* Remove VID filering entry for this port */
+static void mvpp2_prs_vid_disable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+
+ /* Invalidate the guard entry */
+ mvpp2_prs_hw_inv(priv, tid);
+
+ priv->prs_shadow[tid].valid = false;
+}
+
+/* Add guard entry that drops packets when no VID is matched on this port */
+static void mvpp2_prs_vid_enable_filtering(struct mvpp2_port *port)
+{
+ unsigned int tid = MVPP2_PRS_VID_PORT_DFLT(port->id);
+ struct mvpp2 *priv = port->priv;
+ unsigned int reg_val, shift;
+ struct mvpp2_prs_entry pe;
+
+ if (priv->prs_shadow[tid].valid)
+ return;
+
+ memset(&pe, 0, sizeof(pe));
+
+ pe.index = tid;
+
+ reg_val = mvpp2_read(priv, MVPP2_MH_REG(port->id));
+ if (reg_val & MVPP2_DSA_EXTENDED)
+ shift = MVPP2_VLAN_TAG_EDSA_LEN;
+ else
+ shift = MVPP2_VLAN_TAG_LEN;
+
+ mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VID);
+
+ /* Mask all ports */
+ mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+ /* Update port mask */
+ mvpp2_prs_tcam_port_set(&pe, port->id, true);
+
+ /* Continue - set next lookup */
+ mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+
+ /* Skip VLAN header - Set offset to 4 or 8 bytes */
+ mvpp2_prs_sram_shift_set(&pe, shift, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+ /* Drop VLAN packets that don't belong to any VIDs on this port */
+ mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+ MVPP2_PRS_RI_DROP_MASK);
+
+ /* Clear all ai bits for next iteration */
+ mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+ /* Update shadow table */
+ mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VID);
+ mvpp2_prs_hw_write(priv, &pe);
+}
+
/* Parser default initialization */
static int mvpp2_prs_default_init(struct platform_device *pdev,
struct mvpp2 *priv)
@@ -3429,6 +3736,8 @@ static int mvpp2_prs_default_init(struct platform_device *pdev,
mvpp2_prs_dsa_init(priv);
+ mvpp2_prs_vid_init(priv);
+
err = mvpp2_prs_etype_init(priv);
if (err)
return err;
@@ -3901,7 +4210,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
val |= MVPP2_BM_START_MASK;
mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
- bm_pool->type = MVPP2_BM_FREE;
bm_pool->size = size;
bm_pool->pkt_size = 0;
bm_pool->buf_num = 0;
@@ -3954,11 +4262,17 @@ static void mvpp2_bm_bufs_get_addrs(struct device *dev, struct mvpp2 *priv,
/* Free all buffers from the pool */
static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
- struct mvpp2_bm_pool *bm_pool)
+ struct mvpp2_bm_pool *bm_pool, int buf_num)
{
int i;
- for (i = 0; i < bm_pool->buf_num; i++) {
+ if (buf_num > bm_pool->buf_num) {
+ WARN(1, "Pool does not have so many bufs pool(%d) bufs(%d)\n",
+ bm_pool->id, buf_num);
+ buf_num = bm_pool->buf_num;
+ }
+
+ for (i = 0; i < buf_num; i++) {
dma_addr_t buf_dma_addr;
phys_addr_t buf_phys_addr;
void *data;
@@ -3980,16 +4294,39 @@ static void mvpp2_bm_bufs_free(struct device *dev, struct mvpp2 *priv,
bm_pool->buf_num -= i;
}
+/* Check number of buffers in BM pool */
+int mvpp2_check_hw_buf_num(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+{
+ int buf_num = 0;
+
+ buf_num += mvpp2_read(priv, MVPP2_BM_POOL_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP22_BM_POOL_PTRS_NUM_MASK;
+ buf_num += mvpp2_read(priv, MVPP2_BM_BPPI_PTRS_NUM_REG(bm_pool->id)) &
+ MVPP2_BM_BPPI_PTR_NUM_MASK;
+
+ /* HW has one buffer ready which is not reflected in the counters */
+ if (buf_num)
+ buf_num += 1;
+
+ return buf_num;
+}
+
/* Cleanup pool */
static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
struct mvpp2 *priv,
struct mvpp2_bm_pool *bm_pool)
{
+ int buf_num;
u32 val;
- mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool);
- if (bm_pool->buf_num) {
- WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ mvpp2_bm_bufs_free(&pdev->dev, priv, bm_pool, buf_num);
+
+ /* Check buffer counters after free */
+ buf_num = mvpp2_check_hw_buf_num(priv, bm_pool);
+ if (buf_num) {
+ WARN(1, "cannot free all buffers in pool %d, buf_num left %d\n",
+ bm_pool->id, bm_pool->buf_num);
return 0;
}
@@ -4051,6 +4388,21 @@ static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
return 0;
}
+static void mvpp2_setup_bm_pool(void)
+{
+ /* Short pool */
+ mvpp2_pools[MVPP2_BM_SHORT].buf_num = MVPP2_BM_SHORT_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_SHORT].pkt_size = MVPP2_BM_SHORT_PKT_SIZE;
+
+ /* Long pool */
+ mvpp2_pools[MVPP2_BM_LONG].buf_num = MVPP2_BM_LONG_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_LONG].pkt_size = MVPP2_BM_LONG_PKT_SIZE;
+
+ /* Jumbo pool */
+ mvpp2_pools[MVPP2_BM_JUMBO].buf_num = MVPP2_BM_JUMBO_BUF_NUM;
+ mvpp2_pools[MVPP2_BM_JUMBO].pkt_size = MVPP2_BM_JUMBO_PKT_SIZE;
+}
+
/* Attach long pool to rxq */
static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
int lrxq, int long_pool)
@@ -4189,13 +4541,11 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
bm_pool->buf_num += i;
netdev_dbg(port->dev,
- "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
- bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+ "pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
netdev_dbg(port->dev,
- "%s pool %d: %d of %d buffers added\n",
- bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+ "pool %d: %d of %d buffers added\n",
bm_pool->id, i, buf_num);
return i;
}
@@ -4204,25 +4554,20 @@ static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
* pool pointer on success
*/
static struct mvpp2_bm_pool *
-mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
- int pkt_size)
+mvpp2_bm_pool_use(struct mvpp2_port *port, unsigned pool, int pkt_size)
{
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num;
- if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
- netdev_err(port->dev, "mixing pool types is forbidden\n");
+ if (pool >= MVPP2_BM_POOLS_NUM) {
+ netdev_err(port->dev, "Invalid pool %d\n", pool);
return NULL;
}
- if (new_pool->type == MVPP2_BM_FREE)
- new_pool->type = type;
-
/* Allocate buffers in case BM pool is used as long pool, but packet
* size doesn't match MTU or BM pool hasn't being used yet
*/
- if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
- (new_pool->pkt_size == 0)) {
+ if (new_pool->pkt_size == 0) {
int pkts_num;
/* Set default buffer number or free all the buffers in case
@@ -4230,12 +4575,10 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
*/
pkts_num = new_pool->buf_num;
if (pkts_num == 0)
- pkts_num = type == MVPP2_BM_SWF_LONG ?
- MVPP2_BM_LONG_BUF_NUM :
- MVPP2_BM_SHORT_BUF_NUM;
+ pkts_num = mvpp2_pools[pool].buf_num;
else
mvpp2_bm_bufs_free(port->dev->dev.parent,
- port->priv, new_pool);
+ port->priv, new_pool, pkts_num);
new_pool->pkt_size = pkt_size;
new_pool->frag_size =
@@ -4261,16 +4604,28 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{
int rxq;
+ enum mvpp2_bm_pool_log_num long_log_pool, short_log_pool;
+
+ /* If port pkt_size is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (port->pkt_size > MVPP2_BM_LONG_PKT_SIZE) {
+ long_log_pool = MVPP2_BM_JUMBO;
+ short_log_pool = MVPP2_BM_LONG;
+ } else {
+ long_log_pool = MVPP2_BM_LONG;
+ short_log_pool = MVPP2_BM_SHORT;
+ }
if (!port->pool_long) {
port->pool_long =
- mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
- MVPP2_BM_SWF_LONG,
- port->pkt_size);
+ mvpp2_bm_pool_use(port, long_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
if (!port->pool_long)
return -ENOMEM;
- port->pool_long->port_map |= (1 << port->id);
+ port->pool_long->port_map |= BIT(port->id);
for (rxq = 0; rxq < port->nrxqs; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -4278,13 +4633,12 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short) {
port->pool_short =
- mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
- MVPP2_BM_SWF_SHORT,
- MVPP2_BM_SHORT_PKT_SIZE);
+ mvpp2_bm_pool_use(port, short_log_pool,
+ mvpp2_pools[long_log_pool].pkt_size);
if (!port->pool_short)
return -ENOMEM;
- port->pool_short->port_map |= (1 << port->id);
+ port->pool_short->port_map |= BIT(port->id);
for (rxq = 0; rxq < port->nrxqs; rxq++)
mvpp2_rxq_short_pool_set(port, rxq,
@@ -4297,30 +4651,49 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
{
struct mvpp2_port *port = netdev_priv(dev);
- struct mvpp2_bm_pool *port_pool = port->pool_long;
- int num, pkts_num = port_pool->buf_num;
+ enum mvpp2_bm_pool_log_num new_long_pool;
int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
- /* Update BM pool with new buffer size */
- mvpp2_bm_bufs_free(dev->dev.parent, port->priv, port_pool);
- if (port_pool->buf_num) {
- WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
- return -EIO;
- }
-
- port_pool->pkt_size = pkt_size;
- port_pool->frag_size = SKB_DATA_ALIGN(MVPP2_RX_BUF_SIZE(pkt_size)) +
- MVPP2_SKB_SHINFO_SIZE;
- num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
- if (num != pkts_num) {
- WARN(1, "pool %d: %d of %d allocated\n",
- port_pool->id, num, pkts_num);
- return -EIO;
+ /* If port MTU is higher than 1518B:
+ * HW Long pool - SW Jumbo pool, HW Short pool - SW Long pool
+ * else: HW Long pool - SW Long pool, HW Short pool - SW Short pool
+ */
+ if (pkt_size > MVPP2_BM_LONG_PKT_SIZE)
+ new_long_pool = MVPP2_BM_JUMBO;
+ else
+ new_long_pool = MVPP2_BM_LONG;
+
+ if (new_long_pool != port->pool_long->id) {
+ /* Remove port from old short & long pool */
+ port->pool_long = mvpp2_bm_pool_use(port, port->pool_long->id,
+ port->pool_long->pkt_size);
+ port->pool_long->port_map &= ~BIT(port->id);
+ port->pool_long = NULL;
+
+ port->pool_short = mvpp2_bm_pool_use(port, port->pool_short->id,
+ port->pool_short->pkt_size);
+ port->pool_short->port_map &= ~BIT(port->id);
+ port->pool_short = NULL;
+
+ port->pkt_size = pkt_size;
+
+ /* Add port to new short & long pool */
+ mvpp2_swf_bm_pool_init(port);
+
+ /* Update L4 checksum when jumbo enable/disable on port */
+ if (new_long_pool == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM);
+ } else {
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ }
}
- mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
- MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
dev->mtu = mtu;
+ dev->wanted_features = dev->features;
+
netdev_update_features(dev);
return 0;
}
@@ -7153,6 +7526,12 @@ retry:
}
}
}
+
+ /* Disable VLAN filtering in promiscuous mode */
+ if (dev->flags & IFF_PROMISC)
+ mvpp2_prs_vid_disable_filtering(port);
+ else
+ mvpp2_prs_vid_enable_filtering(port);
}
static int mvpp2_set_mac_address(struct net_device *dev, void *p)
@@ -7292,6 +7671,48 @@ static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return ret;
}
+static int mvpp2_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+ int ret;
+
+ ret = mvpp2_prs_vid_entry_add(port, vid);
+ if (ret)
+ netdev_err(dev, "rx-vlan-filter offloading cannot accept more than %d VIDs per port\n",
+ MVPP2_PRS_VLAN_FILT_MAX - 1);
+ return ret;
+}
+
+static int mvpp2_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ mvpp2_prs_vid_entry_remove(port, vid);
+ return 0;
+}
+
+static int mvpp2_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = dev->features ^ features;
+ struct mvpp2_port *port = netdev_priv(dev);
+
+ if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ mvpp2_prs_vid_enable_filtering(port);
+ } else {
+ /* Invalidate all registered VID filters for this
+ * port
+ */
+ mvpp2_prs_vid_remove_all(port);
+
+ mvpp2_prs_vid_disable_filtering(port);
+ }
+ }
+
+ return 0;
+}
+
/* Ethtool methods */
/* Set interrupt coalescing for ethtools */
@@ -7433,6 +7854,9 @@ static const struct net_device_ops mvpp2_netdev_ops = {
.ndo_change_mtu = mvpp2_change_mtu,
.ndo_get_stats64 = mvpp2_get_stats64,
.ndo_do_ioctl = mvpp2_ioctl,
+ .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid,
+ .ndo_set_features = mvpp2_set_features,
};
static const struct ethtool_ops mvpp2_eth_tool_ops = {
@@ -7943,16 +8367,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
}
}
- features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO;
dev->features = features | NETIF_F_RXCSUM;
- dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
+ dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ if (port->pool_long->id == MVPP2_BM_JUMBO && port->id != 0) {
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ dev->hw_features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
dev->vlan_features |= features;
dev->gso_max_segs = MVPP2_MAX_TSO_SEGS;
- /* MTU range: 68 - 9676 */
+ /* MTU range: 68 - 9704 */
dev->min_mtu = ETH_MIN_MTU;
- /* 9676 == 9700 - 20 and rounding to 8 */
- dev->max_mtu = 9676;
+ /* 9704 == 9728 - 20 and rounding to 8 */
+ dev->max_mtu = MVPP2_BM_JUMBO_PKT_SIZE;
err = register_netdev(dev);
if (err < 0) {
@@ -8083,14 +8515,25 @@ static void mvpp22_rx_fifo_init(struct mvpp2 *priv)
mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
}
-/* Initialize Tx FIFO's */
+/* Initialize Tx FIFO's: the total FIFO size is 19kB on PPv2.2 and 10G
+ * interfaces must have a Tx FIFO size of 10kB. As only port 0 can do 10G,
+ * configure its Tx FIFO size to 10kB and the others ports Tx FIFO size to 3kB.
+ */
static void mvpp22_tx_fifo_init(struct mvpp2 *priv)
{
- int port;
+ int port, size, thrs;
- for (port = 0; port < MVPP2_MAX_PORTS; port++)
- mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port),
- MVPP22_TX_FIFO_DATA_SIZE_3KB);
+ for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+ if (port == 0) {
+ size = MVPP22_TX_FIFO_DATA_SIZE_10KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_10KB;
+ } else {
+ size = MVPP22_TX_FIFO_DATA_SIZE_3KB;
+ thrs = MVPP2_TX_FIFO_THRESHOLD_3KB;
+ }
+ mvpp2_write(priv, MVPP22_TX_FIFO_SIZE_REG(port), size);
+ mvpp2_write(priv, MVPP22_TX_FIFO_THRESH_REG(port), thrs);
+ }
}
static void mvpp2_axi_init(struct mvpp2 *priv)
@@ -8284,6 +8727,8 @@ static int mvpp2_probe(struct platform_device *pdev)
priv->sysctrl_base = NULL;
}
+ mvpp2_setup_bm_pool();
+
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index ebc1f566a4d9..9a7a2f05ab35 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -199,6 +199,10 @@ static const char main_strings[][ETH_GSTRING_LEN] = {
"rx_xdp_drop",
"rx_xdp_tx",
"rx_xdp_tx_full",
+
+ /* phy statistics */
+ "rx_packets_phy", "rx_bytes_phy",
+ "tx_packets_phy", "tx_bytes_phy",
};
static const char mlx4_en_test_names[][ETH_GSTRING_LEN]= {
@@ -411,6 +415,10 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
if (bitmap_iterator_test(&it))
data[index++] = ((unsigned long *)&priv->xdp_stats)[i];
+ for (i = 0; i < NUM_PHY_STATS; i++, bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ data[index++] = ((unsigned long *)&priv->phy_stats)[i];
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
data[index++] = priv->tx_ring[TX][i]->packets;
data[index++] = priv->tx_ring[TX][i]->bytes;
@@ -490,6 +498,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
strcpy(data + (index++) * ETH_GSTRING_LEN,
main_strings[strings]);
+ for (i = 0; i < NUM_PHY_STATS; i++, strings++,
+ bitmap_iterator_inc(&it))
+ if (bitmap_iterator_test(&it))
+ strcpy(data + (index++) * ETH_GSTRING_LEN,
+ main_strings[strings]);
+
for (i = 0; i < priv->tx_ring_num[TX]; i++) {
sprintf(data + (index++) * ETH_GSTRING_LEN,
"tx%d_packets", i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 8fc51bc29003..e0adac4a9a19 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -3256,6 +3256,10 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev,
bitmap_set(stats_bitmap->bitmap, last_i, NUM_XDP_STATS);
last_i += NUM_XDP_STATS;
+
+ if (!mlx4_is_slave(dev))
+ bitmap_set(stats_bitmap->bitmap, last_i, NUM_PHY_STATS);
+ last_i += NUM_PHY_STATS;
}
int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -3630,10 +3634,6 @@ int mlx4_en_reset_config(struct net_device *dev,
mlx4_en_stop_port(dev, 1);
}
- en_warn(priv, "Changing device configuration rx filter(%x) rx vlan(%x)\n",
- ts_config.rx_filter,
- !!(features & NETIF_F_HW_VLAN_CTAG_RX));
-
mlx4_en_safe_replace_resources(priv, tmp);
if (DEV_FEATURE_CHANGED(dev, features, NETIF_F_HW_VLAN_CTAG_RX)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index 1fa4849a6f56..0158b88bea5b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -275,19 +275,31 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
priv->port_stats.xmit_more += READ_ONCE(ring->xmit_more);
}
- if (mlx4_is_master(mdev->dev)) {
- stats->rx_packets = en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
- &mlx4_en_stats->RTOT_prio_1,
- NUM_PRIORITIES);
- stats->tx_packets = en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
- &mlx4_en_stats->TTOT_prio_1,
- NUM_PRIORITIES);
- stats->rx_bytes = en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
- &mlx4_en_stats->ROCT_prio_1,
- NUM_PRIORITIES);
- stats->tx_bytes = en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
- &mlx4_en_stats->TOCT_prio_1,
- NUM_PRIORITIES);
+ if (!mlx4_is_slave(mdev->dev)) {
+ struct mlx4_en_phy_stats *p_stats = &priv->phy_stats;
+
+ p_stats->rx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->RTOT_prio_0,
+ &mlx4_en_stats->RTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_packets_phy =
+ en_stats_adder(&mlx4_en_stats->TTOT_prio_0,
+ &mlx4_en_stats->TTOT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->rx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->ROCT_prio_0,
+ &mlx4_en_stats->ROCT_prio_1,
+ NUM_PRIORITIES);
+ p_stats->tx_bytes_phy =
+ en_stats_adder(&mlx4_en_stats->TOCT_prio_0,
+ &mlx4_en_stats->TOCT_prio_1,
+ NUM_PRIORITIES);
+ if (mlx4_is_master(mdev->dev)) {
+ stats->rx_packets = p_stats->rx_packets_phy;
+ stats->tx_packets = p_stats->tx_packets_phy;
+ stats->rx_bytes = p_stats->rx_bytes_phy;
+ stats->tx_bytes = p_stats->tx_bytes_phy;
+ }
}
/* net device stats */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index b4d144e67514..c2c6bd7578fd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -649,6 +649,12 @@ static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
}
+#if IS_ENABLED(CONFIG_IPV6)
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
+#else
+#define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
+#endif
+
int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
@@ -662,12 +668,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
int polled = 0;
int index;
- if (unlikely(!priv->port_up))
+ if (unlikely(!priv->port_up || budget <= 0))
return 0;
- if (unlikely(budget <= 0))
- return polled;
-
ring = priv->rx_ring[cq_ring];
/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
@@ -838,12 +841,7 @@ xdp_drop_no_cnt:
ring->csum_ok++;
} else {
if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
- (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV4 |
-#if IS_ENABLED(CONFIG_IPV6)
- MLX4_CQE_STATUS_IPV6))))
-#else
- 0))))
-#endif
+ (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
goto csum_none;
if (check_csum(cqe, skb, va, dev->features))
goto csum_none;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index f470ae37d937..f7c81133594f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -608,6 +608,7 @@ struct mlx4_en_priv {
struct mlx4_en_flow_stats_tx tx_flowstats;
struct mlx4_en_port_stats port_stats;
struct mlx4_en_xdp_stats xdp_stats;
+ struct mlx4_en_phy_stats phy_stats;
struct mlx4_en_stats_bitmap stats_bitmap;
struct list_head mc_list;
struct list_head curr_list;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
index aab28eb27a30..86b6051da8ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_stats.h
@@ -63,6 +63,14 @@ struct mlx4_en_xdp_stats {
#define NUM_XDP_STATS 3
};
+struct mlx4_en_phy_stats {
+ unsigned long rx_packets_phy;
+ unsigned long rx_bytes_phy;
+ unsigned long tx_packets_phy;
+ unsigned long tx_bytes_phy;
+#define NUM_PHY_STATS 4
+};
+
#define NUM_MAIN_STATS 21
#define MLX4_NUM_PRIORITIES 8
@@ -116,7 +124,7 @@ enum {
#define NUM_ALL_STATS (NUM_MAIN_STATS + NUM_PORT_STATS + NUM_PKT_STATS + \
NUM_FLOW_STATS + NUM_PERF_STATS + NUM_PF_STATS + \
- NUM_XDP_STATS)
+ NUM_XDP_STATS + NUM_PHY_STATS)
#define MLX4_FIND_NETDEV_STAT(n) (offsetof(struct net_device_stats, n) / \
sizeof(((struct net_device_stats *)0)->n))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 47239bf7bf43..323ffe8bf7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -71,19 +71,24 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
}
int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_buf *buf, int node)
+ struct mlx5_frag_buf *buf, int node)
{
dma_addr_t t;
buf->size = size;
buf->npages = 1;
buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = mlx5_dma_zalloc_coherent_node(dev, size,
- &t, node);
- if (!buf->direct.buf)
+
+ buf->frags = kzalloc(sizeof(*buf->frags), GFP_KERNEL);
+ if (!buf->frags)
return -ENOMEM;
- buf->direct.map = t;
+ buf->frags->buf = mlx5_dma_zalloc_coherent_node(dev, size,
+ &t, node);
+ if (!buf->frags->buf)
+ goto err_out;
+
+ buf->frags->map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
@@ -91,18 +96,24 @@ int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
}
return 0;
+err_out:
+ kfree(buf->frags);
+ return -ENOMEM;
}
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev,
+ int size, struct mlx5_frag_buf *buf)
{
return mlx5_buf_alloc_node(dev, size, buf, dev->priv.numa_node);
}
-EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
+EXPORT_SYMBOL(mlx5_buf_alloc);
-void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
+void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->frags->buf,
+ buf->frags->map);
+
+ kfree(buf->frags);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -147,6 +158,7 @@ err_free_buf:
err_out:
return -ENOMEM;
}
+EXPORT_SYMBOL_GPL(mlx5_frag_buf_alloc_node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
@@ -162,6 +174,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
}
kfree(buf->frags);
}
+EXPORT_SYMBOL_GPL(mlx5_frag_buf_free);
static struct mlx5_db_pgdir *mlx5_alloc_db_pgdir(struct mlx5_core_dev *dev,
int node)
@@ -275,13 +288,13 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
}
EXPORT_SYMBOL_GPL(mlx5_db_free);
-void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
+void mlx5_fill_page_array(struct mlx5_frag_buf *buf, __be64 *pas)
{
u64 addr;
int i;
for (i = 0; i < buf->npages; i++) {
- addr = buf->direct.map + (i << buf->page_shift);
+ addr = buf->frags->map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 1016e05c7ec7..669ed16938b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,8 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (refcount_dec_and_test(&mcq->refcount))
- complete(&mcq->free);
+ mlx5_cq_put(mcq);
if (time_after(jiffies, end))
break;
}
@@ -80,69 +79,19 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- refcount_inc(&cq->refcount);
+ mlx5_cq_hold(cq);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
}
-void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
-{
- struct mlx5_core_cq *cq;
- struct mlx5_cq_table *table = &dev->priv.cq_table;
-
- spin_lock(&table->lock);
- cq = radix_tree_lookup(&table->tree, cqn);
- if (likely(cq))
- refcount_inc(&cq->refcount);
- spin_unlock(&table->lock);
-
- if (!cq) {
- mlx5_core_warn(dev, "Completion event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- ++cq->arm_sn;
-
- cq->comp(cq);
-
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
-}
-
-void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
-{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
- struct mlx5_core_cq *cq;
-
- spin_lock(&table->lock);
-
- cq = radix_tree_lookup(&table->tree, cqn);
- if (cq)
- refcount_inc(&cq->refcount);
-
- spin_unlock(&table->lock);
-
- if (!cq) {
- mlx5_core_warn(dev, "Async event for bogus CQ 0x%x\n", cqn);
- return;
- }
-
- cq->event(cq, event_type);
-
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
-}
-
int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
u32 *in, int inlen)
{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
+ int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn);
+ u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
u32 din[MLX5_ST_SZ_DW(destroy_cq_in)];
- u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)];
- int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context),
- c_eqn);
struct mlx5_eq *eq;
int err;
@@ -159,7 +108,9 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
- refcount_set(&cq->refcount, 1);
+ cq->eq = eq;
+ refcount_set(&cq->refcount, 0);
+ mlx5_cq_hold(cq);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@@ -167,12 +118,16 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->tasklet_ctx.priv = &eq->tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- spin_lock_irq(&table->lock);
- err = radix_tree_insert(&table->tree, cq->cqn, cq);
- spin_unlock_irq(&table->lock);
+ /* Add to comp EQ CQ tree to recv comp events */
+ err = mlx5_eq_add_cq(eq, cq);
if (err)
goto err_cmd;
+ /* Add to async EQ CQ tree to recv async events */
+ err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq);
+ if (err)
+ goto err_cq_add;
+
cq->pid = current->pid;
err = mlx5_debug_cq_add(dev, cq);
if (err)
@@ -183,6 +138,8 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
return 0;
+err_cq_add:
+ mlx5_eq_del_cq(eq, cq);
err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
@@ -195,23 +152,17 @@ EXPORT_SYMBOL(mlx5_core_create_cq);
int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
u32 out[MLX5_ST_SZ_DW(destroy_cq_out)] = {0};
u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0};
- struct mlx5_core_cq *tmp;
int err;
- spin_lock_irq(&table->lock);
- tmp = radix_tree_delete(&table->tree, cq->cqn);
- spin_unlock_irq(&table->lock);
- if (!tmp) {
- mlx5_core_warn(dev, "cq 0x%x not found in tree\n", cq->cqn);
- return -EINVAL;
- }
- if (tmp != cq) {
- mlx5_core_warn(dev, "corruption on srqn 0x%x\n", cq->cqn);
- return -EINVAL;
- }
+ err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq);
+ if (err)
+ return err;
+
+ err = mlx5_eq_del_cq(cq->eq, cq);
+ if (err)
+ return err;
MLX5_SET(destroy_cq_in, in, opcode, MLX5_CMD_OP_DESTROY_CQ);
MLX5_SET(destroy_cq_in, in, cqn, cq->cqn);
@@ -222,8 +173,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
- if (refcount_dec_and_test(&cq->refcount))
- complete(&cq->free);
+ mlx5_cq_put(cq);
wait_for_completion(&cq->free);
return 0;
@@ -270,21 +220,3 @@ int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
return mlx5_core_modify_cq(dev, cq, in, sizeof(in));
}
EXPORT_SYMBOL(mlx5_core_modify_cq_moderation);
-
-int mlx5_init_cq_table(struct mlx5_core_dev *dev)
-{
- struct mlx5_cq_table *table = &dev->priv.cq_table;
- int err;
-
- memset(table, 0, sizeof(*table));
- spin_lock_init(&table->lock);
- INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
- err = mlx5_cq_debugfs_init(dev);
-
- return err;
-}
-
-void mlx5_cleanup_cq_table(struct mlx5_core_dev *dev)
-{
- mlx5_cq_debugfs_cleanup(dev);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index 17b723218b0c..b994b80d5714 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -337,6 +337,14 @@ void mlx5_unregister_interface(struct mlx5_interface *intf)
}
EXPORT_SYMBOL(mlx5_unregister_interface);
+void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol)
+{
+ mutex_lock(&mlx5_intf_mutex);
+ mlx5_remove_dev_by_protocol(mdev, protocol);
+ mlx5_add_dev_by_protocol(mdev, protocol);
+ mutex_unlock(&mlx5_intf_mutex);
+}
+
void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
{
struct mlx5_priv *priv = &mdev->priv;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 363d8dcb7f17..ea4b255380a2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1156,6 +1156,15 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
kfree(ppriv); /* mlx5e_rep_priv */
}
+static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
+{
+ struct mlx5e_rep_priv *rpriv;
+
+ rpriv = mlx5e_rep_to_rep_priv(rep);
+
+ return rpriv->netdev;
+}
+
static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
@@ -1168,6 +1177,7 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
rep_if.load = mlx5e_vport_rep_load;
rep_if.unload = mlx5e_vport_rep_unload;
+ rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
}
}
@@ -1195,6 +1205,7 @@ void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
rep_if.load = mlx5e_nic_rep_load;
rep_if.unload = mlx5e_nic_rep_unload;
+ rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
rep_if.priv = rpriv;
INIT_LIST_HEAD(&rpriv->vport_sqs_list);
mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index e5c3ab46a24a..8cce90dc461d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -53,7 +53,7 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
void *data)
{
- u32 ci = cqcc & cq->wq.sz_m1;
+ u32 ci = cqcc & cq->wq.fbc.sz_m1;
memcpy(data, mlx5_cqwq_get_wqe(&cq->wq, ci), sizeof(struct mlx5_cqe64));
}
@@ -75,9 +75,10 @@ static inline void mlx5e_read_mini_arr_slot(struct mlx5e_cq *cq, u32 cqcc)
static inline void mlx5e_cqes_update_owner(struct mlx5e_cq *cq, u32 cqcc, int n)
{
- u8 op_own = (cqcc >> cq->wq.log_sz) & 1;
- u32 wq_sz = 1 << cq->wq.log_sz;
- u32 ci = cqcc & cq->wq.sz_m1;
+ struct mlx5_frag_buf_ctrl *fbc = &cq->wq.fbc;
+ u8 op_own = (cqcc >> fbc->log_sz) & 1;
+ u32 wq_sz = 1 << fbc->log_sz;
+ u32 ci = cqcc & fbc->sz_m1;
u32 ci_top = min_t(u32, wq_sz, ci + n);
for (; ci < ci_top; ci++, n--) {
@@ -102,7 +103,7 @@ static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
cq->title.byte_cnt = cq->mini_arr[cq->mini_arr_idx].byte_cnt;
cq->title.check_sum = cq->mini_arr[cq->mini_arr_idx].checksum;
cq->title.op_own &= 0xf0;
- cq->title.op_own |= 0x01 & (cqcc >> cq->wq.log_sz);
+ cq->title.op_own |= 0x01 & (cqcc >> cq->wq.fbc.log_sz);
cq->title.wqe_counter = cpu_to_be16(cq->decmprs_wqe_counter);
if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 25106e996a96..c1c94974e16b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -393,6 +393,51 @@ static void general_event_handler(struct mlx5_core_dev *dev,
}
}
+/* caller must eventually call mlx5_cq_put on the returned cq */
+static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *cq = NULL;
+
+ spin_lock(&table->lock);
+ cq = radix_tree_lookup(&table->tree, cqn);
+ if (likely(cq))
+ mlx5_cq_hold(cq);
+ spin_unlock(&table->lock);
+
+ return cq;
+}
+
+static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn)
+{
+ struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
+
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ ++cq->arm_sn;
+
+ cq->comp(cq);
+
+ mlx5_cq_put(cq);
+}
+
+static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type)
+{
+ struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn);
+
+ if (unlikely(!cq)) {
+ mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn);
+ return;
+ }
+
+ cq->event(cq, event_type);
+
+ mlx5_cq_put(cq);
+}
+
static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
{
struct mlx5_eq *eq = eq_ptr;
@@ -415,7 +460,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
- mlx5_cq_completion(dev, cqn);
+ mlx5_eq_cq_completion(eq, cqn);
break;
case MLX5_EVENT_TYPE_DCT_DRAINED:
rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
@@ -472,7 +517,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n",
cqn, eqe->data.cq_err.syndrome);
- mlx5_cq_event(dev, cqn, eqe->type);
+ mlx5_eq_cq_event(eq, cqn, eqe->type);
break;
case MLX5_EVENT_TYPE_PAGE_REQUEST:
@@ -567,6 +612,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name,
enum mlx5_eq_type type)
{
+ struct mlx5_cq_table *cq_table = &eq->cq_table;
u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0};
struct mlx5_priv *priv = &dev->priv;
irq_handler_t handler;
@@ -576,6 +622,11 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
u32 *in;
int err;
+ /* Init CQ table */
+ memset(cq_table, 0, sizeof(*cq_table));
+ spin_lock_init(&cq_table->lock);
+ INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC);
+
eq->type = type;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
eq->cons_index = 0;
@@ -669,7 +720,6 @@ err_buf:
mlx5_buf_free(dev, &eq->buf);
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
@@ -696,7 +746,40 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
return err;
}
-EXPORT_SYMBOL_GPL(mlx5_destroy_unmap_eq);
+
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ int err;
+
+ spin_lock_irq(&table->lock);
+ err = radix_tree_insert(&table->tree, cq->cqn, cq);
+ spin_unlock_irq(&table->lock);
+
+ return err;
+}
+
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq)
+{
+ struct mlx5_cq_table *table = &eq->cq_table;
+ struct mlx5_core_cq *tmp;
+
+ spin_lock_irq(&table->lock);
+ tmp = radix_tree_delete(&table->tree, cq->cqn);
+ spin_unlock_irq(&table->lock);
+
+ if (!tmp) {
+ mlx5_core_warn(eq->dev, "cq 0x%x not found in eq 0x%x tree\n", eq->eqn, cq->cqn);
+ return -ENOENT;
+ }
+
+ if (tmp != cq) {
+ mlx5_core_warn(eq->dev, "corruption on cqn 0x%x in eq 0x%x\n", eq->eqn, cq->cqn);
+ return -EINVAL;
+ }
+
+ return 0;
+}
int mlx5_eq_init(struct mlx5_core_dev *dev)
{
@@ -840,4 +923,3 @@ int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
MLX5_SET(query_eq_in, in, eq_number, eq->eqn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
-EXPORT_SYMBOL_GPL(mlx5_core_eq_query);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c2b1d7d351fc..77b7272eaaa8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1619,10 +1619,14 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
esw->mode = mode;
- if (mode == SRIOV_LEGACY)
+ if (mode == SRIOV_LEGACY) {
err = esw_create_legacy_fdb_table(esw, nvfs + 1);
- else
+ } else {
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+
err = esw_offloads_init(esw, nvfs + 1);
+ }
+
if (err)
goto abort;
@@ -1644,12 +1648,17 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw->mode = SRIOV_NONE;
+
+ if (mode == SRIOV_OFFLOADS)
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
+
return err;
}
void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
+ int old_mode;
int nvports;
int i;
@@ -1675,7 +1684,11 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
else if (esw->mode == SRIOV_OFFLOADS)
esw_offloads_cleanup(esw, nvports);
+ old_mode = esw->mode;
esw->mode = SRIOV_NONE;
+
+ if (old_mode == SRIOV_OFFLOADS)
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
}
int mlx5_eswitch_init(struct mlx5_core_dev *dev)
@@ -2175,3 +2188,9 @@ free_out:
kvfree(out);
return err;
}
+
+u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
+{
+ return esw->mode;
+}
+EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 2fa037066b2f..98d2177d0806 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -37,19 +37,9 @@
#include <linux/if_link.h>
#include <net/devlink.h>
#include <linux/mlx5/device.h>
+#include <linux/mlx5/eswitch.h>
#include "lib/mpfs.h"
-enum {
- SRIOV_NONE,
- SRIOV_LEGACY,
- SRIOV_OFFLOADS
-};
-
-enum {
- REP_ETH,
- NUM_REP_TYPES,
-};
-
#ifdef CONFIG_MLX5_ESWITCH
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -139,29 +129,13 @@ struct mlx5_eswitch_fdb {
struct mlx5_flow_table *fdb;
struct mlx5_flow_group *send_to_vport_grp;
struct mlx5_flow_group *miss_grp;
- struct mlx5_flow_handle *miss_rule;
+ struct mlx5_flow_handle *miss_rule_uni;
+ struct mlx5_flow_handle *miss_rule_multi;
int vlan_push_pop_refcount;
} offloads;
};
};
-struct mlx5_eswitch_rep;
-struct mlx5_eswitch_rep_if {
- int (*load)(struct mlx5_core_dev *dev,
- struct mlx5_eswitch_rep *rep);
- void (*unload)(struct mlx5_eswitch_rep *rep);
- void *priv;
- bool valid;
-};
-
-struct mlx5_eswitch_rep {
- struct mlx5_eswitch_rep_if rep_if[NUM_REP_TYPES];
- u16 vport;
- u8 hw_id[ETH_ALEN];
- u16 vlan;
- u32 vlan_refcount;
-};
-
struct mlx5_esw_offload {
struct mlx5_flow_table *ft_offloads;
struct mlx5_flow_group *vport_rx_group;
@@ -231,9 +205,6 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
int vport,
struct ifla_vf_stats *vf_stats);
-struct mlx5_flow_handle *
-mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport,
- u32 sqn);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
struct mlx5_flow_spec;
@@ -278,13 +249,6 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode);
int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode);
int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap);
int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap);
-void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- struct mlx5_eswitch_rep_if *rep_if,
- u8 rep_type);
-void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
- int vport_index,
- u8 rep_type);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type);
int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 99f583a15cc3..0a8303c1b52f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -338,6 +338,7 @@ out:
kvfree(spec);
return flow_rule;
}
+EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
{
@@ -350,7 +351,11 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
+ void *headers_c;
+ void *headers_v;
int err = 0;
+ u8 *dmac_c;
+ u8 *dmac_v;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
@@ -358,6 +363,13 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
goto out;
}
+ spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+ outer_headers);
+ dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
+ outer_headers.dmac_47_16);
+ dmac_c[0] = 0x01;
+
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = 0;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@@ -366,11 +378,28 @@ static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
&flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
- esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
+ esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
goto out;
}
- esw->fdb_table.offloads.miss_rule = flow_rule;
+ esw->fdb_table.offloads.miss_rule_uni = flow_rule;
+
+ headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ outer_headers);
+ dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
+ outer_headers.dmac_47_16);
+ dmac_v[0] = 0x01;
+ flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
+ &flow_act, &dest, 1);
+ if (IS_ERR(flow_rule)) {
+ err = PTR_ERR(flow_rule);
+ esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
+ goto out;
+ }
+
+ esw->fdb_table.offloads.miss_rule_multi = flow_rule;
+
out:
kvfree(spec);
return err;
@@ -426,6 +455,7 @@ static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
}
#define MAX_PF_SQ 256
+#define MAX_SQ_NVPORTS 32
static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
{
@@ -438,6 +468,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
struct mlx5_flow_group *g;
void *match_criteria;
u32 *flow_group_in;
+ u8 *dmac;
esw_debug(esw->dev, "Create offloads FDB Tables\n");
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
@@ -455,7 +486,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto fast_fdb_err;
- table_size = nvports + MAX_PF_SQ + 1;
+ table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
ft_attr.max_fte = table_size;
ft_attr.prio = FDB_SLOW_PATH;
@@ -478,7 +509,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
- ix = nvports + MAX_PF_SQ;
+ ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
@@ -492,10 +523,16 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
/* create miss group */
memset(flow_group_in, 0, inlen);
- MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
+ MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
+ MLX5_MATCH_OUTER_HEADERS);
+ match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
+ match_criteria);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
- MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
+ MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
g = mlx5_create_flow_group(fdb, flow_group_in);
if (IS_ERR(g)) {
@@ -531,7 +568,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
return;
esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
- mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
+ mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
@@ -789,14 +827,9 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
{
int err;
- /* disable PF RoCE so missed packets don't go through RoCE steering */
- mlx5_dev_list_lock();
- mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
err = esw_create_offloads_fdb_tables(esw, nvports);
if (err)
- goto create_fdb_err;
+ return err;
err = esw_create_offloads_table(esw);
if (err)
@@ -821,12 +854,6 @@ create_fg_err:
create_ft_err:
esw_destroy_offloads_fdb_tables(esw);
-create_fdb_err:
- /* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
-
return err;
}
@@ -844,9 +871,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
}
/* enable back PF RoCE */
- mlx5_dev_list_lock();
- mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
- mlx5_dev_list_unlock();
+ mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
return err;
}
@@ -1160,10 +1185,12 @@ void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
rep_if->load = __rep_if->load;
rep_if->unload = __rep_if->unload;
+ rep_if->get_proto_dev = __rep_if->get_proto_dev;
rep_if->priv = __rep_if->priv;
rep_if->valid = true;
}
+EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
int vport_index, u8 rep_type)
@@ -1178,6 +1205,7 @@ void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
rep->rep_if[rep_type].valid = false;
}
+EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
{
@@ -1188,3 +1216,35 @@ void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
rep = &offloads->vport_reps[UPLINK_REP_INDEX];
return rep->rep_if[rep_type].priv;
}
+
+void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
+ int vport,
+ u8 rep_type)
+{
+ struct mlx5_esw_offload *offloads = &esw->offloads;
+ struct mlx5_eswitch_rep *rep;
+
+ if (vport == FDB_UPLINK_VPORT)
+ vport = UPLINK_REP_INDEX;
+
+ rep = &offloads->vport_reps[vport];
+
+ if (rep->rep_if[rep_type].valid &&
+ rep->rep_if[rep_type].get_proto_dev)
+ return rep->rep_if[rep_type].get_proto_dev(rep);
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
+
+void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
+{
+ return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
+}
+EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
+
+struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
+ int vport)
+{
+ return &esw->offloads.vport_reps[vport];
+}
+EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index ae391e4b7070..7142c90d4669 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -942,9 +942,9 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
goto out;
}
- err = mlx5_init_cq_table(dev);
+ err = mlx5_cq_debugfs_init(dev);
if (err) {
- dev_err(&pdev->dev, "failed to initialize cq table\n");
+ dev_err(&pdev->dev, "failed to initialize cq debugfs\n");
goto err_eq_cleanup;
}
@@ -1002,7 +1002,7 @@ err_tables_cleanup:
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
+ mlx5_cq_debugfs_cleanup(dev);
err_eq_cleanup:
mlx5_eq_cleanup(dev);
@@ -1023,7 +1023,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
- mlx5_cleanup_cq_table(dev);
+ mlx5_cq_debugfs_cleanup(dev);
mlx5_eq_cleanup(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 394552f36fcf..4e25f2b2e0bc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -38,16 +38,11 @@
#include <linux/sched.h>
#include <linux/if_link.h>
#include <linux/firmware.h>
+#include <linux/mlx5/cq.h>
#define DRIVER_NAME "mlx5_core"
#define DRIVER_VERSION "5.0-0"
-#define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs(mdev->pdev))
-#define MLX5_VPORT_MANAGER(mdev) \
- (MLX5_CAP_GEN(mdev, vport_group_manager) && \
- (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \
- mlx5_core_is_pf(mdev))
-
extern uint mlx5_core_debug_mask;
#define mlx5_core_dbg(__dev, format, ...) \
@@ -115,9 +110,29 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
u32 element_id);
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
+
+int mlx5_eq_init(struct mlx5_core_dev *dev);
+void mlx5_eq_cleanup(struct mlx5_core_dev *dev);
+int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
+ int nent, u64 mask, const char *name,
+ enum mlx5_eq_type type);
+int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq);
+int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq,
+ u32 *out, int outlen);
+int mlx5_start_eqs(struct mlx5_core_dev *dev);
+void mlx5_stop_eqs(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data);
+void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced);
+int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
+int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev);
+int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev);
+void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
@@ -186,4 +201,5 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev)
int mlx5_lag_allow(struct mlx5_core_dev *dev);
int mlx5_lag_forbid(struct mlx5_core_dev *dev);
+void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol);
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 6bcfc25350f5..ea66448ba365 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -41,7 +41,7 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
{
- return wq->sz_m1 + 1;
+ return wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
@@ -62,7 +62,7 @@ static u32 mlx5_wq_qp_get_byte_size(struct mlx5_wq_qp *wq)
static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
{
- return mlx5_cqwq_get_size(wq) << wq->log_stride;
+ return mlx5_cqwq_get_size(wq) << wq->fbc.log_stride;
}
static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
@@ -92,7 +92,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.direct.buf;
+ wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -130,7 +130,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->rq.buf = wq_ctrl->buf.direct.buf;
+ wq->rq.buf = wq_ctrl->buf.frags->buf;
wq->sq.buf = wq->rq.buf + mlx5_wq_cyc_get_byte_size(&wq->rq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
@@ -151,11 +151,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
{
int err;
- wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
- wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
- wq->sz_m1 = (1 << wq->log_sz) - 1;
- wq->log_frag_strides = PAGE_SHIFT - wq->log_stride;
- wq->frag_sz_m1 = (1 << wq->log_frag_strides) - 1;
+ mlx5_core_init_cq_frag_buf(&wq->fbc, cqc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -172,7 +168,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->frag_buf = wq_ctrl->frag_buf;
+ wq->fbc.frag_buf = wq_ctrl->frag_buf;
wq->db = wq_ctrl->db.db;
wq_ctrl->mdev = mdev;
@@ -209,7 +205,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- wq->buf = wq_ctrl->buf.direct.buf;
+ wq->buf = wq_ctrl->buf.frags->buf;
wq->db = wq_ctrl->db.db;
for (i = 0; i < wq->sz_m1; i++) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index 718589d0cec2..fca90b94596d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -45,7 +45,7 @@ struct mlx5_wq_param {
struct mlx5_wq_ctrl {
struct mlx5_core_dev *mdev;
- struct mlx5_buf buf;
+ struct mlx5_frag_buf buf;
struct mlx5_db db;
};
@@ -68,14 +68,9 @@ struct mlx5_wq_qp {
};
struct mlx5_cqwq {
- struct mlx5_frag_buf frag_buf;
- __be32 *db;
- u32 sz_m1;
- u32 frag_sz_m1;
- u32 cc; /* consumer counter */
- u8 log_sz;
- u8 log_stride;
- u8 log_frag_strides;
+ struct mlx5_frag_buf_ctrl fbc;
+ __be32 *db;
+ u32 cc; /* consumer counter */
};
struct mlx5_wq_ll {
@@ -131,20 +126,17 @@ static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
{
- return wq->cc & wq->sz_m1;
+ return wq->cc & wq->fbc.sz_m1;
}
static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
{
- unsigned int frag = (ix >> wq->log_frag_strides);
-
- return wq->frag_buf.frags[frag].buf +
- ((wq->frag_sz_m1 & ix) << wq->log_stride);
+ return mlx5_frag_buf_get_wqe(&wq->fbc, ix);
}
static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
{
- return wq->cc >> wq->log_sz;
+ return wq->cc >> wq->fbc.log_sz;
}
static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index d56eea310509..93d97b4676eb 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -78,6 +78,10 @@ config MLXSW_SPECTRUM
depends on IPV6 || IPV6=n
select PARMAN
select MLXFW
+ depends on NET_IPGRE
+ depends on !(MLXSW_CORE=y && NET_IPGRE=m)
+ depends on IPV6_GRE
+ depends on !(MLXSW_CORE=y && IPV6_GRE=m)
default m
---help---
This driver supports Mellanox Technologies Spectrum Ethernet
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile
index 9463c3fa254f..0cadcabfe86f 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Makefile
+++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile
@@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \
spectrum_cnt.o spectrum_fid.o \
spectrum_ipip.o spectrum_acl_flex_actions.o \
spectrum_mr.o spectrum_mr_tcam.o \
- spectrum_qdisc.o
+ spectrum_qdisc.o spectrum_span.o
mlxsw_spectrum-$(CONFIG_MLXSW_SPECTRUM_DCB) += spectrum_dcb.o
mlxsw_spectrum-$(CONFIG_NET_DEVLINK) += spectrum_dpipe.o
obj-$(CONFIG_MLXSW_MINIMAL) += mlxsw_minimal.o
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
index b698fb481b2e..ba338428ffd1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -838,7 +838,6 @@ struct mlxsw_afa_mirror {
struct mlxsw_afa_resource resource;
int span_id;
u8 local_in_port;
- u8 local_out_port;
bool ingress;
};
@@ -848,7 +847,7 @@ mlxsw_afa_mirror_destroy(struct mlxsw_afa_block *block,
{
block->afa->ops->mirror_del(block->afa->ops_priv,
mirror->local_in_port,
- mirror->local_out_port,
+ mirror->span_id,
mirror->ingress);
kfree(mirror);
}
@@ -864,9 +863,8 @@ mlxsw_afa_mirror_destructor(struct mlxsw_afa_block *block,
}
static struct mlxsw_afa_mirror *
-mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_afa_mirror_create(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
@@ -876,13 +874,12 @@ mlxsw_afa_mirror_create(struct mlxsw_afa_block *block,
return ERR_PTR(-ENOMEM);
err = block->afa->ops->mirror_add(block->afa->ops_priv,
- local_in_port, local_out_port,
+ local_in_port, out_dev,
ingress, &mirror->span_id);
if (err)
goto err_mirror_add;
mirror->ingress = ingress;
- mirror->local_out_port = local_out_port;
mirror->local_in_port = local_in_port;
mirror->resource.destructor = mlxsw_afa_mirror_destructor;
mlxsw_afa_resource_add(block, &mirror->resource);
@@ -909,13 +906,13 @@ mlxsw_afa_block_append_allocated_mirror(struct mlxsw_afa_block *block,
}
int
-mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port, bool ingress)
+mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block, u8 local_in_port,
+ const struct net_device *out_dev, bool ingress)
{
struct mlxsw_afa_mirror *mirror;
int err;
- mirror = mlxsw_afa_mirror_create(block, local_in_port, local_out_port,
+ mirror = mlxsw_afa_mirror_create(block, local_in_port, out_dev,
ingress);
if (IS_ERR(mirror))
return PTR_ERR(mirror);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
index 43132293475c..6dd601703c99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_actions.h
@@ -36,6 +36,7 @@
#define _MLXSW_CORE_ACL_FLEX_ACTIONS_H
#include <linux/types.h>
+#include <linux/netdevice.h>
struct mlxsw_afa;
struct mlxsw_afa_block;
@@ -48,9 +49,10 @@ struct mlxsw_afa_ops {
void (*kvdl_fwd_entry_del)(void *priv, u32 kvdl_index);
int (*counter_index_get)(void *priv, unsigned int *p_counter_index);
void (*counter_index_put)(void *priv, unsigned int counter_index);
- int (*mirror_add)(void *priv, u8 locol_in_port, u8 local_out_port,
+ int (*mirror_add)(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id);
- void (*mirror_del)(void *priv, u8 locol_in_port, u8 local_out_port,
+ void (*mirror_del)(void *priv, u8 local_in_port, int span_id,
bool ingress);
};
@@ -70,7 +72,8 @@ int mlxsw_afa_block_append_trap(struct mlxsw_afa_block *block, u16 trap_id);
int mlxsw_afa_block_append_trap_and_forward(struct mlxsw_afa_block *block,
u16 trap_id);
int mlxsw_afa_block_append_mirror(struct mlxsw_afa_block *block,
- u8 local_in_port, u8 local_out_port,
+ u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress);
int mlxsw_afa_block_append_fwd(struct mlxsw_afa_block *block,
u8 local_port, bool in_port);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 0e08be41c8e0..cb5f77f09f8e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -1,11 +1,11 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/reg.h
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -6772,8 +6772,104 @@ MLXSW_ITEM32(reg, mpat, qos, 0x04, 26, 1);
*/
MLXSW_ITEM32(reg, mpat, be, 0x04, 25, 1);
+enum mlxsw_reg_mpat_span_type {
+ /* Local SPAN Ethernet.
+ * The original packet is not encapsulated.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH = 0x0,
+
+ /* Encapsulated Remote SPAN Ethernet L3 GRE.
+ * The packet is encapsulated with GRE header.
+ */
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3 = 0x3,
+};
+
+/* reg_mpat_span_type
+ * SPAN type.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, span_type, 0x04, 0, 4);
+
+/* Remote SPAN - Ethernet VLAN
+ * - - - - - - - - - - - - - -
+ */
+
+/* reg_mpat_eth_rspan_vid
+ * Encapsulation header VLAN ID.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_vid, 0x18, 0, 12);
+
+/* Encapsulated Remote SPAN - Ethernet L2
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_version {
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER = 15,
+};
+
+/* reg_mpat_eth_rspan_version
+ * RSPAN mirror header version.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_version, 0x10, 18, 4);
+
+/* reg_mpat_eth_rspan_mac
+ * Destination MAC address.
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_mac, 0x12, 6);
+
+/* reg_mpat_eth_rspan_tp
+ * Tag Packet. Indicates whether the mirroring header should be VLAN tagged.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_tp, 0x18, 16, 1);
+
+/* Encapsulated Remote SPAN - Ethernet L3
+ * - - - - - - - - - - - - - - - - - - -
+ */
+
+enum mlxsw_reg_mpat_eth_rspan_protocol {
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6,
+};
+
+/* reg_mpat_eth_rspan_protocol
+ * SPAN encapsulation protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_protocol, 0x18, 24, 4);
+
+/* reg_mpat_eth_rspan_ttl
+ * Encapsulation header Time-to-Live/HopLimit.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_ttl, 0x1C, 4, 8);
+
+/* reg_mpat_eth_rspan_smac
+ * Source MAC address
+ * Access: RW
+ */
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_smac, 0x22, 6);
+
+/* reg_mpat_eth_rspan_dip*
+ * Destination IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_dip4, 0x4C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_dip6, 0x40, 16);
+
+/* reg_mpat_eth_rspan_sip*
+ * Source IP address. The IP version is configured by protocol.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpat, eth_rspan_sip4, 0x5C, 0, 32);
+MLXSW_ITEM_BUF(reg, mpat, eth_rspan_sip6, 0x50, 16);
+
static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
- u16 system_port, bool e)
+ u16 system_port, bool e,
+ enum mlxsw_reg_mpat_span_type span_type)
{
MLXSW_REG_ZERO(mpat, payload);
mlxsw_reg_mpat_pa_id_set(payload, pa_id);
@@ -6781,6 +6877,49 @@ static inline void mlxsw_reg_mpat_pack(char *payload, u8 pa_id,
mlxsw_reg_mpat_e_set(payload, e);
mlxsw_reg_mpat_qos_set(payload, 1);
mlxsw_reg_mpat_be_set(payload, 1);
+ mlxsw_reg_mpat_span_type_set(payload, span_type);
+}
+
+static inline void mlxsw_reg_mpat_eth_rspan_pack(char *payload, u16 vid)
+{
+ mlxsw_reg_mpat_eth_rspan_vid_set(payload, vid);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l2_pack(char *payload,
+ enum mlxsw_reg_mpat_eth_rspan_version version,
+ const char *mac,
+ bool tp)
+{
+ mlxsw_reg_mpat_eth_rspan_version_set(payload, version);
+ mlxsw_reg_mpat_eth_rspan_mac_memcpy_to(payload, mac);
+ mlxsw_reg_mpat_eth_rspan_tp_set(payload, tp);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(char *payload, u8 ttl,
+ const char *smac,
+ u32 sip, u32 dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV4);
+ mlxsw_reg_mpat_eth_rspan_sip4_set(payload, sip);
+ mlxsw_reg_mpat_eth_rspan_dip4_set(payload, dip);
+}
+
+static inline void
+mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(char *payload, u8 ttl,
+ const char *smac,
+ struct in6_addr sip, struct in6_addr dip)
+{
+ mlxsw_reg_mpat_eth_rspan_ttl_set(payload, ttl);
+ mlxsw_reg_mpat_eth_rspan_smac_memcpy_to(payload, smac);
+ mlxsw_reg_mpat_eth_rspan_protocol_set(payload,
+ MLXSW_REG_MPAT_ETH_RSPAN_PROTOCOL_IPV6);
+ mlxsw_reg_mpat_eth_rspan_sip6_memcpy_to(payload, (void *)&sip);
+ mlxsw_reg_mpat_eth_rspan_dip6_memcpy_to(payload, (void *)&dip);
}
/* MPAR - Monitoring Port Analyzer Register
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c7e941aecc2a..7884e8a2de35 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum.c
- * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
@@ -71,6 +71,7 @@
#include "spectrum_cnt.h"
#include "spectrum_dpipe.h"
#include "spectrum_acl_flex_actions.h"
+#include "spectrum_span.h"
#include "../mlxfw/mlxfw.h"
#define MLXSW_FWREV_MAJOR 13
@@ -487,327 +488,6 @@ static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
return 0;
}
-static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
- return -EIO;
-
- mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
- MAX_SPAN);
- mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
- sizeof(struct mlxsw_sp_span_entry),
- GFP_KERNEL);
- if (!mlxsw_sp->span.entries)
- return -ENOMEM;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++)
- INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
-
- return 0;
-}
-
-static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
- }
- kfree(mlxsw_sp->span.entries);
-}
-
-static struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- u8 local_port = port->local_port;
- int index;
- int i;
- int err;
-
- /* find a free entry to use */
- index = -1;
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- if (!mlxsw_sp->span.entries[i].used) {
- index = i;
- span_entry = &mlxsw_sp->span.entries[i];
- break;
- }
- }
- if (index < 0)
- return NULL;
-
- /* create a new port analayzer entry for local_port */
- mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- if (err)
- return NULL;
-
- span_entry->used = true;
- span_entry->id = index;
- span_entry->ref_count = 1;
- span_entry->local_port = local_port;
- return span_entry;
-}
-
-static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- u8 local_port = span_entry->local_port;
- char mpat_pl[MLXSW_REG_MPAT_LEN];
- int pa_id = span_entry->id;
-
- mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
- span_entry->used = false;
-}
-
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
-{
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- if (curr->used && curr->local_port == local_port)
- return curr;
- }
- return NULL;
-}
-
-static struct mlxsw_sp_span_entry
-*mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
- port->local_port);
- if (span_entry) {
- /* Already exists, just take a reference */
- span_entry->ref_count++;
- return span_entry;
- }
-
- return mlxsw_sp_span_entry_create(port);
-}
-
-static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_span_entry *span_entry)
-{
- WARN_ON(!span_entry->ref_count);
- if (--span_entry->ref_count == 0)
- mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
- return 0;
-}
-
-static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- struct mlxsw_sp_span_inspected_port *p;
- int i;
-
- for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
- struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
-
- list_for_each_entry(p, &curr->bound_ports_list, list)
- if (p->local_port == port->local_port &&
- p->type == MLXSW_SP_SPAN_EGRESS)
- return true;
- }
-
- return false;
-}
-
-static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
- int mtu)
-{
- return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
-}
-
-static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
- /* If port is egress mirrored, the shared buffer size should be
- * updated according to the mtu value
- */
- if (mlxsw_sp_span_is_egress_mirror(port)) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
- return err;
- }
- }
-
- return 0;
-}
-
-static struct mlxsw_sp_span_inspected_port *
-mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry)
-{
- struct mlxsw_sp_span_inspected_port *p;
-
- list_for_each_entry(p, &span_entry->bound_ports_list, list)
- if (port->local_port == p->local_port)
- return p;
- return NULL;
-}
-
-static int
-mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char mpar_pl[MLXSW_REG_MPAR_LEN];
- int pa_id = span_entry->id;
-
- /* bind the port to the SPAN entry */
- mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
- (enum mlxsw_reg_mpar_i_e) type, bind, pa_id);
- return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
-}
-
-static int
-mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
- int err;
-
- /* if it is an egress SPAN, bind a shared buffer to it */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
- port->dev->mtu);
-
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
- err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- if (err) {
- netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
- return err;
- }
- }
-
- if (bind) {
- err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- true);
- if (err)
- goto err_port_bind;
- }
-
- inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
- if (!inspected_port) {
- err = -ENOMEM;
- goto err_inspected_port_alloc;
- }
- inspected_port->local_port = port->local_port;
- inspected_port->type = type;
- list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
-
- return 0;
-
-err_inspected_port_alloc:
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
-err_port_bind:
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
- return err;
-}
-
-static void
-mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
- struct mlxsw_sp_span_entry *span_entry,
- enum mlxsw_sp_span_type type,
- bool bind)
-{
- struct mlxsw_sp_span_inspected_port *inspected_port;
- struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
- char sbib_pl[MLXSW_REG_SBIB_LEN];
-
- inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
- if (!inspected_port)
- return;
-
- if (bind)
- mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
- false);
- /* remove the SBIB buffer if it was egress SPAN */
- if (type == MLXSW_SP_SPAN_EGRESS) {
- mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
- mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
- }
-
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
-
- list_del(&inspected_port->list);
- kfree(inspected_port);
-}
-
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
- struct mlxsw_sp_span_entry *span_entry;
- int err;
-
- span_entry = mlxsw_sp_span_entry_get(to);
- if (!span_entry)
- return -ENOENT;
-
- netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
- span_entry->id);
-
- err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
- if (err)
- goto err_port_bind;
-
- return 0;
-
-err_port_bind:
- mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
- return err;
-}
-
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, u8 destination_port,
- enum mlxsw_sp_span_type type, bool bind)
-{
- struct mlxsw_sp_span_entry *span_entry;
-
- span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
- destination_port);
- if (!span_entry) {
- netdev_err(from->dev, "no span entry found\n");
- return;
- }
-
- netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
- span_entry->id);
- mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
-}
-
static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool enable, u32 rate)
{
@@ -1360,6 +1040,16 @@ mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
xstats->tail_drop[i] =
mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
}
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
+ i, ppcnt_pl);
+ if (err)
+ continue;
+
+ xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
+ xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
+ }
}
static void update_stats_cache(struct work_struct *work)
@@ -1584,7 +1274,6 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
enum mlxsw_sp_span_type span_type;
- struct mlxsw_sp_port *to_port;
struct net_device *to_dev;
to_dev = tcf_mirred_dev(a);
@@ -1593,17 +1282,10 @@ mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
return -EINVAL;
}
- if (!mlxsw_sp_port_dev_check(to_dev)) {
- netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
- return -EOPNOTSUPP;
- }
- to_port = netdev_priv(to_dev);
-
- mirror->to_local_port = to_port->local_port;
mirror->ingress = ingress;
span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type,
- true);
+ return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_dev, span_type,
+ true, &mirror->span_id);
}
static void
@@ -1614,7 +1296,7 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
span_type = mirror->ingress ?
MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->to_local_port,
+ mlxsw_sp_span_mirror_del(mlxsw_sp_port, mirror->span_id,
span_type, true);
}
@@ -4001,14 +3683,24 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_afa_init;
}
+ err = mlxsw_sp_span_init(mlxsw_sp);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
+ goto err_span_init;
+ }
+
+ /* Initialize router after SPAN is initialized, so that the FIB and
+ * neighbor event handlers can issue SPAN respin.
+ */
err = mlxsw_sp_router_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
goto err_router_init;
}
- /* Initialize netdevice notifier after router is initialized, so that
- * the event handler can use router structures.
+ /* Initialize netdevice notifier after router and SPAN is initialized,
+ * so that the event handler can use router structures and call SPAN
+ * respin.
*/
mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
err = register_netdevice_notifier(&mlxsw_sp->netdevice_nb);
@@ -4017,12 +3709,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
goto err_netdev_notifier;
}
- err = mlxsw_sp_span_init(mlxsw_sp);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
- goto err_span_init;
- }
-
err = mlxsw_sp_acl_init(mlxsw_sp);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
@@ -4048,12 +3734,12 @@ err_ports_create:
err_dpipe_init:
mlxsw_sp_acl_fini(mlxsw_sp);
err_acl_init:
- mlxsw_sp_span_fini(mlxsw_sp);
-err_span_init:
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
err_netdev_notifier:
mlxsw_sp_router_fini(mlxsw_sp);
err_router_init:
+ mlxsw_sp_span_fini(mlxsw_sp);
+err_span_init:
mlxsw_sp_afa_fini(mlxsw_sp);
err_afa_init:
mlxsw_sp_counter_pool_fini(mlxsw_sp);
@@ -4079,9 +3765,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
mlxsw_sp_ports_remove(mlxsw_sp);
mlxsw_sp_dpipe_fini(mlxsw_sp);
mlxsw_sp_acl_fini(mlxsw_sp);
- mlxsw_sp_span_fini(mlxsw_sp);
unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb);
mlxsw_sp_router_fini(mlxsw_sp);
+ mlxsw_sp_span_fini(mlxsw_sp);
mlxsw_sp_afa_fini(mlxsw_sp);
mlxsw_sp_counter_pool_fini(mlxsw_sp);
mlxsw_sp_switchdev_fini(mlxsw_sp);
@@ -4124,70 +3810,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
.resource_query_enable = 1,
};
-static bool
-mlxsw_sp_resource_kvd_granularity_validate(struct netlink_ext_ack *extack,
- u64 size)
-{
- const struct mlxsw_config_profile *profile;
-
- profile = &mlxsw_sp_config_profile;
- if (size % profile->kvd_hash_granularity) {
- NL_SET_ERR_MSG_MOD(extack, "resource set with wrong granularity");
- return false;
- }
- return true;
-}
-
-static int
-mlxsw_sp_resource_kvd_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- NL_SET_ERR_MSG_MOD(extack, "kvd size cannot be changed");
- return -EINVAL;
-}
-
-static int
-mlxsw_sp_resource_kvd_linear_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_single_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash single size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
-static int
-mlxsw_sp_resource_kvd_hash_double_size_validate(struct devlink *devlink, u64 size,
- struct netlink_ext_ack *extack)
-{
- struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
-
- if (!mlxsw_sp_resource_kvd_granularity_validate(extack, size))
- return -EINVAL;
-
- if (size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE)) {
- NL_SET_ERR_MSG_MOD(extack, "hash double size is smaller than minimum");
- return -EINVAL;
- }
- return 0;
-}
-
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
@@ -4196,23 +3818,10 @@ static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
}
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_ops = {
- .size_validate = mlxsw_sp_resource_kvd_size_validate,
-};
-
static struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
- .size_validate = mlxsw_sp_resource_kvd_linear_size_validate,
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
};
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_single_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_single_size_validate,
-};
-
-static struct devlink_resource_ops mlxsw_sp_resource_kvd_hash_double_ops = {
- .size_validate = mlxsw_sp_resource_kvd_hash_double_size_validate,
-};
-
static void
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
struct devlink_resource_size_params *kvd_size_params,
@@ -4275,7 +3884,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_RESOURCE_KVD,
DEVLINK_RESOURCE_ID_PARENT_TOP,
&kvd_size_params,
- &mlxsw_sp_resource_kvd_ops);
+ NULL);
if (err)
return err;
@@ -4289,6 +3898,10 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
if (err)
return err;
+ err = mlxsw_sp_kvdl_resources_register(devlink);
+ if (err)
+ return err;
+
double_size = kvd_size - linear_size;
double_size *= profile->kvd_hash_double_parts;
double_size /= profile->kvd_hash_double_parts +
@@ -4299,7 +3912,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
MLXSW_SP_RESOURCE_KVD,
&hash_double_size_params,
- &mlxsw_sp_resource_kvd_hash_double_ops);
+ NULL);
if (err)
return err;
@@ -4309,7 +3922,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD,
&hash_single_size_params,
- &mlxsw_sp_resource_kvd_hash_single_ops);
+ NULL);
if (err)
return err;
@@ -4563,13 +4176,11 @@ mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
u16 lag_id;
if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Exceeded number of supported LAG devices");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
return false;
}
if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
- NL_SET_ERR_MSG(extack,
- "spectrum: LAG device using unsupported Tx type");
+ NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
return false;
}
return true;
@@ -4811,8 +4422,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
!netif_is_lag_master(upper_dev) &&
!netif_is_bridge_master(upper_dev) &&
!netif_is_ovs_master(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Unknown upper device type");
+ NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
return -EINVAL;
}
if (!info->linking)
@@ -4821,8 +4431,7 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
if (netif_is_lag_master(upper_dev) &&
@@ -4830,24 +4439,20 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
info->upper_info, extack))
return -EINVAL;
if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is a LAG master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
!netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on a LAG port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
return -EINVAL;
}
if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Master device is an OVS master and this device has a VLAN");
+ NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
return -EINVAL;
}
if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
- NL_SET_ERR_MSG(extack,
- "spectrum: Can not put a VLAN on an OVS port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
return -EINVAL;
}
break;
@@ -4960,7 +4565,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
case NETDEV_PRECHANGEUPPER:
upper_dev = info->upper_dev;
if (!netif_is_bridge_master(upper_dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: VLAN devices only support bridge and VRF uppers");
+ NL_SET_ERR_MSG_MOD(extack, "VLAN devices only support bridge and VRF uppers");
return -EINVAL;
}
if (!info->linking)
@@ -4969,7 +4574,7 @@ static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
(!netif_is_bridge_master(upper_dev) ||
!mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
upper_dev))) {
- NL_SET_ERR_MSG(extack, "spectrum: Enslaving a port to a device that already has an upper device is not supported");
+ NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a device that already has an upper device is not supported");
return -EINVAL;
}
break;
@@ -5047,10 +4652,18 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mlxsw_sp_span_entry *span_entry;
struct mlxsw_sp *mlxsw_sp;
int err = 0;
mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
+ if (event == NETDEV_UNREGISTER) {
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
+ if (span_entry)
+ mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
+ }
+ mlxsw_sp_span_respin(mlxsw_sp);
+
if (mlxsw_sp_netdev_is_ipip_ol(mlxsw_sp, dev))
err = mlxsw_sp_netdevice_ipip_ol_event(mlxsw_sp, dev,
event, ptr);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 4ec1ca3c96c8..92194a9b2caf 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -70,16 +70,23 @@
#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR "linear"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE "hash_single"
#define MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE "hash_double"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES "singles"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS "chunks"
+#define MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS "large_chunks"
enum mlxsw_sp_resource_id {
MLXSW_SP_RESOURCE_KVD,
MLXSW_SP_RESOURCE_KVD_LINEAR,
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
};
struct mlxsw_sp_port;
struct mlxsw_sp_rif;
+struct mlxsw_sp_span_entry;
struct mlxsw_sp_upper {
struct net_device *dev;
@@ -111,32 +118,13 @@ struct mlxsw_sp_mid {
unsigned long *ports_in_mid; /* bits array */
};
-enum mlxsw_sp_span_type {
- MLXSW_SP_SPAN_EGRESS,
- MLXSW_SP_SPAN_INGRESS
-};
-
-struct mlxsw_sp_span_inspected_port {
- struct list_head list;
- enum mlxsw_sp_span_type type;
- u8 local_port;
-};
-
-struct mlxsw_sp_span_entry {
- u8 local_port;
- bool used;
- struct list_head bound_ports_list;
- int ref_count;
- int id;
-};
-
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
- u8 to_local_port;
+ int span_id;
bool ingress;
};
@@ -223,6 +211,8 @@ struct mlxsw_sp_port_xstats {
u64 wred_drop[TC_MAX_QUEUE];
u64 tail_drop[TC_MAX_QUEUE];
u64 backlog[TC_MAX_QUEUE];
+ u64 tx_bytes[IEEE_8021QAZ_MAX_TCS];
+ u64 tx_packets[IEEE_8021QAZ_MAX_TCS];
};
struct mlxsw_sp_port {
@@ -260,6 +250,7 @@ struct mlxsw_sp_port {
struct mlxsw_sp_port_sample *sample;
struct list_head vlans_list;
struct mlxsw_sp_qdisc *root_qdisc;
+ struct mlxsw_sp_qdisc *tclass_qdiscs;
unsigned acl_rule_count;
struct mlxsw_sp_acl_block *ing_acl_block;
struct mlxsw_sp_acl_block *eg_acl_block;
@@ -397,16 +388,6 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev);
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port);
struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev);
-int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
- struct mlxsw_sp_port *to,
- enum mlxsw_sp_span_type type,
- bool bind);
-void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from,
- u8 destination_port,
- enum mlxsw_sp_span_type type,
- bool bind);
-struct mlxsw_sp_span_entry *
-mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port);
/* spectrum_dcb.c */
#ifdef CONFIG_MLXSW_SPECTRUM_DCB
@@ -462,6 +443,7 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
unsigned int entry_count,
unsigned int *p_alloc_size);
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
+int mlxsw_sp_kvdl_resources_register(struct devlink *devlink);
struct mlxsw_sp_acl_rule_info {
unsigned int priority;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
index 0897a5435cc2..21ed27ae51e3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
@@ -572,7 +572,6 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
struct net_device *out_dev)
{
struct mlxsw_sp_acl_block_binding *binding;
- struct mlxsw_sp_port *out_port;
struct mlxsw_sp_port *in_port;
if (!list_is_singular(&block->binding_list))
@@ -581,16 +580,10 @@ int mlxsw_sp_acl_rulei_act_mirror(struct mlxsw_sp *mlxsw_sp,
binding = list_first_entry(&block->binding_list,
struct mlxsw_sp_acl_block_binding, list);
in_port = binding->mlxsw_sp_port;
- if (!mlxsw_sp_port_dev_check(out_dev))
- return -EINVAL;
-
- out_port = netdev_priv(out_dev);
- if (out_port->mlxsw_sp != mlxsw_sp)
- return -EINVAL;
return mlxsw_afa_block_append_mirror(rulei->act_block,
in_port->local_port,
- out_port->local_port,
+ out_dev,
binding->ingress);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
index 6ca6894125f0..510ce48d87f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
@@ -1,6 +1,6 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017, 2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
*
@@ -35,6 +35,7 @@
#include "spectrum_acl_flex_actions.h"
#include "core_acl_flex_actions.h"
+#include "spectrum_span.h"
#define MLXSW_SP_KVDL_ACT_EXT_SIZE 1
@@ -125,40 +126,23 @@ mlxsw_sp_act_counter_index_put(void *priv, unsigned int counter_index)
}
static int
-mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port, u8 local_out_port,
+mlxsw_sp_act_mirror_add(void *priv, u8 local_in_port,
+ const struct net_device *out_dev,
bool ingress, int *p_span_id)
{
- struct mlxsw_sp_port *in_port, *out_port;
- struct mlxsw_sp_span_entry *span_entry;
+ struct mlxsw_sp_port *in_port;
struct mlxsw_sp *mlxsw_sp = priv;
enum mlxsw_sp_span_type type;
- int err;
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
- out_port = mlxsw_sp->ports[local_out_port];
in_port = mlxsw_sp->ports[local_in_port];
- err = mlxsw_sp_span_mirror_add(in_port, out_port, type, false);
- if (err)
- return err;
-
- span_entry = mlxsw_sp_span_entry_find(mlxsw_sp, local_out_port);
- if (!span_entry) {
- err = -ENOENT;
- goto err_span_entry_find;
- }
-
- *p_span_id = span_entry->id;
- return 0;
-
-err_span_entry_find:
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
- return err;
+ return mlxsw_sp_span_mirror_add(in_port, out_dev, type,
+ false, p_span_id);
}
static void
-mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
- bool ingress)
+mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, int span_id, bool ingress)
{
struct mlxsw_sp *mlxsw_sp = priv;
struct mlxsw_sp_port *in_port;
@@ -167,7 +151,7 @@ mlxsw_sp_act_mirror_del(void *priv, u8 local_in_port, u8 local_out_port,
type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
in_port = mlxsw_sp->ports[local_in_port];
- mlxsw_sp_span_mirror_del(in_port, local_out_port, type, false);
+ mlxsw_sp_span_mirror_del(in_port, span_id, type, false);
}
static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
index 7502e53447bd..98d896c14b87 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -33,126 +33,125 @@
*/
#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
#include "spectrum_ipip.h"
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev)
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev)
{
struct ip_tunnel *tun = netdev_priv(ol_dev);
return tun->parms;
}
-static bool mlxsw_sp_ipip_parms_has_ikey(struct ip_tunnel_parm parms)
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev)
+{
+ struct ip6_tnl *tun = netdev_priv(ol_dev);
+
+ return tun->parms;
+}
+
+static bool mlxsw_sp_ipip_parms4_has_ikey(struct ip_tunnel_parm parms)
{
return !!(parms.i_flags & TUNNEL_KEY);
}
-static bool mlxsw_sp_ipip_parms_has_okey(struct ip_tunnel_parm parms)
+static bool mlxsw_sp_ipip_parms4_has_okey(struct ip_tunnel_parm parms)
{
return !!(parms.o_flags & TUNNEL_KEY);
}
-static u32 mlxsw_sp_ipip_parms_ikey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_ikey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_ikey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_ikey(parms) ?
be32_to_cpu(parms.i_key) : 0;
}
-static u32 mlxsw_sp_ipip_parms_okey(struct ip_tunnel_parm parms)
+static u32 mlxsw_sp_ipip_parms4_okey(struct ip_tunnel_parm parms)
{
- return mlxsw_sp_ipip_parms_has_okey(parms) ?
+ return mlxsw_sp_ipip_parms4_has_okey(parms) ?
be32_to_cpu(parms.o_key) : 0;
}
-static __be32 mlxsw_sp_ipip_parms_saddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_saddr(struct ip_tunnel_parm parms)
{
- return parms.iph.saddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.saddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_saddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_saddr(struct __ip6_tnl_parm parms)
{
- switch (proto) {
- case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_saddr4(parms),
- };
- case MLXSW_SP_L3_PROTO_IPV6:
- break;
- }
-
- WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.laddr };
}
-static __be32 mlxsw_sp_ipip_parms_daddr4(struct ip_tunnel_parm parms)
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_parms4_daddr(struct ip_tunnel_parm parms)
{
- return parms.iph.daddr;
+ return (union mlxsw_sp_l3addr) { .addr4 = parms.iph.daddr };
}
static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_parms_daddr(enum mlxsw_sp_l3proto proto,
- struct ip_tunnel_parm parms)
+mlxsw_sp_ipip_parms6_daddr(struct __ip6_tnl_parm parms)
+{
+ return (union mlxsw_sp_l3addr) { .addr6 = parms.raddr };
+}
+
+union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+ const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
+
switch (proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- return (union mlxsw_sp_l3addr) {
- .addr4 = mlxsw_sp_ipip_parms_daddr4(parms),
- };
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_saddr(parms4);
case MLXSW_SP_L3_PROTO_IPV6:
- break;
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_saddr(parms6);
}
WARN_ON(1);
- return (union mlxsw_sp_l3addr) {
- .addr4 = 0,
- };
-}
-
-static bool mlxsw_sp_ipip_netdev_has_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_has_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return (union mlxsw_sp_l3addr) {0};
}
-static bool mlxsw_sp_ipip_netdev_has_okey(const struct net_device *ol_dev)
+static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_has_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
-static u32 mlxsw_sp_ipip_netdev_ikey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_ikey(mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
-static u32 mlxsw_sp_ipip_netdev_okey(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_okey(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ return mlxsw_sp_ipip_parms4_daddr(parms4).addr4;
}
-union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
+static union mlxsw_sp_l3addr
+mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev)
{
- return mlxsw_sp_ipip_parms_saddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
-}
+ struct ip_tunnel_parm parms4;
+ struct __ip6_tnl_parm parms6;
-static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev)
-{
- return mlxsw_sp_ipip_parms_daddr4(mlxsw_sp_ipip_netdev_parms(ol_dev));
+ switch (proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ return mlxsw_sp_ipip_parms4_daddr(parms4);
+ case MLXSW_SP_L3_PROTO_IPV6:
+ parms6 = mlxsw_sp_ipip_netdev_parms6(ol_dev);
+ return mlxsw_sp_ipip_parms6_daddr(parms6);
+ }
+
+ WARN_ON(1);
+ return (union mlxsw_sp_l3addr) {0};
}
-static union mlxsw_sp_l3addr
-mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto,
- const struct net_device *ol_dev)
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr)
{
- return mlxsw_sp_ipip_parms_daddr(proto,
- mlxsw_sp_ipip_netdev_parms(ol_dev));
+ union mlxsw_sp_l3addr naddr = {0};
+
+ return !memcmp(&addr, &naddr, sizeof(naddr));
}
static int
@@ -176,12 +175,17 @@ mlxsw_sp_ipip_fib_entry_op_gre4_rtdp(struct mlxsw_sp *mlxsw_sp,
u32 tunnel_index,
struct mlxsw_sp_ipip_entry *ipip_entry)
{
- bool has_ikey = mlxsw_sp_ipip_netdev_has_ikey(ipip_entry->ol_dev);
u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb);
- u32 ikey = mlxsw_sp_ipip_netdev_ikey(ipip_entry->ol_dev);
char rtdp_pl[MLXSW_REG_RTDP_LEN];
+ struct ip_tunnel_parm parms;
unsigned int type_check;
+ bool has_ikey;
u32 daddr4;
+ u32 ikey;
+
+ parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
+ has_ikey = mlxsw_sp_ipip_parms4_has_ikey(parms);
+ ikey = mlxsw_sp_ipip_parms4_ikey(parms);
mlxsw_reg_rtdp_pack(rtdp_pl, MLXSW_REG_RTDP_TYPE_IPIP, tunnel_index);
@@ -243,15 +247,14 @@ static bool mlxsw_sp_ipip_tunnel_complete(enum mlxsw_sp_l3proto proto,
{
union mlxsw_sp_l3addr saddr = mlxsw_sp_ipip_netdev_saddr(proto, ol_dev);
union mlxsw_sp_l3addr daddr = mlxsw_sp_ipip_netdev_daddr(proto, ol_dev);
- union mlxsw_sp_l3addr naddr = {0};
/* Tunnels with unset local or remote address are valid in Linux and
* used for lightweight tunnels (LWT) and Non-Broadcast Multi-Access
* (NBMA) tunnels. In principle these can be offloaded, but the driver
* currently doesn't support this. So punt.
*/
- return memcmp(&saddr, &naddr, sizeof(naddr)) &&
- memcmp(&daddr, &naddr, sizeof(naddr));
+ return !mlxsw_sp_l3addr_is_zero(saddr) &&
+ !mlxsw_sp_l3addr_is_zero(daddr);
}
static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp,
@@ -273,14 +276,15 @@ static struct mlxsw_sp_rif_ipip_lb_config
mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp,
const struct net_device *ol_dev)
{
+ struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev);
enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt;
- lb_ipipt = mlxsw_sp_ipip_netdev_has_okey(ol_dev) ?
+ lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(parms) ?
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_KEY_IN_IP :
MLXSW_REG_RITR_LOOPBACK_IPIP_TYPE_IP_IN_GRE_IN_IP;
return (struct mlxsw_sp_rif_ipip_lb_config){
.lb_ipipt = lb_ipipt,
- .okey = mlxsw_sp_ipip_netdev_okey(ol_dev),
+ .okey = mlxsw_sp_ipip_parms4_okey(parms),
.ul_protocol = MLXSW_SP_L3_PROTO_IPV4,
.saddr = mlxsw_sp_ipip_netdev_saddr(MLXSW_SP_L3_PROTO_IPV4,
ol_dev),
@@ -300,16 +304,12 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
bool update_nhs = false;
int err = 0;
- new_parms = mlxsw_sp_ipip_netdev_parms(ipip_entry->ol_dev);
+ new_parms = mlxsw_sp_ipip_netdev_parms4(ipip_entry->ol_dev);
- new_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_saddr = mlxsw_sp_ipip_parms_saddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
- new_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- new_parms);
- old_daddr = mlxsw_sp_ipip_parms_daddr(MLXSW_SP_L3_PROTO_IPV4,
- ipip_entry->parms);
+ new_saddr = mlxsw_sp_ipip_parms4_saddr(new_parms);
+ old_saddr = mlxsw_sp_ipip_parms4_saddr(ipip_entry->parms4);
+ new_daddr = mlxsw_sp_ipip_parms4_daddr(new_parms);
+ old_daddr = mlxsw_sp_ipip_parms4_daddr(ipip_entry->parms4);
if (!mlxsw_sp_l3addr_eq(&new_saddr, &old_saddr)) {
u16 ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ipip_entry->ol_dev);
@@ -326,14 +326,14 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
}
update_tunnel = true;
- } else if ((mlxsw_sp_ipip_parms_okey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_okey(new_parms)) ||
- ipip_entry->parms.link != new_parms.link) {
+ } else if ((mlxsw_sp_ipip_parms4_okey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_okey(new_parms)) ||
+ ipip_entry->parms4.link != new_parms.link) {
update_tunnel = true;
} else if (!mlxsw_sp_l3addr_eq(&new_daddr, &old_daddr)) {
update_nhs = true;
- } else if (mlxsw_sp_ipip_parms_ikey(ipip_entry->parms) !=
- mlxsw_sp_ipip_parms_ikey(new_parms)) {
+ } else if (mlxsw_sp_ipip_parms4_ikey(ipip_entry->parms4) !=
+ mlxsw_sp_ipip_parms4_ikey(new_parms)) {
update_decap = true;
}
@@ -350,7 +350,7 @@ mlxsw_sp_ipip_ol_netdev_change_gre4(struct mlxsw_sp *mlxsw_sp,
false, false, false,
extack);
- ipip_entry->parms = new_parms;
+ ipip_entry->parms4 = new_parms;
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
index 04b08d9d76e9..6909d867bb59 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
@@ -1,7 +1,7 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h
- * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -37,14 +37,19 @@
#include "spectrum_router.h"
#include <net/ip_fib.h>
+#include <linux/if_tunnel.h>
struct ip_tunnel_parm
-mlxsw_sp_ipip_netdev_parms(const struct net_device *ol_dev);
+mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev);
+struct __ip6_tnl_parm
+mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev);
union mlxsw_sp_l3addr
mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto,
const struct net_device *ol_dev);
+bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr);
+
enum mlxsw_sp_ipip_type {
MLXSW_SP_IPIP_TYPE_GRE4,
MLXSW_SP_IPIP_TYPE_MAX,
@@ -56,7 +61,9 @@ struct mlxsw_sp_ipip_entry {
struct mlxsw_sp_rif_ipip_lb *ol_lb;
struct mlxsw_sp_fib_entry *decap_fib_entry;
struct list_head ipip_list_node;
- struct ip_tunnel_parm parms;
+ union {
+ struct ip_tunnel_parm parms4;
+ };
};
struct mlxsw_sp_ipip_ops {
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
index 55f9d2d70f9e..059eb3214328 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_kvdl.c
@@ -67,7 +67,7 @@ struct mlxsw_sp_kvdl_part_info {
struct mlxsw_sp_kvdl_part {
struct list_head list;
- const struct mlxsw_sp_kvdl_part_info *info;
+ struct mlxsw_sp_kvdl_part_info *info;
unsigned long usage[0]; /* Entries */
};
@@ -188,21 +188,27 @@ int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
return 0;
}
+enum mlxsw_sp_kvdl_part_id {
+ MLXSW_SP_KVDL_PART_SINGLE,
+ MLXSW_SP_KVDL_PART_CHUNKS,
+ MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
+};
+
static const struct mlxsw_sp_kvdl_part_info kvdl_parts_info[] = {
{
- .part_index = 0,
+ .part_index = MLXSW_SP_KVDL_PART_SINGLE,
.start_index = MLXSW_SP_KVDL_SINGLE_BASE,
.end_index = MLXSW_SP_KVDL_SINGLE_END,
.alloc_size = 1,
},
{
- .part_index = 1,
+ .part_index = MLXSW_SP_KVDL_PART_CHUNKS,
.start_index = MLXSW_SP_KVDL_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_CHUNKS_END,
.alloc_size = MLXSW_SP_CHUNK_MAX,
},
{
- .part_index = 2,
+ .part_index = MLXSW_SP_KVDL_PART_LARGE_CHUNKS,
.start_index = MLXSW_SP_KVDL_LARGE_CHUNKS_BASE,
.end_index = MLXSW_SP_KVDL_LARGE_CHUNKS_END,
.alloc_size = MLXSW_SP_LARGE_CHUNK_MAX,
@@ -222,27 +228,76 @@ mlxsw_sp_kvdl_part_find(struct mlxsw_sp *mlxsw_sp, unsigned int part_index)
return NULL;
}
+static void
+mlxsw_sp_kvdl_part_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_kvdl_part *part, unsigned int size)
+{
+ struct mlxsw_sp_kvdl_part_info *info = part->info;
+
+ if (list_is_last(&part->list, &mlxsw_sp->kvdl->parts_list)) {
+ info->end_index = size - 1;
+ } else {
+ struct mlxsw_sp_kvdl_part *last_part;
+
+ last_part = list_next_entry(part, list);
+ info->start_index = last_part->info->end_index + 1;
+ info->end_index = info->start_index + size - 1;
+ }
+}
+
static int mlxsw_sp_kvdl_part_init(struct mlxsw_sp *mlxsw_sp,
unsigned int part_index)
{
+ struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
const struct mlxsw_sp_kvdl_part_info *info;
+ enum mlxsw_sp_resource_id resource_id;
struct mlxsw_sp_kvdl_part *part;
+ bool need_update = true;
unsigned int nr_entries;
size_t usage_size;
+ u64 resource_size;
+ int err;
info = &kvdl_parts_info[part_index];
- nr_entries = (info->end_index - info->start_index + 1) /
- info->alloc_size;
+ switch (part_index) {
+ case MLXSW_SP_KVDL_PART_SINGLE:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE;
+ break;
+ case MLXSW_SP_KVDL_PART_CHUNKS:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS;
+ break;
+ case MLXSW_SP_KVDL_PART_LARGE_CHUNKS:
+ resource_id = MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ err = devlink_resource_size_get(devlink, resource_id, &resource_size);
+ if (err) {
+ need_update = false;
+ resource_size = info->end_index - info->start_index + 1;
+ }
+
+ nr_entries = div_u64(resource_size, info->alloc_size);
usage_size = BITS_TO_LONGS(nr_entries) * sizeof(unsigned long);
part = kzalloc(sizeof(*part) + usage_size, GFP_KERNEL);
if (!part)
return -ENOMEM;
- part->info = info;
- list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+ part->info = kmemdup(info, sizeof(*part->info), GFP_KERNEL);
+ if (!part->info)
+ goto err_part_info_alloc;
+ list_add(&part->list, &mlxsw_sp->kvdl->parts_list);
+ if (need_update)
+ mlxsw_sp_kvdl_part_update(mlxsw_sp, part, resource_size);
return 0;
+
+err_part_info_alloc:
+ kfree(part);
+ return -ENOMEM;
}
static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
@@ -255,6 +310,7 @@ static void mlxsw_sp_kvdl_part_fini(struct mlxsw_sp *mlxsw_sp,
return;
list_del(&part->list);
+ kfree(part->info);
kfree(part);
}
@@ -312,6 +368,123 @@ u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
return occ;
}
+u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_SINGLE);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp, MLXSW_SP_KVDL_PART_CHUNKS);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
+ struct mlxsw_sp_kvdl_part *part;
+
+ part = mlxsw_sp_kvdl_part_find(mlxsw_sp,
+ MLXSW_SP_KVDL_PART_LARGE_CHUNKS);
+ if (!part)
+ return -EINVAL;
+
+ return mlxsw_sp_kvdl_part_occ(part);
+}
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
+ .occ_get = mlxsw_sp_kvdl_single_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
+ .occ_get = mlxsw_sp_kvdl_chunks_occ_get,
+};
+
+static struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
+ .occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_single_size_params = {
+ .size_min = 0,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_chunks_size_params = {
+ .size_min = 0,
+ .size_granularity = MLXSW_SP_CHUNK_MAX,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static struct devlink_resource_size_params mlxsw_sp_kvdl_large_chunks_size_params = {
+ .size_min = 0,
+ .size_granularity = MLXSW_SP_LARGE_CHUNK_MAX,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY,
+};
+
+static void
+mlxsw_sp_kvdl_resource_size_params_prepare(struct devlink *devlink)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+ u32 kvdl_max_size;
+
+ kvdl_max_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) -
+ MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE);
+
+ mlxsw_sp_kvdl_single_size_params.size_max = kvdl_max_size;
+ mlxsw_sp_kvdl_chunks_size_params.size_max = kvdl_max_size;
+ mlxsw_sp_kvdl_large_chunks_size_params.size_max = kvdl_max_size;
+}
+
+int mlxsw_sp_kvdl_resources_register(struct devlink *devlink)
+{
+ int err;
+
+ mlxsw_sp_kvdl_resource_size_params_prepare(devlink);
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_SINGLES,
+ false, MLXSW_SP_KVDL_SINGLE_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_single_size_params,
+ &mlxsw_sp_kvdl_single_ops);
+ if (err)
+ return err;
+
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_CHUNKS,
+ false, MLXSW_SP_KVDL_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_chunks_size_params,
+ &mlxsw_sp_kvdl_chunks_ops);
+ if (err)
+ return err;
+
+ err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR_LARGE_CHUNKS,
+ false, MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
+ MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
+ MLXSW_SP_RESOURCE_KVD_LINEAR,
+ &mlxsw_sp_kvdl_large_chunks_size_params,
+ &mlxsw_sp_kvdl_chunks_large_ops);
+ return err;
+}
+
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
{
struct mlxsw_sp_kvdl *kvdl;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
index d20b143de3b4..978a3c70653a 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_mr.c
@@ -126,8 +126,8 @@ mlxsw_sp_mr_route_ivif_in_evifs(const struct mlxsw_sp_mr_route *mr_route)
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- ivif = mr_route->mfc4->mfc_parent;
- return mr_route->mfc4->mfc_un.res.ttls[ivif] != 255;
+ ivif = mr_route->mfc4->_c.mfc_parent;
+ return mr_route->mfc4->_c.mfc_un.res.ttls[ivif] != 255;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
default:
@@ -364,7 +364,7 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->mfc4 = mfc;
mr_route->mr_table = mr_table;
for (i = 0; i < MAXVIFS; i++) {
- if (mfc->mfc_un.res.ttls[i] != 255) {
+ if (mfc->_c.mfc_un.res.ttls[i] != 255) {
err = mlxsw_sp_mr_route_evif_link(mr_route,
&mr_table->vifs[i]);
if (err)
@@ -374,7 +374,8 @@ mlxsw_sp_mr_route4_create(struct mlxsw_sp_mr_table *mr_table,
mr_route->min_mtu = mr_table->vifs[i].dev->mtu;
}
}
- mlxsw_sp_mr_route_ivif_link(mr_route, &mr_table->vifs[mfc->mfc_parent]);
+ mlxsw_sp_mr_route_ivif_link(mr_route,
+ &mr_table->vifs[mfc->_c.mfc_parent]);
mr_route->route_action = mlxsw_sp_mr_route_action(mr_route);
return mr_route;
@@ -418,9 +419,9 @@ static void mlxsw_sp_mr_mfc_offload_set(struct mlxsw_sp_mr_route *mr_route,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
if (offload)
- mr_route->mfc4->mfc_flags |= MFC_OFFLOAD;
+ mr_route->mfc4->_c.mfc_flags |= MFC_OFFLOAD;
else
- mr_route->mfc4->mfc_flags &= ~MFC_OFFLOAD;
+ mr_route->mfc4->_c.mfc_flags &= ~MFC_OFFLOAD;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
@@ -943,10 +944,10 @@ static void mlxsw_sp_mr_route_stats_update(struct mlxsw_sp *mlxsw_sp,
switch (mr_route->mr_table->proto) {
case MLXSW_SP_L3_PROTO_IPV4:
- if (mr_route->mfc4->mfc_un.res.pkt != packets)
- mr_route->mfc4->mfc_un.res.lastuse = jiffies;
- mr_route->mfc4->mfc_un.res.pkt = packets;
- mr_route->mfc4->mfc_un.res.bytes = bytes;
+ if (mr_route->mfc4->_c.mfc_un.res.pkt != packets)
+ mr_route->mfc4->_c.mfc_un.res.lastuse = jiffies;
+ mr_route->mfc4->_c.mfc_un.res.pkt = packets;
+ mr_route->mfc4->_c.mfc_un.res.bytes = bytes;
break;
case MLXSW_SP_L3_PROTO_IPV6:
/* fall through */
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
index 0b7670459051..91262b0573e3 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_qdisc.c
@@ -42,6 +42,8 @@
#include "reg.h"
#define MLXSW_SP_PRIO_BAND_TO_TCLASS(band) (IEEE_8021QAZ_MAX_TCS - band - 1)
+#define MLXSW_SP_PRIO_CHILD_TO_TCLASS(child) \
+ MLXSW_SP_PRIO_BAND_TO_TCLASS((child - 1))
enum mlxsw_sp_qdisc_type {
MLXSW_SP_QDISC_NO_QDISC,
@@ -76,6 +78,7 @@ struct mlxsw_sp_qdisc_ops {
struct mlxsw_sp_qdisc {
u32 handle;
u8 tclass_num;
+ u8 prio_bitmap;
union {
struct red_stats red;
} xstats_base;
@@ -99,6 +102,44 @@ mlxsw_sp_qdisc_compare(struct mlxsw_sp_qdisc *mlxsw_sp_qdisc, u32 handle,
mlxsw_sp_qdisc->handle == handle;
}
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find(struct mlxsw_sp_port *mlxsw_sp_port, u32 parent,
+ bool root_only)
+{
+ int tclass, child_index;
+
+ if (parent == TC_H_ROOT)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (root_only || !mlxsw_sp_port->root_qdisc ||
+ !mlxsw_sp_port->root_qdisc->ops ||
+ TC_H_MAJ(parent) != mlxsw_sp_port->root_qdisc->handle ||
+ TC_H_MIN(parent) > IEEE_8021QAZ_MAX_TCS)
+ return NULL;
+
+ child_index = TC_H_MIN(parent);
+ tclass = MLXSW_SP_PRIO_CHILD_TO_TCLASS(child_index);
+ return &mlxsw_sp_port->tclass_qdiscs[tclass];
+}
+
+static struct mlxsw_sp_qdisc *
+mlxsw_sp_qdisc_find_by_handle(struct mlxsw_sp_port *mlxsw_sp_port, u32 handle)
+{
+ int i;
+
+ if (mlxsw_sp_port->root_qdisc->handle == handle)
+ return mlxsw_sp_port->root_qdisc;
+
+ if (mlxsw_sp_port->root_qdisc->handle == TC_H_UNSPEC)
+ return NULL;
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ if (mlxsw_sp_port->tclass_qdiscs[i].handle == handle)
+ return &mlxsw_sp_port->tclass_qdiscs[i];
+
+ return NULL;
+}
+
static int
mlxsw_sp_qdisc_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
@@ -185,6 +226,23 @@ mlxsw_sp_qdisc_get_xstats(struct mlxsw_sp_port *mlxsw_sp_port,
return -EOPNOTSUPP;
}
+static void
+mlxsw_sp_qdisc_bstats_per_priority_get(struct mlxsw_sp_port_xstats *xstats,
+ u8 prio_bitmap, u64 *tx_packets,
+ u64 *tx_bytes)
+{
+ int i;
+
+ *tx_packets = 0;
+ *tx_bytes = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (prio_bitmap & BIT(i)) {
+ *tx_packets += xstats->tx_packets[i];
+ *tx_bytes += xstats->tx_bytes[i];
+ }
+ }
+}
+
static int
mlxsw_sp_tclass_congestion_enable(struct mlxsw_sp_port *mlxsw_sp_port,
int tclass_num, u32 min, u32 max,
@@ -230,17 +288,16 @@ mlxsw_sp_setup_tc_qdisc_red_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
struct red_stats *red_base;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
red_base = &mlxsw_sp_qdisc->xstats_base.red;
- stats_base->tx_packets = stats->tx_packets;
- stats_base->tx_bytes = stats->tx_bytes;
-
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &stats_base->tx_packets,
+ &stats_base->tx_bytes);
red_base->prob_mark = xstats->ecn;
red_base->prob_drop = xstats->wred_drop[tclass_num];
red_base->pdrop = xstats->tail_drop[tclass_num];
@@ -255,6 +312,12 @@ static int
mlxsw_sp_qdisc_red_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc)
{
+ struct mlxsw_sp_qdisc *root_qdisc = mlxsw_sp_port->root_qdisc;
+
+ if (root_qdisc != mlxsw_sp_qdisc)
+ root_qdisc->stats_base.backlog -=
+ mlxsw_sp_qdisc->stats_base.backlog;
+
return mlxsw_sp_tclass_congestion_disable(mlxsw_sp_port,
mlxsw_sp_qdisc->tclass_num);
}
@@ -319,6 +382,7 @@ mlxsw_sp_qdisc_red_unoffload(struct mlxsw_sp_port *mlxsw_sp_port,
backlog = mlxsw_sp_cells_bytes(mlxsw_sp_port->mlxsw_sp,
mlxsw_sp_qdisc->stats_base.backlog);
p->qstats->backlog -= backlog;
+ mlxsw_sp_qdisc->stats_base.backlog = 0;
}
static int
@@ -357,14 +421,16 @@ mlxsw_sp_qdisc_get_red_stats(struct mlxsw_sp_port *mlxsw_sp_port,
u8 tclass_num = mlxsw_sp_qdisc->tclass_num;
struct mlxsw_sp_qdisc_stats *stats_base;
struct mlxsw_sp_port_xstats *xstats;
- struct rtnl_link_stats64 *stats;
xstats = &mlxsw_sp_port->periodic_hw_stats.xstats;
- stats = &mlxsw_sp_port->periodic_hw_stats.stats;
stats_base = &mlxsw_sp_qdisc->stats_base;
- tx_bytes = stats->tx_bytes - stats_base->tx_bytes;
- tx_packets = stats->tx_packets - stats_base->tx_packets;
+ mlxsw_sp_qdisc_bstats_per_priority_get(xstats,
+ mlxsw_sp_qdisc->prio_bitmap,
+ &tx_packets, &tx_bytes);
+ tx_bytes = tx_bytes - stats_base->tx_bytes;
+ tx_packets = tx_packets - stats_base->tx_packets;
+
overlimits = xstats->wred_drop[tclass_num] + xstats->ecn -
stats_base->overlimits;
drops = xstats->wred_drop[tclass_num] + xstats->tail_drop[tclass_num] -
@@ -406,11 +472,10 @@ int mlxsw_sp_setup_tc_red(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, false);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
-
if (p->command == TC_RED_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -441,9 +506,13 @@ mlxsw_sp_qdisc_prio_destroy(struct mlxsw_sp_port *mlxsw_sp_port,
{
int i;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i,
MLXSW_SP_PORT_DEFAULT_TCLASS);
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[i]);
+ mlxsw_sp_port->tclass_qdiscs[i].prio_bitmap = 0;
+ }
return 0;
}
@@ -467,16 +536,41 @@ mlxsw_sp_qdisc_prio_replace(struct mlxsw_sp_port *mlxsw_sp_port,
void *params)
{
struct tc_prio_qopt_offload_params *p = params;
- int tclass, i;
+ struct mlxsw_sp_qdisc *child_qdisc;
+ int tclass, i, band, backlog;
+ u8 old_priomap;
int err;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->priomap[i]);
- err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, tclass);
- if (err)
- return err;
+ for (band = 0; band < p->bands; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ old_priomap = child_qdisc->prio_bitmap;
+ child_qdisc->prio_bitmap = 0;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ if (p->priomap[i] == band) {
+ child_qdisc->prio_bitmap |= BIT(i);
+ if (BIT(i) & old_priomap)
+ continue;
+ err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port,
+ i, tclass);
+ if (err)
+ return err;
+ }
+ }
+ if (old_priomap != child_qdisc->prio_bitmap &&
+ child_qdisc->ops && child_qdisc->ops->clean_stats) {
+ backlog = child_qdisc->stats_base.backlog;
+ child_qdisc->ops->clean_stats(mlxsw_sp_port,
+ child_qdisc);
+ child_qdisc->stats_base.backlog = backlog;
+ }
+ }
+ for (; band < IEEE_8021QAZ_MAX_TCS; band++) {
+ tclass = MLXSW_SP_PRIO_BAND_TO_TCLASS(band);
+ child_qdisc = &mlxsw_sp_port->tclass_qdiscs[tclass];
+ child_qdisc->prio_bitmap = 0;
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, child_qdisc);
}
-
return 0;
}
@@ -513,6 +607,7 @@ mlxsw_sp_qdisc_get_prio_stats(struct mlxsw_sp_port *mlxsw_sp_port,
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
drops += xstats->tail_drop[i];
+ drops += xstats->wred_drop[i];
backlog += xstats->backlog[i];
}
drops = drops - stats_base->drops;
@@ -548,8 +643,10 @@ mlxsw_sp_setup_tc_qdisc_prio_clean_stats(struct mlxsw_sp_port *mlxsw_sp_port,
stats_base->tx_bytes = stats->tx_bytes;
stats_base->drops = 0;
- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
stats_base->drops += xstats->tail_drop[i];
+ stats_base->drops += xstats->wred_drop[i];
+ }
mlxsw_sp_qdisc->stats_base.backlog = 0;
}
@@ -564,15 +661,48 @@ static struct mlxsw_sp_qdisc_ops mlxsw_sp_qdisc_ops_prio = {
.clean_stats = mlxsw_sp_setup_tc_qdisc_prio_clean_stats,
};
+/* Grafting is not supported in mlxsw. It will result in un-offloading of the
+ * grafted qdisc as well as the qdisc in the qdisc new location.
+ * (However, if the graft is to the location where the qdisc is already at, it
+ * will be ignored completely and won't cause un-offloading).
+ */
+static int
+mlxsw_sp_qdisc_prio_graft(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc,
+ struct tc_prio_qopt_offload_graft_params *p)
+{
+ int tclass_num = MLXSW_SP_PRIO_BAND_TO_TCLASS(p->band);
+ struct mlxsw_sp_qdisc *old_qdisc;
+
+ /* Check if the grafted qdisc is already in its "new" location. If so -
+ * nothing needs to be done.
+ */
+ if (p->band < IEEE_8021QAZ_MAX_TCS &&
+ mlxsw_sp_port->tclass_qdiscs[tclass_num].handle == p->child_handle)
+ return 0;
+
+ /* See if the grafted qdisc is already offloaded on any tclass. If so,
+ * unoffload it.
+ */
+ old_qdisc = mlxsw_sp_qdisc_find_by_handle(mlxsw_sp_port,
+ p->child_handle);
+ if (old_qdisc)
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port, old_qdisc);
+
+ mlxsw_sp_qdisc_destroy(mlxsw_sp_port,
+ &mlxsw_sp_port->tclass_qdiscs[tclass_num]);
+ return -EOPNOTSUPP;
+}
+
int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
struct tc_prio_qopt_offload *p)
{
struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
- if (p->parent != TC_H_ROOT)
+ mlxsw_sp_qdisc = mlxsw_sp_qdisc_find(mlxsw_sp_port, p->parent, true);
+ if (!mlxsw_sp_qdisc)
return -EOPNOTSUPP;
- mlxsw_sp_qdisc = mlxsw_sp_port->root_qdisc;
if (p->command == TC_PRIO_REPLACE)
return mlxsw_sp_qdisc_replace(mlxsw_sp_port, p->handle,
mlxsw_sp_qdisc,
@@ -589,6 +719,9 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
case TC_PRIO_STATS:
return mlxsw_sp_qdisc_get_stats(mlxsw_sp_port, mlxsw_sp_qdisc,
&p->stats);
+ case TC_PRIO_GRAFT:
+ return mlxsw_sp_qdisc_prio_graft(mlxsw_sp_port, mlxsw_sp_qdisc,
+ &p->graft_params);
default:
return -EOPNOTSUPP;
}
@@ -596,17 +729,36 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port,
int mlxsw_sp_tc_qdisc_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
- mlxsw_sp_port->root_qdisc = kzalloc(sizeof(*mlxsw_sp_port->root_qdisc),
- GFP_KERNEL);
- if (!mlxsw_sp_port->root_qdisc)
- return -ENOMEM;
+ struct mlxsw_sp_qdisc *mlxsw_sp_qdisc;
+ int i;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc), GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_root_qdisc_init;
+
+ mlxsw_sp_port->root_qdisc = mlxsw_sp_qdisc;
+ mlxsw_sp_port->root_qdisc->prio_bitmap = 0xff;
mlxsw_sp_port->root_qdisc->tclass_num = MLXSW_SP_PORT_DEFAULT_TCLASS;
+ mlxsw_sp_qdisc = kzalloc(sizeof(*mlxsw_sp_qdisc) * IEEE_8021QAZ_MAX_TCS,
+ GFP_KERNEL);
+ if (!mlxsw_sp_qdisc)
+ goto err_tclass_qdiscs_init;
+
+ mlxsw_sp_port->tclass_qdiscs = mlxsw_sp_qdisc;
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
+ mlxsw_sp_port->tclass_qdiscs[i].tclass_num = i;
+
return 0;
+
+err_tclass_qdiscs_init:
+ kfree(mlxsw_sp_port->root_qdisc);
+err_root_qdisc_init:
+ return -ENOMEM;
}
void mlxsw_sp_tc_qdisc_fini(struct mlxsw_sp_port *mlxsw_sp_port)
{
+ kfree(mlxsw_sp_port->tclass_qdiscs);
kfree(mlxsw_sp_port->root_qdisc);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index f7948e983637..a8a578610a7b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1,10 +1,10 @@
/*
* drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
- * Copyright (c) 2016-2017 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved.
* Copyright (c) 2016 Jiri Pirko <jiri@mellanox.com>
* Copyright (c) 2016 Ido Schimmel <idosch@mellanox.com>
* Copyright (c) 2016 Yotam Gigi <yotamg@mellanox.com>
- * Copyright (c) 2017 Petr Machata <petrm@mellanox.com>
+ * Copyright (c) 2017-2018 Petr Machata <petrm@mellanox.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -70,6 +70,7 @@
#include "spectrum_mr.h"
#include "spectrum_mr_tcam.h"
#include "spectrum_router.h"
+#include "spectrum_span.h"
struct mlxsw_sp_fib;
struct mlxsw_sp_vr;
@@ -796,7 +797,7 @@ static struct mlxsw_sp_vr *mlxsw_sp_vr_create(struct mlxsw_sp *mlxsw_sp,
vr = mlxsw_sp_vr_find_unused(mlxsw_sp);
if (!vr) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported virtual routers");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported virtual routers");
return ERR_PTR(-EBUSY);
}
fib4 = mlxsw_sp_fib_create(mlxsw_sp, vr, MLXSW_SP_L3_PROTO_IPV4);
@@ -1024,9 +1025,11 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
enum mlxsw_sp_ipip_type ipipt,
struct net_device *ol_dev)
{
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_ipip_entry *ret = NULL;
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipipt];
ipip_entry = kzalloc(sizeof(*ipip_entry), GFP_KERNEL);
if (!ipip_entry)
return ERR_PTR(-ENOMEM);
@@ -1040,7 +1043,15 @@ mlxsw_sp_ipip_entry_alloc(struct mlxsw_sp *mlxsw_sp,
ipip_entry->ipipt = ipipt;
ipip_entry->ol_dev = ol_dev;
- ipip_entry->parms = mlxsw_sp_ipip_netdev_parms(ol_dev);
+
+ switch (ipip_ops->ul_proto) {
+ case MLXSW_SP_L3_PROTO_IPV4:
+ ipip_entry->parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev);
+ break;
+ case MLXSW_SP_L3_PROTO_IPV6:
+ WARN_ON(1);
+ break;
+ }
return ipip_entry;
@@ -2320,6 +2331,8 @@ static void mlxsw_sp_router_neigh_event_work(struct work_struct *work)
read_unlock_bh(&n->lock);
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
entry_connected = nud_state & NUD_VALID && !dead;
neigh_entry = mlxsw_sp_neigh_entry_lookup(mlxsw_sp, n);
if (!entry_connected && !neigh_entry)
@@ -2417,7 +2430,8 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *nb,
mlxsw_core_schedule_work(&net_work->work);
mlxsw_sp_port_dev_put(mlxsw_sp_port);
break;
- case NETEVENT_MULTIPATH_HASH_UPDATE:
+ case NETEVENT_IPV4_MPATH_HASH_UPDATE:
+ case NETEVENT_IPV6_MPATH_HASH_UPDATE:
net = ptr;
if (!net_eq(net, &init_net))
@@ -5579,6 +5593,8 @@ static void mlxsw_sp_router_fib4_event_work(struct work_struct *work)
/* Protect internal structures from changes */
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_APPEND: /* fall through */
@@ -5621,6 +5637,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
int err;
rtnl_lock();
+ mlxsw_sp_span_respin(mlxsw_sp);
+
switch (fib_work->event) {
case FIB_EVENT_ENTRY_REPLACE: /* fall through */
case FIB_EVENT_ENTRY_ADD:
@@ -5793,7 +5811,7 @@ static int mlxsw_sp_router_fib_rule_event(unsigned long event,
}
if (err < 0)
- NL_SET_ERR_MSG(extack, "spectrum: FIB rules not supported. Aborting offload");
+ NL_SET_ERR_MSG_MOD(extack, "FIB rules not supported. Aborting offload");
return err;
}
@@ -6032,7 +6050,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp,
err = mlxsw_sp_rif_index_alloc(mlxsw_sp, &rif_index);
if (err) {
- NL_SET_ERR_MSG(extack, "spectrum: Exceeded number of supported router interfaces");
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported router interfaces");
goto err_rif_index_alloc;
}
@@ -7013,13 +7031,25 @@ static void mlxsw_sp_mp4_hash_init(char *recr2_pl)
static void mlxsw_sp_mp6_hash_init(char *recr2_pl)
{
+ bool only_l3 = !init_net.ipv6.sysctl.multipath_hash_policy;
+
mlxsw_sp_mp_hash_header_set(recr2_pl,
MLXSW_REG_RECR2_IPV6_EN_NOT_TCP_NOT_UDP);
mlxsw_sp_mp_hash_header_set(recr2_pl, MLXSW_REG_RECR2_IPV6_EN_TCP_UDP);
mlxsw_reg_recr2_ipv6_sip_enable(recr2_pl);
mlxsw_reg_recr2_ipv6_dip_enable(recr2_pl);
- mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
mlxsw_sp_mp_hash_field_set(recr2_pl, MLXSW_REG_RECR2_IPV6_NEXT_HEADER);
+ if (only_l3) {
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_IPV6_FLOW_LABEL);
+ } else {
+ mlxsw_sp_mp_hash_header_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_EN_IPV6);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_SPORT);
+ mlxsw_sp_mp_hash_field_set(recr2_pl,
+ MLXSW_REG_RECR2_TCP_UDP_DPORT);
+ }
}
static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
new file mode 100644
index 000000000000..f537e1de11d9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c
@@ -0,0 +1,796 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.c
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2018 Petr Machata <petrm@mellanox.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/list.h>
+#include <net/arp.h>
+#include <net/gre.h>
+#include <net/ndisc.h>
+#include <net/ip6_tunnel.h>
+
+#include "spectrum.h"
+#include "spectrum_span.h"
+#include "spectrum_ipip.h"
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
+ return -EIO;
+
+ mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
+ MAX_SPAN);
+ mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
+ sizeof(struct mlxsw_sp_span_entry),
+ GFP_KERNEL);
+ if (!mlxsw_sp->span.entries)
+ return -ENOMEM;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ INIT_LIST_HEAD(&curr->bound_ports_list);
+ curr->id = i;
+ }
+
+ return 0;
+}
+
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
+ }
+ kfree(mlxsw_sp->span.entries);
+}
+
+static int
+mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = netdev_priv(to_dev);
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_phys_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure_common(struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_reg_mpat_span_type span_type)
+{
+ struct mlxsw_sp_port *dest_port = span_entry->parms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false, span_type);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_LOCAL_ETH);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = {
+ .can_handle = mlxsw_sp_port_dev_check,
+ .parms = mlxsw_sp_span_entry_phys_parms,
+ .configure = mlxsw_sp_span_entry_phys_configure,
+ .deconfigure = mlxsw_sp_span_entry_phys_deconfigure,
+};
+
+static struct net_device *
+mlxsw_sp_span_gretap4_route(const struct net_device *to_dev,
+ __be32 *saddrp, __be32 *daddrp)
+{
+ struct ip_tunnel *tun = netdev_priv(to_dev);
+ struct net_device *dev = NULL;
+ struct ip_tunnel_parm parms;
+ struct rtable *rt = NULL;
+ struct flowi4 fl4;
+
+ /* We assume "dev" stays valid after rt is put. */
+ ASSERT_RTNL();
+
+ parms = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ ip_tunnel_init_flow(&fl4, parms.iph.protocol, *daddrp, *saddrp,
+ 0, 0, parms.link, tun->fwmark);
+
+ rt = ip_route_output_key(tun->net, &fl4);
+ if (IS_ERR(rt))
+ return NULL;
+
+ if (rt->rt_type != RTN_UNICAST)
+ goto out;
+
+ dev = rt->dst.dev;
+ *saddrp = fl4.saddr;
+ *daddrp = rt->rt_gateway;
+
+out:
+ ip_rt_put(rt);
+ return dev;
+}
+
+static int mlxsw_sp_span_dmac(struct neigh_table *tbl,
+ const void *pkey,
+ struct net_device *l3edev,
+ unsigned char dmac[ETH_ALEN])
+{
+ struct neighbour *neigh = neigh_lookup(tbl, pkey, l3edev);
+ int err = 0;
+
+ if (!neigh) {
+ neigh = neigh_create(tbl, pkey, l3edev);
+ if (IS_ERR(neigh))
+ return PTR_ERR(neigh);
+ }
+
+ neigh_event_send(neigh, NULL);
+
+ read_lock_bh(&neigh->lock);
+ if ((neigh->nud_state & NUD_VALID) && !neigh->dead)
+ memcpy(dmac, neigh->ha, ETH_ALEN);
+ else
+ err = -ENOENT;
+ read_unlock_bh(&neigh->lock);
+
+ neigh_release(neigh);
+ return err;
+}
+
+static int
+mlxsw_sp_span_entry_unoffloadable(struct mlxsw_sp_span_parms *sparmsp)
+{
+ sparmsp->dest_port = NULL;
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_tunnel_parms_common(struct net_device *l3edev,
+ union mlxsw_sp_l3addr saddr,
+ union mlxsw_sp_l3addr daddr,
+ union mlxsw_sp_l3addr gw,
+ __u8 ttl,
+ struct neigh_table *tbl,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ unsigned char dmac[ETH_ALEN];
+
+ if (mlxsw_sp_l3addr_is_zero(gw))
+ gw = daddr;
+
+ if (!l3edev || !mlxsw_sp_port_dev_check(l3edev) ||
+ mlxsw_sp_span_dmac(tbl, &gw, l3edev, dmac))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ sparmsp->dest_port = netdev_priv(l3edev);
+ sparmsp->ttl = ttl;
+ memcpy(sparmsp->dmac, dmac, ETH_ALEN);
+ memcpy(sparmsp->smac, l3edev->dev_addr, ETH_ALEN);
+ sparmsp->saddr = saddr;
+ sparmsp->daddr = daddr;
+ return 0;
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev);
+ union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr };
+ union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr };
+ bool inherit_tos = tparm.iph.tos & 0x1;
+ bool inherit_ttl = !tparm.iph.ttl;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.iph.ttl,
+ &arp_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap4_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv4_pack(mpat_pl,
+ sparms.ttl, sparms.smac,
+ be32_to_cpu(sparms.saddr.addr4),
+ be32_to_cpu(sparms.daddr.addr4));
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = {
+ .can_handle = is_gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap4_parms,
+ .configure = mlxsw_sp_span_entry_gretap4_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure,
+};
+
+static struct net_device *
+mlxsw_sp_span_gretap6_route(const struct net_device *to_dev,
+ struct in6_addr *saddrp,
+ struct in6_addr *daddrp)
+{
+ struct ip6_tnl *t = netdev_priv(to_dev);
+ struct flowi6 fl6 = t->fl.u.ip6;
+ struct net_device *dev = NULL;
+ struct dst_entry *dst;
+ struct rt6_info *rt6;
+
+ /* We assume "dev" stays valid after dst is released. */
+ ASSERT_RTNL();
+
+ fl6.flowi6_mark = t->parms.fwmark;
+ if (!ip6_tnl_xmit_ctl(t, &fl6.saddr, &fl6.daddr))
+ return NULL;
+
+ dst = ip6_route_output(t->net, NULL, &fl6);
+ if (!dst || dst->error)
+ goto out;
+
+ rt6 = container_of(dst, struct rt6_info, dst);
+
+ dev = dst->dev;
+ *saddrp = fl6.saddr;
+ *daddrp = rt6->rt6i_gateway;
+
+out:
+ dst_release(dst);
+ return dev;
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev);
+ bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS;
+ union mlxsw_sp_l3addr saddr = { .addr6 = tparm.laddr };
+ union mlxsw_sp_l3addr daddr = { .addr6 = tparm.raddr };
+ bool inherit_ttl = !tparm.hop_limit;
+ union mlxsw_sp_l3addr gw = daddr;
+ struct net_device *l3edev;
+
+ if (!(to_dev->flags & IFF_UP) ||
+ /* Reject tunnels with GRE keys, checksums, etc. */
+ tparm.i_flags || tparm.o_flags ||
+ /* Require a fixed TTL and a TOS copied from the mirrored packet. */
+ inherit_ttl || !inherit_tos ||
+ /* A destination address may not be "any". */
+ mlxsw_sp_l3addr_is_zero(daddr))
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+
+ l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6);
+ return mlxsw_sp_span_entry_tunnel_parms_common(l3edev, saddr, daddr, gw,
+ tparm.hop_limit,
+ &nd_tbl, sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_gretap6_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_port *dest_port = sparms.dest_port;
+ struct mlxsw_sp *mlxsw_sp = dest_port->mlxsw_sp;
+ u8 local_port = dest_port->local_port;
+ char mpat_pl[MLXSW_REG_MPAT_LEN];
+ int pa_id = span_entry->id;
+
+ /* Create a new port analayzer entry for local_port. */
+ mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, true,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+ mlxsw_reg_mpat_eth_rspan_l2_pack(mpat_pl,
+ MLXSW_REG_MPAT_ETH_RSPAN_VERSION_NO_HEADER,
+ sparms.dmac, false);
+ mlxsw_reg_mpat_eth_rspan_l3_ipv6_pack(mpat_pl, sparms.ttl, sparms.smac,
+ sparms.saddr.addr6,
+ sparms.daddr.addr6);
+
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
+}
+
+static void
+mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure_common(span_entry,
+ MLXSW_REG_MPAT_SPAN_TYPE_REMOTE_ETH_L3);
+}
+
+static const
+struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = {
+ .can_handle = is_ip6gretap_dev,
+ .parms = mlxsw_sp_span_entry_gretap6_parms,
+ .configure = mlxsw_sp_span_entry_gretap6_configure,
+ .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure,
+};
+
+static const
+struct mlxsw_sp_span_entry_ops *const mlxsw_sp_span_entry_types[] = {
+ &mlxsw_sp_span_entry_ops_phys,
+ &mlxsw_sp_span_entry_ops_gretap4,
+ &mlxsw_sp_span_entry_ops_gretap6,
+};
+
+static int
+mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp)
+{
+ return mlxsw_sp_span_entry_unoffloadable(sparmsp);
+}
+
+static int
+mlxsw_sp_span_entry_nop_configure(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ return 0;
+}
+
+static void
+mlxsw_sp_span_entry_nop_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+}
+
+static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_nop = {
+ .parms = mlxsw_sp_span_entry_nop_parms,
+ .configure = mlxsw_sp_span_entry_nop_configure,
+ .deconfigure = mlxsw_sp_span_entry_nop_deconfigure,
+};
+
+static void
+mlxsw_sp_span_entry_configure(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms)
+{
+ if (sparms.dest_port) {
+ if (sparms.dest_port->mlxsw_sp != mlxsw_sp) {
+ netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ } else if (span_entry->ops->configure(span_entry, sparms)) {
+ netdev_err(span_entry->to_dev, "Failed to offload mirror to %s",
+ sparms.dest_port->dev->name);
+ sparms.dest_port = NULL;
+ }
+ }
+
+ span_entry->parms = sparms;
+}
+
+static void
+mlxsw_sp_span_entry_deconfigure(struct mlxsw_sp_span_entry *span_entry)
+{
+ if (span_entry->parms.dest_port)
+ span_entry->ops->deconfigure(span_entry);
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_create(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry = NULL;
+ int i;
+
+ /* find a free entry to use */
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ if (!mlxsw_sp->span.entries[i].ref_count) {
+ span_entry = &mlxsw_sp->span.entries[i];
+ break;
+ }
+ }
+ if (!span_entry)
+ return NULL;
+
+ span_entry->ops = ops;
+ span_entry->ref_count = 1;
+ span_entry->to_dev = to_dev;
+ mlxsw_sp_span_entry_configure(mlxsw_sp, span_entry, sparms);
+
+ return span_entry;
+}
+
+static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+}
+
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->to_dev == to_dev)
+ return curr;
+ }
+ return NULL;
+}
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ mlxsw_sp_span_entry_deconfigure(span_entry);
+ span_entry->ops = &mlxsw_sp_span_entry_ops_nop;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_id(struct mlxsw_sp *mlxsw_sp, int span_id)
+{
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ if (curr->ref_count && curr->id == span_id)
+ return curr;
+ }
+ return NULL;
+}
+
+static struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev,
+ const struct mlxsw_sp_span_entry_ops *ops,
+ struct mlxsw_sp_span_parms sparms)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev);
+ if (span_entry) {
+ /* Already exists, just take a reference */
+ span_entry->ref_count++;
+ return span_entry;
+ }
+
+ return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms);
+}
+
+static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ WARN_ON(!span_entry->ref_count);
+ if (--span_entry->ref_count == 0)
+ mlxsw_sp_span_entry_destroy(span_entry);
+ return 0;
+}
+
+static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ struct mlxsw_sp_span_inspected_port *p;
+ int i;
+
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+
+ list_for_each_entry(p, &curr->bound_ports_list, list)
+ if (p->local_port == port->local_port &&
+ p->type == MLXSW_SP_SPAN_EGRESS)
+ return true;
+ }
+
+ return false;
+}
+
+static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
+ int mtu)
+{
+ return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
+}
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* If port is egress mirrored, the shared buffer size should be
+ * updated according to the mtu value
+ */
+ if (mlxsw_sp_span_is_egress_mirror(port)) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static struct mlxsw_sp_span_inspected_port *
+mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry)
+{
+ struct mlxsw_sp_span_inspected_port *p;
+
+ list_for_each_entry(p, &span_entry->bound_ports_list, list)
+ if (port->local_port == p->local_port)
+ return p;
+ return NULL;
+}
+
+static int
+mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char mpar_pl[MLXSW_REG_MPAR_LEN];
+ int pa_id = span_entry->id;
+
+ /* bind the port to the SPAN entry */
+ mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
+ (enum mlxsw_reg_mpar_i_e)type, bind, pa_id);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
+}
+
+static int
+mlxsw_sp_span_inspected_port_add(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+ int err;
+
+ /* if it is an egress SPAN, bind a shared buffer to it */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
+ port->dev->mtu);
+
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ if (err) {
+ netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
+ return err;
+ }
+ }
+
+ if (bind) {
+ err = mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ true);
+ if (err)
+ goto err_port_bind;
+ }
+
+ inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
+ if (!inspected_port) {
+ err = -ENOMEM;
+ goto err_inspected_port_alloc;
+ }
+ inspected_port->local_port = port->local_port;
+ inspected_port->type = type;
+ list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
+
+ return 0;
+
+err_inspected_port_alloc:
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+err_port_bind:
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+ return err;
+}
+
+static void
+mlxsw_sp_span_inspected_port_del(struct mlxsw_sp_port *port,
+ struct mlxsw_sp_span_entry *span_entry,
+ enum mlxsw_sp_span_type type,
+ bool bind)
+{
+ struct mlxsw_sp_span_inspected_port *inspected_port;
+ struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
+ char sbib_pl[MLXSW_REG_SBIB_LEN];
+
+ inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
+ if (!inspected_port)
+ return;
+
+ if (bind)
+ mlxsw_sp_span_inspected_port_bind(port, span_entry, type,
+ false);
+ /* remove the SBIB buffer if it was egress SPAN */
+ if (type == MLXSW_SP_SPAN_EGRESS) {
+ mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
+ mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
+ }
+
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+
+ list_del(&inspected_port->list);
+ kfree(inspected_port);
+}
+
+static const struct mlxsw_sp_span_entry_ops *
+mlxsw_sp_span_entry_ops(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE(mlxsw_sp_span_entry_types); ++i)
+ if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev))
+ return mlxsw_sp_span_entry_types[i];
+
+ return NULL;
+}
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type, bool bind,
+ int *p_span_id)
+{
+ struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms sparms = {0};
+ struct mlxsw_sp_span_entry *span_entry;
+ int err;
+
+ ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev);
+ if (!ops) {
+ netdev_err(to_dev, "Cannot mirror to %s", to_dev->name);
+ return -EOPNOTSUPP;
+ }
+
+ err = ops->parms(to_dev, &sparms);
+ if (err)
+ return err;
+
+ span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms);
+ if (!span_entry)
+ return -ENOENT;
+
+ netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
+ span_entry->id);
+
+ err = mlxsw_sp_span_inspected_port_add(from, span_entry, type, bind);
+ if (err)
+ goto err_port_bind;
+
+ *p_span_id = span_entry->id;
+ return 0;
+
+err_port_bind:
+ mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
+ return err;
+}
+
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind)
+{
+ struct mlxsw_sp_span_entry *span_entry;
+
+ span_entry = mlxsw_sp_span_entry_find_by_id(from->mlxsw_sp, span_id);
+ if (!span_entry) {
+ netdev_err(from->dev, "no span entry found\n");
+ return;
+ }
+
+ netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
+ span_entry->id);
+ mlxsw_sp_span_inspected_port_del(from, span_entry, type, bind);
+}
+
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp)
+{
+ int i;
+ int err;
+
+ ASSERT_RTNL();
+ for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
+ struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
+ struct mlxsw_sp_span_parms sparms = {0};
+
+ if (!curr->ref_count)
+ continue;
+
+ err = curr->ops->parms(curr->to_dev, &sparms);
+ if (err)
+ continue;
+
+ if (memcmp(&sparms, &curr->parms, sizeof(sparms))) {
+ mlxsw_sp_span_entry_deconfigure(curr);
+ mlxsw_sp_span_entry_configure(mlxsw_sp, curr, sparms);
+ }
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
new file mode 100644
index 000000000000..948aceb512c5
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h
@@ -0,0 +1,104 @@
+/*
+ * drivers/net/ethernet/mellanox/mlxsw/mlxsw_span.h
+ * Copyright (c) 2018 Mellanox Technologies. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _MLXSW_SPECTRUM_SPAN_H
+#define _MLXSW_SPECTRUM_SPAN_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "spectrum_router.h"
+
+struct mlxsw_sp;
+struct mlxsw_sp_port;
+
+enum mlxsw_sp_span_type {
+ MLXSW_SP_SPAN_EGRESS,
+ MLXSW_SP_SPAN_INGRESS
+};
+
+struct mlxsw_sp_span_inspected_port {
+ struct list_head list;
+ enum mlxsw_sp_span_type type;
+ u8 local_port;
+};
+
+struct mlxsw_sp_span_parms {
+ struct mlxsw_sp_port *dest_port; /* NULL for unoffloaded SPAN. */
+ unsigned int ttl;
+ unsigned char dmac[ETH_ALEN];
+ unsigned char smac[ETH_ALEN];
+ union mlxsw_sp_l3addr daddr;
+ union mlxsw_sp_l3addr saddr;
+};
+
+struct mlxsw_sp_span_entry_ops;
+
+struct mlxsw_sp_span_entry {
+ const struct net_device *to_dev;
+ const struct mlxsw_sp_span_entry_ops *ops;
+ struct mlxsw_sp_span_parms parms;
+ struct list_head bound_ports_list;
+ int ref_count;
+ int id;
+};
+
+struct mlxsw_sp_span_entry_ops {
+ bool (*can_handle)(const struct net_device *to_dev);
+ int (*parms)(const struct net_device *to_dev,
+ struct mlxsw_sp_span_parms *sparmsp);
+ int (*configure)(struct mlxsw_sp_span_entry *span_entry,
+ struct mlxsw_sp_span_parms sparms);
+ void (*deconfigure)(struct mlxsw_sp_span_entry *span_entry);
+};
+
+int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp);
+void mlxsw_sp_span_respin(struct mlxsw_sp *mlxsw_sp);
+
+int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
+ const struct net_device *to_dev,
+ enum mlxsw_sp_span_type type,
+ bool bind, int *p_span_id);
+void mlxsw_sp_span_mirror_del(struct mlxsw_sp_port *from, int span_id,
+ enum mlxsw_sp_span_type type, bool bind);
+struct mlxsw_sp_span_entry *
+mlxsw_sp_span_entry_find_by_port(struct mlxsw_sp *mlxsw_sp,
+ const struct net_device *to_dev);
+
+void mlxsw_sp_span_entry_invalidate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_span_entry *span_entry);
+
+int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu);
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index 161bcdc012f0..c11c9a635866 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -1844,7 +1844,7 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
if (is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not enslave a VLAN device to a VLAN-aware bridge");
+ NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge");
return -EINVAL;
}
@@ -1907,20 +1907,16 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device,
struct netlink_ext_ack *extack)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
+ struct net_device *dev = bridge_port->dev;
u16 vid;
- if (!is_vlan_dev(bridge_port->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Only VLAN devices can be enslaved to a VLAN-unaware bridge");
- return -EINVAL;
- }
- vid = vlan_dev_vlan_id(bridge_port->dev);
-
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return -EINVAL;
if (mlxsw_sp_port_is_br_member(mlxsw_sp_port, bridge_device->dev)) {
- NL_SET_ERR_MSG(extack, "spectrum: Can not bridge VLAN uppers of the same port");
+ NL_SET_ERR_MSG_MOD(extack, "Can not bridge VLAN uppers of the same port");
return -EINVAL;
}
@@ -1937,8 +1933,10 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device,
struct mlxsw_sp_port *mlxsw_sp_port)
{
struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
- u16 vid = vlan_dev_vlan_id(bridge_port->dev);
+ struct net_device *dev = bridge_port->dev;
+ u16 vid;
+ vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1;
mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
if (WARN_ON(!mlxsw_sp_port_vlan))
return;
diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c
index d5b28884e21e..51fa82b429a3 100644
--- a/drivers/net/ethernet/natsemi/jazzsonic.c
+++ b/drivers/net/ethernet/natsemi/jazzsonic.c
@@ -60,14 +60,6 @@ do { \
*((volatile unsigned int *)dev->base_addr+(reg)) = (val); \
} while (0)
-
-/* use 0 for production, 1 for verification, >1 for debug */
-#ifdef SONIC_DEBUG
-static unsigned int sonic_debug = SONIC_DEBUG;
-#else
-static unsigned int sonic_debug = 1;
-#endif
-
/*
* We cannot use station (ethernet) address prefixes to detect the
* sonic controller since these are board manufacturer depended.
@@ -117,7 +109,6 @@ static const struct net_device_ops sonic_netdev_ops = {
static int sonic_probe1(struct net_device *dev)
{
- static unsigned version_printed;
unsigned int silicon_revision;
unsigned int val;
struct sonic_local *lp = netdev_priv(dev);
@@ -133,26 +124,17 @@ static int sonic_probe1(struct net_device *dev)
* the expected location.
*/
silicon_revision = SONIC_READ(SONIC_SR);
- if (sonic_debug > 1)
- printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
-
i = 0;
while (known_revisions[i] != 0xffff &&
known_revisions[i] != silicon_revision)
i++;
if (known_revisions[i] == 0xffff) {
- printk("SONIC ethernet controller not found (0x%4x)\n",
- silicon_revision);
+ pr_info("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
goto out;
}
- if (sonic_debug && version_printed++ == 0)
- printk(version);
-
- printk(KERN_INFO "%s: Sonic ethernet found at 0x%08lx, ",
- dev_name(lp->device), dev->base_addr);
-
/*
* Put the sonic into software reset, then
* retrieve and print the ethernet address.
@@ -245,12 +227,16 @@ static int jazz_sonic_probe(struct platform_device *pdev)
err = sonic_probe1(dev);
if (err)
goto out;
+
+ pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
+
+ sonic_msg_init(dev);
+
err = register_netdev(dev);
if (err)
goto out1;
- printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
-
return 0;
out1:
@@ -262,8 +248,6 @@ out:
}
MODULE_DESCRIPTION("Jazz SONIC ethernet driver");
-module_param(sonic_debug, int, 0);
-MODULE_PARM_DESC(sonic_debug, "jazzsonic debug level (1-4)");
MODULE_ALIAS("platform:jazzsonic");
#include "sonic.c"
diff --git a/drivers/net/ethernet/natsemi/macsonic.c b/drivers/net/ethernet/natsemi/macsonic.c
index b922ab5cedea..0937fc2a928e 100644
--- a/drivers/net/ethernet/natsemi/macsonic.c
+++ b/drivers/net/ethernet/natsemi/macsonic.c
@@ -60,8 +60,6 @@
#include <asm/macints.h>
#include <asm/mac_via.h>
-static char mac_sonic_string[] = "macsonic";
-
#include "sonic.h"
/* These should basically be bus-size and endian independent (since
@@ -72,15 +70,6 @@ static char mac_sonic_string[] = "macsonic";
#define SONIC_WRITE(reg,val) (nubus_writew(val, dev->base_addr + (reg * 4) \
+ lp->reg_offset))
-/* use 0 for production, 1 for verification, >1 for debug */
-#ifdef SONIC_DEBUG
-static unsigned int sonic_debug = SONIC_DEBUG;
-#else
-static unsigned int sonic_debug = 1;
-#endif
-
-static int sonic_version_printed;
-
/* For onboard SONIC */
#define ONBOARD_SONIC_REGISTERS 0x50F0A000
#define ONBOARD_SONIC_PROM_BASE 0x50f08000
@@ -313,11 +302,6 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
int sr;
bool commslot = macintosh_config->expansion_type == MAC_EXP_PDS_COMM;
- if (!MACH_IS_MAC)
- return -ENODEV;
-
- printk(KERN_INFO "Checking for internal Macintosh ethernet (SONIC).. ");
-
/* Bogus probing, on the models which may or may not have
Ethernet (BTW, the Ethernet *is* always at the same
address, and nothing else lives there, at least if Apple's
@@ -327,13 +311,11 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
card_present = hwreg_present((void*)ONBOARD_SONIC_REGISTERS);
if (!card_present) {
- printk("none.\n");
+ pr_info("Onboard/comm-slot SONIC not found\n");
return -ENODEV;
}
}
- printk("yes\n");
-
/* Danger! My arms are flailing wildly! You *must* set lp->reg_offset
* and dev->base_addr before using SONIC_READ() or SONIC_WRITE() */
dev->base_addr = ONBOARD_SONIC_REGISTERS;
@@ -342,18 +324,10 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
else
dev->irq = IRQ_NUBUS_9;
- if (!sonic_version_printed) {
- printk(KERN_INFO "%s", version);
- sonic_version_printed = 1;
- }
- printk(KERN_INFO "%s: onboard / comm-slot SONIC at 0x%08lx\n",
- dev_name(lp->device), dev->base_addr);
-
/* The PowerBook's SONIC is 16 bit always. */
if (macintosh_config->ident == MAC_MODEL_PB520) {
lp->reg_offset = 0;
lp->dma_bitmode = SONIC_BITMODE16;
- sr = SONIC_READ(SONIC_SR);
} else if (commslot) {
/* Some of the comm-slot cards are 16 bit. But some
of them are not. The 32-bit cards use offset 2 and
@@ -370,22 +344,21 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
else {
lp->dma_bitmode = SONIC_BITMODE16;
lp->reg_offset = 0;
- sr = SONIC_READ(SONIC_SR);
}
} else {
/* All onboard cards are at offset 2 with 32 bit DMA. */
lp->reg_offset = 2;
lp->dma_bitmode = SONIC_BITMODE32;
- sr = SONIC_READ(SONIC_SR);
}
- printk(KERN_INFO
- "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
- dev_name(lp->device), sr, lp->dma_bitmode?32:16, lp->reg_offset);
-#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
- printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
- SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
-#endif
+ pr_info("Onboard/comm-slot SONIC, revision 0x%04x, %d bit DMA, register offset %d\n",
+ SONIC_READ(SONIC_SR), lp->dma_bitmode ? 32 : 16,
+ lp->reg_offset);
+
+ /* This is sometimes useful to find out how MacOS configured the card */
+ pr_debug("%s: DCR=0x%04x, DCR2=0x%04x\n", __func__,
+ SONIC_READ(SONIC_DCR) & 0xffff,
+ SONIC_READ(SONIC_DCR2) & 0xffff);
/* Software reset, then initialize control registers. */
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -406,11 +379,14 @@ static int mac_onboard_sonic_probe(struct net_device *dev)
/* Now look for the MAC address. */
mac_onboard_sonic_ethernet_addr(dev);
+ pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
+
/* Shared init code */
return macsonic_init(dev);
}
-static int mac_nubus_sonic_ethernet_addr(struct net_device *dev,
+static int mac_sonic_nubus_ethernet_addr(struct net_device *dev,
unsigned long prom_addr, int id)
{
int i;
@@ -449,70 +425,49 @@ static int macsonic_ident(struct nubus_rsrc *fres)
return -1;
}
-static int mac_nubus_sonic_probe(struct net_device *dev)
+static int mac_sonic_nubus_probe_board(struct nubus_board *board, int id,
+ struct net_device *dev)
{
- static int slots;
- struct nubus_rsrc *ndev = NULL;
struct sonic_local* lp = netdev_priv(dev);
unsigned long base_addr, prom_addr;
u16 sonic_dcr;
- int id = -1;
int reg_offset, dma_bitmode;
- /* Find the first SONIC that hasn't been initialized already */
- for_each_func_rsrc(ndev) {
- if (ndev->category != NUBUS_CAT_NETWORK ||
- ndev->type != NUBUS_TYPE_ETHERNET)
- continue;
-
- /* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
- continue;
- slots |= 1<<ndev->board->slot;
-
- /* Is it one of ours? */
- if ((id = macsonic_ident(ndev)) != -1)
- break;
- }
-
- if (ndev == NULL)
- return -ENODEV;
-
switch (id) {
case MACSONIC_DUODOCK:
- base_addr = ndev->board->slot_addr + DUODOCK_SONIC_REGISTERS;
- prom_addr = ndev->board->slot_addr + DUODOCK_SONIC_PROM_BASE;
+ base_addr = board->slot_addr + DUODOCK_SONIC_REGISTERS;
+ prom_addr = board->slot_addr + DUODOCK_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT0 | SONIC_DCR_RFT1 |
SONIC_DCR_TFT0;
reg_offset = 2;
dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE:
- base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
- prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ base_addr = board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_BMS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE32;
break;
case MACSONIC_APPLE16:
- base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
- prom_addr = ndev->board->slot_addr + APPLE_SONIC_PROM_BASE;
+ base_addr = board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = board->slot_addr + APPLE_SONIC_PROM_BASE;
sonic_dcr = SONIC_DCR_EXBUS | SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNALINK:
- base_addr = ndev->board->slot_addr + APPLE_SONIC_REGISTERS;
- prom_addr = ndev->board->slot_addr + DAYNALINK_PROM_BASE;
+ base_addr = board->slot_addr + APPLE_SONIC_REGISTERS;
+ prom_addr = board->slot_addr + DAYNALINK_PROM_BASE;
sonic_dcr = SONIC_DCR_RFT1 | SONIC_DCR_TFT0 |
SONIC_DCR_PO1 | SONIC_DCR_BMS;
reg_offset = 0;
dma_bitmode = SONIC_BITMODE16;
break;
case MACSONIC_DAYNA:
- base_addr = ndev->board->slot_addr + DAYNA_SONIC_REGISTERS;
- prom_addr = ndev->board->slot_addr + DAYNA_SONIC_MAC_ADDR;
+ base_addr = board->slot_addr + DAYNA_SONIC_REGISTERS;
+ prom_addr = board->slot_addr + DAYNA_SONIC_MAC_ADDR;
sonic_dcr = SONIC_DCR_BMS |
SONIC_DCR_RFT1 | SONIC_DCR_TFT0 | SONIC_DCR_PO1;
reg_offset = 0;
@@ -528,21 +483,16 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
dev->base_addr = base_addr;
lp->reg_offset = reg_offset;
lp->dma_bitmode = dma_bitmode;
- dev->irq = SLOT2IRQ(ndev->board->slot);
+ dev->irq = SLOT2IRQ(board->slot);
- if (!sonic_version_printed) {
- printk(KERN_INFO "%s", version);
- sonic_version_printed = 1;
- }
- printk(KERN_INFO "%s: %s in slot %X\n",
- dev_name(lp->device), ndev->board->name, ndev->board->slot);
- printk(KERN_INFO "%s: revision 0x%04x, using %d bit DMA and register offset %d\n",
- dev_name(lp->device), SONIC_READ(SONIC_SR), dma_bitmode?32:16, reg_offset);
+ dev_info(&board->dev, "%s, revision 0x%04x, %d bit DMA, register offset %d\n",
+ board->name, SONIC_READ(SONIC_SR),
+ lp->dma_bitmode ? 32 : 16, lp->reg_offset);
-#if 0 /* This is sometimes useful to find out how MacOS configured the card. */
- printk(KERN_INFO "%s: DCR: 0x%04x, DCR2: 0x%04x\n", dev_name(lp->device),
- SONIC_READ(SONIC_DCR) & 0xffff, SONIC_READ(SONIC_DCR2) & 0xffff);
-#endif
+ /* This is sometimes useful to find out how MacOS configured the card */
+ dev_dbg(&board->dev, "%s: DCR=0x%04x, DCR2=0x%04x\n", __func__,
+ SONIC_READ(SONIC_DCR) & 0xffff,
+ SONIC_READ(SONIC_DCR2) & 0xffff);
/* Software reset, then initialize control registers. */
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
@@ -557,14 +507,17 @@ static int mac_nubus_sonic_probe(struct net_device *dev)
SONIC_WRITE(SONIC_ISR, 0x7fff);
/* Now look for the MAC address. */
- if (mac_nubus_sonic_ethernet_addr(dev, prom_addr, id) != 0)
+ if (mac_sonic_nubus_ethernet_addr(dev, prom_addr, id) != 0)
return -ENODEV;
+ dev_info(&board->dev, "SONIC ethernet @%08lx, MAC %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
+
/* Shared init code */
return macsonic_init(dev);
}
-static int mac_sonic_probe(struct platform_device *pdev)
+static int mac_sonic_platform_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct sonic_local *lp;
@@ -579,22 +532,16 @@ static int mac_sonic_probe(struct platform_device *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
- /* This will catch fatal stuff like -ENOMEM as well as success */
err = mac_onboard_sonic_probe(dev);
- if (err == 0)
- goto found;
- if (err != -ENODEV)
- goto out;
- err = mac_nubus_sonic_probe(dev);
if (err)
goto out;
-found:
+
+ sonic_msg_init(dev);
+
err = register_netdev(dev);
if (err)
goto out;
- printk("%s: MAC %pM IRQ %d\n", dev->name, dev->dev_addr, dev->irq);
-
return 0;
out:
@@ -604,13 +551,11 @@ out:
}
MODULE_DESCRIPTION("Macintosh SONIC ethernet driver");
-module_param(sonic_debug, int, 0);
-MODULE_PARM_DESC(sonic_debug, "macsonic debug level (1-4)");
MODULE_ALIAS("platform:macsonic");
#include "sonic.c"
-static int mac_sonic_device_remove(struct platform_device *pdev)
+static int mac_sonic_platform_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct sonic_local* lp = netdev_priv(dev);
@@ -623,12 +568,105 @@ static int mac_sonic_device_remove(struct platform_device *pdev)
return 0;
}
-static struct platform_driver mac_sonic_driver = {
- .probe = mac_sonic_probe,
- .remove = mac_sonic_device_remove,
- .driver = {
- .name = mac_sonic_string,
+static struct platform_driver mac_sonic_platform_driver = {
+ .probe = mac_sonic_platform_probe,
+ .remove = mac_sonic_platform_remove,
+ .driver = {
+ .name = "macsonic",
+ },
+};
+
+static int mac_sonic_nubus_probe(struct nubus_board *board)
+{
+ struct net_device *ndev;
+ struct sonic_local *lp;
+ struct nubus_rsrc *fres;
+ int id = -1;
+ int err;
+
+ /* The platform driver will handle a PDS or Comm Slot card (even if
+ * it has a pseudoslot declaration ROM).
+ */
+ if (macintosh_config->expansion_type == MAC_EXP_PDS_COMM)
+ return -ENODEV;
+
+ for_each_board_func_rsrc(board, fres) {
+ if (fres->category != NUBUS_CAT_NETWORK ||
+ fres->type != NUBUS_TYPE_ETHERNET)
+ continue;
+
+ id = macsonic_ident(fres);
+ if (id != -1)
+ break;
+ }
+ if (!fres)
+ return -ENODEV;
+
+ ndev = alloc_etherdev(sizeof(struct sonic_local));
+ if (!ndev)
+ return -ENOMEM;
+
+ lp = netdev_priv(ndev);
+ lp->device = &board->dev;
+ SET_NETDEV_DEV(ndev, &board->dev);
+
+ err = mac_sonic_nubus_probe_board(board, id, ndev);
+ if (err)
+ goto out;
+
+ sonic_msg_init(ndev);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto out;
+
+ nubus_set_drvdata(board, ndev);
+
+ return 0;
+
+out:
+ free_netdev(ndev);
+ return err;
+}
+
+static int mac_sonic_nubus_remove(struct nubus_board *board)
+{
+ struct net_device *ndev = nubus_get_drvdata(board);
+ struct sonic_local *lp = netdev_priv(ndev);
+
+ unregister_netdev(ndev);
+ dma_free_coherent(lp->device,
+ SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode),
+ lp->descriptors, lp->descriptors_laddr);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static struct nubus_driver mac_sonic_nubus_driver = {
+ .probe = mac_sonic_nubus_probe,
+ .remove = mac_sonic_nubus_remove,
+ .driver = {
+ .name = "macsonic-nubus",
+ .owner = THIS_MODULE,
},
};
-module_platform_driver(mac_sonic_driver);
+static int perr, nerr;
+
+static int __init mac_sonic_init(void)
+{
+ perr = platform_driver_register(&mac_sonic_platform_driver);
+ nerr = nubus_driver_register(&mac_sonic_nubus_driver);
+ return 0;
+}
+module_init(mac_sonic_init);
+
+static void __exit mac_sonic_exit(void)
+{
+ if (!perr)
+ platform_driver_unregister(&mac_sonic_platform_driver);
+ if (!nerr)
+ nubus_driver_unregister(&mac_sonic_nubus_driver);
+}
+module_exit(mac_sonic_exit);
diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c
index 612c7a44b26c..7ed08486ae23 100644
--- a/drivers/net/ethernet/natsemi/sonic.c
+++ b/drivers/net/ethernet/natsemi/sonic.c
@@ -33,7 +33,21 @@
* the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
*/
+static unsigned int version_printed;
+static int sonic_debug = -1;
+module_param(sonic_debug, int, 0);
+MODULE_PARM_DESC(sonic_debug, "debug message level");
+
+static void sonic_msg_init(struct net_device *dev)
+{
+ struct sonic_local *lp = netdev_priv(dev);
+
+ lp->msg_enable = netif_msg_init(sonic_debug, 0);
+
+ if (version_printed++ == 0)
+ netif_dbg(lp, drv, dev, "%s", version);
+}
/*
* Open/initialize the SONIC controller.
@@ -47,8 +61,7 @@ static int sonic_open(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
int i;
- if (sonic_debug > 2)
- printk("sonic_open: initializing sonic driver.\n");
+ netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__);
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2);
@@ -95,8 +108,7 @@ static int sonic_open(struct net_device *dev)
netif_start_queue(dev);
- if (sonic_debug > 2)
- printk("sonic_open: Initialization done.\n");
+ netif_dbg(lp, ifup, dev, "%s: Initialization done\n", __func__);
return 0;
}
@@ -110,8 +122,7 @@ static int sonic_close(struct net_device *dev)
struct sonic_local *lp = netdev_priv(dev);
int i;
- if (sonic_debug > 2)
- printk("sonic_close\n");
+ netif_dbg(lp, ifdown, dev, "%s\n", __func__);
netif_stop_queue(dev);
@@ -205,8 +216,7 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
int length;
int entry = lp->next_tx;
- if (sonic_debug > 2)
- printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
+ netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb);
length = skb->len;
if (length < ETH_ZLEN) {
@@ -252,14 +262,12 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
if (lp->tx_skb[lp->next_tx] != NULL) {
/* The ring is full, the ISR has yet to process the next TD. */
- if (sonic_debug > 3)
- printk("%s: stopping queue\n", dev->name);
+ netif_dbg(lp, tx_queued, dev, "%s: stopping queue\n", __func__);
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
} else netif_start_queue(dev);
- if (sonic_debug > 2)
- printk("sonic_send_packet: issuing Tx command\n");
+ netif_dbg(lp, tx_queued, dev, "%s: issuing Tx command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
@@ -281,8 +289,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
do {
if (status & SONIC_INT_PKTRX) {
- if (sonic_debug > 2)
- printk("%s: packet rx\n", dev->name);
+ netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__);
sonic_rx(dev); /* got packet(s) */
SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
@@ -299,8 +306,7 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
* still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
*/
- if (sonic_debug > 2)
- printk("%s: tx done\n", dev->name);
+ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__);
while (lp->tx_skb[entry] != NULL) {
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
@@ -346,20 +352,20 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
* check error conditions
*/
if (status & SONIC_INT_RFO) {
- if (sonic_debug > 1)
- printk("%s: rx fifo overrun\n", dev->name);
+ netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n",
+ __func__);
lp->stats.rx_fifo_errors++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
- if (sonic_debug > 1)
- printk("%s: rx descriptors exhausted\n", dev->name);
+ netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n",
+ __func__);
lp->stats.rx_dropped++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
- if (sonic_debug > 1)
- printk("%s: rx buffer area exceeded\n", dev->name);
+ netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n",
+ __func__);
lp->stats.rx_dropped++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
@@ -380,8 +386,9 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id)
/* transmit error */
if (status & SONIC_INT_TXER) {
- if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
- printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
+ if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU)
+ netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n",
+ __func__);
SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
}
@@ -475,8 +482,8 @@ static void sonic_rx(struct net_device *dev)
if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
- if (sonic_debug > 2)
- printk("%s: rx buffer exhausted\n", dev->name);
+ netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n",
+ __func__);
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
}
} else
@@ -542,9 +549,8 @@ static void sonic_multicast_list(struct net_device *dev)
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
- if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n",
- netdev_mc_count(dev));
+ netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__,
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
netdev_for_each_mc_addr(ha, dev) {
@@ -562,8 +568,7 @@ static void sonic_multicast_list(struct net_device *dev)
}
}
- if (sonic_debug > 2)
- printk("sonic_multicast_list: setting RCR=%x\n", rcr);
+ netif_dbg(lp, ifup, dev, "%s: setting RCR=%x\n", __func__, rcr);
SONIC_WRITE(SONIC_RCR, rcr);
}
@@ -596,8 +601,8 @@ static int sonic_init(struct net_device *dev)
/*
* initialize the receive resource area
*/
- if (sonic_debug > 2)
- printk("sonic_init: initialize receive resource area\n");
+ netif_dbg(lp, ifup, dev, "%s: initialize receive resource area\n",
+ __func__);
for (i = 0; i < SONIC_NUM_RRS; i++) {
u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
@@ -622,8 +627,7 @@ static int sonic_init(struct net_device *dev)
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
/* load the resource pointers */
- if (sonic_debug > 3)
- printk("sonic_init: issuing RRRA command\n");
+ netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
i = 0;
@@ -632,16 +636,17 @@ static int sonic_init(struct net_device *dev)
break;
}
- if (sonic_debug > 2)
- printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
+ netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__,
+ SONIC_READ(SONIC_CMD), i);
/*
* Initialize the receive descriptors so that they
* become a circular linked list, ie. let the last
* descriptor point to the first again.
*/
- if (sonic_debug > 2)
- printk("sonic_init: initialize receive descriptors\n");
+ netif_dbg(lp, ifup, dev, "%s: initialize receive descriptors\n",
+ __func__);
+
for (i=0; i<SONIC_NUM_RDS; i++) {
sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
@@ -664,8 +669,9 @@ static int sonic_init(struct net_device *dev)
/*
* initialize transmit descriptors
*/
- if (sonic_debug > 2)
- printk("sonic_init: initialize transmit descriptors\n");
+ netif_dbg(lp, ifup, dev, "%s: initialize transmit descriptors\n",
+ __func__);
+
for (i = 0; i < SONIC_NUM_TDS; i++) {
sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
@@ -712,10 +718,8 @@ static int sonic_init(struct net_device *dev)
if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
break;
}
- if (sonic_debug > 2) {
- printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
- SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
- }
+ netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__,
+ SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
/*
* enable receiver, disable loopback
@@ -731,9 +735,8 @@ static int sonic_init(struct net_device *dev)
if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
- if (sonic_debug > 2)
- printk("sonic_init: new status=%x\n",
- SONIC_READ(SONIC_CMD));
+ netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__,
+ SONIC_READ(SONIC_CMD));
return 0;
}
diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h
index 421b1a283fed..2b27f7049acb 100644
--- a/drivers/net/ethernet/natsemi/sonic.h
+++ b/drivers/net/ethernet/natsemi/sonic.h
@@ -319,6 +319,7 @@ struct sonic_local {
unsigned int eol_rx;
unsigned int eol_tx; /* last unacked transmit packet */
unsigned int next_tx; /* next free TD */
+ int msg_enable;
struct device *device; /* generic device */
struct net_device_stats stats;
};
@@ -336,6 +337,7 @@ static struct net_device_stats *sonic_get_stats(struct net_device *dev);
static void sonic_multicast_list(struct net_device *dev);
static int sonic_init(struct net_device *dev);
static void sonic_tx_timeout(struct net_device *dev);
+static void sonic_msg_init(struct net_device *dev);
/* Internal inlines for reading/writing DMA buffers. Note that bus
size and endianness matter here, whereas they don't for registers,
diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c
index 1817deea98a4..e1b886e87a76 100644
--- a/drivers/net/ethernet/natsemi/xtsonic.c
+++ b/drivers/net/ethernet/natsemi/xtsonic.c
@@ -73,14 +73,6 @@ extern void xtboard_get_ether_addr(unsigned char *buf);
#define SONIC_WRITE(reg,val) \
*((volatile unsigned int *)dev->base_addr+reg) = val
-
-/* Use 0 for production, 1 for verification, and >2 for debug */
-#ifdef SONIC_DEBUG
-static unsigned int sonic_debug = SONIC_DEBUG;
-#else
-static unsigned int sonic_debug = 1;
-#endif
-
/*
* We cannot use station (ethernet) address prefixes to detect the
* sonic controller since these are board manufacturer depended.
@@ -130,7 +122,6 @@ static const struct net_device_ops xtsonic_netdev_ops = {
static int __init sonic_probe1(struct net_device *dev)
{
- static unsigned version_printed = 0;
unsigned int silicon_revision;
struct sonic_local *lp = netdev_priv(dev);
unsigned int base_addr = dev->base_addr;
@@ -146,23 +137,17 @@ static int __init sonic_probe1(struct net_device *dev)
* the expected location.
*/
silicon_revision = SONIC_READ(SONIC_SR);
- if (sonic_debug > 1)
- printk("SONIC Silicon Revision = 0x%04x\n",silicon_revision);
-
i = 0;
while ((known_revisions[i] != 0xffff) &&
(known_revisions[i] != silicon_revision))
i++;
if (known_revisions[i] == 0xffff) {
- printk("SONIC ethernet controller not found (0x%4x)\n",
- silicon_revision);
+ pr_info("SONIC ethernet controller not found (0x%4x)\n",
+ silicon_revision);
return -ENODEV;
}
- if (sonic_debug && version_printed++ == 0)
- printk(version);
-
/*
* Put the sonic into software reset, then retrieve ethernet address.
* Note: we are assuming that the boot-loader has initialized the cam.
@@ -273,12 +258,15 @@ int xtsonic_probe(struct platform_device *pdev)
if ((err = sonic_probe1(dev)))
goto out;
+
+ pr_info("SONIC ethernet @%08lx, MAC %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
+
+ sonic_msg_init(dev);
+
if ((err = register_netdev(dev)))
goto out1;
- printk("%s: SONIC ethernet @%08lx, MAC %pM, IRQ %d\n", dev->name,
- dev->base_addr, dev->dev_addr, dev->irq);
-
return 0;
out1:
@@ -290,8 +278,6 @@ out:
}
MODULE_DESCRIPTION("Xtensa XT2000 SONIC ethernet driver");
-module_param(sonic_debug, int, 0);
-MODULE_PARM_DESC(sonic_debug, "xtsonic debug level (1-4)");
#include "sonic.c"
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/Makefile b/drivers/net/ethernet/netronome/nfp/bpf/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/bpf/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/flower/Makefile b/drivers/net/ethernet/netronome/nfp/flower/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/flower/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
index adfe474c2cf0..28c1cd5b823b 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
@@ -61,6 +61,13 @@
#define NFP_FLOWER_MASK_MPLS_BOS BIT(8)
#define NFP_FLOWER_MASK_MPLS_Q BIT(0)
+/* Compressed HW representation of TCP Flags */
+#define NFP_FL_TCP_FLAG_URG BIT(4)
+#define NFP_FL_TCP_FLAG_PSH BIT(3)
+#define NFP_FL_TCP_FLAG_RST BIT(2)
+#define NFP_FL_TCP_FLAG_SYN BIT(1)
+#define NFP_FL_TCP_FLAG_FIN BIT(0)
+
#define NFP_FL_SC_ACT_DROP 0x80000000
#define NFP_FL_SC_ACT_USER 0x7D000000
#define NFP_FL_SC_ACT_POPV 0x6A000000
@@ -257,7 +264,7 @@ struct nfp_flower_tp_ports {
* 3 2 1
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
- * | DSCP |ECN| protocol | reserved |
+ * | DSCP |ECN| protocol | ttl | flags |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | ipv4_addr_src |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
@@ -268,7 +275,7 @@ struct nfp_flower_ipv4 {
u8 tos;
u8 proto;
u8 ttl;
- u8 reserved;
+ u8 flags;
__be32 ipv4_src;
__be32 ipv4_dst;
};
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 332ff0fdc038..c5cebf6fb1d3 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -41,6 +41,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <net/pkt_cls.h>
+#include <net/tcp.h>
#include <linux/workqueue.h>
struct net_device;
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 37c2ecae2a7a..b3bc8279d4fb 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -181,6 +181,26 @@ nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *frame,
frame->tos = flow_ip->tos;
frame->ttl = flow_ip->ttl;
}
+
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_dissector_key_tcp *tcp;
+ u32 tcp_flags;
+
+ tcp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_TCP, target);
+ tcp_flags = be16_to_cpu(tcp->flags);
+
+ if (tcp_flags & TCPHDR_FIN)
+ frame->flags |= NFP_FL_TCP_FLAG_FIN;
+ if (tcp_flags & TCPHDR_SYN)
+ frame->flags |= NFP_FL_TCP_FLAG_SYN;
+ if (tcp_flags & TCPHDR_RST)
+ frame->flags |= NFP_FL_TCP_FLAG_RST;
+ if (tcp_flags & TCPHDR_PSH)
+ frame->flags |= NFP_FL_TCP_FLAG_PSH;
+ if (tcp_flags & TCPHDR_URG)
+ frame->flags |= NFP_FL_TCP_FLAG_URG;
+ }
}
static void
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index eb5c13dea8f5..f3586c519805 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -44,11 +44,16 @@
#include "../nfp_net.h"
#include "../nfp_port.h"
+#define NFP_FLOWER_SUPPORTED_TCPFLAGS \
+ (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
+ TCPHDR_PSH | TCPHDR_URG)
+
#define NFP_FLOWER_WHITELIST_DISSECTOR \
(BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
BIT(FLOW_DISSECTOR_KEY_BASIC) | \
BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
+ BIT(FLOW_DISSECTOR_KEY_TCP) | \
BIT(FLOW_DISSECTOR_KEY_PORTS) | \
BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
BIT(FLOW_DISSECTOR_KEY_VLAN) | \
@@ -288,6 +293,35 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
}
}
+ if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
+ struct flow_dissector_key_tcp *tcp;
+ u32 tcp_flags;
+
+ tcp = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_TCP,
+ flow->key);
+ tcp_flags = be16_to_cpu(tcp->flags);
+
+ if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
+ return -EOPNOTSUPP;
+
+ /* We only support PSH and URG flags when either
+ * FIN, SYN or RST is present as well.
+ */
+ if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
+ !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
+ return -EOPNOTSUPP;
+
+ /* We need to store TCP flags in the IPv4 key space, thus
+ * we need to ensure we include a IPv4 key layer if we have
+ * not done so already.
+ */
+ if (!(key_layer & NFP_FLOWER_LAYER_IPV4)) {
+ key_layer |= NFP_FLOWER_LAYER_IPV4;
+ key_size += sizeof(struct nfp_flower_ipv4);
+ }
+ }
+
ret_key_ls->key_layer = key_layer;
ret_key_ls->key_layer_two = key_layer_two;
ret_key_ls->key_size = key_size;
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index ab301d56430b..c4b1f344b4da 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -645,6 +645,7 @@ MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_4x10_1x40.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0097-0001_8x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x10.nffw");
MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_2x25.nffw");
+MODULE_FIRMWARE("netronome/nic_AMDA0099-0001_1x10_1x25.nffw");
MODULE_AUTHOR("Netronome Systems <oss-drivers@netronome.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
index 4499a7333078..bb63c115537d 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015-2017 Netronome Systems, Inc.
+ * Copyright (C) 2015-2018 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
@@ -51,12 +51,12 @@
* The configuration BAR is 8K in size, but due to
* THB-350, 32k needs to be reserved.
*/
-#define NFP_NET_CFG_BAR_SZ (32 * 1024)
+#define NFP_NET_CFG_BAR_SZ (32 * 1024)
/**
* Offset in Freelist buffer where packet starts on RX
*/
-#define NFP_NET_RX_OFFSET 32
+#define NFP_NET_RX_OFFSET 32
/**
* LSO parameters
@@ -75,65 +75,65 @@
#define NFP_NET_META_PORTID 5
#define NFP_NET_META_CSUM 6 /* checksum complete type */
-#define NFP_META_PORT_ID_CTRL ~0U
+#define NFP_META_PORT_ID_CTRL ~0U
/**
* Hash type pre-pended when a RSS hash was computed
*/
-#define NFP_NET_RSS_NONE 0
-#define NFP_NET_RSS_IPV4 1
-#define NFP_NET_RSS_IPV6 2
-#define NFP_NET_RSS_IPV6_EX 3
-#define NFP_NET_RSS_IPV4_TCP 4
-#define NFP_NET_RSS_IPV6_TCP 5
-#define NFP_NET_RSS_IPV6_EX_TCP 6
-#define NFP_NET_RSS_IPV4_UDP 7
-#define NFP_NET_RSS_IPV6_UDP 8
-#define NFP_NET_RSS_IPV6_EX_UDP 9
+#define NFP_NET_RSS_NONE 0
+#define NFP_NET_RSS_IPV4 1
+#define NFP_NET_RSS_IPV6 2
+#define NFP_NET_RSS_IPV6_EX 3
+#define NFP_NET_RSS_IPV4_TCP 4
+#define NFP_NET_RSS_IPV6_TCP 5
+#define NFP_NET_RSS_IPV6_EX_TCP 6
+#define NFP_NET_RSS_IPV4_UDP 7
+#define NFP_NET_RSS_IPV6_UDP 8
+#define NFP_NET_RSS_IPV6_EX_UDP 9
/**
* Ring counts
- * %NFP_NET_TXR_MAX: Maximum number of TX rings
- * %NFP_NET_RXR_MAX: Maximum number of RX rings
+ * %NFP_NET_TXR_MAX: Maximum number of TX rings
+ * %NFP_NET_RXR_MAX: Maximum number of RX rings
*/
-#define NFP_NET_TXR_MAX 64
-#define NFP_NET_RXR_MAX 64
+#define NFP_NET_TXR_MAX 64
+#define NFP_NET_RXR_MAX 64
/**
* Read/Write config words (0x0000 - 0x002c)
- * %NFP_NET_CFG_CTRL: Global control
+ * %NFP_NET_CFG_CTRL: Global control
* %NFP_NET_CFG_UPDATE: Indicate which fields are updated
* %NFP_NET_CFG_TXRS_ENABLE: Bitmask of enabled TX rings
* %NFP_NET_CFG_RXRS_ENABLE: Bitmask of enabled RX rings
- * %NFP_NET_CFG_MTU: Set MTU size
+ * %NFP_NET_CFG_MTU: Set MTU size
* %NFP_NET_CFG_FLBUFSZ: Set freelist buffer size (must be larger than MTU)
- * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
- * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
+ * %NFP_NET_CFG_EXN: MSI-X table entry for exceptions
+ * %NFP_NET_CFG_LSC: MSI-X table entry for link state changes
* %NFP_NET_CFG_MACADDR: MAC address
*
* TODO:
* - define Error details in UPDATE
*/
-#define NFP_NET_CFG_CTRL 0x0000
-#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */
-#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */
-#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */
-#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */
-#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */
-#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */
-#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */
-#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
-#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
-#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
-#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
+#define NFP_NET_CFG_CTRL 0x0000
+#define NFP_NET_CFG_CTRL_ENABLE (0x1 << 0) /* Global enable */
+#define NFP_NET_CFG_CTRL_PROMISC (0x1 << 1) /* Enable Promisc mode */
+#define NFP_NET_CFG_CTRL_L2BC (0x1 << 2) /* Allow L2 Broadcast */
+#define NFP_NET_CFG_CTRL_L2MC (0x1 << 3) /* Allow L2 Multicast */
+#define NFP_NET_CFG_CTRL_RXCSUM (0x1 << 4) /* Enable RX Checksum */
+#define NFP_NET_CFG_CTRL_TXCSUM (0x1 << 5) /* Enable TX Checksum */
+#define NFP_NET_CFG_CTRL_RXVLAN (0x1 << 6) /* Enable VLAN strip */
+#define NFP_NET_CFG_CTRL_TXVLAN (0x1 << 7) /* Enable VLAN insert */
+#define NFP_NET_CFG_CTRL_SCATTER (0x1 << 8) /* Scatter DMA */
+#define NFP_NET_CFG_CTRL_GATHER (0x1 << 9) /* Gather DMA */
+#define NFP_NET_CFG_CTRL_LSO (0x1 << 10) /* LSO/TSO (version 1) */
#define NFP_NET_CFG_CTRL_CTAG_FILTER (0x1 << 11) /* VLAN CTAG filtering */
-#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
+#define NFP_NET_CFG_CTRL_RINGCFG (0x1 << 16) /* Ring runtime changes */
#define NFP_NET_CFG_CTRL_RSS (0x1 << 17) /* RSS (version 1) */
-#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
-#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
-#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
-#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
-#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
+#define NFP_NET_CFG_CTRL_IRQMOD (0x1 << 18) /* Interrupt moderation */
+#define NFP_NET_CFG_CTRL_RINGPRIO (0x1 << 19) /* Ring priorities */
+#define NFP_NET_CFG_CTRL_MSIXAUTO (0x1 << 20) /* MSI-X auto-masking */
+#define NFP_NET_CFG_CTRL_TXRWB (0x1 << 21) /* Write-back of TX ring*/
+#define NFP_NET_CFG_CTRL_L2SWITCH (0x1 << 22) /* L2 Switch */
#define NFP_NET_CFG_CTRL_L2SWITCH_LOCAL (0x1 << 23) /* Switch to local */
#define NFP_NET_CFG_CTRL_VXLAN (0x1 << 24) /* VXLAN tunnel support */
#define NFP_NET_CFG_CTRL_NVGRE (0x1 << 25) /* NVGRE tunnel support */
@@ -152,35 +152,35 @@
#define NFP_NET_CFG_CTRL_CHAIN_META (NFP_NET_CFG_CTRL_RSS2 | \
NFP_NET_CFG_CTRL_CSUM_COMPLETE)
-#define NFP_NET_CFG_UPDATE 0x0004
-#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
-#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
-#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */
-#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
-#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
-#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
-#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
-#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
-#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
+#define NFP_NET_CFG_UPDATE 0x0004
+#define NFP_NET_CFG_UPDATE_GEN (0x1 << 0) /* General update */
+#define NFP_NET_CFG_UPDATE_RING (0x1 << 1) /* Ring config change */
+#define NFP_NET_CFG_UPDATE_RSS (0x1 << 2) /* RSS config change */
+#define NFP_NET_CFG_UPDATE_TXRPRIO (0x1 << 3) /* TX Ring prio change */
+#define NFP_NET_CFG_UPDATE_RXRPRIO (0x1 << 4) /* RX Ring prio change */
+#define NFP_NET_CFG_UPDATE_MSIX (0x1 << 5) /* MSI-X change */
+#define NFP_NET_CFG_UPDATE_L2SWITCH (0x1 << 6) /* Switch changes */
+#define NFP_NET_CFG_UPDATE_RESET (0x1 << 7) /* Update due to FLR */
+#define NFP_NET_CFG_UPDATE_IRQMOD (0x1 << 8) /* IRQ mod change */
#define NFP_NET_CFG_UPDATE_VXLAN (0x1 << 9) /* VXLAN port change */
#define NFP_NET_CFG_UPDATE_BPF (0x1 << 10) /* BPF program load */
#define NFP_NET_CFG_UPDATE_MACADDR (0x1 << 11) /* MAC address change */
#define NFP_NET_CFG_UPDATE_MBOX (0x1 << 12) /* Mailbox update */
#define NFP_NET_CFG_UPDATE_VF (0x1 << 13) /* VF settings change */
-#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
-#define NFP_NET_CFG_TXRS_ENABLE 0x0008
-#define NFP_NET_CFG_RXRS_ENABLE 0x0010
-#define NFP_NET_CFG_MTU 0x0018
-#define NFP_NET_CFG_FLBUFSZ 0x001c
-#define NFP_NET_CFG_EXN 0x001f
-#define NFP_NET_CFG_LSC 0x0020
-#define NFP_NET_CFG_MACADDR 0x0024
+#define NFP_NET_CFG_UPDATE_ERR (0x1 << 31) /* A error occurred */
+#define NFP_NET_CFG_TXRS_ENABLE 0x0008
+#define NFP_NET_CFG_RXRS_ENABLE 0x0010
+#define NFP_NET_CFG_MTU 0x0018
+#define NFP_NET_CFG_FLBUFSZ 0x001c
+#define NFP_NET_CFG_EXN 0x001f
+#define NFP_NET_CFG_LSC 0x0020
+#define NFP_NET_CFG_MACADDR 0x0024
/**
* Read-only words (0x0030 - 0x0050):
* %NFP_NET_CFG_VERSION: Firmware version number
- * %NFP_NET_CFG_STS: Status
- * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
+ * %NFP_NET_CFG_STS: Status
+ * %NFP_NET_CFG_CAP: Capabilities (same bits as %NFP_NET_CFG_CTRL)
* %NFP_NET_CFG_MAX_TXRINGS: Maximum number of TX rings
* %NFP_NET_CFG_MAX_RXRINGS: Maximum number of RX rings
* %NFP_NET_CFG_MAX_MTU: Maximum support MTU
@@ -190,37 +190,37 @@
* TODO:
* - define more STS bits
*/
-#define NFP_NET_CFG_VERSION 0x0030
+#define NFP_NET_CFG_VERSION 0x0030
#define NFP_NET_CFG_VERSION_RESERVED_MASK (0xff << 24)
#define NFP_NET_CFG_VERSION_CLASS_MASK (0xff << 16)
-#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
+#define NFP_NET_CFG_VERSION_CLASS(x) (((x) & 0xff) << 16)
#define NFP_NET_CFG_VERSION_CLASS_GENERIC 0
#define NFP_NET_CFG_VERSION_MAJOR_MASK (0xff << 8)
-#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8)
+#define NFP_NET_CFG_VERSION_MAJOR(x) (((x) & 0xff) << 8)
#define NFP_NET_CFG_VERSION_MINOR_MASK (0xff << 0)
-#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
-#define NFP_NET_CFG_STS 0x0034
-#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
+#define NFP_NET_CFG_VERSION_MINOR(x) (((x) & 0xff) << 0)
+#define NFP_NET_CFG_STS 0x0034
+#define NFP_NET_CFG_STS_LINK (0x1 << 0) /* Link up or down */
/* Link rate */
#define NFP_NET_CFG_STS_LINK_RATE_SHIFT 1
#define NFP_NET_CFG_STS_LINK_RATE_MASK 0xF
-#define NFP_NET_CFG_STS_LINK_RATE \
+#define NFP_NET_CFG_STS_LINK_RATE \
(NFP_NET_CFG_STS_LINK_RATE_MASK << NFP_NET_CFG_STS_LINK_RATE_SHIFT)
#define NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED 0
-#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
-#define NFP_NET_CFG_STS_LINK_RATE_1G 2
-#define NFP_NET_CFG_STS_LINK_RATE_10G 3
-#define NFP_NET_CFG_STS_LINK_RATE_25G 4
-#define NFP_NET_CFG_STS_LINK_RATE_40G 5
-#define NFP_NET_CFG_STS_LINK_RATE_50G 6
-#define NFP_NET_CFG_STS_LINK_RATE_100G 7
-#define NFP_NET_CFG_CAP 0x0038
-#define NFP_NET_CFG_MAX_TXRINGS 0x003c
-#define NFP_NET_CFG_MAX_RXRINGS 0x0040
-#define NFP_NET_CFG_MAX_MTU 0x0044
+#define NFP_NET_CFG_STS_LINK_RATE_UNKNOWN 1
+#define NFP_NET_CFG_STS_LINK_RATE_1G 2
+#define NFP_NET_CFG_STS_LINK_RATE_10G 3
+#define NFP_NET_CFG_STS_LINK_RATE_25G 4
+#define NFP_NET_CFG_STS_LINK_RATE_40G 5
+#define NFP_NET_CFG_STS_LINK_RATE_50G 6
+#define NFP_NET_CFG_STS_LINK_RATE_100G 7
+#define NFP_NET_CFG_CAP 0x0038
+#define NFP_NET_CFG_MAX_TXRINGS 0x003c
+#define NFP_NET_CFG_MAX_RXRINGS 0x0040
+#define NFP_NET_CFG_MAX_MTU 0x0044
/* Next two words are being used by VFs for solving THB350 issue */
-#define NFP_NET_CFG_START_TXQ 0x0048
-#define NFP_NET_CFG_START_RXQ 0x004c
+#define NFP_NET_CFG_START_TXQ 0x0048
+#define NFP_NET_CFG_START_RXQ 0x004c
/**
* Prepend configuration
@@ -280,8 +280,8 @@
/**
* 40B reserved for future use (0x0098 - 0x00c0)
*/
-#define NFP_NET_CFG_RESERVED 0x0098
-#define NFP_NET_CFG_RESERVED_SZ 0x0028
+#define NFP_NET_CFG_RESERVED 0x0098
+#define NFP_NET_CFG_RESERVED_SZ 0x0028
/**
* RSS configuration (0x0100 - 0x01ac):
@@ -290,26 +290,26 @@
* %NFP_NET_CFG_RSS_KEY: RSS "secret" key
* %NFP_NET_CFG_RSS_ITBL: RSS indirection table
*/
-#define NFP_NET_CFG_RSS_BASE 0x0100
-#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
-#define NFP_NET_CFG_RSS_MASK (0x7f)
-#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f)
-#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */
-#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */
-#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */
-#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
-#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
-#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
+#define NFP_NET_CFG_RSS_BASE 0x0100
+#define NFP_NET_CFG_RSS_CTRL NFP_NET_CFG_RSS_BASE
+#define NFP_NET_CFG_RSS_MASK (0x7f)
+#define NFP_NET_CFG_RSS_MASK_of(_x) ((_x) & 0x7f)
+#define NFP_NET_CFG_RSS_IPV4 (1 << 8) /* RSS for IPv4 */
+#define NFP_NET_CFG_RSS_IPV6 (1 << 9) /* RSS for IPv6 */
+#define NFP_NET_CFG_RSS_IPV4_TCP (1 << 10) /* RSS for IPv4/TCP */
+#define NFP_NET_CFG_RSS_IPV4_UDP (1 << 11) /* RSS for IPv4/UDP */
+#define NFP_NET_CFG_RSS_IPV6_TCP (1 << 12) /* RSS for IPv6/TCP */
+#define NFP_NET_CFG_RSS_IPV6_UDP (1 << 13) /* RSS for IPv6/UDP */
#define NFP_NET_CFG_RSS_HFUNC 0xff000000
-#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
+#define NFP_NET_CFG_RSS_TOEPLITZ (1 << 24) /* Use Toeplitz hash */
#define NFP_NET_CFG_RSS_XOR (1 << 25) /* Use XOR as hash */
#define NFP_NET_CFG_RSS_CRC32 (1 << 26) /* Use CRC32 as hash */
#define NFP_NET_CFG_RSS_HFUNCS 3
-#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
-#define NFP_NET_CFG_RSS_KEY_SZ 0x28
-#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
+#define NFP_NET_CFG_RSS_KEY (NFP_NET_CFG_RSS_BASE + 0x4)
+#define NFP_NET_CFG_RSS_KEY_SZ 0x28
+#define NFP_NET_CFG_RSS_ITBL (NFP_NET_CFG_RSS_BASE + 0x4 + \
NFP_NET_CFG_RSS_KEY_SZ)
-#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
+#define NFP_NET_CFG_RSS_ITBL_SZ 0x80
/**
* TX ring configuration (0x200 - 0x800)
@@ -321,13 +321,13 @@
* %NFP_NET_CFG_TXR_PRIO: Per TX ring priority (1B entries)
* %NFP_NET_CFG_TXR_IRQ_MOD: Per TX ring interrupt moderation packet
*/
-#define NFP_NET_CFG_TXR_BASE 0x0200
-#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
-#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \
+#define NFP_NET_CFG_TXR_BASE 0x0200
+#define NFP_NET_CFG_TXR_ADDR(_x) (NFP_NET_CFG_TXR_BASE + ((_x) * 0x8))
+#define NFP_NET_CFG_TXR_WB_ADDR(_x) (NFP_NET_CFG_TXR_BASE + 0x200 + \
((_x) * 0x8))
-#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x))
-#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x))
-#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x))
+#define NFP_NET_CFG_TXR_SZ(_x) (NFP_NET_CFG_TXR_BASE + 0x400 + (_x))
+#define NFP_NET_CFG_TXR_VEC(_x) (NFP_NET_CFG_TXR_BASE + 0x440 + (_x))
+#define NFP_NET_CFG_TXR_PRIO(_x) (NFP_NET_CFG_TXR_BASE + 0x480 + (_x))
#define NFP_NET_CFG_TXR_IRQ_MOD(_x) (NFP_NET_CFG_TXR_BASE + 0x500 + \
((_x) * 0x4))
@@ -340,11 +340,11 @@
* %NFP_NET_CFG_RXR_PRIO: Per RX ring priority (1B entries)
* %NFP_NET_CFG_RXR_IRQ_MOD: Per RX ring interrupt moderation (4B entries)
*/
-#define NFP_NET_CFG_RXR_BASE 0x0800
-#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
-#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x))
-#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x))
-#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x))
+#define NFP_NET_CFG_RXR_BASE 0x0800
+#define NFP_NET_CFG_RXR_ADDR(_x) (NFP_NET_CFG_RXR_BASE + ((_x) * 0x8))
+#define NFP_NET_CFG_RXR_SZ(_x) (NFP_NET_CFG_RXR_BASE + 0x200 + (_x))
+#define NFP_NET_CFG_RXR_VEC(_x) (NFP_NET_CFG_RXR_BASE + 0x240 + (_x))
+#define NFP_NET_CFG_RXR_PRIO(_x) (NFP_NET_CFG_RXR_BASE + 0x280 + (_x))
#define NFP_NET_CFG_RXR_IRQ_MOD(_x) (NFP_NET_CFG_RXR_BASE + 0x300 + \
((_x) * 0x4))
@@ -358,36 +358,36 @@
* the MSI-X entry and the host driver must clear the register to
* re-enable the interrupt.
*/
-#define NFP_NET_CFG_ICR_BASE 0x0c00
-#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x))
-#define NFP_NET_CFG_ICR_UNMASKED 0x0
-#define NFP_NET_CFG_ICR_RXTX 0x1
-#define NFP_NET_CFG_ICR_LSC 0x2
+#define NFP_NET_CFG_ICR_BASE 0x0c00
+#define NFP_NET_CFG_ICR(_x) (NFP_NET_CFG_ICR_BASE + (_x))
+#define NFP_NET_CFG_ICR_UNMASKED 0x0
+#define NFP_NET_CFG_ICR_RXTX 0x1
+#define NFP_NET_CFG_ICR_LSC 0x2
/**
* General device stats (0x0d00 - 0x0d90)
* all counters are 64bit.
*/
-#define NFP_NET_CFG_STATS_BASE 0x0d00
-#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00)
-#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08)
-#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10)
-#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18)
-#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20)
-#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28)
-#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30)
-#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38)
-#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40)
-
-#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48)
-#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50)
-#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58)
-#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60)
-#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68)
-#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70)
-#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78)
-#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
-#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
+#define NFP_NET_CFG_STATS_BASE 0x0d00
+#define NFP_NET_CFG_STATS_RX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x00)
+#define NFP_NET_CFG_STATS_RX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x08)
+#define NFP_NET_CFG_STATS_RX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x10)
+#define NFP_NET_CFG_STATS_RX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x18)
+#define NFP_NET_CFG_STATS_RX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x20)
+#define NFP_NET_CFG_STATS_RX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x28)
+#define NFP_NET_CFG_STATS_RX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x30)
+#define NFP_NET_CFG_STATS_RX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x38)
+#define NFP_NET_CFG_STATS_RX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x40)
+
+#define NFP_NET_CFG_STATS_TX_DISCARDS (NFP_NET_CFG_STATS_BASE + 0x48)
+#define NFP_NET_CFG_STATS_TX_ERRORS (NFP_NET_CFG_STATS_BASE + 0x50)
+#define NFP_NET_CFG_STATS_TX_OCTETS (NFP_NET_CFG_STATS_BASE + 0x58)
+#define NFP_NET_CFG_STATS_TX_UC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x60)
+#define NFP_NET_CFG_STATS_TX_MC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x68)
+#define NFP_NET_CFG_STATS_TX_BC_OCTETS (NFP_NET_CFG_STATS_BASE + 0x70)
+#define NFP_NET_CFG_STATS_TX_FRAMES (NFP_NET_CFG_STATS_BASE + 0x78)
+#define NFP_NET_CFG_STATS_TX_MC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x80)
+#define NFP_NET_CFG_STATS_TX_BC_FRAMES (NFP_NET_CFG_STATS_BASE + 0x88)
#define NFP_NET_CFG_STATS_APP0_FRAMES (NFP_NET_CFG_STATS_BASE + 0x90)
#define NFP_NET_CFG_STATS_APP0_BYTES (NFP_NET_CFG_STATS_BASE + 0x98)
@@ -404,11 +404,11 @@
* %NFP_NET_CFG_TXR_STATS: TX ring statistics (Packet and Byte count)
* %NFP_NET_CFG_RXR_STATS: RX ring statistics (Packet and Byte count)
*/
-#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
-#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
+#define NFP_NET_CFG_TXR_STATS_BASE 0x1000
+#define NFP_NET_CFG_TXR_STATS(_x) (NFP_NET_CFG_TXR_STATS_BASE + \
((_x) * 0x10))
-#define NFP_NET_CFG_RXR_STATS_BASE 0x1400
-#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
+#define NFP_NET_CFG_RXR_STATS_BASE 0x1400
+#define NFP_NET_CFG_RXR_STATS(_x) (NFP_NET_CFG_RXR_STATS_BASE + \
((_x) * 0x10))
/**
@@ -444,7 +444,7 @@
* %NFP_NET_CFG_TLV_TYPE: Offset of type within the TLV
* %NFP_NET_CFG_TLV_TYPE_REQUIRED: Driver must be able to parse the TLV
* %NFP_NET_CFG_TLV_LENGTH: Offset of length within the TLV
- * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
+ * %NFP_NET_CFG_TLV_LENGTH_INC: TLV length increments
* %NFP_NET_CFG_TLV_VALUE: Offset of value with the TLV
*
* List of simple TLV structures, first one starts at %NFP_NET_CFG_TLV_BASE.
@@ -457,12 +457,12 @@
* Note that the 4 byte TLV header is not counted in %NFP_NET_CFG_TLV_LENGTH.
*/
#define NFP_NET_CFG_TLV_TYPE 0x00
-#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
+#define NFP_NET_CFG_TLV_TYPE_REQUIRED 0x8000
#define NFP_NET_CFG_TLV_LENGTH 0x02
#define NFP_NET_CFG_TLV_LENGTH_INC 4
#define NFP_NET_CFG_TLV_VALUE 0x04
-#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
+#define NFP_NET_CFG_TLV_HEADER_REQUIRED 0x80000000
#define NFP_NET_CFG_TLV_HEADER_TYPE 0x7fff0000
#define NFP_NET_CFG_TLV_HEADER_LENGTH 0x0000ffff
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/netronome/nfp/nic/Makefile b/drivers/net/ethernet/netronome/nfp/nic/Makefile
new file mode 100644
index 000000000000..805fa28f391a
--- /dev/null
+++ b/drivers/net/ethernet/netronome/nfp/nic/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+# kbuild requires Makefile in a directory to build individual objects
diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
index ca4a81dc1ace..03ad4eeac7f8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c
@@ -1784,7 +1784,7 @@ enum qed_iwarp_mpa_pkt_type {
/* fpdu can be fragmented over maximum 3 bds: header, partial mpa, unaligned */
#define QED_IWARP_MAX_BDS_PER_FPDU 3
-char *pkt_type_str[] = {
+static const char * const pkt_type_str[] = {
"QED_IWARP_MPA_PKT_PACKED",
"QED_IWARP_MPA_PKT_PARTIAL",
"QED_IWARP_MPA_PKT_UNALIGNED"
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 0bf7d1759250..7055db161b1b 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -99,12 +99,12 @@ static const int multicast_filter_limit = 32;
#define RTL8169_PHY_TIMEOUT (10*HZ)
/* write/read MMIO register */
-#define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
-#define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
-#define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
-#define RTL_R8(reg) readb (ioaddr + (reg))
-#define RTL_R16(reg) readw (ioaddr + (reg))
-#define RTL_R32(reg) readl (ioaddr + (reg))
+#define RTL_W8(tp, reg, val8) writeb((val8), tp->mmio_addr + (reg))
+#define RTL_W16(tp, reg, val16) writew((val16), tp->mmio_addr + (reg))
+#define RTL_W32(tp, reg, val32) writel((val32), tp->mmio_addr + (reg))
+#define RTL_R8(tp, reg) readb(tp->mmio_addr + (reg))
+#define RTL_R16(tp, reg) readw(tp->mmio_addr + (reg))
+#define RTL_R32(tp, reg) readl(tp->mmio_addr + (reg))
enum mac_version {
RTL_GIGA_MAC_VER_01 = 0,
@@ -735,12 +735,6 @@ struct ring_info {
u8 __pad[sizeof(void *) - sizeof(u32)];
};
-enum features {
- RTL_FEATURE_WOL = (1 << 0),
- RTL_FEATURE_MSI = (1 << 1),
- RTL_FEATURE_GMII = (1 << 2),
-};
-
struct rtl8169_counters {
__le64 tx_packets;
__le64 rx_packets;
@@ -829,7 +823,7 @@ struct rtl8169_private {
void (*phy_reset_enable)(struct rtl8169_private *tp);
void (*hw_start)(struct net_device *);
unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
- unsigned int (*link_ok)(void __iomem *);
+ unsigned int (*link_ok)(struct rtl8169_private *tp);
int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
@@ -984,56 +978,46 @@ static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
DECLARE_RTL_COND(rtl_ocp_gphy_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
+ return RTL_R32(tp, GPHY_OCP) & OCPAR_FLAG;
}
static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
if (rtl_ocp_reg_failure(tp, reg))
return;
- RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
+ RTL_W32(tp, GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
}
static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
if (rtl_ocp_reg_failure(tp, reg))
return 0;
- RTL_W32(GPHY_OCP, reg << 15);
+ RTL_W32(tp, GPHY_OCP, reg << 15);
return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
- (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
+ (RTL_R32(tp, GPHY_OCP) & 0xffff) : ~0;
}
static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
if (rtl_ocp_reg_failure(tp, reg))
return;
- RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
+ RTL_W32(tp, OCPDR, OCPAR_FLAG | (reg << 15) | data);
}
static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
if (rtl_ocp_reg_failure(tp, reg))
return 0;
- RTL_W32(OCPDR, reg << 15);
+ RTL_W32(tp, OCPDR, reg << 15);
- return RTL_R32(OCPDR);
+ return RTL_R32(tp, OCPDR);
}
#define OCP_STD_PHY_BASE 0xa400
@@ -1076,16 +1060,12 @@ static int mac_mcu_read(struct rtl8169_private *tp, int reg)
DECLARE_RTL_COND(rtl_phyar_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(PHYAR) & 0x80000000;
+ return RTL_R32(tp, PHYAR) & 0x80000000;
}
static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
+ RTL_W32(tp, PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
/*
@@ -1097,13 +1077,12 @@ static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
int value;
- RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
+ RTL_W32(tp, PHYAR, 0x0 | (reg & 0x1f) << 16);
value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
- RTL_R32(PHYAR) & 0xffff : ~0;
+ RTL_R32(tp, PHYAR) & 0xffff : ~0;
/*
* According to hardware specs a 20us delay is required after read
@@ -1116,18 +1095,14 @@ static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
DECLARE_RTL_COND(rtl_ocpar_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(OCPAR) & OCPAR_FLAG;
+ return RTL_R32(tp, OCPAR) & OCPAR_FLAG;
}
static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
- RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
- RTL_W32(EPHY_RXER_NUM, 0);
+ RTL_W32(tp, OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
+ RTL_W32(tp, OCPAR, OCPAR_GPHY_WRITE_CMD);
+ RTL_W32(tp, EPHY_RXER_NUM, 0);
rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
}
@@ -1140,51 +1115,46 @@ static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
mdelay(1);
- RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
- RTL_W32(EPHY_RXER_NUM, 0);
+ RTL_W32(tp, OCPAR, OCPAR_GPHY_READ_CMD);
+ RTL_W32(tp, EPHY_RXER_NUM, 0);
return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
- RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
+ RTL_R32(tp, OCPDR) & OCPDR_DATA_MASK : ~0;
}
#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
-static void r8168dp_2_mdio_start(void __iomem *ioaddr)
+static void r8168dp_2_mdio_start(struct rtl8169_private *tp)
{
- RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
+ RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
}
-static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
+static void r8168dp_2_mdio_stop(struct rtl8169_private *tp)
{
- RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
+ RTL_W32(tp, 0xd0, RTL_R32(tp, 0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
}
static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- r8168dp_2_mdio_start(ioaddr);
+ r8168dp_2_mdio_start(tp);
r8169_mdio_write(tp, reg, value);
- r8168dp_2_mdio_stop(ioaddr);
+ r8168dp_2_mdio_stop(tp);
}
static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
int value;
- r8168dp_2_mdio_start(ioaddr);
+ r8168dp_2_mdio_start(tp);
value = r8169_mdio_read(tp, reg);
- r8168dp_2_mdio_stop(ioaddr);
+ r8168dp_2_mdio_stop(tp);
return value;
}
@@ -1229,16 +1199,12 @@ static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
DECLARE_RTL_COND(rtl_ephyar_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(EPHYAR) & EPHYAR_FLAG;
+ return RTL_R32(tp, EPHYAR) & EPHYAR_FLAG;
}
static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
+ RTL_W32(tp, EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
(reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
@@ -1248,41 +1214,33 @@ static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
+ RTL_W32(tp, EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
- RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
+ RTL_R32(tp, EPHYAR) & EPHYAR_DATA_MASK : ~0;
}
DECLARE_RTL_COND(rtl_eriar_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(ERIAR) & ERIAR_FLAG;
+ return RTL_R32(tp, ERIAR) & ERIAR_FLAG;
}
static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
u32 val, int type)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
BUG_ON((addr & 3) || (mask == 0));
- RTL_W32(ERIDR, val);
- RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
+ RTL_W32(tp, ERIDR, val);
+ RTL_W32(tp, ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
}
static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
+ RTL_W32(tp, ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
- RTL_R32(ERIDR) : ~0;
+ RTL_R32(tp, ERIDR) : ~0;
}
static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
@@ -1296,11 +1254,9 @@ static void rtl_w0w1_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
static u32 r8168dp_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ RTL_W32(tp, OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
- RTL_R32(OCPDR) : ~0;
+ RTL_R32(tp, OCPDR) : ~0;
}
static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
@@ -1328,10 +1284,8 @@ static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg,
u32 data)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(OCPDR, data);
- RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
+ RTL_W32(tp, OCPDR, data);
+ RTL_W32(tp, OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
}
@@ -1393,19 +1347,15 @@ DECLARE_RTL_COND(rtl_ep_ocp_read_cond)
DECLARE_RTL_COND(rtl_ocp_tx_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R8(IBISR0) & 0x20;
+ return RTL_R8(tp, IBISR0) & 0x20;
}
static void rtl8168ep_stop_cmac(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(IBCR2, RTL_R8(IBCR2) & ~0x01);
+ RTL_W8(tp, IBCR2, RTL_R8(tp, IBCR2) & ~0x01);
rtl_msleep_loop_wait_high(tp, &rtl_ocp_tx_cond, 50, 2000);
- RTL_W8(IBISR0, RTL_R8(IBISR0) | 0x20);
- RTL_W8(IBCR0, RTL_R8(IBCR0) & ~0x01);
+ RTL_W8(tp, IBISR0, RTL_R8(tp, IBISR0) | 0x20);
+ RTL_W8(tp, IBCR0, RTL_R8(tp, IBCR0) & ~0x01);
}
static void rtl8168dp_driver_start(struct rtl8169_private *tp)
@@ -1473,19 +1423,19 @@ static void rtl8168_driver_stop(struct rtl8169_private *tp)
}
}
-static int r8168dp_check_dash(struct rtl8169_private *tp)
+static bool r8168dp_check_dash(struct rtl8169_private *tp)
{
u16 reg = rtl8168_get_ocp_reg(tp);
- return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
+ return !!(ocp_read(tp, 0x0f, reg) & 0x00008000);
}
-static int r8168ep_check_dash(struct rtl8169_private *tp)
+static bool r8168ep_check_dash(struct rtl8169_private *tp)
{
- return (ocp_read(tp, 0x0f, 0x128) & 0x00000001) ? 1 : 0;
+ return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001);
}
-static int r8168_check_dash(struct rtl8169_private *tp)
+static bool r8168_check_dash(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_27:
@@ -1497,7 +1447,7 @@ static int r8168_check_dash(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_51:
return r8168ep_check_dash(tp);
default:
- return 0;
+ return false;
}
}
@@ -1518,49 +1468,37 @@ static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
DECLARE_RTL_COND(rtl_efusear_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
+ return RTL_R32(tp, EFUSEAR) & EFUSEAR_FLAG;
}
static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
+ RTL_W32(tp, EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
- RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
+ RTL_R32(tp, EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
}
static u16 rtl_get_events(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R16(IntrStatus);
+ return RTL_R16(tp, IntrStatus);
}
static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W16(IntrStatus, bits);
+ RTL_W16(tp, IntrStatus, bits);
mmiowb();
}
static void rtl_irq_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W16(IntrMask, 0);
+ RTL_W16(tp, IntrMask, 0);
mmiowb();
}
static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W16(IntrMask, bits);
+ RTL_W16(tp, IntrMask, bits);
}
#define RTL_EVENT_NAPI_RX (RxOK | RxErr)
@@ -1574,18 +1512,14 @@ static void rtl_irq_enable_all(struct rtl8169_private *tp)
static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_irq_disable(tp);
rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
- RTL_R8(ChipCmd);
+ RTL_R8(tp, ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(TBICSR) & TBIReset;
+ return RTL_R32(tp, TBICSR) & TBIReset;
}
static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
@@ -1593,21 +1527,19 @@ static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
}
-static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
+static unsigned int rtl8169_tbi_link_ok(struct rtl8169_private *tp)
{
- return RTL_R32(TBICSR) & TBILinkOk;
+ return RTL_R32(tp, TBICSR) & TBILinkOk;
}
-static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
+static unsigned int rtl8169_xmii_link_ok(struct rtl8169_private *tp)
{
- return RTL_R8(PHYstatus) & LinkStatus;
+ return RTL_R8(tp, PHYstatus) & LinkStatus;
}
static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
+ RTL_W32(tp, TBICSR, RTL_R32(tp, TBICSR) | TBIReset);
}
static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
@@ -1620,7 +1552,6 @@ static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
static void rtl_link_chg_patch(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct net_device *dev = tp->dev;
if (!netif_running(dev))
@@ -1628,12 +1559,12 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
tp->mac_version == RTL_GIGA_MAC_VER_38) {
- if (RTL_R8(PHYstatus) & _1000bpsF) {
+ if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
ERIAR_EXGMAC);
- } else if (RTL_R8(PHYstatus) & _100bps) {
+ } else if (RTL_R8(tp, PHYstatus) & _100bps) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1651,7 +1582,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
} else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
tp->mac_version == RTL_GIGA_MAC_VER_36) {
- if (RTL_R8(PHYstatus) & _1000bpsF) {
+ if (RTL_R8(tp, PHYstatus) & _1000bpsF) {
rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
@@ -1663,7 +1594,7 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
ERIAR_EXGMAC);
}
} else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
- if (RTL_R8(PHYstatus) & _10bps) {
+ if (RTL_R8(tp, PHYstatus) & _10bps) {
rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
ERIAR_EXGMAC);
rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
@@ -1676,10 +1607,9 @@ static void rtl_link_chg_patch(struct rtl8169_private *tp)
}
static void rtl8169_check_link_status(struct net_device *dev,
- struct rtl8169_private *tp,
- void __iomem *ioaddr)
+ struct rtl8169_private *tp)
{
- if (tp->link_ok(ioaddr)) {
+ if (tp->link_ok(tp)) {
rtl_link_chg_patch(tp);
/* This is to cancel a scheduled suspend if there's one. */
pm_request_resume(&tp->pci_dev->dev);
@@ -1697,15 +1627,14 @@ static void rtl8169_check_link_status(struct net_device *dev,
static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
u8 options;
u32 wolopts = 0;
- options = RTL_R8(Config1);
+ options = RTL_R8(tp, Config1);
if (!(options & PMEnable))
return 0;
- options = RTL_R8(Config3);
+ options = RTL_R8(tp, Config3);
if (options & LinkUp)
wolopts |= WAKE_PHY;
switch (tp->mac_version) {
@@ -1735,7 +1664,7 @@ static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
break;
}
- options = RTL_R8(Config5);
+ options = RTL_R8(tp, Config5);
if (options & UWF)
wolopts |= WAKE_UCAST;
if (options & BWF)
@@ -1768,7 +1697,6 @@ static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
{
- void __iomem *ioaddr = tp->mmio_addr;
unsigned int i, tmp;
static const struct {
u32 opt;
@@ -1784,7 +1712,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
};
u8 options;
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_34:
@@ -1826,28 +1754,28 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
}
for (i = 0; i < tmp; i++) {
- options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+ options = RTL_R8(tp, cfg[i].reg) & ~cfg[i].mask;
if (wolopts & cfg[i].opt)
options |= cfg[i].mask;
- RTL_W8(cfg[i].reg, options);
+ RTL_W8(tp, cfg[i].reg, options);
}
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
- options = RTL_R8(Config1) & ~PMEnable;
+ options = RTL_R8(tp, Config1) & ~PMEnable;
if (wolopts)
options |= PMEnable;
- RTL_W8(Config1, options);
+ RTL_W8(tp, Config1, options);
break;
default:
- options = RTL_R8(Config2) & ~PME_SIGNAL;
+ options = RTL_R8(tp, Config2) & ~PME_SIGNAL;
if (wolopts)
options |= PME_SIGNAL;
- RTL_W8(Config2, options);
+ RTL_W8(tp, Config2, options);
break;
}
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -1859,10 +1787,6 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
rtl_lock_work(tp);
- if (wol->wolopts)
- tp->features |= RTL_FEATURE_WOL;
- else
- tp->features &= ~RTL_FEATURE_WOL;
if (pm_runtime_active(d))
__rtl8169_set_wol(tp, wol->wolopts);
else
@@ -1906,16 +1830,15 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
u8 autoneg, u16 speed, u8 duplex, u32 ignored)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
int ret = 0;
u32 reg;
- reg = RTL_R32(TBICSR);
+ reg = RTL_R32(tp, TBICSR);
if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
(duplex == DUPLEX_FULL)) {
- RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
+ RTL_W32(tp, TBICSR, reg & ~(TBINwEnable | TBINwRestart));
} else if (autoneg == AUTONEG_ENABLE)
- RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
+ RTL_W32(tp, TBICSR, reg | TBINwEnable | TBINwRestart);
else {
netif_warn(tp, link, dev,
"incorrect speed setting refused in TBI mode\n");
@@ -2040,16 +1963,15 @@ static void __rtl8169_set_features(struct net_device *dev,
netdev_features_t features)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
u32 rx_config;
- rx_config = RTL_R32(RxConfig);
+ rx_config = RTL_R32(tp, RxConfig);
if (features & NETIF_F_RXALL)
rx_config |= (AcceptErr | AcceptRunt);
else
rx_config &= ~(AcceptErr | AcceptRunt);
- RTL_W32(RxConfig, rx_config);
+ RTL_W32(tp, RxConfig, rx_config);
if (features & NETIF_F_RXCSUM)
tp->cp_cmd |= RxChkSum;
@@ -2061,10 +1983,10 @@ static void __rtl8169_set_features(struct net_device *dev,
else
tp->cp_cmd &= ~RxVlan;
- tp->cp_cmd |= RTL_R16(CPlusCmd) & ~(RxVlan | RxChkSum);
+ tp->cp_cmd |= RTL_R16(tp, CPlusCmd) & ~(RxVlan | RxChkSum);
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ RTL_R16(tp, CPlusCmd);
}
static int rtl8169_set_features(struct net_device *dev,
@@ -2101,7 +2023,6 @@ static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
struct ethtool_link_ksettings *cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
u32 status;
u32 supported, advertising;
@@ -2109,7 +2030,7 @@ static int rtl8169_get_link_ksettings_tbi(struct net_device *dev,
SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
cmd->base.port = PORT_FIBRE;
- status = RTL_R32(TBICSR);
+ status = RTL_R32(tp, TBICSR);
advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
cmd->base.autoneg = !!(status & TBINwEnable);
@@ -2224,23 +2145,20 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
DECLARE_RTL_COND(rtl_counters_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(CounterAddrLow) & (CounterReset | CounterDump);
+ return RTL_R32(tp, CounterAddrLow) & (CounterReset | CounterDump);
}
static bool rtl8169_do_counters(struct net_device *dev, u32 counter_cmd)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
dma_addr_t paddr = tp->counters_phys_addr;
u32 cmd;
- RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
- RTL_R32(CounterAddrHigh);
+ RTL_W32(tp, CounterAddrHigh, (u64)paddr >> 32);
+ RTL_R32(tp, CounterAddrHigh);
cmd = (u64)paddr & DMA_BIT_MASK(32);
- RTL_W32(CounterAddrLow, cmd);
- RTL_W32(CounterAddrLow, cmd | counter_cmd);
+ RTL_W32(tp, CounterAddrLow, cmd);
+ RTL_W32(tp, CounterAddrLow, cmd | counter_cmd);
return rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000);
}
@@ -2262,13 +2180,12 @@ static bool rtl8169_reset_counters(struct net_device *dev)
static bool rtl8169_update_counters(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
/*
* Some chips are unable to dump tally counters when the receiver
* is disabled.
*/
- if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
+ if ((RTL_R8(tp, ChipCmd) & CmdRxEnb) == 0)
return true;
return rtl8169_do_counters(dev, CounterDump);
@@ -2448,7 +2365,6 @@ static const struct rtl_coalesce_info *rtl_coalesce_info(struct net_device *dev)
static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
const struct rtl_coalesce_info *ci;
const struct rtl_coalesce_scale *scale;
struct {
@@ -2468,10 +2384,10 @@ static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
if (IS_ERR(ci))
return PTR_ERR(ci);
- scale = &ci->scalev[RTL_R16(CPlusCmd) & 3];
+ scale = &ci->scalev[RTL_R16(tp, CPlusCmd) & 3];
/* read IntrMitigate and adjust according to scale */
- for (w = RTL_R16(IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
+ for (w = RTL_R16(tp, IntrMitigate); w; w >>= RTL_COALESCE_SHIFT, p++) {
*p->max_frames = (w & RTL_COALESCE_MASK) << 2;
w >>= RTL_COALESCE_SHIFT;
*p->usecs = w & RTL_COALESCE_MASK;
@@ -2518,7 +2434,6 @@ static const struct rtl_coalesce_scale *rtl_coalesce_choose_scale(
static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
const struct rtl_coalesce_scale *scale;
struct {
u32 frames;
@@ -2566,11 +2481,11 @@ static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
rtl_lock_work(tp);
- RTL_W16(IntrMitigate, swab16(w));
+ RTL_W16(tp, IntrMitigate, swab16(w));
tp->cp_cmd = (tp->cp_cmd & ~3) | cp01;
- RTL_W16(CPlusCmd, tp->cp_cmd);
- RTL_R16(CPlusCmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
+ RTL_R16(tp, CPlusCmd);
rtl_unlock_work(tp);
@@ -2600,17 +2515,16 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
struct net_device *dev, u8 default_version)
{
- void __iomem *ioaddr = tp->mmio_addr;
/*
* The driver currently handles the 8168Bf and the 8168Be identically
* but they can be identified more specifically through the test below
* if needed:
*
- * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
+ * (RTL_R32(tp, TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
*
* Same thing for the 8101Eb and the 8101Ec:
*
- * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
+ * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
*/
static const struct rtl_mac_info {
u32 mask;
@@ -2708,7 +2622,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
const struct rtl_mac_info *p = mac_info;
u32 reg;
- reg = RTL_R32(TxConfig);
+ reg = RTL_R32(tp, TxConfig);
while ((reg & p->mask) != p->val)
p++;
tp->mac_version = p->mac_version;
@@ -3805,8 +3719,6 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
rtl_writephy(tp, 0x1f, 0x0005);
rtl_w0w1_phy(tp, 0x01, 0x0100, 0x0000);
rtl_writephy(tp, 0x1f, 0x0000);
- /* soft-reset phy */
- rtl_writephy(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE | BMCR_ANRESTART);
/* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
@@ -4591,7 +4503,6 @@ static void rtl_hw_phy_config(struct net_device *dev)
static void rtl_phy_work(struct rtl8169_private *tp)
{
struct timer_list *timer = &tp->timer;
- void __iomem *ioaddr = tp->mmio_addr;
unsigned long timeout = RTL8169_PHY_TIMEOUT;
assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
@@ -4605,7 +4516,7 @@ static void rtl_phy_work(struct rtl8169_private *tp)
goto out_mod_timer;
}
- if (tp->link_ok(ioaddr))
+ if (tp->link_ok(tp))
return;
netif_dbg(tp, link, tp->dev, "PHY reset until link up\n");
@@ -4643,21 +4554,17 @@ static void rtl8169_phy_reset(struct net_device *dev,
static bool rtl_tbi_enabled(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
- (RTL_R8(PHYstatus) & TBI_Enable);
+ (RTL_R8(tp, PHYstatus) & TBI_Enable);
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_phy_config(dev);
if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(0x82, 0x01);
+ RTL_W8(tp, 0x82, 0x01);
}
pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
@@ -4667,7 +4574,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
- RTL_W8(0x82, 0x01);
+ RTL_W8(tp, 0x82, 0x01);
dprintk("Set PHY Reg 0x0bh = 0x00h\n");
rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
}
@@ -4687,22 +4594,20 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_lock_work(tp);
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC4, addr[4] | addr[5] << 8);
- RTL_R32(MAC4);
+ RTL_W32(tp, MAC4, addr[4] | addr[5] << 8);
+ RTL_R32(tp, MAC4);
- RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
- RTL_R32(MAC0);
+ RTL_W32(tp, MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
+ RTL_R32(tp, MAC0);
if (tp->mac_version == RTL_GIGA_MAC_VER_34)
rtl_rar_exgmac_set(tp, addr);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
rtl_unlock_work(tp);
}
@@ -4822,8 +4727,6 @@ static void rtl_speed_down(struct rtl8169_private *tp)
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
@@ -4847,7 +4750,7 @@ static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RTL_R32(RxConfig) |
+ RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) |
AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
break;
default:
@@ -4880,8 +4783,6 @@ static void r810x_phy_power_up(struct rtl8169_private *tp)
static void r810x_pll_power_down(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
if (rtl_wol_pll_power_down(tp))
return;
@@ -4896,15 +4797,13 @@ static void r810x_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_16:
break;
default:
- RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
}
}
static void r810x_pll_power_up(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
r810x_phy_power_up(tp);
switch (tp->mac_version) {
@@ -4917,10 +4816,10 @@ static void r810x_pll_power_up(struct rtl8169_private *tp)
break;
case RTL_GIGA_MAC_VER_47:
case RTL_GIGA_MAC_VER_48:
- RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
break;
default:
- RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
break;
}
}
@@ -4987,21 +4886,12 @@ static void r8168_phy_power_down(struct rtl8169_private *tp)
static void r8168_pll_power_down(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) &&
- r8168_check_dash(tp)) {
+ if (r8168_check_dash(tp))
return;
- }
if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
tp->mac_version == RTL_GIGA_MAC_VER_24) &&
- (RTL_R16(CPlusCmd) & ASF)) {
+ (RTL_R16(tp, CPlusCmd) & ASF)) {
return;
}
@@ -5027,22 +4917,20 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0x00000000,
0xfc000000, ERIAR_EXGMAC);
- RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) & ~0x80);
break;
}
}
static void r8168_pll_power_up(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_25:
case RTL_GIGA_MAC_VER_26:
@@ -5051,19 +4939,19 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_31:
case RTL_GIGA_MAC_VER_32:
case RTL_GIGA_MAC_VER_33:
- RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0x80);
break;
case RTL_GIGA_MAC_VER_44:
case RTL_GIGA_MAC_VER_45:
case RTL_GIGA_MAC_VER_46:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
case RTL_GIGA_MAC_VER_49:
- RTL_W8(PMCH, RTL_R8(PMCH) | 0xc0);
+ RTL_W8(tp, PMCH, RTL_R8(tp, PMCH) | 0xc0);
rtl_w0w1_eri(tp, 0x1a8, ERIAR_MASK_1111, 0xfc000000,
0x00000000, ERIAR_EXGMAC);
break;
@@ -5153,8 +5041,6 @@ static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
static void rtl_init_rxcfg(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_01:
case RTL_GIGA_MAC_VER_02:
@@ -5170,7 +5056,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_15:
case RTL_GIGA_MAC_VER_16:
case RTL_GIGA_MAC_VER_17:
- RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_18:
case RTL_GIGA_MAC_VER_19:
@@ -5181,7 +5067,7 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_24:
case RTL_GIGA_MAC_VER_34:
case RTL_GIGA_MAC_VER_35:
- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
break;
case RTL_GIGA_MAC_VER_40:
case RTL_GIGA_MAC_VER_41:
@@ -5195,10 +5081,10 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_49:
case RTL_GIGA_MAC_VER_50:
case RTL_GIGA_MAC_VER_51:
- RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
break;
default:
- RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
+ RTL_W32(tp, RxConfig, RX128_INT_EN | RX_DMA_BURST);
break;
}
}
@@ -5210,71 +5096,55 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.enable);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
rtl_generic_op(tp, tp->jumbo_ops.disable);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
}
static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
- RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) | Jumbo_En1);
rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
- RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~Jumbo_En1);
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
}
static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
}
static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
}
static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(MaxTxPacketSize, 0x3f);
- RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
- RTL_W8(Config4, RTL_R8(Config4) | 0x01);
+ RTL_W8(tp, MaxTxPacketSize, 0x3f);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) | Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) | 0x01);
rtl_tx_performance_tweak(tp->pci_dev, PCI_EXP_DEVCTL_READRQ_512B);
}
static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(MaxTxPacketSize, 0x0c);
- RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
- RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
+ RTL_W8(tp, MaxTxPacketSize, 0x0c);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Jumbo_En0);
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~0x01);
rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
}
@@ -5292,20 +5162,16 @@ static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
r8168b_0_hw_jumbo_enable(tp);
- RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) | (1 << 0));
}
static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
r8168b_0_hw_jumbo_disable(tp);
- RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
@@ -5372,16 +5238,12 @@ static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
DECLARE_RTL_COND(rtl_chipcmd_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R8(ChipCmd) & CmdReset;
+ return RTL_R8(tp, ChipCmd) & CmdReset;
}
static void rtl_hw_reset(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(ChipCmd, CmdReset);
+ RTL_W8(tp, ChipCmd, CmdReset);
rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
}
@@ -5432,29 +5294,21 @@ static void rtl_request_firmware(struct rtl8169_private *tp)
static void rtl_rx_close(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
+ RTL_W32(tp, RxConfig, RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
}
DECLARE_RTL_COND(rtl_npq_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R8(TxPoll) & NPQ;
+ return RTL_R8(tp, TxPoll) & NPQ;
}
DECLARE_RTL_COND(rtl_txcfg_empty_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(TxConfig) & TXCFG_EMPTY;
+ return RTL_R32(tp, TxConfig) & TXCFG_EMPTY;
}
static void rtl8169_hw_reset(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Disable interrupts */
rtl8169_irq_mask_and_ack(tp);
@@ -5481,10 +5335,10 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
tp->mac_version == RTL_GIGA_MAC_VER_49 ||
tp->mac_version == RTL_GIGA_MAC_VER_50 ||
tp->mac_version == RTL_GIGA_MAC_VER_51) {
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
} else {
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) | StopReq);
udelay(100);
}
@@ -5493,10 +5347,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Set DMA burst size and Interframe Gap Time */
- RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
+ RTL_W32(tp, TxConfig, (TX_DMA_BURST << TxDMAShift) |
(InterFrameGap << TxInterFrameGapShift));
}
@@ -5509,36 +5361,35 @@ static void rtl_hw_start(struct net_device *dev)
rtl_irq_enable_all(tp);
}
-static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
- void __iomem *ioaddr)
+static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp)
{
/*
* Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
* register to be written before TxDescAddrLow to work.
* Switching from MMIO to I/O access fixes the issue as well.
*/
- RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
- RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
- RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
- RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
+ RTL_W32(tp, TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
+ RTL_W32(tp, TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
+ RTL_W32(tp, RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
+ RTL_W32(tp, RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
}
-static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
+static u16 rtl_rw_cpluscmd(struct rtl8169_private *tp)
{
u16 cmd;
- cmd = RTL_R16(CPlusCmd);
- RTL_W16(CPlusCmd, cmd);
+ cmd = RTL_R16(tp, CPlusCmd);
+ RTL_W16(tp, CPlusCmd, cmd);
return cmd;
}
-static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
+static void rtl_set_rx_max_size(struct rtl8169_private *tp, unsigned int rx_buf_sz)
{
/* Low hurts. Let's disable the filtering. */
- RTL_W16(RxMaxSize, rx_buf_sz + 1);
+ RTL_W16(tp, RxMaxSize, rx_buf_sz + 1);
}
-static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
+static void rtl8169_set_magic_reg(struct rtl8169_private *tp, unsigned mac_version)
{
static const struct rtl_cfg2_info {
u32 mac_version;
@@ -5554,10 +5405,10 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
unsigned int i;
u32 clk;
- clk = RTL_R8(Config2) & PCI_Clock_66MHz;
+ clk = RTL_R8(tp, Config2) & PCI_Clock_66MHz;
for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
if ((p->mac_version == mac_version) && (p->clk == clk)) {
- RTL_W32(0x7c, p->val);
+ RTL_W32(tp, 0x7c, p->val);
break;
}
}
@@ -5566,7 +5417,6 @@ static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
static void rtl_set_rx_mode(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
int rx_mode;
u32 tmp = 0;
@@ -5598,7 +5448,7 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->features & NETIF_F_RXALL)
rx_mode |= (AcceptErr | AcceptRunt);
- tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
+ tmp = (RTL_R32(tp, RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
u32 data = mc_filter[0];
@@ -5610,35 +5460,34 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (tp->mac_version == RTL_GIGA_MAC_VER_35)
mc_filter[1] = mc_filter[0] = 0xffffffff;
- RTL_W32(MAR0 + 4, mc_filter[1]);
- RTL_W32(MAR0 + 0, mc_filter[0]);
+ RTL_W32(tp, MAR0 + 4, mc_filter[1]);
+ RTL_W32(tp, MAR0 + 0, mc_filter[0]);
- RTL_W32(RxConfig, tmp);
+ RTL_W32(tp, RxConfig, tmp);
}
static void rtl_hw_start_8169(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) | PCIMulRW);
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
}
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03 ||
tp->mac_version == RTL_GIGA_MAC_VER_04)
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_init_rxcfg(tp);
- RTL_W8(EarlyTxThres, NoEarlyTx);
+ RTL_W8(tp, EarlyTxThres, NoEarlyTx);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ rtl_set_rx_max_size(tp, rx_buf_sz);
if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
tp->mac_version == RTL_GIGA_MAC_VER_02 ||
@@ -5646,7 +5495,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
tp->mac_version == RTL_GIGA_MAC_VER_04)
rtl_set_rx_tx_config_registers(tp);
- tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
+ tp->cp_cmd |= rtl_rw_cpluscmd(tp) | PCIMulRW;
if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
tp->mac_version == RTL_GIGA_MAC_VER_03) {
@@ -5655,37 +5504,37 @@ static void rtl_hw_start_8169(struct net_device *dev)
tp->cp_cmd |= (1 << 14);
}
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl8169_set_magic_reg(ioaddr, tp->mac_version);
+ rtl8169_set_magic_reg(tp, tp->mac_version);
/*
* Undocumented corner. Supposedly:
* (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
*/
- RTL_W16(IntrMitigate, 0x0000);
+ RTL_W16(tp, IntrMitigate, 0x0000);
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ rtl_set_rx_tx_desc_registers(tp);
if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
tp->mac_version != RTL_GIGA_MAC_VER_02 &&
tp->mac_version != RTL_GIGA_MAC_VER_03 &&
tp->mac_version != RTL_GIGA_MAC_VER_04) {
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_tx_config_registers(tp);
}
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
/* Initially a 10 us delay. Turned it into a PCI commit. - FR */
- RTL_R8(IntrMask);
+ RTL_R8(tp, IntrMask);
- RTL_W32(RxMissed, 0);
+ RTL_W32(tp, RxMissed, 0);
rtl_set_rx_mode(dev);
/* no early-rx interrupts */
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
@@ -5719,17 +5568,13 @@ static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
DECLARE_RTL_COND(rtl_csiar_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R32(CSIAR) & CSIAR_FLAG;
+ return RTL_R32(tp, CSIAR) & CSIAR_FLAG;
}
static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIDR, value);
- RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ RTL_W32(tp, CSIDR, value);
+ RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
@@ -5737,21 +5582,17 @@ static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
+ RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(CSIDR) : ~0;
+ RTL_R32(tp, CSIDR) : ~0;
}
static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIDR, value);
- RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ RTL_W32(tp, CSIDR, value);
+ RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC);
@@ -5760,21 +5601,17 @@ static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
+ RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(CSIDR) : ~0;
+ RTL_R32(tp, CSIDR) : ~0;
}
static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIDR, value);
- RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
+ RTL_W32(tp, CSIDR, value);
+ RTL_W32(tp, CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
CSIAR_FUNC_NIC2);
@@ -5783,13 +5620,11 @@ static void r8411_csi_write(struct rtl8169_private *tp, int addr, int value)
static u32 r8411_csi_read(struct rtl8169_private *tp, int addr)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
+ RTL_W32(tp, CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC2 |
CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
- RTL_R32(CSIDR) : ~0;
+ RTL_R32(tp, CSIDR) : ~0;
}
static void rtl_init_csi_ops(struct rtl8169_private *tp)
@@ -5865,17 +5700,16 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
{
- void __iomem *ioaddr = tp->mmio_addr;
u8 data;
- data = RTL_R8(Config3);
+ data = RTL_R8(tp, Config3);
if (enable)
data |= Rdy_to_L23;
else
data &= ~Rdy_to_L23;
- RTL_W8(Config3, data);
+ RTL_W8(tp, Config3, data);
}
#define R8168_CPCMD_QUIRK_MASK (\
@@ -5891,12 +5725,11 @@ static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
if (tp->dev->mtu <= ETH_DATA_LEN) {
rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
@@ -5906,30 +5739,27 @@ static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
rtl_hw_start_8168bb(tp);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+ RTL_W8(tp, Config4, RTL_R8(tp, Config4) & ~(1 << 0));
}
static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
+ RTL_W8(tp, Config1, RTL_R8(tp, Config1) | Speed_down);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
rtl_disable_clock_request(pdev);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
@@ -5951,42 +5781,39 @@ static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_2(tp);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_2(tp);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
/* Magic. */
- RTL_W8(DBG_REG, 0x20);
+ RTL_W8(tp, DBG_REG, 0x20);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168c_1[] = {
{ 0x02, 0x0800, 0x1000 },
{ 0x03, 0, 0x0002 },
@@ -5995,7 +5822,7 @@ static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
+ RTL_W8(tp, DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
@@ -6030,24 +5857,22 @@ static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
static void rtl_hw_start_8168d(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_2(tp);
rtl_disable_clock_request(pdev);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
+ RTL_W16(tp, CPlusCmd, RTL_R16(tp, CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
}
static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_1(tp);
@@ -6055,14 +5880,13 @@ static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_disable_clock_request(pdev);
}
static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
static const struct ephy_info e_info_8168d_4[] = {
{ 0x0b, 0x0000, 0x0048 },
@@ -6074,7 +5898,7 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_ephy_init(tp, e_info_8168d_4, ARRAY_SIZE(e_info_8168d_4));
@@ -6083,7 +5907,6 @@ static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
static const struct ephy_info e_info_8168e_1[] = {
{ 0x00, 0x0200, 0x0100 },
@@ -6108,20 +5931,19 @@ static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
if (tp->dev->mtu <= ETH_DATA_LEN)
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
rtl_disable_clock_request(pdev);
/* Reset tx FIFO pointer */
- RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
- RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | TXPLA_RST);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~TXPLA_RST);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
}
static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
static const struct ephy_info e_info_8168e_2[] = {
{ 0x09, 0x0000, 0x0080 },
@@ -6144,24 +5966,23 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_disable_clock_request(pdev);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
/* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_2(tp);
@@ -6179,20 +6000,19 @@ static void rtl_hw_start_8168f(struct rtl8169_private *tp)
rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_disable_clock_request(pdev);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
- RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
- RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | PWM_EN);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~Spi_en);
}
static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168f_1[] = {
{ 0x06, 0x00c0, 0x0020 },
{ 0x08, 0x0001, 0x0002 },
@@ -6207,7 +6027,7 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
}
static void rtl_hw_start_8411(struct rtl8169_private *tp)
@@ -6229,10 +6049,9 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
static void rtl_hw_start_8168g(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
@@ -6247,14 +6066,14 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
rtl_w0w1_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
rtl_eri_write(tp, 0x2f8, ERIAR_MASK_0011, 0x1d8f, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
@@ -6264,7 +6083,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp)
static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168g_1[] = {
{ 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x37d0, 0x0820 },
@@ -6275,14 +6093,13 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168g_1, ARRAY_SIZE(e_info_8168g_1));
}
static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168g_2[] = {
{ 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x3df0, 0x0200 },
@@ -6293,14 +6110,13 @@ static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168g_2, ARRAY_SIZE(e_info_8168g_2));
}
static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8411_2[] = {
{ 0x00, 0x0000, 0x0008 },
{ 0x0c, 0x3df0, 0x0200 },
@@ -6312,14 +6128,13 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
rtl_hw_start_8168g(tp);
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
}
static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
int rg_saw_cnt;
u32 data;
@@ -6333,11 +6148,11 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168h_1, ARRAY_SIZE(e_info_8168h_1));
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
@@ -6357,19 +6172,19 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
- RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
rtl_w0w1_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
@@ -6417,12 +6232,11 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl8168ep_stop_cmac(tp);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x00080002, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x2f, ERIAR_EXGMAC);
@@ -6440,25 +6254,24 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp)
rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87, ERIAR_EXGMAC);
- RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
- RTL_W8(MaxTxPacketSize, EarlySize);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN);
+ RTL_W8(tp, MaxTxPacketSize, EarlySize);
rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
/* Adjust EEE LED frequency */
- RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
+ RTL_W8(tp, EEE_LED, RTL_R8(tp, EEE_LED) & ~0x07);
rtl_w0w1_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~TX_10M_PS_EN);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~TX_10M_PS_EN);
rtl_pcie_state_l2l3_enable(tp, false);
}
static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168ep_1[] = {
{ 0x00, 0xffff, 0x10ab },
{ 0x06, 0xffff, 0xf030 },
@@ -6468,8 +6281,8 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168ep_1, ARRAY_SIZE(e_info_8168ep_1));
rtl_hw_start_8168ep(tp);
@@ -6477,7 +6290,6 @@ static void rtl_hw_start_8168ep_1(struct rtl8169_private *tp)
static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8168ep_2[] = {
{ 0x00, 0xffff, 0x10a3 },
{ 0x19, 0xffff, 0xfc00 },
@@ -6485,19 +6297,18 @@ static void rtl_hw_start_8168ep_2(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168ep_2, ARRAY_SIZE(e_info_8168ep_2));
rtl_hw_start_8168ep(tp);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
- RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
}
static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
u32 data;
static const struct ephy_info e_info_8168ep_3[] = {
{ 0x00, 0xffff, 0x10a3 },
@@ -6507,14 +6318,14 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
};
/* disable aspm and clock request before access ephy */
- RTL_W8(Config2, RTL_R8(Config2) & ~ClkReqEn);
- RTL_W8(Config5, RTL_R8(Config5) & ~ASPM_en);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~ClkReqEn);
+ RTL_W8(tp, Config5, RTL_R8(tp, Config5) & ~ASPM_en);
rtl_ephy_init(tp, e_info_8168ep_3, ARRAY_SIZE(e_info_8168ep_3));
rtl_hw_start_8168ep(tp);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
- RTL_W8(MISC_1, RTL_R8(MISC_1) & ~PFM_D3COLD_EN);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
+ RTL_W8(tp, MISC_1, RTL_R8(tp, MISC_1) & ~PFM_D3COLD_EN);
data = r8168_mac_ocp_read(tp, 0xd3e2);
data &= 0xf000;
@@ -6533,19 +6344,18 @@ static void rtl_hw_start_8168ep_3(struct rtl8169_private *tp)
static void rtl_hw_start_8168(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ rtl_set_rx_max_size(tp, rx_buf_sz);
- tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
+ tp->cp_cmd |= RTL_R16(tp, CPlusCmd) | PktCntrDisable | INTT_1;
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- RTL_W16(IntrMitigate, 0x5151);
+ RTL_W16(tp, IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
@@ -6553,11 +6363,11 @@ static void rtl_hw_start_8168(struct net_device *dev)
tp->event_slow &= ~RxOverflow;
}
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ rtl_set_rx_tx_desc_registers(tp);
rtl_set_rx_tx_config_registers(tp);
- RTL_R8(IntrMask);
+ RTL_R8(tp, IntrMask);
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
@@ -6663,13 +6473,13 @@ static void rtl_hw_start_8168(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_mode(dev);
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
#define R810X_CPCMD_QUIRK_MASK (\
@@ -6685,7 +6495,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
static const struct ephy_info e_info_8102e_1[] = {
{ 0x01, 0, 0x6e65 },
@@ -6701,32 +6510,31 @@ static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
- RTL_W8(DBG_REG, FIX_NAK_1);
+ RTL_W8(tp, DBG_REG, FIX_NAK_1);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(Config1,
+ RTL_W8(tp, Config1,
LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
- cfg1 = RTL_R8(Config1);
+ cfg1 = RTL_R8(tp, Config1);
if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
- RTL_W8(Config1, cfg1 & ~LEDS0);
+ RTL_W8(tp, Config1, cfg1 & ~LEDS0);
rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
}
static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
rtl_csi_access_enable_2(tp);
rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
- RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
- RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
+ RTL_W8(tp, Config1, MEMMAP | IOMAP | VPD | PMEnable);
+ RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Beacon_en);
}
static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
@@ -6738,7 +6546,6 @@ static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8105e_1[] = {
{ 0x07, 0, 0x4000 },
{ 0x19, 0, 0x0200 },
@@ -6751,13 +6558,13 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
};
/* Force LAN exit from ASPM if Rx/Tx are not idle */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
/* Disable Early Tally Counter */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
+ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) & ~0x010000);
- RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
- RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) | PFM_EN);
rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
@@ -6772,7 +6579,6 @@ static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
static void rtl_hw_start_8402(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
static const struct ephy_info e_info_8402[] = {
{ 0x19, 0xffff, 0xff64 },
{ 0x1e, 0, 0x4000 }
@@ -6781,10 +6587,10 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
rtl_csi_access_enable_2(tp);
/* Force LAN exit from ASPM if Rx/Tx are not idle */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W32(tp, TxConfig, RTL_R32(tp, TxConfig) | TXCFG_AUTO_FIFO);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
@@ -6803,14 +6609,12 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
static void rtl_hw_start_8106(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* Force LAN exit from ASPM if Rx/Tx are not idle */
- RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
+ RTL_W32(tp, FuncEvent, RTL_R32(tp, FuncEvent) | 0x002800);
- RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
- RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
- RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+ RTL_W32(tp, MISC, (RTL_R32(tp, MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET);
+ RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN);
rtl_pcie_state_l2l3_enable(tp, false);
}
@@ -6818,7 +6622,6 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
static void rtl_hw_start_8101(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
@@ -6829,16 +6632,16 @@ static void rtl_hw_start_8101(struct net_device *dev)
pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
PCI_EXP_DEVCTL_NOSNOOP_EN);
- RTL_W8(Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
- RTL_W8(MaxTxPacketSize, TxPacketMax);
+ RTL_W8(tp, MaxTxPacketSize, TxPacketMax);
- rtl_set_rx_max_size(ioaddr, rx_buf_sz);
+ rtl_set_rx_max_size(tp, rx_buf_sz);
tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
- rtl_set_rx_tx_desc_registers(tp, ioaddr);
+ rtl_set_rx_tx_desc_registers(tp);
rtl_set_rx_tx_config_registers(tp);
@@ -6878,17 +6681,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
break;
}
- RTL_W8(Cfg9346, Cfg9346_Lock);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
- RTL_W16(IntrMitigate, 0x0000);
+ RTL_W16(tp, IntrMitigate, 0x0000);
- RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
+ RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb);
rtl_set_rx_mode(dev);
- RTL_R8(IntrMask);
+ RTL_R8(tp, IntrMask);
- RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
+ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000);
}
static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -7098,7 +6901,7 @@ static void rtl_reset_work(struct rtl8169_private *tp)
napi_enable(&tp->napi);
rtl_hw_start(dev);
netif_wake_queue(dev);
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ rtl8169_check_link_status(dev, tp);
}
static void rtl8169_tx_timeout(struct net_device *dev)
@@ -7346,7 +7149,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
struct rtl8169_private *tp = netdev_priv(dev);
unsigned int entry = tp->cur_tx % NUM_TX_DESC;
struct TxDesc *txd = tp->TxDescArray + entry;
- void __iomem *ioaddr = tp->mmio_addr;
struct device *d = &tp->pci_dev->dev;
dma_addr_t mapping;
u32 status, len;
@@ -7406,7 +7208,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
tp->cur_tx += frags + 1;
- RTL_W8(TxPoll, NPQ);
+ RTL_W8(tp, TxPoll, NPQ);
mmiowb();
@@ -7477,11 +7279,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
- void __iomem *ioaddr = tp->mmio_addr;
-
netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
- RTL_W16(CPlusCmd, tp->cp_cmd);
+ RTL_W16(tp, CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
}
@@ -7547,11 +7347,8 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
* of start_xmit activity is detected (if it is not detected,
* it is slow enough). -- FR
*/
- if (tp->cur_tx != dirty_tx) {
- void __iomem *ioaddr = tp->mmio_addr;
-
- RTL_W8(TxPoll, NPQ);
- }
+ if (tp->cur_tx != dirty_tx)
+ RTL_W8(tp, TxPoll, NPQ);
}
}
@@ -7732,7 +7529,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
rtl8169_pcierr_interrupt(dev);
if (status & LinkChg)
- rtl8169_check_link_status(dev, tp, tp->mmio_addr);
+ rtl8169_check_link_status(dev, tp);
rtl_irq_enable_all(tp);
}
@@ -7804,21 +7601,20 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
return work_done;
}
-static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
+static void rtl8169_rx_missed(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
if (tp->mac_version > RTL_GIGA_MAC_VER_06)
return;
- dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
- RTL_W32(RxMissed, 0);
+ dev->stats.rx_missed_errors += RTL_R32(tp, RxMissed) & 0xffffff;
+ RTL_W32(tp, RxMissed, 0);
}
static void rtl8169_down(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
del_timer_sync(&tp->timer);
@@ -7831,7 +7627,7 @@ static void rtl8169_down(struct net_device *dev)
* as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
* and napi is disabled (rtl8169_poll).
*/
- rtl8169_rx_missed(dev, ioaddr);
+ rtl8169_rx_missed(dev);
/* Give a racing hard_start_xmit a few cycles to complete. */
synchronize_sched();
@@ -7861,7 +7657,7 @@ static int rtl8169_close(struct net_device *dev)
cancel_work_sync(&tp->wk.work);
- free_irq(pdev->irq, dev);
+ pci_free_irq(pdev, 0, dev);
dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
tp->RxPhyAddr);
@@ -7880,14 +7676,13 @@ static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(tp->pci_dev->irq, dev);
+ rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
}
#endif
static int rtl_open(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
int retval = -ENOMEM;
@@ -7917,9 +7712,8 @@ static int rtl_open(struct net_device *dev)
rtl_request_firmware(tp);
- retval = request_irq(pdev->irq, rtl8169_interrupt,
- (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
- dev->name, dev);
+ retval = pci_request_irq(pdev, 0, rtl8169_interrupt, NULL, dev,
+ dev->name);
if (retval < 0)
goto err_release_fw_2;
@@ -7947,7 +7741,7 @@ static int rtl_open(struct net_device *dev)
tp->saved_wolopts = 0;
pm_runtime_put_sync(&pdev->dev);
- rtl8169_check_link_status(dev, tp, ioaddr);
+ rtl8169_check_link_status(dev, tp);
out:
return retval;
@@ -7971,7 +7765,6 @@ static void
rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
{
struct rtl8169_private *tp = netdev_priv(dev);
- void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
struct rtl8169_counters *counters = tp->counters;
unsigned int start;
@@ -7979,7 +7772,7 @@ rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
pm_runtime_get_noresume(&pdev->dev);
if (netif_running(dev) && pm_runtime_active(&pdev->dev))
- rtl8169_rx_missed(dev, ioaddr);
+ rtl8169_rx_missed(dev);
do {
start = u64_stats_fetch_begin_irq(&tp->rx_stats.syncp);
@@ -8102,7 +7895,7 @@ static int rtl8169_runtime_suspend(struct device *device)
rtl8169_net_suspend(dev);
/* Update counters before going runtime suspend */
- rtl8169_rx_missed(dev, tp->mmio_addr);
+ rtl8169_rx_missed(dev);
rtl8169_update_counters(dev);
return 0;
@@ -8163,8 +7956,6 @@ static const struct dev_pm_ops rtl8169_pm_ops = {
static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
/* WoL fails with 8168b when the receiver is disabled. */
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_11:
@@ -8172,9 +7963,9 @@ static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
case RTL_GIGA_MAC_VER_17:
pci_clear_master(tp->pci_dev);
- RTL_W8(ChipCmd, CmdRxEnb);
+ RTL_W8(tp, ChipCmd, CmdRxEnb);
/* PCI commit */
- RTL_R8(ChipCmd);
+ RTL_R8(tp, ChipCmd);
break;
default:
break;
@@ -8209,15 +8000,8 @@ static void rtl_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct rtl8169_private *tp = netdev_priv(dev);
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) &&
- r8168_check_dash(tp)) {
+ if (r8168_check_dash(tp))
rtl8168_driver_stop(tp);
- }
netif_napi_del(&tp->napi);
@@ -8256,7 +8040,7 @@ static const struct rtl_cfg_info {
unsigned int region;
unsigned int align;
u16 event_slow;
- unsigned features;
+ unsigned int has_gmii:1;
const struct rtl_coalesce_info *coalesce_info;
u8 default_ver;
} rtl_cfg_infos [] = {
@@ -8265,7 +8049,7 @@ static const struct rtl_cfg_info {
.region = 1,
.align = 0,
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
- .features = RTL_FEATURE_GMII,
+ .has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8169,
.default_ver = RTL_GIGA_MAC_VER_01,
},
@@ -8274,7 +8058,7 @@ static const struct rtl_cfg_info {
.region = 2,
.align = 8,
.event_slow = SYSErr | LinkChg | RxOverflow,
- .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
+ .has_gmii = 1,
.coalesce_info = rtl_coalesce_info_8168_8136,
.default_ver = RTL_GIGA_MAC_VER_11,
},
@@ -8284,56 +8068,44 @@ static const struct rtl_cfg_info {
.align = 8,
.event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
PCSTimeout,
- .features = RTL_FEATURE_MSI,
.coalesce_info = rtl_coalesce_info_8168_8136,
.default_ver = RTL_GIGA_MAC_VER_13,
}
};
-/* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct rtl8169_private *tp,
- const struct rtl_cfg_info *cfg)
+static int rtl_alloc_irq(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
- unsigned msi = 0;
- u8 cfg2;
+ unsigned int flags;
- cfg2 = RTL_R8(Config2) & ~MSIEnable;
- if (cfg->features & RTL_FEATURE_MSI) {
- if (pci_enable_msi(tp->pci_dev)) {
- netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
- } else {
- cfg2 |= MSIEnable;
- msi = RTL_FEATURE_MSI;
- }
+ if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
+ RTL_W8(tp, Cfg9346, Cfg9346_Unlock);
+ RTL_W8(tp, Config2, RTL_R8(tp, Config2) & ~MSIEnable);
+ RTL_W8(tp, Cfg9346, Cfg9346_Lock);
+ flags = PCI_IRQ_LEGACY;
+ } else {
+ flags = PCI_IRQ_ALL_TYPES;
}
- if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
- RTL_W8(Config2, cfg2);
- return msi;
+
+ return pci_alloc_irq_vectors(tp->pci_dev, 1, 1, flags);
}
DECLARE_RTL_COND(rtl_link_list_ready_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return RTL_R8(MCU) & LINK_LIST_RDY;
+ return RTL_R8(tp, MCU) & LINK_LIST_RDY;
}
DECLARE_RTL_COND(rtl_rxtx_empty_cond)
{
- void __iomem *ioaddr = tp->mmio_addr;
-
- return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
+ return (RTL_R8(tp, MCU) & RXTX_EMPTY) == RXTX_EMPTY;
}
static void rtl_hw_init_8168g(struct rtl8169_private *tp)
{
- void __iomem *ioaddr = tp->mmio_addr;
u32 data;
tp->ocp_base = OCP_STD_PHY_BASE;
- RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
+ RTL_W32(tp, MISC, RTL_R32(tp, MISC) | RXDV_GATED_EN);
if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
return;
@@ -8341,9 +8113,9 @@ static void rtl_hw_init_8168g(struct rtl8169_private *tp)
if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
return;
- RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
+ RTL_W8(tp, ChipCmd, RTL_R8(tp, ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
msleep(1);
- RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
+ RTL_W8(tp, MCU, RTL_R8(tp, MCU) & ~NOW_IS_OOB);
data = r8168_mac_ocp_read(tp, 0xe8de);
data &= ~(1 << 14);
@@ -8397,7 +8169,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct rtl8169_private *tp;
struct mii_if_info *mii;
struct net_device *dev;
- void __iomem *ioaddr;
int chipset, i;
int rc;
@@ -8423,7 +8194,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
mii->mdio_write = rtl_mdio_write;
mii->phy_id_mask = 0x1f;
mii->reg_num_mask = 0x1f;
- mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
+ mii->supports_gmii = cfg->has_gmii;
/* disable ASPM completely as that cause random device stop working
* problems as well as full system hangs for some PCIe devices users */
@@ -8455,20 +8226,13 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return -ENODEV;
}
- rc = pci_request_regions(pdev, MODULENAME);
+ rc = pcim_iomap_regions(pdev, BIT(region), MODULENAME);
if (rc < 0) {
- netif_err(tp, probe, dev, "could not request regions\n");
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
return rc;
}
- /* ioremap MMIO region */
- ioaddr = devm_ioremap(&pdev->dev, pci_resource_start(pdev, region),
- R8169_REGS_SIZE);
- if (!ioaddr) {
- netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
- return -EIO;
- }
- tp->mmio_addr = ioaddr;
+ tp->mmio_addr = pcim_iomap_table(pdev)[region];
if (!pci_is_pcie(pdev))
netif_info(tp, probe, dev, "not PCI Express\n");
@@ -8518,41 +8282,14 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
chipset = tp->mac_version;
tp->txd_version = rtl_chip_infos[chipset].txd_version;
- RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
- RTL_W8(Config5, RTL_R8(Config5) & (BWF | MWF | UWF | LanWake | PMEStatus));
- switch (tp->mac_version) {
- case RTL_GIGA_MAC_VER_34:
- case RTL_GIGA_MAC_VER_35:
- case RTL_GIGA_MAC_VER_36:
- case RTL_GIGA_MAC_VER_37:
- case RTL_GIGA_MAC_VER_38:
- case RTL_GIGA_MAC_VER_40:
- case RTL_GIGA_MAC_VER_41:
- case RTL_GIGA_MAC_VER_42:
- case RTL_GIGA_MAC_VER_43:
- case RTL_GIGA_MAC_VER_44:
- case RTL_GIGA_MAC_VER_45:
- case RTL_GIGA_MAC_VER_46:
- case RTL_GIGA_MAC_VER_47:
- case RTL_GIGA_MAC_VER_48:
- case RTL_GIGA_MAC_VER_49:
- case RTL_GIGA_MAC_VER_50:
- case RTL_GIGA_MAC_VER_51:
- if (rtl_eri_read(tp, 0xdc, ERIAR_EXGMAC) & MagicPacket_v2)
- tp->features |= RTL_FEATURE_WOL;
- if ((RTL_R8(Config3) & LinkUp) != 0)
- tp->features |= RTL_FEATURE_WOL;
- break;
- default:
- if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- break;
+ rc = rtl_alloc_irq(tp);
+ if (rc < 0) {
+ netif_err(tp, probe, dev, "Can't allocate interrupt\n");
+ return rc;
}
- if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
- tp->features |= RTL_FEATURE_WOL;
- tp->features |= rtl_try_msi(tp, cfg);
- RTL_W8(Cfg9346, Cfg9346_Lock);
+
+ /* override BIOS settings, use userspace tools to enable WOL */
+ __rtl8169_set_wol(tp, 0);
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
@@ -8600,7 +8337,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_rar_set(tp, (u8 *)mac_addr);
}
for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = RTL_R8(MAC0 + i);
+ dev->dev_addr[i] = RTL_R8(tp, MAC0 + i);
dev->ethtool_ops = &rtl8169_ethtool_ops;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
@@ -8667,8 +8404,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
- rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
- (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
+ rtl_chip_infos[chipset].name, tp->mmio_addr, dev->dev_addr,
+ (u32)(RTL_R32(tp, TxConfig) & 0x9cf0f8ff),
+ pci_irq_vector(pdev, 0));
if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
"tx checksumming: %s]\n",
@@ -8676,15 +8414,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
}
- if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
- tp->mac_version == RTL_GIGA_MAC_VER_28 ||
- tp->mac_version == RTL_GIGA_MAC_VER_31 ||
- tp->mac_version == RTL_GIGA_MAC_VER_49 ||
- tp->mac_version == RTL_GIGA_MAC_VER_50 ||
- tp->mac_version == RTL_GIGA_MAC_VER_51) &&
- r8168_check_dash(tp)) {
+ if (r8168_check_dash(tp))
rtl8168_driver_start(tp);
- }
netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index 96a27b00c90e..b81f4faf7b10 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -1018,6 +1018,7 @@ struct ravb_private {
u32 dirty_rx[NUM_RX_QUEUE]; /* Producer ring indices */
u32 cur_tx[NUM_TX_QUEUE];
u32 dirty_tx[NUM_TX_QUEUE];
+ u32 rx_buf_sz; /* Based on MTU+slack. */
struct napi_struct napi[NUM_RX_QUEUE];
struct work_struct work;
/* MII transceiver section. */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index a95fbd5510d9..54a6265da7a0 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -238,7 +238,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
- PKT_BUF_SZ,
+ priv->rx_buf_sz,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -300,9 +300,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
for (i = 0; i < priv->num_rx_ring[q]; i++) {
/* RX descriptor */
rx_desc = &priv->rx_ring[q][i];
- rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
+ rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
- PKT_BUF_SZ,
+ priv->rx_buf_sz,
DMA_FROM_DEVICE);
/* We just set the data size to 0 for a failed mapping which
* should prevent DMA from happening...
@@ -346,6 +346,10 @@ static int ravb_ring_init(struct net_device *ndev, int q)
int ring_size;
int i;
+ /* +16 gets room from the status from the card. */
+ priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
+ ETH_HLEN + VLAN_HLEN;
+
/* Allocate RX and TX skb rings */
priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -355,7 +359,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
goto error;
for (i = 0; i < priv->num_rx_ring[q]; i++) {
- skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+ skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
if (!skb)
goto error;
ravb_set_buffer_align(skb);
@@ -586,7 +590,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
skb = priv->rx_skb[q][entry];
priv->rx_skb[q][entry] = NULL;
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- PKT_BUF_SZ,
+ priv->rx_buf_sz,
DMA_FROM_DEVICE);
get_ts &= (q == RAVB_NC) ?
RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -619,11 +623,12 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
desc = &priv->rx_ring[q][entry];
- desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
+ desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
if (!priv->rx_skb[q][entry]) {
skb = netdev_alloc_skb(ndev,
- PKT_BUF_SZ + RAVB_ALIGN - 1);
+ priv->rx_buf_sz +
+ RAVB_ALIGN - 1);
if (!skb)
break; /* Better luck next round. */
ravb_set_buffer_align(skb);
@@ -1854,6 +1859,17 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
return phy_mii_ioctl(phydev, req, cmd);
}
+static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ if (netif_running(ndev))
+ return -EBUSY;
+
+ ndev->mtu = new_mtu;
+ netdev_update_features(ndev);
+
+ return 0;
+}
+
static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1895,6 +1911,7 @@ static const struct net_device_ops ravb_netdev_ops = {
.ndo_set_rx_mode = ravb_set_rx_mode,
.ndo_tx_timeout = ravb_tx_timeout,
.ndo_do_ioctl = ravb_do_ioctl,
+ .ndo_change_mtu = ravb_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_features = ravb_set_features,
@@ -2117,6 +2134,9 @@ static int ravb_probe(struct platform_device *pdev)
goto out_release;
}
+ ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ ndev->min_mtu = ETH_MIN_MTU;
+
/* Set function */
ndev->netdev_ops = &ravb_netdev_ops;
ndev->ethtool_ops = &ravb_ethtool_ops;
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 14c839bb09e7..3557fe3f2bb5 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -123,8 +123,8 @@ static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_FWSL0] = 0x0030,
[TSU_FWSL1] = 0x0034,
[TSU_FWSLC] = 0x0038,
- [TSU_QTAG0] = 0x0040,
- [TSU_QTAG1] = 0x0044,
+ [TSU_QTAGM0] = 0x0040,
+ [TSU_QTAGM1] = 0x0044,
[TSU_FWSR] = 0x0050,
[TSU_FWINMK] = 0x0054,
[TSU_ADQT0] = 0x0048,
@@ -763,6 +763,7 @@ static struct sh_eth_cpu_data sh7757_data = {
.rpadir = 1,
.rpadir_value = 2 << 16,
.rtrate = 1,
+ .dual_port = 1,
};
#define SH_GIGA_ETH_BASE 0xfee00000UL
@@ -841,6 +842,7 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.no_trimd = 1,
.no_ade = 1,
.tsu = 1,
+ .dual_port = 1,
};
/* SH7734 */
@@ -911,6 +913,7 @@ static struct sh_eth_cpu_data sh7763_data = {
.tsu = 1,
.irq_flags = IRQF_SHARED,
.magic = 1,
+ .dual_port = 1,
};
static struct sh_eth_cpu_data sh7619_data = {
@@ -943,6 +946,7 @@ static struct sh_eth_cpu_data sh771x_data = {
EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
EESIPR_PREIP | EESIPR_CERFIP,
.tsu = 1,
+ .dual_port = 1,
};
static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
@@ -972,20 +976,16 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
static int sh_eth_check_reset(struct net_device *ndev)
{
- int ret = 0;
- int cnt = 100;
+ int cnt;
- while (cnt > 0) {
+ for (cnt = 100; cnt > 0; cnt--) {
if (!(sh_eth_read(ndev, EDMR) & EDMR_SRST_GETHER))
- break;
+ return 0;
mdelay(1);
- cnt--;
- }
- if (cnt <= 0) {
- netdev_err(ndev, "Device reset failed\n");
- ret = -ETIMEDOUT;
}
- return ret;
+
+ netdev_err(ndev, "Device reset failed\n");
+ return -ETIMEDOUT;
}
static int sh_eth_reset(struct net_device *ndev)
@@ -2112,8 +2112,6 @@ static size_t __sh_eth_get_regs(struct net_device *ndev, u32 *buf)
add_tsu_reg(TSU_FWSL0);
add_tsu_reg(TSU_FWSL1);
add_tsu_reg(TSU_FWSLC);
- add_tsu_reg(TSU_QTAG0);
- add_tsu_reg(TSU_QTAG1);
add_tsu_reg(TSU_QTAGM0);
add_tsu_reg(TSU_QTAGM1);
add_tsu_reg(TSU_FWSR);
@@ -2932,7 +2930,7 @@ static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
/* SuperH's TSU register init function */
static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
- if (sh_eth_is_rz_fast_ether(mdp)) {
+ if (!mdp->cd->dual_port) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
TSU_FWSLC); /* Enable POST registers */
@@ -2949,13 +2947,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
- if (sh_eth_is_gether(mdp)) {
- sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
- sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
- } else {
- sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
- sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
- }
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
+ sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index e5fe70134690..21047d58a93f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -118,8 +118,8 @@ enum {
TSU_FWSL0,
TSU_FWSL1,
TSU_FWSLC,
- TSU_QTAG0,
- TSU_QTAG1,
+ TSU_QTAG0, /* Same as TSU_QTAGM0 */
+ TSU_QTAG1, /* Same as TSU_QTAGM1 */
TSU_QTAGM0,
TSU_QTAGM1,
TSU_FWSR,
@@ -509,6 +509,7 @@ struct sh_eth_cpu_data {
unsigned rmiimode:1; /* EtherC has RMIIMODE register */
unsigned rtrate:1; /* EtherC has RTRATE register */
unsigned magic:1; /* EtherC has ECMR.MPDE and ECSR.MPD */
+ unsigned dual_port:1; /* Dual EtherC/E-DMAC */
};
struct sh_eth_private {
diff --git a/drivers/net/ethernet/sfc/falcon/enum.h b/drivers/net/ethernet/sfc/falcon/enum.h
index 30a1136fc909..4824fcf5c3d4 100644
--- a/drivers/net/ethernet/sfc/falcon/enum.h
+++ b/drivers/net/ethernet/sfc/falcon/enum.h
@@ -81,7 +81,6 @@ enum ef4_loopback_mode {
(1 << LOOPBACK_XAUI) | \
(1 << LOOPBACK_GMII) | \
(1 << LOOPBACK_SGMII) | \
- (1 << LOOPBACK_SGMII) | \
(1 << LOOPBACK_XGBR) | \
(1 << LOOPBACK_XFI) | \
(1 << LOOPBACK_XAUI_FAR) | \
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index 5270d26f0bc6..2d5d4aea3bcb 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -48,26 +48,18 @@
#define MUX_CLK_NUM_PARENTS 2
struct meson8b_dwmac {
- struct platform_device *pdev;
-
+ struct device *dev;
void __iomem *regs;
-
phy_interface_t phy_mode;
+ struct clk *rgmii_tx_clk;
+ u32 tx_delay_ns;
+};
+struct meson8b_dwmac_clk_configs {
struct clk_mux m250_mux;
- struct clk *m250_mux_clk;
- struct clk *m250_mux_parent[MUX_CLK_NUM_PARENTS];
-
struct clk_divider m250_div;
- struct clk *m250_div_clk;
-
struct clk_fixed_factor fixed_div2;
- struct clk *fixed_div2_clk;
-
struct clk_gate rgmii_tx_en;
- struct clk *rgmii_tx_en_clk;
-
- u32 tx_delay_ns;
};
static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
@@ -82,106 +74,99 @@ static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
writel(data, dwmac->regs + reg);
}
-static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
+static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac,
+ const char *name_suffix,
+ const char **parent_names,
+ int num_parents,
+ const struct clk_ops *ops,
+ struct clk_hw *hw)
{
struct clk_init_data init;
- int i, ret;
- struct device *dev = &dwmac->pdev->dev;
char clk_name[32];
- const char *clk_div_parents[1];
- const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
+
+ snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dwmac->dev),
+ name_suffix);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_names = parent_names;
+ init.num_parents = num_parents;
+
+ hw->init = &init;
+
+ return devm_clk_register(dwmac->dev, hw);
+}
+
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
+{
+ int i, ret;
+ struct clk *clk;
+ struct device *dev = dwmac->dev;
+ const char *parent_name, *mux_parent_names[MUX_CLK_NUM_PARENTS];
+ struct meson8b_dwmac_clk_configs *clk_configs;
+
+ clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
+ if (!clk_configs)
+ return -ENOMEM;
/* get the mux parents from DT */
for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) {
char name[16];
snprintf(name, sizeof(name), "clkin%d", i);
- dwmac->m250_mux_parent[i] = devm_clk_get(dev, name);
- if (IS_ERR(dwmac->m250_mux_parent[i])) {
- ret = PTR_ERR(dwmac->m250_mux_parent[i]);
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
if (ret != -EPROBE_DEFER)
dev_err(dev, "Missing clock %s\n", name);
return ret;
}
- mux_parent_names[i] =
- __clk_get_name(dwmac->m250_mux_parent[i]);
+ mux_parent_names[i] = __clk_get_name(clk);
}
- /* create the m250_mux */
- snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev));
- init.name = clk_name;
- init.ops = &clk_mux_ops;
- init.flags = CLK_SET_RATE_PARENT;
- init.parent_names = mux_parent_names;
- init.num_parents = MUX_CLK_NUM_PARENTS;
-
- dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0;
- dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
- dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
- dwmac->m250_mux.flags = 0;
- dwmac->m250_mux.table = NULL;
- dwmac->m250_mux.hw.init = &init;
-
- dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw);
- if (WARN_ON(IS_ERR(dwmac->m250_mux_clk)))
- return PTR_ERR(dwmac->m250_mux_clk);
-
- /* create the m250_div */
- snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev));
- init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_divider_ops;
- init.flags = CLK_SET_RATE_PARENT;
- clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk);
- init.parent_names = clk_div_parents;
- init.num_parents = ARRAY_SIZE(clk_div_parents);
-
- dwmac->m250_div.reg = dwmac->regs + PRG_ETH0;
- dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
- dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
- dwmac->m250_div.hw.init = &init;
- dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED |
+ clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT;
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK;
+ clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parent_names,
+ MUX_CLK_NUM_PARENTS, &clk_mux_ops,
+ &clk_configs->m250_mux.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_name = __clk_get_name(clk);
+ clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
+ clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
+ clk_configs->m250_div.flags = CLK_DIVIDER_ONE_BASED |
CLK_DIVIDER_ALLOW_ZERO |
CLK_DIVIDER_ROUND_CLOSEST;
-
- dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw);
- if (WARN_ON(IS_ERR(dwmac->m250_div_clk)))
- return PTR_ERR(dwmac->m250_div_clk);
-
- /* create the fixed_div2 */
- snprintf(clk_name, sizeof(clk_name), "%s#fixed_div2", dev_name(dev));
- init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL);
- init.ops = &clk_fixed_factor_ops;
- init.flags = CLK_SET_RATE_PARENT;
- clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk);
- init.parent_names = clk_div_parents;
- init.num_parents = ARRAY_SIZE(clk_div_parents);
-
- dwmac->fixed_div2.mult = 1;
- dwmac->fixed_div2.div = 2;
- dwmac->fixed_div2.hw.init = &init;
-
- dwmac->fixed_div2_clk = devm_clk_register(dev, &dwmac->fixed_div2.hw);
- if (WARN_ON(IS_ERR(dwmac->fixed_div2_clk)))
- return PTR_ERR(dwmac->fixed_div2_clk);
-
- /* create the rgmii_tx_en */
- init.name = devm_kasprintf(dev, GFP_KERNEL, "%s#rgmii_tx_en",
- dev_name(dev));
- init.ops = &clk_gate_ops;
- init.flags = CLK_SET_RATE_PARENT;
- clk_div_parents[0] = __clk_get_name(dwmac->fixed_div2_clk);
- init.parent_names = clk_div_parents;
- init.num_parents = ARRAY_SIZE(clk_div_parents);
-
- dwmac->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
- dwmac->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
- dwmac->rgmii_tx_en.hw.init = &init;
-
- dwmac->rgmii_tx_en_clk = devm_clk_register(dev,
- &dwmac->rgmii_tx_en.hw);
- if (WARN_ON(IS_ERR(dwmac->rgmii_tx_en_clk)))
- return PTR_ERR(dwmac->rgmii_tx_en_clk);
+ clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_name, 1,
+ &clk_divider_ops,
+ &clk_configs->m250_div.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_name = __clk_get_name(clk);
+ clk_configs->fixed_div2.mult = 1;
+ clk_configs->fixed_div2.div = 2;
+ clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_name, 1,
+ &clk_fixed_factor_ops,
+ &clk_configs->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_name = __clk_get_name(clk);
+ clk_configs->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_name, 1,
+ &clk_gate_ops,
+ &clk_configs->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ dwmac->rgmii_tx_clk = clk;
return 0;
}
@@ -219,19 +204,23 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
* a register) based on the line-speed (125MHz for Gbit speeds,
* 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
*/
- ret = clk_set_rate(dwmac->rgmii_tx_en_clk, 125 * 1000 * 1000);
+ ret = clk_set_rate(dwmac->rgmii_tx_clk, 125 * 1000 * 1000);
if (ret) {
- dev_err(&dwmac->pdev->dev,
+ dev_err(dwmac->dev,
"failed to set RGMII TX clock\n");
return ret;
}
- ret = clk_prepare_enable(dwmac->rgmii_tx_en_clk);
+ ret = clk_prepare_enable(dwmac->rgmii_tx_clk);
if (ret) {
- dev_err(&dwmac->pdev->dev,
+ dev_err(dwmac->dev,
"failed to enable the RGMII TX clock\n");
return ret;
}
+
+ devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ dwmac->rgmii_tx_clk);
break;
case PHY_INTERFACE_MODE_RMII:
@@ -251,7 +240,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
break;
default:
- dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n",
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
phy_modes(dwmac->phy_mode));
return -EINVAL;
}
@@ -292,7 +281,7 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
- dwmac->pdev = pdev;
+ dwmac->dev = &pdev->dev;
dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node);
if (dwmac->phy_mode < 0) {
dev_err(&pdev->dev, "missing phy-mode property\n");
@@ -317,29 +306,16 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
if (ret)
- goto err_clk_disable;
+ goto err_remove_config_dt;
return 0;
-err_clk_disable:
- if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
- clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
err_remove_config_dt:
stmmac_remove_config_dt(pdev, plat_dat);
return ret;
}
-static int meson8b_dwmac_remove(struct platform_device *pdev)
-{
- struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
-
- if (phy_interface_mode_is_rgmii(dwmac->phy_mode))
- clk_disable_unprepare(dwmac->rgmii_tx_en_clk);
-
- return stmmac_pltfr_remove(pdev);
-}
-
static const struct of_device_id meson8b_dwmac_match[] = {
{ .compatible = "amlogic,meson8b-dwmac" },
{ .compatible = "amlogic,meson-gxbb-dwmac" },
@@ -349,7 +325,7 @@ MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
static struct platform_driver meson8b_dwmac_driver = {
.probe = meson8b_dwmac_probe,
- .remove = meson8b_dwmac_remove,
+ .remove = stmmac_pltfr_remove,
.driver = {
.name = "meson8b-dwmac",
.pm = &stmmac_pltfr_pm_ops,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 63795ecafc8d..46b9ae20ff6c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -120,7 +120,7 @@ static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
writel(value, ioaddr + base_register);
}
-static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
+static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
u8 packet, u32 queue)
{
void __iomem *ioaddr = hw->pcsr;
@@ -713,7 +713,7 @@ static const struct stmmac_ops dwmac4_ops = {
.rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority,
- .rx_queue_routing = dwmac4_tx_queue_routing,
+ .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
@@ -744,7 +744,7 @@ static const struct stmmac_ops dwmac410_ops = {
.rx_queue_enable = dwmac4_rx_queue_enable,
.rx_queue_prio = dwmac4_rx_queue_priority,
.tx_queue_prio = dwmac4_tx_queue_priority,
- .rx_queue_routing = dwmac4_tx_queue_routing,
+ .rx_queue_routing = dwmac4_rx_queue_routing,
.prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
.prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
.set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
index c728ffa095de..2a6521d33e43 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -389,6 +389,8 @@ static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
{
+ p->des0 = 0;
+ p->des1 = 0;
p->des2 = 0;
p->des3 = 0;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index a916e13624eb..75161e1b7e55 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -58,6 +58,7 @@ struct stmmac_tx_queue {
unsigned int dirty_tx;
dma_addr_t dma_tx_phy;
u32 tx_tail_addr;
+ u32 mss;
};
struct stmmac_rx_queue {
@@ -138,7 +139,6 @@ struct stmmac_priv {
spinlock_t ptp_lock;
void __iomem *mmcaddr;
void __iomem *ptpaddr;
- u32 mss;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_dir;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 7ad841434ec8..a9856a8bf8ad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1355,6 +1355,7 @@ static int init_dma_tx_desc_rings(struct net_device *dev)
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
+ tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
}
@@ -1843,6 +1844,11 @@ static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
if (unlikely(status & tx_dma_own))
break;
+ /* Make sure descriptor fields are read after reading
+ * the own bit.
+ */
+ dma_rmb();
+
/* Just consider the last segment and ...*/
if (likely(!(status & tx_not_ls))) {
/* ... verify the status error condition */
@@ -1946,6 +1952,7 @@ static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
(i == DMA_TX_SIZE - 1));
tx_q->dirty_tx = 0;
tx_q->cur_tx = 0;
+ tx_q->mss = 0;
netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
stmmac_start_tx_dma(priv, chan);
@@ -2430,7 +2437,7 @@ static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
continue;
packet = priv->plat->rx_queues_cfg[queue].pkt_route;
- priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
+ priv->hw->mac->rx_queue_routing(priv->hw, packet, queue);
}
}
@@ -2632,7 +2639,6 @@ static int stmmac_open(struct net_device *dev)
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
priv->rx_copybreak = STMMAC_RX_COPYBREAK;
- priv->mss = 0;
ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
@@ -2793,6 +2799,7 @@ static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
while (tmp_len > 0) {
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
desc = tx_q->dma_tx + tx_q->cur_tx;
desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
@@ -2872,11 +2879,12 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size;
/* set new MSS value if needed */
- if (mss != priv->mss) {
+ if (mss != tx_q->mss) {
mss_desc = tx_q->dma_tx + tx_q->cur_tx;
priv->hw->desc->set_mss(mss_desc, mss);
- priv->mss = mss;
+ tx_q->mss = mss;
tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
}
if (netif_msg_tx_queued(priv)) {
@@ -2887,6 +2895,7 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
}
first_entry = tx_q->cur_tx;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
desc = tx_q->dma_tx + first_entry;
first = desc;
@@ -2926,7 +2935,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
- tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
}
@@ -2980,14 +2988,21 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
/* If context desc is used to change MSS */
- if (mss_desc)
+ if (mss_desc) {
+ /* Make sure that first descriptor has been completely
+ * written, including its own bit. This is because MSS is
+ * actually before first descriptor, so we need to make
+ * sure that MSS's own bit is the last thing written.
+ */
+ dma_wmb();
priv->hw->desc->set_tx_owner(mss_desc);
+ }
/* The own bit must be the latest setting done when prepare the
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- dma_wmb();
+ wmb();
if (netif_msg_pktdata(priv)) {
pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
@@ -3062,6 +3077,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
entry = tx_q->cur_tx;
first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
@@ -3090,6 +3106,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
bool last_segment = (i == (nfrags - 1));
entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
+ WARN_ON(tx_q->tx_skbuff[entry]);
if (likely(priv->extend_desc))
desc = (struct dma_desc *)(tx_q->dma_etx + entry);
@@ -3101,8 +3118,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
if (dma_mapping_error(priv->device, des))
goto dma_map_err; /* should reuse desc w/o issues */
- tx_q->tx_skbuff[entry] = NULL;
-
tx_q->tx_skbuff_dma[entry].buf = des;
if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
desc->des0 = cpu_to_le32(des);
@@ -3211,7 +3226,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
* descriptor and then barrier is needed to make sure that
* all is coherent before granting the DMA engine.
*/
- dma_wmb();
+ wmb();
}
netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
@@ -4436,6 +4451,7 @@ static void stmmac_reset_queues_param(struct stmmac_priv *priv)
tx_q->cur_tx = 0;
tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
}
}
@@ -4481,11 +4497,6 @@ int stmmac_resume(struct device *dev)
stmmac_reset_queues_param(priv);
- /* reset private mss value to force mss context settings at
- * next tso xmit (only used for gmac4).
- */
- priv->mss = 0;
-
stmmac_clear_descriptors(priv);
stmmac_hw_setup(ndev, false);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 05f122b8424a..ebd3e5ffa73c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -135,13 +135,14 @@ static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
* stmmac_mtl_setup - parse DT parameters for multiple queues configuration
* @pdev: platform device
*/
-static void stmmac_mtl_setup(struct platform_device *pdev,
- struct plat_stmmacenet_data *plat)
+static int stmmac_mtl_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
{
struct device_node *q_node;
struct device_node *rx_node;
struct device_node *tx_node;
u8 queue = 0;
+ int ret = 0;
/* For backwards-compatibility with device trees that don't have any
* snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
@@ -159,12 +160,12 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
if (!rx_node)
- return;
+ return ret;
tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
if (!tx_node) {
of_node_put(rx_node);
- return;
+ return ret;
}
/* Processing RX queues common config */
@@ -220,6 +221,11 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
queue++;
}
+ if (queue != plat->rx_queues_to_use) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Not all RX queues were configured\n");
+ goto out;
+ }
/* Processing TX queues common config */
if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
@@ -281,10 +287,18 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
queue++;
}
+ if (queue != plat->tx_queues_to_use) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Not all TX queues were configured\n");
+ goto out;
+ }
+out:
of_node_put(rx_node);
of_node_put(tx_node);
of_node_put(q_node);
+
+ return ret;
}
/**
@@ -376,6 +390,7 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
struct device_node *np = pdev->dev.of_node;
struct plat_stmmacenet_data *plat;
struct stmmac_dma_cfg *dma_cfg;
+ int rc;
plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
if (!plat)
@@ -402,8 +417,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
/* To Configure PHY by using all device-tree supported properties */
- if (stmmac_dt_phy(plat, np, &pdev->dev))
- return ERR_PTR(-ENODEV);
+ rc = stmmac_dt_phy(plat, np, &pdev->dev);
+ if (rc)
+ return ERR_PTR(rc);
of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
@@ -499,7 +515,11 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac)
plat->axi = stmmac_axi_setup(pdev);
- stmmac_mtl_setup(pdev, plat);
+ rc = stmmac_mtl_setup(pdev, plat);
+ if (rc) {
+ stmmac_remove_config_dt(pdev, plat);
+ return ERR_PTR(rc);
+ }
/* clock setup */
plat->stmmac_clk = devm_clk_get(&pdev->dev,
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b919e89a9b93..516dd59249d7 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1694,6 +1694,7 @@ static struct pernet_operations geneve_net_ops = {
.exit_batch = geneve_exit_batch_net,
.id = &geneve_net_id,
.size = sizeof(struct geneve_net),
+ .async = true,
};
static int __init geneve_init_module(void)
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index f38e32a7ec9c..127edd23018f 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -1325,6 +1325,7 @@ static struct pernet_operations gtp_net_ops = {
.exit = gtp_net_exit,
.id = &gtp_net_id,
.size = sizeof(struct gtp_net),
+ .async = true,
};
static int __init gtp_init(void)
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 303ba4133920..8782f5655e3f 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -104,3 +104,14 @@ config IEEE802154_CA8210_DEBUGFS
exposes a debugfs node for each CA8210 instance which allows
direct use of the Cascoda API, exposing the 802.15.4 MAC
management entities.
+
+config IEEE802154_MCR20A
+ tristate "MCR20A transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ ---help---
+ Say Y here to enable the MCR20A SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'mcr20a'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index bea1de5e726c..104744d5a668 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -6,3 +6,4 @@ obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
obj-$(CONFIG_IEEE802154_ADF7242) += adf7242.o
obj-$(CONFIG_IEEE802154_CA8210) += ca8210.o
+obj-$(CONFIG_IEEE802154_MCR20A) += mcr20a.o
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
new file mode 100644
index 000000000000..d9eb22a52551
--- /dev/null
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -0,0 +1,1413 @@
+/*
+ * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/of_gpio.h>
+#include <linux/regmap.h>
+#include <linux/ieee802154.h>
+#include <linux/debugfs.h>
+
+#include <net/mac802154.h>
+#include <net/cfg802154.h>
+
+#include <linux/device.h>
+
+#include "mcr20a.h"
+
+#define SPI_COMMAND_BUFFER 3
+
+#define REGISTER_READ BIT(7)
+#define REGISTER_WRITE (0 << 7)
+#define REGISTER_ACCESS (0 << 6)
+#define PACKET_BUFF_BURST_ACCESS BIT(6)
+#define PACKET_BUFF_BYTE_ACCESS BIT(5)
+
+#define MCR20A_WRITE_REG(x) (x)
+#define MCR20A_READ_REG(x) (REGISTER_READ | (x))
+#define MCR20A_BURST_READ_PACKET_BUF (0xC0)
+#define MCR20A_BURST_WRITE_PACKET_BUF (0x40)
+
+#define MCR20A_CMD_REG 0x80
+#define MCR20A_CMD_REG_MASK 0x3f
+#define MCR20A_CMD_WRITE 0x40
+#define MCR20A_CMD_FB 0x20
+
+/* Number of Interrupt Request Status Register */
+#define MCR20A_IRQSTS_NUM 2 /* only IRQ_STS1 and IRQ_STS2 */
+
+/* MCR20A CCA Type */
+enum {
+ MCR20A_CCA_ED, // energy detect - CCA bit not active,
+ // not to be used for T and CCCA sequences
+ MCR20A_CCA_MODE1, // energy detect - CCA bit ACTIVE
+ MCR20A_CCA_MODE2, // 802.15.4 compliant signal detect - CCA bit ACTIVE
+ MCR20A_CCA_MODE3
+};
+
+enum {
+ MCR20A_XCVSEQ_IDLE = 0x00,
+ MCR20A_XCVSEQ_RX = 0x01,
+ MCR20A_XCVSEQ_TX = 0x02,
+ MCR20A_XCVSEQ_CCA = 0x03,
+ MCR20A_XCVSEQ_TR = 0x04,
+ MCR20A_XCVSEQ_CCCA = 0x05,
+};
+
+/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
+#define MCR20A_MIN_CHANNEL (11)
+#define MCR20A_MAX_CHANNEL (26)
+#define MCR20A_CHANNEL_SPACING (5)
+
+/* MCR20A CCA Threshold constans */
+#define MCR20A_MIN_CCA_THRESHOLD (0x6EU)
+#define MCR20A_MAX_CCA_THRESHOLD (0x00U)
+
+/* version 0C */
+#define MCR20A_OVERWRITE_VERSION (0x0C)
+
+/* MCR20A PLL configurations */
+static const u8 PLL_INT[16] = {
+ /* 2405 */ 0x0B, /* 2410 */ 0x0B, /* 2415 */ 0x0B,
+ /* 2420 */ 0x0B, /* 2425 */ 0x0B, /* 2430 */ 0x0B,
+ /* 2435 */ 0x0C, /* 2440 */ 0x0C, /* 2445 */ 0x0C,
+ /* 2450 */ 0x0C, /* 2455 */ 0x0C, /* 2460 */ 0x0C,
+ /* 2465 */ 0x0D, /* 2470 */ 0x0D, /* 2475 */ 0x0D,
+ /* 2480 */ 0x0D
+};
+
+static const u8 PLL_FRAC[16] = {
+ /* 2405 */ 0x28, /* 2410 */ 0x50, /* 2415 */ 0x78,
+ /* 2420 */ 0xA0, /* 2425 */ 0xC8, /* 2430 */ 0xF0,
+ /* 2435 */ 0x18, /* 2440 */ 0x40, /* 2445 */ 0x68,
+ /* 2450 */ 0x90, /* 2455 */ 0xB8, /* 2460 */ 0xE0,
+ /* 2465 */ 0x08, /* 2470 */ 0x30, /* 2475 */ 0x58,
+ /* 2480 */ 0x80
+};
+
+static const struct reg_sequence mar20a_iar_overwrites[] = {
+ { IAR_MISC_PAD_CTRL, 0x02 },
+ { IAR_VCO_CTRL1, 0xB3 },
+ { IAR_VCO_CTRL2, 0x07 },
+ { IAR_PA_TUNING, 0x71 },
+ { IAR_CHF_IBUF, 0x2F },
+ { IAR_CHF_QBUF, 0x2F },
+ { IAR_CHF_IRIN, 0x24 },
+ { IAR_CHF_QRIN, 0x24 },
+ { IAR_CHF_IL, 0x24 },
+ { IAR_CHF_QL, 0x24 },
+ { IAR_CHF_CC1, 0x32 },
+ { IAR_CHF_CCL, 0x1D },
+ { IAR_CHF_CC2, 0x2D },
+ { IAR_CHF_IROUT, 0x24 },
+ { IAR_CHF_QROUT, 0x24 },
+ { IAR_PA_CAL, 0x28 },
+ { IAR_AGC_THR1, 0x55 },
+ { IAR_AGC_THR2, 0x2D },
+ { IAR_ATT_RSSI1, 0x5F },
+ { IAR_ATT_RSSI2, 0x8F },
+ { IAR_RSSI_OFFSET, 0x61 },
+ { IAR_CHF_PMA_GAIN, 0x03 },
+ { IAR_CCA1_THRESH, 0x50 },
+ { IAR_CORR_NVAL, 0x13 },
+ { IAR_ACKDELAY, 0x3D },
+};
+
+#define MCR20A_VALID_CHANNELS (0x07FFF800)
+
+struct mcr20a_platform_data {
+ int rst_gpio;
+};
+
+#define MCR20A_MAX_BUF (127)
+
+#define printdev(X) (&X->spi->dev)
+
+/* regmap information for Direct Access Register (DAR) access */
+#define MCR20A_DAR_WRITE 0x01
+#define MCR20A_DAR_READ 0x00
+#define MCR20A_DAR_NUMREGS 0x3F
+
+/* regmap information for Indirect Access Register (IAR) access */
+#define MCR20A_IAR_ACCESS 0x80
+#define MCR20A_IAR_NUMREGS 0xBEFF
+
+/* Read/Write SPI Commands for DAR and IAR registers. */
+#define MCR20A_READSHORT(reg) ((reg) << 1)
+#define MCR20A_WRITESHORT(reg) ((reg) << 1 | 1)
+#define MCR20A_READLONG(reg) (1 << 15 | (reg) << 5)
+#define MCR20A_WRITELONG(reg) (1 << 15 | (reg) << 5 | 1 << 4)
+
+/* Type definitions for link configuration of instantiable layers */
+#define MCR20A_PHY_INDIRECT_QUEUE_SIZE (12)
+
+static bool
+mcr20a_dar_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DAR_IRQ_STS1:
+ case DAR_IRQ_STS2:
+ case DAR_IRQ_STS3:
+ case DAR_PHY_CTRL1:
+ case DAR_PHY_CTRL2:
+ case DAR_PHY_CTRL3:
+ case DAR_PHY_CTRL4:
+ case DAR_SRC_CTRL:
+ case DAR_SRC_ADDRS_SUM_LSB:
+ case DAR_SRC_ADDRS_SUM_MSB:
+ case DAR_T3CMP_LSB:
+ case DAR_T3CMP_MSB:
+ case DAR_T3CMP_USB:
+ case DAR_T2PRIMECMP_LSB:
+ case DAR_T2PRIMECMP_MSB:
+ case DAR_T1CMP_LSB:
+ case DAR_T1CMP_MSB:
+ case DAR_T1CMP_USB:
+ case DAR_T2CMP_LSB:
+ case DAR_T2CMP_MSB:
+ case DAR_T2CMP_USB:
+ case DAR_T4CMP_LSB:
+ case DAR_T4CMP_MSB:
+ case DAR_T4CMP_USB:
+ case DAR_PLL_INT0:
+ case DAR_PLL_FRAC0_LSB:
+ case DAR_PLL_FRAC0_MSB:
+ case DAR_PA_PWR:
+ /* no DAR_ACM */
+ case DAR_OVERWRITE_VER:
+ case DAR_CLK_OUT_CTRL:
+ case DAR_PWR_MODES:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+mcr20a_dar_readable(struct device *dev, unsigned int reg)
+{
+ bool rc;
+
+ /* all writeable are also readable */
+ rc = mcr20a_dar_writeable(dev, reg);
+ if (rc)
+ return rc;
+
+ /* readonly regs */
+ switch (reg) {
+ case DAR_RX_FRM_LEN:
+ case DAR_CCA1_ED_FNL:
+ case DAR_EVENT_TMR_LSB:
+ case DAR_EVENT_TMR_MSB:
+ case DAR_EVENT_TMR_USB:
+ case DAR_TIMESTAMP_LSB:
+ case DAR_TIMESTAMP_MSB:
+ case DAR_TIMESTAMP_USB:
+ case DAR_SEQ_STATE:
+ case DAR_LQI_VALUE:
+ case DAR_RSSI_CCA_CONT:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+mcr20a_dar_volatile(struct device *dev, unsigned int reg)
+{
+ /* can be changed during runtime */
+ switch (reg) {
+ case DAR_IRQ_STS1:
+ case DAR_IRQ_STS2:
+ case DAR_IRQ_STS3:
+ /* use them in spi_async and regmap so it's volatile */
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+mcr20a_dar_precious(struct device *dev, unsigned int reg)
+{
+ /* don't clear irq line on read */
+ switch (reg) {
+ case DAR_IRQ_STS1:
+ case DAR_IRQ_STS2:
+ case DAR_IRQ_STS3:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config mcr20a_dar_regmap = {
+ .name = "mcr20a_dar",
+ .reg_bits = 8,
+ .val_bits = 8,
+ .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE,
+ .read_flag_mask = REGISTER_ACCESS | REGISTER_READ,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = mcr20a_dar_writeable,
+ .readable_reg = mcr20a_dar_readable,
+ .volatile_reg = mcr20a_dar_volatile,
+ .precious_reg = mcr20a_dar_precious,
+ .fast_io = true,
+ .can_multi_write = true,
+};
+
+static bool
+mcr20a_iar_writeable(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case IAR_XTAL_TRIM:
+ case IAR_PMC_LP_TRIM:
+ case IAR_MACPANID0_LSB:
+ case IAR_MACPANID0_MSB:
+ case IAR_MACSHORTADDRS0_LSB:
+ case IAR_MACSHORTADDRS0_MSB:
+ case IAR_MACLONGADDRS0_0:
+ case IAR_MACLONGADDRS0_8:
+ case IAR_MACLONGADDRS0_16:
+ case IAR_MACLONGADDRS0_24:
+ case IAR_MACLONGADDRS0_32:
+ case IAR_MACLONGADDRS0_40:
+ case IAR_MACLONGADDRS0_48:
+ case IAR_MACLONGADDRS0_56:
+ case IAR_RX_FRAME_FILTER:
+ case IAR_PLL_INT1:
+ case IAR_PLL_FRAC1_LSB:
+ case IAR_PLL_FRAC1_MSB:
+ case IAR_MACPANID1_LSB:
+ case IAR_MACPANID1_MSB:
+ case IAR_MACSHORTADDRS1_LSB:
+ case IAR_MACSHORTADDRS1_MSB:
+ case IAR_MACLONGADDRS1_0:
+ case IAR_MACLONGADDRS1_8:
+ case IAR_MACLONGADDRS1_16:
+ case IAR_MACLONGADDRS1_24:
+ case IAR_MACLONGADDRS1_32:
+ case IAR_MACLONGADDRS1_40:
+ case IAR_MACLONGADDRS1_48:
+ case IAR_MACLONGADDRS1_56:
+ case IAR_DUAL_PAN_CTRL:
+ case IAR_DUAL_PAN_DWELL:
+ case IAR_CCA1_THRESH:
+ case IAR_CCA1_ED_OFFSET_COMP:
+ case IAR_LQI_OFFSET_COMP:
+ case IAR_CCA_CTRL:
+ case IAR_CCA2_CORR_PEAKS:
+ case IAR_CCA2_CORR_THRESH:
+ case IAR_TMR_PRESCALE:
+ case IAR_ANT_PAD_CTRL:
+ case IAR_MISC_PAD_CTRL:
+ case IAR_BSM_CTRL:
+ case IAR_RNG:
+ case IAR_RX_WTR_MARK:
+ case IAR_SOFT_RESET:
+ case IAR_TXDELAY:
+ case IAR_ACKDELAY:
+ case IAR_CORR_NVAL:
+ case IAR_ANT_AGC_CTRL:
+ case IAR_AGC_THR1:
+ case IAR_AGC_THR2:
+ case IAR_PA_CAL:
+ case IAR_ATT_RSSI1:
+ case IAR_ATT_RSSI2:
+ case IAR_RSSI_OFFSET:
+ case IAR_XTAL_CTRL:
+ case IAR_CHF_PMA_GAIN:
+ case IAR_CHF_IBUF:
+ case IAR_CHF_QBUF:
+ case IAR_CHF_IRIN:
+ case IAR_CHF_QRIN:
+ case IAR_CHF_IL:
+ case IAR_CHF_QL:
+ case IAR_CHF_CC1:
+ case IAR_CHF_CCL:
+ case IAR_CHF_CC2:
+ case IAR_CHF_IROUT:
+ case IAR_CHF_QROUT:
+ case IAR_PA_TUNING:
+ case IAR_VCO_CTRL1:
+ case IAR_VCO_CTRL2:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+mcr20a_iar_readable(struct device *dev, unsigned int reg)
+{
+ bool rc;
+
+ /* all writeable are also readable */
+ rc = mcr20a_iar_writeable(dev, reg);
+ if (rc)
+ return rc;
+
+ /* readonly regs */
+ switch (reg) {
+ case IAR_PART_ID:
+ case IAR_DUAL_PAN_STS:
+ case IAR_RX_BYTE_COUNT:
+ case IAR_FILTERFAIL_CODE1:
+ case IAR_FILTERFAIL_CODE2:
+ case IAR_RSSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+mcr20a_iar_volatile(struct device *dev, unsigned int reg)
+{
+/* can be changed during runtime */
+ switch (reg) {
+ case IAR_DUAL_PAN_STS:
+ case IAR_RX_BYTE_COUNT:
+ case IAR_FILTERFAIL_CODE1:
+ case IAR_FILTERFAIL_CODE2:
+ case IAR_RSSI:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config mcr20a_iar_regmap = {
+ .name = "mcr20a_iar",
+ .reg_bits = 16,
+ .val_bits = 8,
+ .write_flag_mask = REGISTER_ACCESS | REGISTER_WRITE | IAR_INDEX,
+ .read_flag_mask = REGISTER_ACCESS | REGISTER_READ | IAR_INDEX,
+ .cache_type = REGCACHE_RBTREE,
+ .writeable_reg = mcr20a_iar_writeable,
+ .readable_reg = mcr20a_iar_readable,
+ .volatile_reg = mcr20a_iar_volatile,
+ .fast_io = true,
+};
+
+struct mcr20a_local {
+ struct spi_device *spi;
+
+ struct ieee802154_hw *hw;
+ struct mcr20a_platform_data *pdata;
+ struct regmap *regmap_dar;
+ struct regmap *regmap_iar;
+
+ u8 *buf;
+
+ bool is_tx;
+
+ /* for writing tx buffer */
+ struct spi_message tx_buf_msg;
+ u8 tx_header[1];
+ /* burst buffer write command */
+ struct spi_transfer tx_xfer_header;
+ u8 tx_len[1];
+ /* len of tx packet */
+ struct spi_transfer tx_xfer_len;
+ /* data of tx packet */
+ struct spi_transfer tx_xfer_buf;
+ struct sk_buff *tx_skb;
+
+ /* for read length rxfifo */
+ struct spi_message reg_msg;
+ u8 reg_cmd[1];
+ u8 reg_data[MCR20A_IRQSTS_NUM];
+ struct spi_transfer reg_xfer_cmd;
+ struct spi_transfer reg_xfer_data;
+
+ /* receive handling */
+ struct spi_message rx_buf_msg;
+ u8 rx_header[1];
+ struct spi_transfer rx_xfer_header;
+ u8 rx_lqi[1];
+ struct spi_transfer rx_xfer_lqi;
+ u8 rx_buf[MCR20A_MAX_BUF];
+ struct spi_transfer rx_xfer_buf;
+
+ /* isr handling for reading intstat */
+ struct spi_message irq_msg;
+ u8 irq_header[1];
+ u8 irq_data[MCR20A_IRQSTS_NUM];
+ struct spi_transfer irq_xfer_data;
+ struct spi_transfer irq_xfer_header;
+};
+
+static void
+mcr20a_write_tx_buf_complete(void *context)
+{
+ struct mcr20a_local *lp = context;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ lp->reg_msg.complete = NULL;
+ lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
+ lp->reg_data[0] = MCR20A_XCVSEQ_TX;
+ lp->reg_xfer_data.len = 1;
+
+ ret = spi_async(lp->spi, &lp->reg_msg);
+ if (ret)
+ dev_err(printdev(lp), "failed to set SEQ TX\n");
+}
+
+static int
+mcr20a_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct mcr20a_local *lp = hw->priv;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ lp->tx_skb = skb;
+
+ print_hex_dump_debug("mcr20a tx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, 0);
+
+ lp->is_tx = 1;
+
+ lp->reg_msg.complete = NULL;
+ lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_PHY_CTRL1);
+ lp->reg_data[0] = MCR20A_XCVSEQ_IDLE;
+ lp->reg_xfer_data.len = 1;
+
+ return spi_async(lp->spi, &lp->reg_msg);
+}
+
+static int
+mcr20a_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ WARN_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int
+mcr20a_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct mcr20a_local *lp = hw->priv;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* freqency = ((PLL_INT+64) + (PLL_FRAC/65536)) * 32 MHz */
+ ret = regmap_write(lp->regmap_dar, DAR_PLL_INT0, PLL_INT[channel - 11]);
+ if (ret)
+ return ret;
+ ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_LSB, 0x00);
+ if (ret)
+ return ret;
+ ret = regmap_write(lp->regmap_dar, DAR_PLL_FRAC0_MSB,
+ PLL_FRAC[channel - 11]);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int
+mcr20a_start(struct ieee802154_hw *hw)
+{
+ struct mcr20a_local *lp = hw->priv;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* No slotted operation */
+ dev_dbg(printdev(lp), "no slotted operation\n");
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_SLOTTED, 0x0);
+
+ /* enable irq */
+ enable_irq(lp->spi->irq);
+
+ /* Unmask SEQ interrupt */
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL2,
+ DAR_PHY_CTRL2_SEQMSK, 0x0);
+
+ /* Start the RX sequence */
+ dev_dbg(printdev(lp), "start the RX sequence\n");
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+
+ return 0;
+}
+
+static void
+mcr20a_stop(struct ieee802154_hw *hw)
+{
+ struct mcr20a_local *lp = hw->priv;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* stop all running sequence */
+ regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
+
+ /* disable irq */
+ disable_irq(lp->spi->irq);
+}
+
+static int
+mcr20a_set_hw_addr_filt(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct mcr20a_local *lp = hw->priv;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
+ regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_LSB, addr);
+ regmap_write(lp->regmap_iar, IAR_MACSHORTADDRS0_MSB, addr >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
+ regmap_write(lp->regmap_iar, IAR_MACPANID0_LSB, pan);
+ regmap_write(lp->regmap_iar, IAR_MACPANID0_MSB, pan >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+ u8 addr[8], i;
+
+ memcpy(addr, &filt->ieee_addr, 8);
+ for (i = 0; i < 8; i++)
+ regmap_write(lp->regmap_iar,
+ IAR_MACLONGADDRS0_0 + i, addr[i]);
+ }
+
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+ if (filt->pan_coord) {
+ regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
+ DAR_PHY_CTRL4_PANCORDNTR0, 0x10);
+ } else {
+ regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
+ DAR_PHY_CTRL4_PANCORDNTR0, 0x00);
+ }
+ }
+
+ return 0;
+}
+
+/* -30 dBm to 10 dBm */
+#define MCR20A_MAX_TX_POWERS 0x14
+static const s32 mcr20a_powers[MCR20A_MAX_TX_POWERS + 1] = {
+ -3000, -2800, -2600, -2400, -2200, -2000, -1800, -1600, -1400,
+ -1200, -1000, -800, -600, -400, -200, 0, 200, 400, 600, 800, 1000
+};
+
+static int
+mcr20a_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct mcr20a_local *lp = hw->priv;
+ u32 i;
+
+ dev_dbg(printdev(lp), "%s(%d)\n", __func__, mbm);
+
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return regmap_write(lp->regmap_dar, DAR_PA_PWR,
+ ((i + 8) & 0x1F));
+ }
+
+ return -EINVAL;
+}
+
+#define MCR20A_MAX_ED_LEVELS MCR20A_MIN_CCA_THRESHOLD
+static s32 mcr20a_ed_levels[MCR20A_MAX_ED_LEVELS + 1];
+
+static int
+mcr20a_set_cca_mode(struct ieee802154_hw *hw,
+ const struct wpan_phy_cca *cca)
+{
+ struct mcr20a_local *lp = hw->priv;
+ unsigned int cca_mode = 0xff;
+ bool cca_mode_and = false;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* mapping 802.15.4 to driver spec */
+ switch (cca->mode) {
+ case NL802154_CCA_ENERGY:
+ cca_mode = MCR20A_CCA_MODE1;
+ break;
+ case NL802154_CCA_CARRIER:
+ cca_mode = MCR20A_CCA_MODE2;
+ break;
+ case NL802154_CCA_ENERGY_CARRIER:
+ switch (cca->opt) {
+ case NL802154_CCA_OPT_ENERGY_CARRIER_AND:
+ cca_mode = MCR20A_CCA_MODE3;
+ cca_mode_and = true;
+ break;
+ case NL802154_CCA_OPT_ENERGY_CARRIER_OR:
+ cca_mode = MCR20A_CCA_MODE3;
+ cca_mode_and = false;
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
+ DAR_PHY_CTRL4_CCATYPE_MASK,
+ cca_mode << DAR_PHY_CTRL4_CCATYPE_SHIFT);
+ if (ret < 0)
+ return ret;
+
+ if (cca_mode == MCR20A_CCA_MODE3) {
+ if (cca_mode_and) {
+ ret = regmap_update_bits(lp->regmap_iar, IAR_CCA_CTRL,
+ IAR_CCA_CTRL_CCA3_AND_NOT_OR,
+ 0x08);
+ } else {
+ ret = regmap_update_bits(lp->regmap_iar,
+ IAR_CCA_CTRL,
+ IAR_CCA_CTRL_CCA3_AND_NOT_OR,
+ 0x00);
+ }
+ if (ret < 0)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int
+mcr20a_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct mcr20a_local *lp = hw->priv;
+ u32 i;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, i);
+ }
+
+ return 0;
+}
+
+static int
+mcr20a_set_promiscuous_mode(struct ieee802154_hw *hw, const bool on)
+{
+ struct mcr20a_local *lp = hw->priv;
+ int ret;
+ u8 rx_frame_filter_reg = 0x0;
+ u8 val;
+
+ dev_dbg(printdev(lp), "%s(%d)\n", __func__, on);
+
+ if (on) {
+ /* All frame types accepted*/
+ val |= DAR_PHY_CTRL4_PROMISCUOUS;
+ rx_frame_filter_reg &= ~(IAR_RX_FRAME_FLT_FRM_VER);
+ rx_frame_filter_reg |= (IAR_RX_FRAME_FLT_ACK_FT |
+ IAR_RX_FRAME_FLT_NS_FT);
+
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
+ DAR_PHY_CTRL4_PROMISCUOUS,
+ DAR_PHY_CTRL4_PROMISCUOUS);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
+ rx_frame_filter_reg);
+ if (ret < 0)
+ return ret;
+ } else {
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL4,
+ DAR_PHY_CTRL4_PROMISCUOUS, 0x0);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
+ IAR_RX_FRAME_FLT_FRM_VER |
+ IAR_RX_FRAME_FLT_BEACON_FT |
+ IAR_RX_FRAME_FLT_DATA_FT |
+ IAR_RX_FRAME_FLT_CMD_FT);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct ieee802154_ops mcr20a_hw_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = mcr20a_xmit,
+ .ed = mcr20a_ed,
+ .set_channel = mcr20a_set_channel,
+ .start = mcr20a_start,
+ .stop = mcr20a_stop,
+ .set_hw_addr_filt = mcr20a_set_hw_addr_filt,
+ .set_txpower = mcr20a_set_txpower,
+ .set_cca_mode = mcr20a_set_cca_mode,
+ .set_cca_ed_level = mcr20a_set_cca_ed_level,
+ .set_promiscuous_mode = mcr20a_set_promiscuous_mode,
+};
+
+static int
+mcr20a_request_rx(struct mcr20a_local *lp)
+{
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* Start the RX sequence */
+ regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_RX);
+
+ return 0;
+}
+
+static void
+mcr20a_handle_rx_read_buf_complete(void *context)
+{
+ struct mcr20a_local *lp = context;
+ u8 len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
+ struct sk_buff *skb;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ dev_dbg(printdev(lp), "RX is done\n");
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+ len = IEEE802154_MTU;
+ }
+
+ len = len - 2; /* get rid of frame check field */
+
+ skb = dev_alloc_skb(len);
+ if (!skb)
+ return;
+
+ memcpy(skb_put(skb, len), lp->rx_buf, len);
+ ieee802154_rx_irqsafe(lp->hw, skb, lp->rx_lqi[0]);
+
+ print_hex_dump_debug("mcr20a rx: ", DUMP_PREFIX_OFFSET, 16, 1,
+ lp->rx_buf, len, 0);
+ pr_debug("mcr20a rx: lqi: %02hhx\n", lp->rx_lqi[0]);
+
+ /* start RX sequence */
+ mcr20a_request_rx(lp);
+}
+
+static void
+mcr20a_handle_rx_read_len_complete(void *context)
+{
+ struct mcr20a_local *lp = context;
+ u8 len;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* get the length of received frame */
+ len = lp->reg_data[0] & DAR_RX_FRAME_LENGTH_MASK;
+ dev_dbg(printdev(lp), "frame len : %d\n", len);
+
+ /* prepare to read the rx buf */
+ lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
+ lp->rx_header[0] = MCR20A_BURST_READ_PACKET_BUF;
+ lp->rx_xfer_buf.len = len;
+
+ ret = spi_async(lp->spi, &lp->rx_buf_msg);
+ if (ret)
+ dev_err(printdev(lp), "failed to read rx buffer length\n");
+}
+
+static int
+mcr20a_handle_rx(struct mcr20a_local *lp)
+{
+ dev_dbg(printdev(lp), "%s\n", __func__);
+ lp->reg_msg.complete = mcr20a_handle_rx_read_len_complete;
+ lp->reg_cmd[0] = MCR20A_READ_REG(DAR_RX_FRM_LEN);
+ lp->reg_xfer_data.len = 1;
+
+ return spi_async(lp->spi, &lp->reg_msg);
+}
+
+static int
+mcr20a_handle_tx_complete(struct mcr20a_local *lp)
+{
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ ieee802154_xmit_complete(lp->hw, lp->tx_skb, false);
+
+ return mcr20a_request_rx(lp);
+}
+
+static int
+mcr20a_handle_tx(struct mcr20a_local *lp)
+{
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* write tx buffer */
+ lp->tx_header[0] = MCR20A_BURST_WRITE_PACKET_BUF;
+ /* add 2 bytes of FCS */
+ lp->tx_len[0] = lp->tx_skb->len + 2;
+ lp->tx_xfer_buf.tx_buf = lp->tx_skb->data;
+ /* add 1 byte psduLength */
+ lp->tx_xfer_buf.len = lp->tx_skb->len + 1;
+
+ ret = spi_async(lp->spi, &lp->tx_buf_msg);
+ if (ret) {
+ dev_err(printdev(lp), "SPI write Failed for TX buf\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+mcr20a_irq_clean_complete(void *context)
+{
+ struct mcr20a_local *lp = context;
+ u8 seq_state = lp->irq_data[DAR_IRQ_STS1] & DAR_PHY_CTRL1_XCVSEQ_MASK;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ enable_irq(lp->spi->irq);
+
+ dev_dbg(printdev(lp), "IRQ STA1 (%02x) STA2 (%02x)\n",
+ lp->irq_data[DAR_IRQ_STS1], lp->irq_data[DAR_IRQ_STS2]);
+
+ switch (seq_state) {
+ /* TX IRQ, RX IRQ and SEQ IRQ */
+ case (0x03):
+ if (lp->is_tx) {
+ lp->is_tx = 0;
+ dev_dbg(printdev(lp), "TX is done. No ACK\n");
+ mcr20a_handle_tx_complete(lp);
+ }
+ break;
+ case (0x05):
+ /* rx is starting */
+ dev_dbg(printdev(lp), "RX is starting\n");
+ mcr20a_handle_rx(lp);
+ break;
+ case (0x07):
+ if (lp->is_tx) {
+ /* tx is done */
+ lp->is_tx = 0;
+ dev_dbg(printdev(lp), "TX is done. Get ACK\n");
+ mcr20a_handle_tx_complete(lp);
+ } else {
+ /* rx is starting */
+ dev_dbg(printdev(lp), "RX is starting\n");
+ mcr20a_handle_rx(lp);
+ }
+ break;
+ case (0x01):
+ if (lp->is_tx) {
+ dev_dbg(printdev(lp), "TX is starting\n");
+ mcr20a_handle_tx(lp);
+ } else {
+ dev_dbg(printdev(lp), "MCR20A is stop\n");
+ }
+ break;
+ }
+}
+
+static void mcr20a_irq_status_complete(void *context)
+{
+ int ret;
+ struct mcr20a_local *lp = context;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+ regmap_update_bits_async(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_XCVSEQ_MASK, MCR20A_XCVSEQ_IDLE);
+
+ lp->reg_msg.complete = mcr20a_irq_clean_complete;
+ lp->reg_cmd[0] = MCR20A_WRITE_REG(DAR_IRQ_STS1);
+ memcpy(lp->reg_data, lp->irq_data, MCR20A_IRQSTS_NUM);
+ lp->reg_xfer_data.len = MCR20A_IRQSTS_NUM;
+
+ ret = spi_async(lp->spi, &lp->reg_msg);
+
+ if (ret)
+ dev_err(printdev(lp), "failed to clean irq status\n");
+}
+
+static irqreturn_t mcr20a_irq_isr(int irq, void *data)
+{
+ struct mcr20a_local *lp = data;
+ int ret;
+
+ disable_irq_nosync(irq);
+
+ lp->irq_header[0] = MCR20A_READ_REG(DAR_IRQ_STS1);
+ /* read IRQSTSx */
+ ret = spi_async(lp->spi, &lp->irq_msg);
+ if (ret) {
+ enable_irq(irq);
+ return IRQ_NONE;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int mcr20a_get_platform_data(struct spi_device *spi,
+ struct mcr20a_platform_data *pdata)
+{
+ int ret = 0;
+
+ if (!spi->dev.of_node)
+ return -EINVAL;
+
+ pdata->rst_gpio = of_get_named_gpio(spi->dev.of_node, "rst_b-gpio", 0);
+ dev_dbg(&spi->dev, "rst_b-gpio: %d\n", pdata->rst_gpio);
+
+ return ret;
+}
+
+static void mcr20a_hw_setup(struct mcr20a_local *lp)
+{
+ u8 i;
+ struct ieee802154_hw *hw = lp->hw;
+ struct wpan_phy *phy = lp->hw->phy;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ phy->symbol_duration = 16;
+ phy->lifs_period = 40;
+ phy->sifs_period = 12;
+
+ hw->flags = IEEE802154_HW_TX_OMIT_CKSUM |
+ IEEE802154_HW_AFILT |
+ IEEE802154_HW_PROMISCUOUS;
+
+ phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ /* initiating cca_ed_levels */
+ for (i = MCR20A_MAX_CCA_THRESHOLD; i < MCR20A_MIN_CCA_THRESHOLD + 1;
+ ++i) {
+ mcr20a_ed_levels[i] = -i * 100;
+ }
+
+ phy->supported.cca_ed_levels = mcr20a_ed_levels;
+ phy->supported.cca_ed_levels_size = ARRAY_SIZE(mcr20a_ed_levels);
+
+ phy->cca.mode = NL802154_CCA_ENERGY;
+
+ phy->supported.channels[0] = MCR20A_VALID_CHANNELS;
+ phy->current_page = 0;
+ /* MCR20A default reset value */
+ phy->current_channel = 20;
+ phy->symbol_duration = 16;
+ phy->supported.tx_powers = mcr20a_powers;
+ phy->supported.tx_powers_size = ARRAY_SIZE(mcr20a_powers);
+ phy->cca_ed_level = phy->supported.cca_ed_levels[75];
+ phy->transmit_power = phy->supported.tx_powers[0x0F];
+}
+
+static void
+mcr20a_setup_tx_spi_messages(struct mcr20a_local *lp)
+{
+ spi_message_init(&lp->tx_buf_msg);
+ lp->tx_buf_msg.context = lp;
+ lp->tx_buf_msg.complete = mcr20a_write_tx_buf_complete;
+
+ lp->tx_xfer_header.len = 1;
+ lp->tx_xfer_header.tx_buf = lp->tx_header;
+
+ lp->tx_xfer_len.len = 1;
+ lp->tx_xfer_len.tx_buf = lp->tx_len;
+
+ spi_message_add_tail(&lp->tx_xfer_header, &lp->tx_buf_msg);
+ spi_message_add_tail(&lp->tx_xfer_len, &lp->tx_buf_msg);
+ spi_message_add_tail(&lp->tx_xfer_buf, &lp->tx_buf_msg);
+}
+
+static void
+mcr20a_setup_rx_spi_messages(struct mcr20a_local *lp)
+{
+ spi_message_init(&lp->reg_msg);
+ lp->reg_msg.context = lp;
+
+ lp->reg_xfer_cmd.len = 1;
+ lp->reg_xfer_cmd.tx_buf = lp->reg_cmd;
+ lp->reg_xfer_cmd.rx_buf = lp->reg_cmd;
+
+ lp->reg_xfer_data.rx_buf = lp->reg_data;
+ lp->reg_xfer_data.tx_buf = lp->reg_data;
+
+ spi_message_add_tail(&lp->reg_xfer_cmd, &lp->reg_msg);
+ spi_message_add_tail(&lp->reg_xfer_data, &lp->reg_msg);
+
+ spi_message_init(&lp->rx_buf_msg);
+ lp->rx_buf_msg.context = lp;
+ lp->rx_buf_msg.complete = mcr20a_handle_rx_read_buf_complete;
+ lp->rx_xfer_header.len = 1;
+ lp->rx_xfer_header.tx_buf = lp->rx_header;
+ lp->rx_xfer_header.rx_buf = lp->rx_header;
+
+ lp->rx_xfer_buf.rx_buf = lp->rx_buf;
+
+ lp->rx_xfer_lqi.len = 1;
+ lp->rx_xfer_lqi.rx_buf = lp->rx_lqi;
+
+ spi_message_add_tail(&lp->rx_xfer_header, &lp->rx_buf_msg);
+ spi_message_add_tail(&lp->rx_xfer_buf, &lp->rx_buf_msg);
+ spi_message_add_tail(&lp->rx_xfer_lqi, &lp->rx_buf_msg);
+}
+
+static void
+mcr20a_setup_irq_spi_messages(struct mcr20a_local *lp)
+{
+ spi_message_init(&lp->irq_msg);
+ lp->irq_msg.context = lp;
+ lp->irq_msg.complete = mcr20a_irq_status_complete;
+ lp->irq_xfer_header.len = 1;
+ lp->irq_xfer_header.tx_buf = lp->irq_header;
+ lp->irq_xfer_header.rx_buf = lp->irq_header;
+
+ lp->irq_xfer_data.len = MCR20A_IRQSTS_NUM;
+ lp->irq_xfer_data.rx_buf = lp->irq_data;
+
+ spi_message_add_tail(&lp->irq_xfer_header, &lp->irq_msg);
+ spi_message_add_tail(&lp->irq_xfer_data, &lp->irq_msg);
+}
+
+static int
+mcr20a_phy_init(struct mcr20a_local *lp)
+{
+ u8 index;
+ unsigned int phy_reg = 0;
+ int ret;
+
+ dev_dbg(printdev(lp), "%s\n", __func__);
+
+ /* Disable Tristate on COCO MISO for SPI reads */
+ ret = regmap_write(lp->regmap_iar, IAR_MISC_PAD_CTRL, 0x02);
+ if (ret)
+ goto err_ret;
+
+ /* Clear all PP IRQ bits in IRQSTS1 to avoid unexpected interrupts
+ * immediately after init
+ */
+ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS1, 0xEF);
+ if (ret)
+ goto err_ret;
+
+ /* Clear all PP IRQ bits in IRQSTS2 */
+ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS2,
+ DAR_IRQSTS2_ASM_IRQ | DAR_IRQSTS2_PB_ERR_IRQ |
+ DAR_IRQSTS2_WAKE_IRQ);
+ if (ret)
+ goto err_ret;
+
+ /* Disable all timer interrupts */
+ ret = regmap_write(lp->regmap_dar, DAR_IRQ_STS3, 0xFF);
+ if (ret)
+ goto err_ret;
+
+ /* PHY_CTRL1 : default HW settings + AUTOACK enabled */
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PHY_CTRL1,
+ DAR_PHY_CTRL1_AUTOACK, DAR_PHY_CTRL1_AUTOACK);
+
+ /* PHY_CTRL2 : disable all interrupts */
+ ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL2, 0xFF);
+ if (ret)
+ goto err_ret;
+
+ /* PHY_CTRL3 : disable all timers and remaining interrupts */
+ ret = regmap_write(lp->regmap_dar, DAR_PHY_CTRL3,
+ DAR_PHY_CTRL3_ASM_MSK | DAR_PHY_CTRL3_PB_ERR_MSK |
+ DAR_PHY_CTRL3_WAKE_MSK);
+ if (ret)
+ goto err_ret;
+
+ /* SRC_CTRL : enable Acknowledge Frame Pending and
+ * Source Address Matching Enable
+ */
+ ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL,
+ DAR_SRC_CTRL_ACK_FRM_PND |
+ (DAR_SRC_CTRL_INDEX << DAR_SRC_CTRL_INDEX_SHIFT));
+ if (ret)
+ goto err_ret;
+
+ /* RX_FRAME_FILTER */
+ /* FRM_VER[1:0] = b11. Accept FrameVersion 0 and 1 packets */
+ ret = regmap_write(lp->regmap_iar, IAR_RX_FRAME_FILTER,
+ IAR_RX_FRAME_FLT_FRM_VER |
+ IAR_RX_FRAME_FLT_BEACON_FT |
+ IAR_RX_FRAME_FLT_DATA_FT |
+ IAR_RX_FRAME_FLT_CMD_FT);
+ if (ret)
+ goto err_ret;
+
+ dev_info(printdev(lp), "MCR20A DAR overwrites version: 0x%02x\n",
+ MCR20A_OVERWRITE_VERSION);
+
+ /* Overwrites direct registers */
+ ret = regmap_write(lp->regmap_dar, DAR_OVERWRITE_VER,
+ MCR20A_OVERWRITE_VERSION);
+ if (ret)
+ goto err_ret;
+
+ /* Overwrites indirect registers */
+ ret = regmap_multi_reg_write(lp->regmap_iar, mar20a_iar_overwrites,
+ ARRAY_SIZE(mar20a_iar_overwrites));
+ if (ret)
+ goto err_ret;
+
+ /* Clear HW indirect queue */
+ dev_dbg(printdev(lp), "clear HW indirect queue\n");
+ for (index = 0; index < MCR20A_PHY_INDIRECT_QUEUE_SIZE; index++) {
+ phy_reg = (u8)(((index & DAR_SRC_CTRL_INDEX) <<
+ DAR_SRC_CTRL_INDEX_SHIFT)
+ | (DAR_SRC_CTRL_SRCADDR_EN)
+ | (DAR_SRC_CTRL_INDEX_DISABLE));
+ ret = regmap_write(lp->regmap_dar, DAR_SRC_CTRL, phy_reg);
+ if (ret)
+ goto err_ret;
+ phy_reg = 0;
+ }
+
+ /* Assign HW Indirect hash table to PAN0 */
+ ret = regmap_read(lp->regmap_iar, IAR_DUAL_PAN_CTRL, &phy_reg);
+ if (ret)
+ goto err_ret;
+
+ /* Clear current lvl */
+ phy_reg &= ~IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK;
+
+ /* Set new lvl */
+ phy_reg |= MCR20A_PHY_INDIRECT_QUEUE_SIZE <<
+ IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT;
+ ret = regmap_write(lp->regmap_iar, IAR_DUAL_PAN_CTRL, phy_reg);
+ if (ret)
+ goto err_ret;
+
+ /* Set CCA threshold to -75 dBm */
+ ret = regmap_write(lp->regmap_iar, IAR_CCA1_THRESH, 0x4B);
+ if (ret)
+ goto err_ret;
+
+ /* Set prescaller to obtain 1 symbol (16us) timebase */
+ ret = regmap_write(lp->regmap_iar, IAR_TMR_PRESCALE, 0x05);
+ if (ret)
+ goto err_ret;
+
+ /* Enable autodoze mode. */
+ ret = regmap_update_bits(lp->regmap_dar, DAR_PWR_MODES,
+ DAR_PWR_MODES_AUTODOZE,
+ DAR_PWR_MODES_AUTODOZE);
+ if (ret)
+ goto err_ret;
+
+ /* Disable clk_out */
+ ret = regmap_update_bits(lp->regmap_dar, DAR_CLK_OUT_CTRL,
+ DAR_CLK_OUT_CTRL_EN, 0x0);
+ if (ret)
+ goto err_ret;
+
+ return 0;
+
+err_ret:
+ return ret;
+}
+
+static int
+mcr20a_probe(struct spi_device *spi)
+{
+ struct ieee802154_hw *hw;
+ struct mcr20a_local *lp;
+ struct mcr20a_platform_data *pdata;
+ int irq_type;
+ int ret = -ENOMEM;
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ if (!spi->irq) {
+ dev_err(&spi->dev, "no IRQ specified\n");
+ return -EINVAL;
+ }
+
+ pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ /* set mcr20a platform data */
+ ret = mcr20a_get_platform_data(spi, pdata);
+ if (ret < 0) {
+ dev_crit(&spi->dev, "mcr20a_get_platform_data failed.\n");
+ return ret;
+ }
+
+ /* init reset gpio */
+ if (gpio_is_valid(pdata->rst_gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, pdata->rst_gpio,
+ GPIOF_OUT_INIT_HIGH, "reset");
+ if (ret)
+ return ret;
+ }
+
+ /* reset mcr20a */
+ if (gpio_is_valid(pdata->rst_gpio)) {
+ usleep_range(10, 20);
+ gpio_set_value_cansleep(pdata->rst_gpio, 0);
+ usleep_range(10, 20);
+ gpio_set_value_cansleep(pdata->rst_gpio, 1);
+ usleep_range(120, 240);
+ }
+
+ /* allocate ieee802154_hw and private data */
+ hw = ieee802154_alloc_hw(sizeof(*lp), &mcr20a_hw_ops);
+ if (!hw) {
+ dev_crit(&spi->dev, "ieee802154_alloc_hw failed\n");
+ return -ENOMEM;
+ }
+
+ /* init mcr20a local data */
+ lp = hw->priv;
+ lp->hw = hw;
+ lp->spi = spi;
+ lp->spi->dev.platform_data = pdata;
+ lp->pdata = pdata;
+
+ /* init ieee802154_hw */
+ hw->parent = &spi->dev;
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ /* init buf */
+ lp->buf = devm_kzalloc(&spi->dev, SPI_COMMAND_BUFFER, GFP_KERNEL);
+
+ if (!lp->buf)
+ return -ENOMEM;
+
+ mcr20a_setup_tx_spi_messages(lp);
+ mcr20a_setup_rx_spi_messages(lp);
+ mcr20a_setup_irq_spi_messages(lp);
+
+ /* setup regmap */
+ lp->regmap_dar = devm_regmap_init_spi(spi, &mcr20a_dar_regmap);
+ if (IS_ERR(lp->regmap_dar)) {
+ ret = PTR_ERR(lp->regmap_dar);
+ dev_err(&spi->dev, "Failed to allocate dar map: %d\n",
+ ret);
+ goto free_dev;
+ }
+
+ lp->regmap_iar = devm_regmap_init_spi(spi, &mcr20a_iar_regmap);
+ if (IS_ERR(lp->regmap_iar)) {
+ ret = PTR_ERR(lp->regmap_iar);
+ dev_err(&spi->dev, "Failed to allocate iar map: %d\n", ret);
+ goto free_dev;
+ }
+
+ mcr20a_hw_setup(lp);
+
+ spi_set_drvdata(spi, lp);
+
+ ret = mcr20a_phy_init(lp);
+ if (ret < 0) {
+ dev_crit(&spi->dev, "mcr20a_phy_init failed\n");
+ goto free_dev;
+ }
+
+ irq_type = irq_get_trigger_type(spi->irq);
+ if (!irq_type)
+ irq_type = IRQF_TRIGGER_FALLING;
+
+ ret = devm_request_irq(&spi->dev, spi->irq, mcr20a_irq_isr,
+ irq_type, dev_name(&spi->dev), lp);
+ if (ret) {
+ dev_err(&spi->dev, "could not request_irq for mcr20a\n");
+ ret = -ENODEV;
+ goto free_dev;
+ }
+
+ /* disable_irq by default and wait for starting hardware */
+ disable_irq(spi->irq);
+
+ ret = ieee802154_register_hw(hw);
+ if (ret) {
+ dev_crit(&spi->dev, "ieee802154_register_hw failed\n");
+ goto free_dev;
+ }
+
+ return ret;
+
+free_dev:
+ ieee802154_free_hw(lp->hw);
+
+ return ret;
+}
+
+static int mcr20a_remove(struct spi_device *spi)
+{
+ struct mcr20a_local *lp = spi_get_drvdata(spi);
+
+ dev_dbg(&spi->dev, "%s\n", __func__);
+
+ ieee802154_unregister_hw(lp->hw);
+ ieee802154_free_hw(lp->hw);
+
+ return 0;
+}
+
+static const struct of_device_id mcr20a_of_match[] = {
+ { .compatible = "nxp,mcr20a", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, mcr20a_of_match);
+
+static const struct spi_device_id mcr20a_device_id[] = {
+ { .name = "mcr20a", },
+ { },
+};
+MODULE_DEVICE_TABLE(spi, mcr20a_device_id);
+
+static struct spi_driver mcr20a_driver = {
+ .id_table = mcr20a_device_id,
+ .driver = {
+ .of_match_table = of_match_ptr(mcr20a_of_match),
+ .name = "mcr20a",
+ },
+ .probe = mcr20a_probe,
+ .remove = mcr20a_remove,
+};
+
+module_spi_driver(mcr20a_driver);
+
+MODULE_DESCRIPTION("MCR20A Transceiver Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xue Liu <liuxuenetmail@gmail>");
diff --git a/drivers/net/ieee802154/mcr20a.h b/drivers/net/ieee802154/mcr20a.h
new file mode 100644
index 000000000000..6da4fd00b3c5
--- /dev/null
+++ b/drivers/net/ieee802154/mcr20a.h
@@ -0,0 +1,498 @@
+/*
+ * Driver for NXP MCR20A 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2018 Xue Liu <liuxuenetmail@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef _MCR20A_H
+#define _MCR20A_H
+
+/* Direct Accress Register */
+#define DAR_IRQ_STS1 0x00
+#define DAR_IRQ_STS2 0x01
+#define DAR_IRQ_STS3 0x02
+#define DAR_PHY_CTRL1 0x03
+#define DAR_PHY_CTRL2 0x04
+#define DAR_PHY_CTRL3 0x05
+#define DAR_RX_FRM_LEN 0x06
+#define DAR_PHY_CTRL4 0x07
+#define DAR_SRC_CTRL 0x08
+#define DAR_SRC_ADDRS_SUM_LSB 0x09
+#define DAR_SRC_ADDRS_SUM_MSB 0x0A
+#define DAR_CCA1_ED_FNL 0x0B
+#define DAR_EVENT_TMR_LSB 0x0C
+#define DAR_EVENT_TMR_MSB 0x0D
+#define DAR_EVENT_TMR_USB 0x0E
+#define DAR_TIMESTAMP_LSB 0x0F
+#define DAR_TIMESTAMP_MSB 0x10
+#define DAR_TIMESTAMP_USB 0x11
+#define DAR_T3CMP_LSB 0x12
+#define DAR_T3CMP_MSB 0x13
+#define DAR_T3CMP_USB 0x14
+#define DAR_T2PRIMECMP_LSB 0x15
+#define DAR_T2PRIMECMP_MSB 0x16
+#define DAR_T1CMP_LSB 0x17
+#define DAR_T1CMP_MSB 0x18
+#define DAR_T1CMP_USB 0x19
+#define DAR_T2CMP_LSB 0x1A
+#define DAR_T2CMP_MSB 0x1B
+#define DAR_T2CMP_USB 0x1C
+#define DAR_T4CMP_LSB 0x1D
+#define DAR_T4CMP_MSB 0x1E
+#define DAR_T4CMP_USB 0x1F
+#define DAR_PLL_INT0 0x20
+#define DAR_PLL_FRAC0_LSB 0x21
+#define DAR_PLL_FRAC0_MSB 0x22
+#define DAR_PA_PWR 0x23
+#define DAR_SEQ_STATE 0x24
+#define DAR_LQI_VALUE 0x25
+#define DAR_RSSI_CCA_CONT 0x26
+/*------------------ 0x27 */
+#define DAR_ASM_CTRL1 0x28
+#define DAR_ASM_CTRL2 0x29
+#define DAR_ASM_DATA_0 0x2A
+#define DAR_ASM_DATA_1 0x2B
+#define DAR_ASM_DATA_2 0x2C
+#define DAR_ASM_DATA_3 0x2D
+#define DAR_ASM_DATA_4 0x2E
+#define DAR_ASM_DATA_5 0x2F
+#define DAR_ASM_DATA_6 0x30
+#define DAR_ASM_DATA_7 0x31
+#define DAR_ASM_DATA_8 0x32
+#define DAR_ASM_DATA_9 0x33
+#define DAR_ASM_DATA_A 0x34
+#define DAR_ASM_DATA_B 0x35
+#define DAR_ASM_DATA_C 0x36
+#define DAR_ASM_DATA_D 0x37
+#define DAR_ASM_DATA_E 0x38
+#define DAR_ASM_DATA_F 0x39
+/*----------------------- 0x3A */
+#define DAR_OVERWRITE_VER 0x3B
+#define DAR_CLK_OUT_CTRL 0x3C
+#define DAR_PWR_MODES 0x3D
+#define IAR_INDEX 0x3E
+#define IAR_DATA 0x3F
+
+/* Indirect Resgister Memory */
+#define IAR_PART_ID 0x00
+#define IAR_XTAL_TRIM 0x01
+#define IAR_PMC_LP_TRIM 0x02
+#define IAR_MACPANID0_LSB 0x03
+#define IAR_MACPANID0_MSB 0x04
+#define IAR_MACSHORTADDRS0_LSB 0x05
+#define IAR_MACSHORTADDRS0_MSB 0x06
+#define IAR_MACLONGADDRS0_0 0x07
+#define IAR_MACLONGADDRS0_8 0x08
+#define IAR_MACLONGADDRS0_16 0x09
+#define IAR_MACLONGADDRS0_24 0x0A
+#define IAR_MACLONGADDRS0_32 0x0B
+#define IAR_MACLONGADDRS0_40 0x0C
+#define IAR_MACLONGADDRS0_48 0x0D
+#define IAR_MACLONGADDRS0_56 0x0E
+#define IAR_RX_FRAME_FILTER 0x0F
+#define IAR_PLL_INT1 0x10
+#define IAR_PLL_FRAC1_LSB 0x11
+#define IAR_PLL_FRAC1_MSB 0x12
+#define IAR_MACPANID1_LSB 0x13
+#define IAR_MACPANID1_MSB 0x14
+#define IAR_MACSHORTADDRS1_LSB 0x15
+#define IAR_MACSHORTADDRS1_MSB 0x16
+#define IAR_MACLONGADDRS1_0 0x17
+#define IAR_MACLONGADDRS1_8 0x18
+#define IAR_MACLONGADDRS1_16 0x19
+#define IAR_MACLONGADDRS1_24 0x1A
+#define IAR_MACLONGADDRS1_32 0x1B
+#define IAR_MACLONGADDRS1_40 0x1C
+#define IAR_MACLONGADDRS1_48 0x1D
+#define IAR_MACLONGADDRS1_56 0x1E
+#define IAR_DUAL_PAN_CTRL 0x1F
+#define IAR_DUAL_PAN_DWELL 0x20
+#define IAR_DUAL_PAN_STS 0x21
+#define IAR_CCA1_THRESH 0x22
+#define IAR_CCA1_ED_OFFSET_COMP 0x23
+#define IAR_LQI_OFFSET_COMP 0x24
+#define IAR_CCA_CTRL 0x25
+#define IAR_CCA2_CORR_PEAKS 0x26
+#define IAR_CCA2_CORR_THRESH 0x27
+#define IAR_TMR_PRESCALE 0x28
+/*-------------------- 0x29 */
+#define IAR_GPIO_DATA 0x2A
+#define IAR_GPIO_DIR 0x2B
+#define IAR_GPIO_PUL_EN 0x2C
+#define IAR_GPIO_PUL_SEL 0x2D
+#define IAR_GPIO_DS 0x2E
+/*------------------ 0x2F */
+#define IAR_ANT_PAD_CTRL 0x30
+#define IAR_MISC_PAD_CTRL 0x31
+#define IAR_BSM_CTRL 0x32
+/*------------------- 0x33 */
+#define IAR_RNG 0x34
+#define IAR_RX_BYTE_COUNT 0x35
+#define IAR_RX_WTR_MARK 0x36
+#define IAR_SOFT_RESET 0x37
+#define IAR_TXDELAY 0x38
+#define IAR_ACKDELAY 0x39
+#define IAR_SEQ_MGR_CTRL 0x3A
+#define IAR_SEQ_MGR_STS 0x3B
+#define IAR_SEQ_T_STS 0x3C
+#define IAR_ABORT_STS 0x3D
+#define IAR_CCCA_BUSY_CNT 0x3E
+#define IAR_SRC_ADDR_CHECKSUM1 0x3F
+#define IAR_SRC_ADDR_CHECKSUM2 0x40
+#define IAR_SRC_TBL_VALID1 0x41
+#define IAR_SRC_TBL_VALID2 0x42
+#define IAR_FILTERFAIL_CODE1 0x43
+#define IAR_FILTERFAIL_CODE2 0x44
+#define IAR_SLOT_PRELOAD 0x45
+/*-------------------- 0x46 */
+#define IAR_CORR_VT 0x47
+#define IAR_SYNC_CTRL 0x48
+#define IAR_PN_LSB_0 0x49
+#define IAR_PN_LSB_1 0x4A
+#define IAR_PN_MSB_0 0x4B
+#define IAR_PN_MSB_1 0x4C
+#define IAR_CORR_NVAL 0x4D
+#define IAR_TX_MODE_CTRL 0x4E
+#define IAR_SNF_THR 0x4F
+#define IAR_FAD_THR 0x50
+#define IAR_ANT_AGC_CTRL 0x51
+#define IAR_AGC_THR1 0x52
+#define IAR_AGC_THR2 0x53
+#define IAR_AGC_HYS 0x54
+#define IAR_AFC 0x55
+/*------------------- 0x56 */
+/*------------------- 0x57 */
+#define IAR_PHY_STS 0x58
+#define IAR_RX_MAX_CORR 0x59
+#define IAR_RX_MAX_PREAMBLE 0x5A
+#define IAR_RSSI 0x5B
+/*------------------- 0x5C */
+/*------------------- 0x5D */
+#define IAR_PLL_DIG_CTRL 0x5E
+#define IAR_VCO_CAL 0x5F
+#define IAR_VCO_BEST_DIFF 0x60
+#define IAR_VCO_BIAS 0x61
+#define IAR_KMOD_CTRL 0x62
+#define IAR_KMOD_CAL 0x63
+#define IAR_PA_CAL 0x64
+#define IAR_PA_PWRCAL 0x65
+#define IAR_ATT_RSSI1 0x66
+#define IAR_ATT_RSSI2 0x67
+#define IAR_RSSI_OFFSET 0x68
+#define IAR_RSSI_SLOPE 0x69
+#define IAR_RSSI_CAL1 0x6A
+#define IAR_RSSI_CAL2 0x6B
+/*------------------- 0x6C */
+/*------------------- 0x6D */
+#define IAR_XTAL_CTRL 0x6E
+#define IAR_XTAL_COMP_MIN 0x6F
+#define IAR_XTAL_COMP_MAX 0x70
+#define IAR_XTAL_GM 0x71
+/*------------------- 0x72 */
+/*------------------- 0x73 */
+#define IAR_LNA_TUNE 0x74
+#define IAR_LNA_AGCGAIN 0x75
+/*------------------- 0x76 */
+/*------------------- 0x77 */
+#define IAR_CHF_PMA_GAIN 0x78
+#define IAR_CHF_IBUF 0x79
+#define IAR_CHF_QBUF 0x7A
+#define IAR_CHF_IRIN 0x7B
+#define IAR_CHF_QRIN 0x7C
+#define IAR_CHF_IL 0x7D
+#define IAR_CHF_QL 0x7E
+#define IAR_CHF_CC1 0x7F
+#define IAR_CHF_CCL 0x80
+#define IAR_CHF_CC2 0x81
+#define IAR_CHF_IROUT 0x82
+#define IAR_CHF_QROUT 0x83
+/*------------------- 0x84 */
+/*------------------- 0x85 */
+#define IAR_RSSI_CTRL 0x86
+/*------------------- 0x87 */
+/*------------------- 0x88 */
+#define IAR_PA_BIAS 0x89
+#define IAR_PA_TUNING 0x8A
+/*------------------- 0x8B */
+/*------------------- 0x8C */
+#define IAR_PMC_HP_TRIM 0x8D
+#define IAR_VREGA_TRIM 0x8E
+/*------------------- 0x8F */
+/*------------------- 0x90 */
+#define IAR_VCO_CTRL1 0x91
+#define IAR_VCO_CTRL2 0x92
+/*------------------- 0x93 */
+/*------------------- 0x94 */
+#define IAR_ANA_SPARE_OUT1 0x95
+#define IAR_ANA_SPARE_OUT2 0x96
+#define IAR_ANA_SPARE_IN 0x97
+#define IAR_MISCELLANEOUS 0x98
+/*------------------- 0x99 */
+#define IAR_SEQ_MGR_OVRD0 0x9A
+#define IAR_SEQ_MGR_OVRD1 0x9B
+#define IAR_SEQ_MGR_OVRD2 0x9C
+#define IAR_SEQ_MGR_OVRD3 0x9D
+#define IAR_SEQ_MGR_OVRD4 0x9E
+#define IAR_SEQ_MGR_OVRD5 0x9F
+#define IAR_SEQ_MGR_OVRD6 0xA0
+#define IAR_SEQ_MGR_OVRD7 0xA1
+/*------------------- 0xA2 */
+#define IAR_TESTMODE_CTRL 0xA3
+#define IAR_DTM_CTRL1 0xA4
+#define IAR_DTM_CTRL2 0xA5
+#define IAR_ATM_CTRL1 0xA6
+#define IAR_ATM_CTRL2 0xA7
+#define IAR_ATM_CTRL3 0xA8
+/*------------------- 0xA9 */
+#define IAR_LIM_FE_TEST_CTRL 0xAA
+#define IAR_CHF_TEST_CTRL 0xAB
+#define IAR_VCO_TEST_CTRL 0xAC
+#define IAR_PLL_TEST_CTRL 0xAD
+#define IAR_PA_TEST_CTRL 0xAE
+#define IAR_PMC_TEST_CTRL 0xAF
+#define IAR_SCAN_DTM_PROTECT_1 0xFE
+#define IAR_SCAN_DTM_PROTECT_0 0xFF
+
+/* IRQSTS1 bits */
+#define DAR_IRQSTS1_RX_FRM_PEND BIT(7)
+#define DAR_IRQSTS1_PLL_UNLOCK_IRQ BIT(6)
+#define DAR_IRQSTS1_FILTERFAIL_IRQ BIT(5)
+#define DAR_IRQSTS1_RXWTRMRKIRQ BIT(4)
+#define DAR_IRQSTS1_CCAIRQ BIT(3)
+#define DAR_IRQSTS1_RXIRQ BIT(2)
+#define DAR_IRQSTS1_TXIRQ BIT(1)
+#define DAR_IRQSTS1_SEQIRQ BIT(0)
+
+/* IRQSTS2 bits */
+#define DAR_IRQSTS2_CRCVALID BIT(7)
+#define DAR_IRQSTS2_CCA BIT(6)
+#define DAR_IRQSTS2_SRCADDR BIT(5)
+#define DAR_IRQSTS2_PI BIT(4)
+#define DAR_IRQSTS2_TMRSTATUS BIT(3)
+#define DAR_IRQSTS2_ASM_IRQ BIT(2)
+#define DAR_IRQSTS2_PB_ERR_IRQ BIT(1)
+#define DAR_IRQSTS2_WAKE_IRQ BIT(0)
+
+/* IRQSTS3 bits */
+#define DAR_IRQSTS3_TMR4MSK BIT(7)
+#define DAR_IRQSTS3_TMR3MSK BIT(6)
+#define DAR_IRQSTS3_TMR2MSK BIT(5)
+#define DAR_IRQSTS3_TMR1MSK BIT(4)
+#define DAR_IRQSTS3_TMR4IRQ BIT(3)
+#define DAR_IRQSTS3_TMR3IRQ BIT(2)
+#define DAR_IRQSTS3_TMR2IRQ BIT(1)
+#define DAR_IRQSTS3_TMR1IRQ BIT(0)
+
+/* PHY_CTRL1 bits */
+#define DAR_PHY_CTRL1_TMRTRIGEN BIT(7)
+#define DAR_PHY_CTRL1_SLOTTED BIT(6)
+#define DAR_PHY_CTRL1_CCABFRTX BIT(5)
+#define DAR_PHY_CTRL1_CCABFRTX_SHIFT 5
+#define DAR_PHY_CTRL1_RXACKRQD BIT(4)
+#define DAR_PHY_CTRL1_AUTOACK BIT(3)
+#define DAR_PHY_CTRL1_XCVSEQ_MASK 0x07
+
+/* PHY_CTRL2 bits */
+#define DAR_PHY_CTRL2_CRC_MSK BIT(7)
+#define DAR_PHY_CTRL2_PLL_UNLOCK_MSK BIT(6)
+#define DAR_PHY_CTRL2_FILTERFAIL_MSK BIT(5)
+#define DAR_PHY_CTRL2_RX_WMRK_MSK BIT(4)
+#define DAR_PHY_CTRL2_CCAMSK BIT(3)
+#define DAR_PHY_CTRL2_RXMSK BIT(2)
+#define DAR_PHY_CTRL2_TXMSK BIT(1)
+#define DAR_PHY_CTRL2_SEQMSK BIT(0)
+
+/* PHY_CTRL3 bits */
+#define DAR_PHY_CTRL3_TMR4CMP_EN BIT(7)
+#define DAR_PHY_CTRL3_TMR3CMP_EN BIT(6)
+#define DAR_PHY_CTRL3_TMR2CMP_EN BIT(5)
+#define DAR_PHY_CTRL3_TMR1CMP_EN BIT(4)
+#define DAR_PHY_CTRL3_ASM_MSK BIT(2)
+#define DAR_PHY_CTRL3_PB_ERR_MSK BIT(1)
+#define DAR_PHY_CTRL3_WAKE_MSK BIT(0)
+
+/* RX_FRM_LEN bits */
+#define DAR_RX_FRAME_LENGTH_MASK (0x7F)
+
+/* PHY_CTRL4 bits */
+#define DAR_PHY_CTRL4_TRCV_MSK BIT(7)
+#define DAR_PHY_CTRL4_TC3TMOUT BIT(6)
+#define DAR_PHY_CTRL4_PANCORDNTR0 BIT(5)
+#define DAR_PHY_CTRL4_CCATYPE (3)
+#define DAR_PHY_CTRL4_CCATYPE_SHIFT (3)
+#define DAR_PHY_CTRL4_CCATYPE_MASK (0x18)
+#define DAR_PHY_CTRL4_TMRLOAD BIT(2)
+#define DAR_PHY_CTRL4_PROMISCUOUS BIT(1)
+#define DAR_PHY_CTRL4_TC2PRIME_EN BIT(0)
+
+/* SRC_CTRL bits */
+#define DAR_SRC_CTRL_INDEX (0x0F)
+#define DAR_SRC_CTRL_INDEX_SHIFT (4)
+#define DAR_SRC_CTRL_ACK_FRM_PND BIT(3)
+#define DAR_SRC_CTRL_SRCADDR_EN BIT(2)
+#define DAR_SRC_CTRL_INDEX_EN BIT(1)
+#define DAR_SRC_CTRL_INDEX_DISABLE BIT(0)
+
+/* DAR_ASM_CTRL1 bits */
+#define DAR_ASM_CTRL1_CLEAR BIT(7)
+#define DAR_ASM_CTRL1_START BIT(6)
+#define DAR_ASM_CTRL1_SELFTST BIT(5)
+#define DAR_ASM_CTRL1_CTR BIT(4)
+#define DAR_ASM_CTRL1_CBC BIT(3)
+#define DAR_ASM_CTRL1_AES BIT(2)
+#define DAR_ASM_CTRL1_LOAD_MAC BIT(1)
+
+/* DAR_ASM_CTRL2 bits */
+#define DAR_ASM_CTRL2_DATA_REG_TYPE_SEL (7)
+#define DAR_ASM_CTRL2_DATA_REG_TYPE_SEL_SHIFT (5)
+#define DAR_ASM_CTRL2_TSTPAS BIT(1)
+
+/* DAR_CLK_OUT_CTRL bits */
+#define DAR_CLK_OUT_CTRL_EXTEND BIT(7)
+#define DAR_CLK_OUT_CTRL_HIZ BIT(6)
+#define DAR_CLK_OUT_CTRL_SR BIT(5)
+#define DAR_CLK_OUT_CTRL_DS BIT(4)
+#define DAR_CLK_OUT_CTRL_EN BIT(3)
+#define DAR_CLK_OUT_CTRL_DIV (7)
+
+/* DAR_PWR_MODES bits */
+#define DAR_PWR_MODES_XTAL_READY BIT(5)
+#define DAR_PWR_MODES_XTALEN BIT(4)
+#define DAR_PWR_MODES_ASM_CLK_EN BIT(3)
+#define DAR_PWR_MODES_AUTODOZE BIT(1)
+#define DAR_PWR_MODES_PMC_MODE BIT(0)
+
+/* RX_FRAME_FILTER bits */
+#define IAR_RX_FRAME_FLT_FRM_VER (0xC0)
+#define IAR_RX_FRAME_FLT_FRM_VER_SHIFT (6)
+#define IAR_RX_FRAME_FLT_ACTIVE_PROMISCUOUS BIT(5)
+#define IAR_RX_FRAME_FLT_NS_FT BIT(4)
+#define IAR_RX_FRAME_FLT_CMD_FT BIT(3)
+#define IAR_RX_FRAME_FLT_ACK_FT BIT(2)
+#define IAR_RX_FRAME_FLT_DATA_FT BIT(1)
+#define IAR_RX_FRAME_FLT_BEACON_FT BIT(0)
+
+/* DUAL_PAN_CTRL bits */
+#define IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_MSK (0xF0)
+#define IAR_DUAL_PAN_CTRL_DUAL_PAN_SAM_LVL_SHIFT (4)
+#define IAR_DUAL_PAN_CTRL_CURRENT_NETWORK BIT(3)
+#define IAR_DUAL_PAN_CTRL_PANCORDNTR1 BIT(2)
+#define IAR_DUAL_PAN_CTRL_DUAL_PAN_AUTO BIT(1)
+#define IAR_DUAL_PAN_CTRL_ACTIVE_NETWORK BIT(0)
+
+/* DUAL_PAN_STS bits */
+#define IAR_DUAL_PAN_STS_RECD_ON_PAN1 BIT(7)
+#define IAR_DUAL_PAN_STS_RECD_ON_PAN0 BIT(6)
+#define IAR_DUAL_PAN_STS_DUAL_PAN_REMAIN (0x3F)
+
+/* CCA_CTRL bits */
+#define IAR_CCA_CTRL_AGC_FRZ_EN BIT(6)
+#define IAR_CCA_CTRL_CONT_RSSI_EN BIT(5)
+#define IAR_CCA_CTRL_LQI_RSSI_NOT_CORR BIT(4)
+#define IAR_CCA_CTRL_CCA3_AND_NOT_OR BIT(3)
+#define IAR_CCA_CTRL_POWER_COMP_EN_LQI BIT(2)
+#define IAR_CCA_CTRL_POWER_COMP_EN_ED BIT(1)
+#define IAR_CCA_CTRL_POWER_COMP_EN_CCA1 BIT(0)
+
+/* ANT_PAD_CTRL bits */
+#define IAR_ANT_PAD_CTRL_ANTX_POL (0x0F)
+#define IAR_ANT_PAD_CTRL_ANTX_POL_SHIFT (4)
+#define IAR_ANT_PAD_CTRL_ANTX_CTRLMODE BIT(3)
+#define IAR_ANT_PAD_CTRL_ANTX_HZ BIT(2)
+#define IAR_ANT_PAD_CTRL_ANTX_EN (3)
+
+/* MISC_PAD_CTRL bits */
+#define IAR_MISC_PAD_CTRL_MISO_HIZ_EN BIT(3)
+#define IAR_MISC_PAD_CTRL_IRQ_B_OD BIT(2)
+#define IAR_MISC_PAD_CTRL_NON_GPIO_DS BIT(1)
+#define IAR_MISC_PAD_CTRL_ANTX_CURR (1)
+
+/* ANT_AGC_CTRL bits */
+#define IAR_ANT_AGC_CTRL_FAD_EN_SHIFT (0)
+#define IAR_ANT_AGC_CTRL_FAD_EN_MASK (1)
+#define IAR_ANT_AGC_CTRL_ANTX_SHIFT (1)
+#define IAR_ANT_AGC_CTRL_ANTX_MASK BIT(AR_ANT_AGC_CTRL_ANTX_SHIFT)
+
+/* BSM_CTRL bits */
+#define BSM_CTRL_BSM_EN (1)
+
+/* SOFT_RESET bits */
+#define IAR_SOFT_RESET_SOG_RST BIT(7)
+#define IAR_SOFT_RESET_REGS_RST BIT(4)
+#define IAR_SOFT_RESET_PLL_RST BIT(3)
+#define IAR_SOFT_RESET_TX_RST BIT(2)
+#define IAR_SOFT_RESET_RX_RST BIT(1)
+#define IAR_SOFT_RESET_SEQ_MGR_RST BIT(0)
+
+/* SEQ_MGR_CTRL bits */
+#define IAR_SEQ_MGR_CTRL_SEQ_STATE_CTRL (3)
+#define IAR_SEQ_MGR_CTRL_SEQ_STATE_CTRL_SHIFT (6)
+#define IAR_SEQ_MGR_CTRL_NO_RX_RECYCLE BIT(5)
+#define IAR_SEQ_MGR_CTRL_LATCH_PREAMBLE BIT(4)
+#define IAR_SEQ_MGR_CTRL_EVENT_TMR_DO_NOT_LATCH BIT(3)
+#define IAR_SEQ_MGR_CTRL_CLR_NEW_SEQ_INHIBIT BIT(2)
+#define IAR_SEQ_MGR_CTRL_PSM_LOCK_DIS BIT(1)
+#define IAR_SEQ_MGR_CTRL_PLL_ABORT_OVRD BIT(0)
+
+/* SEQ_MGR_STS bits */
+#define IAR_SEQ_MGR_STS_TMR2_SEQ_TRIG_ARMED BIT(7)
+#define IAR_SEQ_MGR_STS_RX_MODE BIT(6)
+#define IAR_SEQ_MGR_STS_RX_TIMEOUT_PENDING BIT(5)
+#define IAR_SEQ_MGR_STS_NEW_SEQ_INHIBIT BIT(4)
+#define IAR_SEQ_MGR_STS_SEQ_IDLE BIT(3)
+#define IAR_SEQ_MGR_STS_XCVSEQ_ACTUAL (7)
+
+/* ABORT_STS bits */
+#define IAR_ABORT_STS_PLL_ABORTED BIT(2)
+#define IAR_ABORT_STS_TC3_ABORTED BIT(1)
+#define IAR_ABORT_STS_SW_ABORTED BIT(0)
+
+/* IAR_FILTERFAIL_CODE2 bits */
+#define IAR_FILTERFAIL_CODE2_PAN_SEL BIT(7)
+#define IAR_FILTERFAIL_CODE2_9_8 (3)
+
+/* PHY_STS bits */
+#define IAR_PHY_STS_PLL_UNLOCK BIT(7)
+#define IAR_PHY_STS_PLL_LOCK_ERR BIT(6)
+#define IAR_PHY_STS_PLL_LOCK BIT(5)
+#define IAR_PHY_STS_CRCVALID BIT(3)
+#define IAR_PHY_STS_FILTERFAIL_FLAG_SEL BIT(2)
+#define IAR_PHY_STS_SFD_DET BIT(1)
+#define IAR_PHY_STS_PREAMBLE_DET BIT(0)
+
+/* TESTMODE_CTRL bits */
+#define IAR_TEST_MODE_CTRL_HOT_ANT BIT(4)
+#define IAR_TEST_MODE_CTRL_IDEAL_RSSI_EN BIT(3)
+#define IAR_TEST_MODE_CTRL_IDEAL_PFC_EN BIT(2)
+#define IAR_TEST_MODE_CTRL_CONTINUOUS_EN BIT(1)
+#define IAR_TEST_MODE_CTRL_FPGA_EN BIT(0)
+
+/* DTM_CTRL1 bits */
+#define IAR_DTM_CTRL1_ATM_LOCKED BIT(7)
+#define IAR_DTM_CTRL1_DTM_EN BIT(6)
+#define IAR_DTM_CTRL1_PAGE5 BIT(5)
+#define IAR_DTM_CTRL1_PAGE4 BIT(4)
+#define IAR_DTM_CTRL1_PAGE3 BIT(3)
+#define IAR_DTM_CTRL1_PAGE2 BIT(2)
+#define IAR_DTM_CTRL1_PAGE1 BIT(1)
+#define IAR_DTM_CTRL1_PAGE0 BIT(0)
+
+/* TX_MODE_CTRL */
+#define IAR_TX_MODE_CTRL_TX_INV BIT(4)
+#define IAR_TX_MODE_CTRL_BT_EN BIT(3)
+#define IAR_TX_MODE_CTRL_DTS2 BIT(2)
+#define IAR_TX_MODE_CTRL_DTS1 BIT(1)
+#define IAR_TX_MODE_CTRL_DTS0 BIT(0)
+
+#define TX_MODE_CTRL_DTS_MASK (7)
+
+#endif /* _MCR20A_H */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 5166575a164d..a115f12bf130 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -74,6 +74,7 @@ struct ipvl_dev {
DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
netdev_features_t sfeatures;
u32 msg_enable;
+ spinlock_t addrs_lock;
};
struct ipvl_addr {
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index c1f008fe4e1d..1a8132eb2a3e 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -35,6 +35,7 @@ void ipvlan_count_rx(const struct ipvl_dev *ipvlan,
}
EXPORT_SYMBOL_GPL(ipvlan_count_rx);
+#if IS_ENABLED(CONFIG_IPV6)
static u8 ipvlan_get_v6_hash(const void *iaddr)
{
const struct in6_addr *ip6_addr = iaddr;
@@ -42,6 +43,12 @@ static u8 ipvlan_get_v6_hash(const void *iaddr)
return __ipv6_addr_jhash(ip6_addr, ipvlan_jhash_secret) &
IPVLAN_HASH_MASK;
}
+#else
+static u8 ipvlan_get_v6_hash(const void *iaddr)
+{
+ return 0;
+}
+#endif
static u8 ipvlan_get_v4_hash(const void *iaddr)
{
@@ -51,6 +58,23 @@ static u8 ipvlan_get_v4_hash(const void *iaddr)
IPVLAN_HASH_MASK;
}
+static bool addr_equal(bool is_v6, struct ipvl_addr *addr, const void *iaddr)
+{
+ if (!is_v6 && addr->atype == IPVL_IPV4) {
+ struct in_addr *i4addr = (struct in_addr *)iaddr;
+
+ return addr->ip4addr.s_addr == i4addr->s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (is_v6 && addr->atype == IPVL_IPV6) {
+ struct in6_addr *i6addr = (struct in6_addr *)iaddr;
+
+ return ipv6_addr_equal(&addr->ip6addr, i6addr);
+#endif
+ }
+
+ return false;
+}
+
static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
const void *iaddr, bool is_v6)
{
@@ -59,15 +83,9 @@ static struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
hash = is_v6 ? ipvlan_get_v6_hash(iaddr) :
ipvlan_get_v4_hash(iaddr);
- hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode) {
- if (is_v6 && addr->atype == IPVL_IPV6 &&
- ipv6_addr_equal(&addr->ip6addr, iaddr))
- return addr;
- else if (!is_v6 && addr->atype == IPVL_IPV4 &&
- addr->ip4addr.s_addr ==
- ((struct in_addr *)iaddr)->s_addr)
+ hlist_for_each_entry_rcu(addr, &port->hlhead[hash], hlnode)
+ if (addr_equal(is_v6, addr, iaddr))
return addr;
- }
return NULL;
}
@@ -91,29 +109,33 @@ void ipvlan_ht_addr_del(struct ipvl_addr *addr)
struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
const void *iaddr, bool is_v6)
{
- struct ipvl_addr *addr;
+ struct ipvl_addr *addr, *ret = NULL;
- list_for_each_entry(addr, &ipvlan->addrs, anode) {
- if ((is_v6 && addr->atype == IPVL_IPV6 &&
- ipv6_addr_equal(&addr->ip6addr, iaddr)) ||
- (!is_v6 && addr->atype == IPVL_IPV4 &&
- addr->ip4addr.s_addr == ((struct in_addr *)iaddr)->s_addr))
- return addr;
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &ipvlan->addrs, anode) {
+ if (addr_equal(is_v6, addr, iaddr)) {
+ ret = addr;
+ break;
+ }
}
- return NULL;
+ rcu_read_unlock();
+ return ret;
}
bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6)
{
struct ipvl_dev *ipvlan;
+ bool ret = false;
- ASSERT_RTNL();
-
- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (ipvlan_find_addr(ipvlan, iaddr, is_v6))
- return true;
+ rcu_read_lock();
+ list_for_each_entry_rcu(ipvlan, &port->ipvlans, pnode) {
+ if (ipvlan_find_addr(ipvlan, iaddr, is_v6)) {
+ ret = true;
+ break;
+ }
}
- return false;
+ rcu_read_unlock();
+ return ret;
}
static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
@@ -150,6 +172,7 @@ static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int
lyr3h = ip4h;
break;
}
+#if IS_ENABLED(CONFIG_IPV6)
case htons(ETH_P_IPV6): {
struct ipv6hdr *ip6h;
@@ -188,6 +211,7 @@ static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int
}
break;
}
+#endif
default:
return NULL;
}
@@ -337,14 +361,18 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
{
struct ipvl_addr *addr = NULL;
- if (addr_type == IPVL_IPV6) {
+ switch (addr_type) {
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPVL_IPV6: {
struct ipv6hdr *ip6h;
struct in6_addr *i6addr;
ip6h = (struct ipv6hdr *)lyr3h;
i6addr = use_dest ? &ip6h->daddr : &ip6h->saddr;
addr = ipvlan_ht_addr_lookup(port, i6addr, true);
- } else if (addr_type == IPVL_ICMPV6) {
+ break;
+ }
+ case IPVL_ICMPV6: {
struct nd_msg *ndmh;
struct in6_addr *i6addr;
@@ -356,14 +384,19 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
i6addr = &ndmh->target;
addr = ipvlan_ht_addr_lookup(port, i6addr, true);
}
- } else if (addr_type == IPVL_IPV4) {
+ break;
+ }
+#endif
+ case IPVL_IPV4: {
struct iphdr *ip4h;
__be32 *i4addr;
ip4h = (struct iphdr *)lyr3h;
i4addr = use_dest ? &ip4h->daddr : &ip4h->saddr;
addr = ipvlan_ht_addr_lookup(port, i4addr, false);
- } else if (addr_type == IPVL_ARP) {
+ break;
+ }
+ case IPVL_ARP: {
struct arphdr *arph;
unsigned char *arp_ptr;
__be32 dip;
@@ -377,6 +410,8 @@ static struct ipvl_addr *ipvlan_addr_lookup(struct ipvl_port *port,
memcpy(&dip, arp_ptr, 4);
addr = ipvlan_ht_addr_lookup(port, &dip, false);
+ break;
+ }
}
return addr;
@@ -420,6 +455,7 @@ out:
return ret;
}
+#if IS_ENABLED(CONFIG_IPV6)
static int ipvlan_process_v6_outbound(struct sk_buff *skb)
{
const struct ipv6hdr *ip6h = ipv6_hdr(skb);
@@ -456,6 +492,12 @@ err:
out:
return ret;
}
+#else
+static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+{
+ return NET_XMIT_DROP;
+}
+#endif
static int ipvlan_process_outbound(struct sk_buff *skb)
{
@@ -464,8 +506,8 @@ static int ipvlan_process_outbound(struct sk_buff *skb)
/* In this mode we dont care about multicast and broadcast traffic */
if (is_multicast_ether_addr(ethh->h_dest)) {
- pr_warn_ratelimited("Dropped {multi|broad}cast of type= [%x]\n",
- ntohs(skb->protocol));
+ pr_debug_ratelimited("Dropped {multi|broad}cast of type=[%x]\n",
+ ntohs(skb->protocol));
kfree_skb(skb);
goto out;
}
@@ -759,6 +801,7 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
goto out;
break;
}
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
{
struct dst_entry *dst;
@@ -774,10 +817,12 @@ struct sk_buff *ipvlan_l3_rcv(struct net_device *dev, struct sk_buff *skb,
};
skb_dst_drop(skb);
- dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6, flags);
+ dst = ip6_route_input_lookup(dev_net(sdev), sdev, &fl6,
+ skb, flags);
skb_dst_set(skb, dst);
break;
}
+#endif
default:
break;
}
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 2469df118fbf..4cbe9e27287d 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -22,12 +22,14 @@ static const struct nf_hook_ops ipvl_nfops[] = {
.hooknum = NF_INET_LOCAL_IN,
.priority = INT_MAX,
},
+#if IS_ENABLED(CONFIG_IPV6)
{
.hook = ipvlan_nf_input,
.pf = NFPROTO_IPV6,
.hooknum = NF_INET_LOCAL_IN,
.priority = INT_MAX,
},
+#endif
};
static const struct l3mdev_ops ipvl_l3mdev_ops = {
@@ -176,7 +178,7 @@ static int ipvlan_init(struct net_device *dev)
dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
(phy_dev->state & IPVLAN_STATE_MASK);
dev->features = phy_dev->features & IPVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
dev->gso_max_size = phy_dev->gso_max_size;
dev->gso_max_segs = phy_dev->gso_max_segs;
dev->hard_header_len = phy_dev->hard_header_len;
@@ -225,8 +227,10 @@ static int ipvlan_open(struct net_device *dev)
else
dev->flags &= ~IFF_NOARP;
- list_for_each_entry(addr, &ipvlan->addrs, anode)
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_add(ipvlan, addr);
+ rcu_read_unlock();
return dev_uc_add(phy_dev, phy_dev->dev_addr);
}
@@ -242,8 +246,10 @@ static int ipvlan_stop(struct net_device *dev)
dev_uc_del(phy_dev, phy_dev->dev_addr);
- list_for_each_entry(addr, &ipvlan->addrs, anode)
+ rcu_read_lock();
+ list_for_each_entry_rcu(addr, &ipvlan->addrs, anode)
ipvlan_ht_addr_del(addr);
+ rcu_read_unlock();
return 0;
}
@@ -586,6 +592,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->sfeatures = IPVLAN_FEATURES;
ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
+ spin_lock_init(&ipvlan->addrs_lock);
/* TODO Probably put random address here to be presented to the
* world but keep using the physical-dev address for the outgoing
@@ -663,11 +670,13 @@ void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
struct ipvl_dev *ipvlan = netdev_priv(dev);
struct ipvl_addr *addr, *next;
+ spin_lock_bh(&ipvlan->addrs_lock);
list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
ipvlan_ht_addr_del(addr);
- list_del(&addr->anode);
+ list_del_rcu(&addr->anode);
kfree_rcu(addr, rcu);
}
+ spin_unlock_bh(&ipvlan->addrs_lock);
ida_simple_remove(&ipvlan->port->ida, dev->dev_id);
list_del_rcu(&ipvlan->pnode);
@@ -758,8 +767,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
if (dev->reg_state != NETREG_UNREGISTERING)
break;
- list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
- pnode)
+ list_for_each_entry_safe(ipvlan, next, &port->ipvlans, pnode)
ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
&lst_kill);
unregister_netdevice_many(&lst_kill);
@@ -791,6 +799,7 @@ static int ipvlan_device_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
+/* the caller must held the addrs lock */
static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
@@ -800,14 +809,17 @@ static int ipvlan_add_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
return -ENOMEM;
addr->master = ipvlan;
- if (is_v6) {
- memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
- addr->atype = IPVL_IPV6;
- } else {
+ if (!is_v6) {
memcpy(&addr->ip4addr, iaddr, sizeof(struct in_addr));
addr->atype = IPVL_IPV4;
+#if IS_ENABLED(CONFIG_IPV6)
+ } else {
+ memcpy(&addr->ip6addr, iaddr, sizeof(struct in6_addr));
+ addr->atype = IPVL_IPV6;
+#endif
}
- list_add_tail(&addr->anode, &ipvlan->addrs);
+
+ list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
/* If the interface is not up, the address will be added to the hash
* list by ipvlan_open.
@@ -822,32 +834,17 @@ static void ipvlan_del_addr(struct ipvl_dev *ipvlan, void *iaddr, bool is_v6)
{
struct ipvl_addr *addr;
+ spin_lock_bh(&ipvlan->addrs_lock);
addr = ipvlan_find_addr(ipvlan, iaddr, is_v6);
- if (!addr)
+ if (!addr) {
+ spin_unlock_bh(&ipvlan->addrs_lock);
return;
+ }
ipvlan_ht_addr_del(addr);
- list_del(&addr->anode);
+ list_del_rcu(&addr->anode);
+ spin_unlock_bh(&ipvlan->addrs_lock);
kfree_rcu(addr, rcu);
-
- return;
-}
-
-static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
-{
- if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true)) {
- netif_err(ipvlan, ifup, ipvlan->dev,
- "Failed to add IPv6=%pI6c addr for %s intf\n",
- ip6_addr, ipvlan->dev->name);
- return -EINVAL;
- }
-
- return ipvlan_add_addr(ipvlan, ip6_addr, true);
-}
-
-static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
-{
- return ipvlan_del_addr(ipvlan, ip6_addr, true);
}
static bool ipvlan_is_valid_dev(const struct net_device *dev)
@@ -863,6 +860,27 @@ static bool ipvlan_is_valid_dev(const struct net_device *dev)
return true;
}
+#if IS_ENABLED(CONFIG_IPV6)
+static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ int ret = -EINVAL;
+
+ spin_lock_bh(&ipvlan->addrs_lock);
+ if (ipvlan_addr_busy(ipvlan->port, ip6_addr, true))
+ netif_err(ipvlan, ifup, ipvlan->dev,
+ "Failed to add IPv6=%pI6c addr for %s intf\n",
+ ip6_addr, ipvlan->dev->name);
+ else
+ ret = ipvlan_add_addr(ipvlan, ip6_addr, true);
+ spin_unlock_bh(&ipvlan->addrs_lock);
+ return ret;
+}
+
+static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
+{
+ return ipvlan_del_addr(ipvlan, ip6_addr, true);
+}
+
static int ipvlan_addr6_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
@@ -894,10 +912,6 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
struct net_device *dev = (struct net_device *)i6vi->i6vi_dev->dev;
struct ipvl_dev *ipvlan = netdev_priv(dev);
- /* FIXME IPv6 autoconf calls us from bh without RTNL */
- if (in_softirq())
- return NOTIFY_DONE;
-
if (!ipvlan_is_valid_dev(dev))
return NOTIFY_DONE;
@@ -913,17 +927,21 @@ static int ipvlan_addr6_validator_event(struct notifier_block *unused,
return NOTIFY_OK;
}
+#endif
static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
{
- if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false)) {
+ int ret = -EINVAL;
+
+ spin_lock_bh(&ipvlan->addrs_lock);
+ if (ipvlan_addr_busy(ipvlan->port, ip4_addr, false))
netif_err(ipvlan, ifup, ipvlan->dev,
"Failed to add IPv4=%pI4 on %s intf.\n",
ip4_addr, ipvlan->dev->name);
- return -EINVAL;
- }
-
- return ipvlan_add_addr(ipvlan, ip4_addr, false);
+ else
+ ret = ipvlan_add_addr(ipvlan, ip4_addr, false);
+ spin_unlock_bh(&ipvlan->addrs_lock);
+ return ret;
}
static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
@@ -993,6 +1011,7 @@ static struct notifier_block ipvlan_notifier_block __read_mostly = {
.notifier_call = ipvlan_device_event,
};
+#if IS_ENABLED(CONFIG_IPV6)
static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr6_event,
};
@@ -1000,6 +1019,7 @@ static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
static struct notifier_block ipvlan_addr6_vtor_notifier_block __read_mostly = {
.notifier_call = ipvlan_addr6_validator_event,
};
+#endif
static void ipvlan_ns_exit(struct net *net)
{
@@ -1016,6 +1036,7 @@ static struct pernet_operations ipvlan_net_ops = {
.id = &ipvlan_netid,
.size = sizeof(struct ipvlan_netns),
.exit = ipvlan_ns_exit,
+ .async = true,
};
static int __init ipvlan_init_module(void)
@@ -1024,9 +1045,11 @@ static int __init ipvlan_init_module(void)
ipvlan_init_secret();
register_netdevice_notifier(&ipvlan_notifier_block);
+#if IS_ENABLED(CONFIG_IPV6)
register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
register_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
+#endif
register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
register_inetaddr_validator_notifier(&ipvlan_addr4_vtor_notifier_block);
@@ -1045,9 +1068,11 @@ error:
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inetaddr_validator_notifier(
&ipvlan_addr4_vtor_notifier_block);
+#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
unregister_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
+#endif
unregister_netdevice_notifier(&ipvlan_notifier_block);
return err;
}
@@ -1060,9 +1085,11 @@ static void __exit ipvlan_cleanup_module(void)
unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
unregister_inetaddr_validator_notifier(
&ipvlan_addr4_vtor_notifier_block);
+#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
unregister_inet6addr_validator_notifier(
&ipvlan_addr6_vtor_notifier_block);
+#endif
}
module_init(ipvlan_init_module);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index 30612497643c..b97a907ea5aa 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -230,4 +230,5 @@ out:
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init,
+ .async = true,
};
diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c
index e8ae50e1255e..319edc9c8ec7 100644
--- a/drivers/net/phy/aquantia.c
+++ b/drivers/net/phy/aquantia.c
@@ -38,14 +38,6 @@ static int aquantia_config_aneg(struct phy_device *phydev)
return 0;
}
-static int aquantia_aneg_done(struct phy_device *phydev)
-{
- int reg;
-
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
- return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
-}
-
static int aquantia_config_intr(struct phy_device *phydev)
{
int err;
@@ -125,7 +117,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQ1202",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
@@ -137,7 +129,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQ2104",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
@@ -149,7 +141,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQR105",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
@@ -161,7 +153,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQR106",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
@@ -173,7 +165,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQR107",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
@@ -185,7 +177,7 @@ static struct phy_driver aquantia_driver[] = {
.name = "Aquantia AQR405",
.features = PHY_AQUANTIA_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .aneg_done = aquantia_aneg_done,
+ .aneg_done = genphy_c45_aneg_done,
.config_aneg = aquantia_config_aneg,
.config_intr = aquantia_config_intr,
.ack_interrupt = aquantia_ack_interrupt,
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 421feb8f92fe..29b1c88b55cc 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -565,7 +565,7 @@ static int bcm7xxx_28nm_set_tunable(struct phy_device *phydev,
if (ret)
return ret;
- /* Disable EEE advertisment since this prevents the PHY
+ /* Disable EEE advertisement since this prevents the PHY
* from successfully linking up, trigger auto-negotiation restart
* to let the MAC decide what to do.
*/
diff --git a/drivers/net/phy/cortina.c b/drivers/net/phy/cortina.c
index 9442db221834..8022cd317f62 100644
--- a/drivers/net/phy/cortina.c
+++ b/drivers/net/phy/cortina.c
@@ -30,14 +30,6 @@ static int cortina_read_reg(struct phy_device *phydev, u16 regnum)
MII_ADDR_C45 | regnum);
}
-static int cortina_config_aneg(struct phy_device *phydev)
-{
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
-
- return 0;
-}
-
static int cortina_read_status(struct phy_device *phydev)
{
int gpio_int_status, ret = 0;
@@ -61,11 +53,6 @@ err:
return ret;
}
-static int cortina_soft_reset(struct phy_device *phydev)
-{
- return 0;
-}
-
static int cortina_probe(struct phy_device *phydev)
{
u32 phy_id = 0;
@@ -101,9 +88,10 @@ static struct phy_driver cortina_driver[] = {
.phy_id = PHY_ID_CS4340,
.phy_id_mask = 0xffffffff,
.name = "Cortina CS4340",
- .config_aneg = cortina_config_aneg,
+ .config_init = gen10g_config_init,
+ .config_aneg = gen10g_config_aneg,
.read_status = cortina_read_status,
- .soft_reset = cortina_soft_reset,
+ .soft_reset = gen10g_no_soft_reset,
.probe = cortina_probe,
},
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index ab58224f897f..b3935778b19f 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -75,6 +75,8 @@
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX 0x0
#define DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN 0x1f
+#define DP83867_IO_MUX_CFG_CLK_O_SEL_MASK (0x1f << 8)
+#define DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT 8
/* CFG4 bits */
#define DP83867_CFG4_PORT_MIRROR_EN BIT(0)
@@ -92,6 +94,7 @@ struct dp83867_private {
int io_impedance;
int port_mirroring;
bool rxctrl_strap_quirk;
+ int clk_output_sel;
};
static int dp83867_ack_interrupt(struct phy_device *phydev)
@@ -160,6 +163,14 @@ static int dp83867_of_init(struct phy_device *phydev)
dp83867->io_impedance = -EINVAL;
/* Optional configuration */
+ ret = of_property_read_u32(of_node, "ti,clk-output-sel",
+ &dp83867->clk_output_sel);
+ if (ret || dp83867->clk_output_sel > DP83867_CLK_O_SEL_REF_CLK)
+ /* Keep the default value if ti,clk-output-sel is not set
+ * or too high
+ */
+ dp83867->clk_output_sel = DP83867_CLK_O_SEL_REF_CLK;
+
if (of_property_read_bool(of_node, "ti,max-output-impedance"))
dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MAX;
else if (of_property_read_bool(of_node, "ti,min-output-impedance"))
@@ -295,6 +306,14 @@ static int dp83867_config_init(struct phy_device *phydev)
if (dp83867->port_mirroring != DP83867_PORT_MIRROING_KEEP)
dp83867_config_port_mirroring(phydev);
+ /* Clock output selection if muxing property is set */
+ if (dp83867->clk_output_sel != DP83867_CLK_O_SEL_REF_CLK) {
+ val = phy_read_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG);
+ val &= ~DP83867_IO_MUX_CFG_CLK_O_SEL_MASK;
+ val |= (dp83867->clk_output_sel << DP83867_IO_MUX_CFG_CLK_O_SEL_SHIFT);
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_IO_MUX_CFG, val);
+ }
+
return 0;
}
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 22d9bc9c33a4..98fd6b7ceeec 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -860,7 +860,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
return err;
/* There appears to be a bug in the 88e1512 when used in
- * SGMII to copper mode, where the AN advertisment register
+ * SGMII to copper mode, where the AN advertisement register
* clears the pause bits each time a negotiation occurs.
* This means we can never be truely sure what was advertised,
* so disable Pause support.
diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
index 8a0bd98fdec7..9564916d2d7b 100644
--- a/drivers/net/phy/marvell10g.c
+++ b/drivers/net/phy/marvell10g.c
@@ -71,15 +71,6 @@ static int mv3310_probe(struct phy_device *phydev)
return 0;
}
-/*
- * Resetting the MV88X3310 causes it to become non-responsive. Avoid
- * setting the reset bit(s).
- */
-static int mv3310_soft_reset(struct phy_device *phydev)
-{
- return 0;
-}
-
static int mv3310_config_init(struct phy_device *phydev)
{
__ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
@@ -317,7 +308,7 @@ static int mv3310_read_status(struct phy_device *phydev)
if (val < 0)
return val;
- /* Read the link partner's 1G advertisment */
+ /* Read the link partner's 1G advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MV_AN_STAT1000);
if (val < 0)
return val;
@@ -377,7 +368,7 @@ static struct phy_driver mv3310_drivers[] = {
SUPPORTED_10000baseT_Full |
SUPPORTED_Backplane,
.probe = mv3310_probe,
- .soft_reset = mv3310_soft_reset,
+ .soft_reset = gen10g_no_soft_reset,
.config_init = mv3310_config_init,
.config_aneg = mv3310_config_aneg,
.aneg_done = mv3310_aneg_done,
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index a4576859afae..e1225545362d 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -163,11 +163,11 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
EXPORT_SYMBOL_GPL(genphy_c45_read_link);
/**
- * genphy_c45_read_lpa - read the link partner advertisment and pause
+ * genphy_c45_read_lpa - read the link partner advertisement and pause
* @phydev: target phy_device struct
*
* Read the Clause 45 defined base (7.19) and 10G (7.33) status registers,
- * filling in the link partner advertisment, pause and asym_pause members
+ * filling in the link partner advertisement, pause and asym_pause members
* in @phydev. This assumes that the auto-negotiation MMD is present, and
* the backplane bit (7.48.0) is clear. Clause 45 PHY drivers are expected
* to fill in the remainder of the link partner advert from vendor registers.
@@ -176,7 +176,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
{
int val;
- /* Read the link partner's base page advertisment */
+ /* Read the link partner's base page advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_LPA);
if (val < 0)
return val;
@@ -185,7 +185,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev)
phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0;
phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0;
- /* Read the link partner's 10G advertisment */
+ /* Read the link partner's 10G advertisement */
val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_STAT);
if (val < 0)
return val;
@@ -268,12 +268,13 @@ EXPORT_SYMBOL_GPL(genphy_c45_read_mdix);
/* The gen10g_* functions are the old Clause 45 stub */
-static int gen10g_config_aneg(struct phy_device *phydev)
+int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_config_aneg);
-static int gen10g_read_status(struct phy_device *phydev)
+int gen10g_read_status(struct phy_device *phydev)
{
u32 mmd_mask = phydev->c45_ids.devices_in_package;
int ret;
@@ -291,14 +292,16 @@ static int gen10g_read_status(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_read_status);
-static int gen10g_soft_reset(struct phy_device *phydev)
+int gen10g_no_soft_reset(struct phy_device *phydev)
{
/* Do nothing for now */
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_no_soft_reset);
-static int gen10g_config_init(struct phy_device *phydev)
+int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
phydev->supported = SUPPORTED_10000baseT_Full;
@@ -306,22 +309,25 @@ static int gen10g_config_init(struct phy_device *phydev)
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_config_init);
-static int gen10g_suspend(struct phy_device *phydev)
+int gen10g_suspend(struct phy_device *phydev)
{
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_suspend);
-static int gen10g_resume(struct phy_device *phydev)
+int gen10g_resume(struct phy_device *phydev)
{
return 0;
}
+EXPORT_SYMBOL_GPL(gen10g_resume);
struct phy_driver genphy_10g_driver = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
- .soft_reset = gen10g_soft_reset,
+ .soft_reset = gen10g_no_soft_reset,
.config_init = gen10g_config_init,
.features = 0,
.config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c
index 4083f00c97a5..c7da4cbb1103 100644
--- a/drivers/net/phy/phy-core.c
+++ b/drivers/net/phy/phy-core.c
@@ -190,10 +190,10 @@ size_t phy_speeds(unsigned int *speeds, size_t size,
}
/**
- * phy_resolve_aneg_linkmode - resolve the advertisments into phy settings
+ * phy_resolve_aneg_linkmode - resolve the advertisements into phy settings
* @phydev: The phy_device struct
*
- * Resolve our and the link partner advertisments into their corresponding
+ * Resolve our and the link partner advertisements into their corresponding
* speed and duplex. If full duplex was negotiated, extract the pause mode
* from the link partner mask.
*/
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 6ac8b29b2dc3..51a011a349fe 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -364,7 +364,7 @@ static void phylink_get_fixed_state(struct phylink *pl, struct phylink_link_stat
}
/* Flow control is resolved according to our and the link partners
- * advertisments using the following drawn from the 802.3 specs:
+ * advertisements using the following drawn from the 802.3 specs:
* Local device Link partner
* Pause AsymDir Pause AsymDir Result
* 1 X 1 X TX+RX
@@ -679,12 +679,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy)
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
- pl->netdev->phydev = phy;
pl->phydev = phy;
linkmode_copy(pl->supported, supported);
linkmode_copy(pl->link_config.advertising, config.advertising);
- /* Restrict the phy advertisment according to the MAC support. */
+ /* Restrict the phy advertisement according to the MAC support. */
ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising);
phy->advertising = advertising;
mutex_unlock(&pl->state_mutex);
@@ -817,7 +816,6 @@ void phylink_disconnect_phy(struct phylink *pl)
if (phy) {
mutex_lock(&phy->lock);
mutex_lock(&pl->state_mutex);
- pl->netdev->phydev = NULL;
pl->phydev = NULL;
mutex_unlock(&pl->state_mutex);
mutex_unlock(&phy->lock);
@@ -889,7 +887,7 @@ void phylink_start(struct phylink *pl)
/* Apply the link configuration to the MAC when starting. This allows
* a fixed-link to start with the correct parameters, and also
- * ensures that we set the appropriate advertisment for Serdes links.
+ * ensures that we set the appropriate advertisement for Serdes links.
*/
phylink_resolve_flow(pl, &pl->link_config);
phylink_mac_config(pl, &pl->link_config);
@@ -1076,7 +1074,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
config = pl->link_config;
- /* Mask out unsupported advertisments */
+ /* Mask out unsupported advertisements */
linkmode_and(config.advertising, kset->link_modes.advertising,
pl->supported);
@@ -1121,7 +1119,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl,
if (phylink_validate(pl, pl->supported, &config))
return -EINVAL;
- /* If autonegotiation is enabled, we must have an advertisment */
+ /* If autonegotiation is enabled, we must have an advertisement */
if (config.an_enabled && phylink_is_empty_linkmode(config.advertising))
return -EINVAL;
@@ -1584,25 +1582,14 @@ static int phylink_sfp_module_insert(void *upstream,
bool changed;
u8 port;
- sfp_parse_support(pl->sfp_bus, id, support);
- port = sfp_parse_port(pl->sfp_bus, id, support);
- iface = sfp_parse_interface(pl->sfp_bus, id);
-
ASSERT_RTNL();
- switch (iface) {
- case PHY_INTERFACE_MODE_SGMII:
- case PHY_INTERFACE_MODE_1000BASEX:
- case PHY_INTERFACE_MODE_2500BASEX:
- case PHY_INTERFACE_MODE_10GKR:
- break;
- default:
- return -EINVAL;
- }
+ sfp_parse_support(pl->sfp_bus, id, support);
+ port = sfp_parse_port(pl->sfp_bus, id, support);
memset(&config, 0, sizeof(config));
linkmode_copy(config.advertising, support);
- config.interface = iface;
+ config.interface = PHY_INTERFACE_MODE_NA;
config.speed = SPEED_UNKNOWN;
config.duplex = DUPLEX_UNKNOWN;
config.pause = MLO_PAUSE_AN;
@@ -1611,6 +1598,22 @@ static int phylink_sfp_module_insert(void *upstream,
/* Ignore errors if we're expecting a PHY to attach later */
ret = phylink_validate(pl, support, &config);
if (ret) {
+ netdev_err(pl->netdev, "validation with support %*pb failed: %d\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, support, ret);
+ return ret;
+ }
+
+ iface = sfp_select_interface(pl->sfp_bus, id, config.advertising);
+ if (iface == PHY_INTERFACE_MODE_NA) {
+ netdev_err(pl->netdev,
+ "selection of interface failed, advertisement %*pb\n",
+ __ETHTOOL_LINK_MODE_MASK_NBITS, config.advertising);
+ return -EINVAL;
+ }
+
+ config.interface = iface;
+ ret = phylink_validate(pl, support, &config);
+ if (ret) {
netdev_err(pl->netdev, "validation of %s/%s with support %*pb failed: %d\n",
phylink_an_mode_str(MLO_AN_INBAND),
phy_modes(config.interface),
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index 8961209ee949..3d4ff5d0d2a6 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -106,68 +106,6 @@ int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
EXPORT_SYMBOL_GPL(sfp_parse_port);
/**
- * sfp_parse_interface() - Parse the phy_interface_t
- * @bus: a pointer to the &struct sfp_bus structure for the sfp module
- * @id: a pointer to the module's &struct sfp_eeprom_id
- *
- * Derive the phy_interface_t mode for the information found in the
- * module's identifying EEPROM. There is no standard or defined way
- * to derive this information, so we use some heuristics.
- *
- * If the encoding is 64b66b, then the module must be >= 10G, so
- * return %PHY_INTERFACE_MODE_10GKR.
- *
- * If it's 8b10b, then it's 1G or slower. If it's definitely a fibre
- * module, return %PHY_INTERFACE_MODE_1000BASEX mode, otherwise return
- * %PHY_INTERFACE_MODE_SGMII mode.
- *
- * If the encoding is not known, return %PHY_INTERFACE_MODE_NA.
- */
-phy_interface_t sfp_parse_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id)
-{
- phy_interface_t iface;
-
- /* Setting the serdes link mode is guesswork: there's no field in
- * the EEPROM which indicates what mode should be used.
- *
- * If the module wants 64b66b, then it must be >= 10G.
- *
- * If it's a gigabit-only fiber module, it probably does not have
- * a PHY, so switch to 802.3z negotiation mode. Otherwise, switch
- * to SGMII mode (which is required to support non-gigabit speeds).
- */
- switch (id->base.encoding) {
- case SFP_ENCODING_8472_64B66B:
- iface = PHY_INTERFACE_MODE_10GKR;
- break;
-
- case SFP_ENCODING_8B10B:
- if (!id->base.e1000_base_t &&
- !id->base.e100_base_lx &&
- !id->base.e100_base_fx)
- iface = PHY_INTERFACE_MODE_1000BASEX;
- else
- iface = PHY_INTERFACE_MODE_SGMII;
- break;
-
- default:
- if (id->base.e1000_base_cx) {
- iface = PHY_INTERFACE_MODE_1000BASEX;
- break;
- }
-
- iface = PHY_INTERFACE_MODE_NA;
- dev_err(bus->sfp_dev,
- "SFP module encoding does not support 8b10b nor 64b66b\n");
- break;
- }
-
- return iface;
-}
-EXPORT_SYMBOL_GPL(sfp_parse_interface);
-
-/**
* sfp_parse_support() - Parse the eeprom id for supported link modes
* @bus: a pointer to the &struct sfp_bus structure for the sfp module
* @id: a pointer to the module's &struct sfp_eeprom_id
@@ -180,10 +118,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support)
{
unsigned int br_min, br_nom, br_max;
-
- phylink_set(support, Autoneg);
- phylink_set(support, Pause);
- phylink_set(support, Asym_Pause);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(modes) = { 0, };
/* Decode the bitrate information to MBd */
br_min = br_nom = br_max = 0;
@@ -201,20 +136,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
/* Set ethtool support from the compliance fields. */
if (id->base.e10g_base_sr)
- phylink_set(support, 10000baseSR_Full);
+ phylink_set(modes, 10000baseSR_Full);
if (id->base.e10g_base_lr)
- phylink_set(support, 10000baseLR_Full);
+ phylink_set(modes, 10000baseLR_Full);
if (id->base.e10g_base_lrm)
- phylink_set(support, 10000baseLRM_Full);
+ phylink_set(modes, 10000baseLRM_Full);
if (id->base.e10g_base_er)
- phylink_set(support, 10000baseER_Full);
+ phylink_set(modes, 10000baseER_Full);
if (id->base.e1000_base_sx ||
id->base.e1000_base_lx ||
id->base.e1000_base_cx)
- phylink_set(support, 1000baseX_Full);
+ phylink_set(modes, 1000baseX_Full);
if (id->base.e1000_base_t) {
- phylink_set(support, 1000baseT_Half);
- phylink_set(support, 1000baseT_Full);
+ phylink_set(modes, 1000baseT_Half);
+ phylink_set(modes, 1000baseT_Full);
}
/* 1000Base-PX or 1000Base-BX10 */
@@ -228,20 +163,20 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
if ((id->base.sfp_ct_passive || id->base.sfp_ct_active) && br_nom) {
/* This may look odd, but some manufacturers use 12000MBd */
if (br_min <= 12000 && br_max >= 10300)
- phylink_set(support, 10000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
if (br_min <= 3200 && br_max >= 3100)
- phylink_set(support, 2500baseX_Full);
+ phylink_set(modes, 2500baseX_Full);
if (br_min <= 1300 && br_max >= 1200)
- phylink_set(support, 1000baseX_Full);
+ phylink_set(modes, 1000baseX_Full);
}
if (id->base.sfp_ct_passive) {
if (id->base.passive.sff8431_app_e)
- phylink_set(support, 10000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
}
if (id->base.sfp_ct_active) {
if (id->base.active.sff8431_app_e ||
id->base.active.sff8431_lim) {
- phylink_set(support, 10000baseCR_Full);
+ phylink_set(modes, 10000baseCR_Full);
}
}
@@ -249,18 +184,18 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
case 0x00: /* Unspecified */
break;
case 0x02: /* 100Gbase-SR4 or 25Gbase-SR */
- phylink_set(support, 100000baseSR4_Full);
- phylink_set(support, 25000baseSR_Full);
+ phylink_set(modes, 100000baseSR4_Full);
+ phylink_set(modes, 25000baseSR_Full);
break;
case 0x03: /* 100Gbase-LR4 or 25Gbase-LR */
case 0x04: /* 100Gbase-ER4 or 25Gbase-ER */
- phylink_set(support, 100000baseLR4_ER4_Full);
+ phylink_set(modes, 100000baseLR4_ER4_Full);
break;
case 0x0b: /* 100Gbase-CR4 or 25Gbase-CR CA-L */
case 0x0c: /* 25Gbase-CR CA-S */
case 0x0d: /* 25Gbase-CR CA-N */
- phylink_set(support, 100000baseCR4_Full);
- phylink_set(support, 25000baseCR_Full);
+ phylink_set(modes, 100000baseCR4_Full);
+ phylink_set(modes, 25000baseCR_Full);
break;
default:
dev_warn(bus->sfp_dev,
@@ -274,13 +209,70 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
id->base.fc_speed_200 ||
id->base.fc_speed_400) {
if (id->base.br_nominal >= 31)
- phylink_set(support, 2500baseX_Full);
+ phylink_set(modes, 2500baseX_Full);
if (id->base.br_nominal >= 12)
- phylink_set(support, 1000baseX_Full);
+ phylink_set(modes, 1000baseX_Full);
}
+
+ /* If we haven't discovered any modes that this module supports, try
+ * the encoding and bitrate to determine supported modes. Some BiDi
+ * modules (eg, 1310nm/1550nm) are not 1000BASE-BX compliant due to
+ * the differing wavelengths, so do not set any transceiver bits.
+ */
+ if (bitmap_empty(modes, __ETHTOOL_LINK_MODE_MASK_NBITS)) {
+ /* If the encoding and bit rate allows 1000baseX */
+ if (id->base.encoding == SFP_ENCODING_8B10B && br_nom &&
+ br_min <= 1300 && br_max >= 1200)
+ phylink_set(modes, 1000baseX_Full);
+ }
+
+ bitmap_or(support, support, modes, __ETHTOOL_LINK_MODE_MASK_NBITS);
+
+ phylink_set(support, Autoneg);
+ phylink_set(support, Pause);
+ phylink_set(support, Asym_Pause);
}
EXPORT_SYMBOL_GPL(sfp_parse_support);
+/**
+ * sfp_select_interface() - Select appropriate phy_interface_t mode
+ * @bus: a pointer to the &struct sfp_bus structure for the sfp module
+ * @id: a pointer to the module's &struct sfp_eeprom_id
+ * @link_modes: ethtool link modes mask
+ *
+ * Derive the phy_interface_t mode for the information found in the
+ * module's identifying EEPROM and the link modes mask. There is no
+ * standard or defined way to derive this information, so we decide
+ * based upon the link mode mask.
+ */
+phy_interface_t sfp_select_interface(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id,
+ unsigned long *link_modes)
+{
+ if (phylink_test(link_modes, 10000baseCR_Full) ||
+ phylink_test(link_modes, 10000baseSR_Full) ||
+ phylink_test(link_modes, 10000baseLR_Full) ||
+ phylink_test(link_modes, 10000baseLRM_Full) ||
+ phylink_test(link_modes, 10000baseER_Full))
+ return PHY_INTERFACE_MODE_10GKR;
+
+ if (phylink_test(link_modes, 2500baseX_Full))
+ return PHY_INTERFACE_MODE_2500BASEX;
+
+ if (id->base.e1000_base_t ||
+ id->base.e100_base_lx ||
+ id->base.e100_base_fx)
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (phylink_test(link_modes, 1000baseX_Full))
+ return PHY_INTERFACE_MODE_1000BASEX;
+
+ dev_warn(bus->sfp_dev, "Unable to ascertain link mode\n");
+
+ return PHY_INTERFACE_MODE_NA;
+}
+EXPORT_SYMBOL_GPL(sfp_select_interface);
+
static LIST_HEAD(sfp_buses);
static DEFINE_MUTEX(sfp_mutex);
diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
index 6c7d9289078d..83bf4959b043 100644
--- a/drivers/net/phy/sfp.c
+++ b/drivers/net/phy/sfp.c
@@ -42,6 +42,7 @@ enum {
SFP_MOD_EMPTY = 0,
SFP_MOD_PROBE,
+ SFP_MOD_HPOWER,
SFP_MOD_PRESENT,
SFP_MOD_ERROR,
@@ -86,6 +87,7 @@ static const enum gpiod_flags gpio_flags[] = {
* access the I2C EEPROM. However, Avago modules require 300ms.
*/
#define T_PROBE_INIT msecs_to_jiffies(300)
+#define T_HPOWER_LEVEL msecs_to_jiffies(300)
#define T_PROBE_RETRY msecs_to_jiffies(100)
/* SFP modules appear to always have their PHY configured for bus address
@@ -110,10 +112,12 @@ struct sfp {
struct sfp_bus *sfp_bus;
struct phy_device *mod_phy;
const struct sff_data *type;
+ u32 max_power_mW;
unsigned int (*get_state)(struct sfp *);
void (*set_state)(struct sfp *, unsigned int);
int (*read)(struct sfp *, bool, u8, void *, size_t);
+ int (*write)(struct sfp *, bool, u8, void *, size_t);
struct gpio_desc *gpio[GPIO_MAX];
@@ -201,10 +205,11 @@ static void sfp_gpio_set_state(struct sfp *sfp, unsigned int state)
}
}
-static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
- void *buf, size_t len)
+static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
+ size_t len)
{
struct i2c_msg msgs[2];
+ u8 bus_addr = a2 ? 0x51 : 0x50;
int ret;
msgs[0].addr = bus_addr;
@@ -216,17 +221,38 @@ static int sfp__i2c_read(struct i2c_adapter *i2c, u8 bus_addr, u8 dev_addr,
msgs[1].len = len;
msgs[1].buf = buf;
- ret = i2c_transfer(i2c, msgs, ARRAY_SIZE(msgs));
+ ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
if (ret < 0)
return ret;
return ret == ARRAY_SIZE(msgs) ? len : 0;
}
-static int sfp_i2c_read(struct sfp *sfp, bool a2, u8 addr, void *buf,
- size_t len)
+static int sfp_i2c_write(struct sfp *sfp, bool a2, u8 dev_addr, void *buf,
+ size_t len)
{
- return sfp__i2c_read(sfp->i2c, a2 ? 0x51 : 0x50, addr, buf, len);
+ struct i2c_msg msgs[1];
+ u8 bus_addr = a2 ? 0x51 : 0x50;
+ int ret;
+
+ msgs[0].addr = bus_addr;
+ msgs[0].flags = 0;
+ msgs[0].len = 1 + len;
+ msgs[0].buf = kmalloc(1 + len, GFP_KERNEL);
+ if (!msgs[0].buf)
+ return -ENOMEM;
+
+ msgs[0].buf[0] = dev_addr;
+ memcpy(&msgs[0].buf[1], buf, len);
+
+ ret = i2c_transfer(sfp->i2c, msgs, ARRAY_SIZE(msgs));
+
+ kfree(msgs[0].buf);
+
+ if (ret < 0)
+ return ret;
+
+ return ret == ARRAY_SIZE(msgs) ? len : 0;
}
static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
@@ -239,6 +265,7 @@ static int sfp_i2c_configure(struct sfp *sfp, struct i2c_adapter *i2c)
sfp->i2c = i2c;
sfp->read = sfp_i2c_read;
+ sfp->write = sfp_i2c_write;
i2c_mii = mdio_i2c_alloc(sfp->dev, i2c);
if (IS_ERR(i2c_mii))
@@ -274,6 +301,11 @@ static int sfp_read(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
return sfp->read(sfp, a2, addr, buf, len);
}
+static int sfp_write(struct sfp *sfp, bool a2, u8 addr, void *buf, size_t len)
+{
+ return sfp->write(sfp, a2, addr, buf, len);
+}
+
static unsigned int sfp_check(void *buf, size_t len)
{
u8 *p, check;
@@ -462,21 +494,83 @@ static void sfp_sm_mod_init(struct sfp *sfp)
sfp_sm_probe_phy(sfp);
}
+static int sfp_sm_mod_hpower(struct sfp *sfp)
+{
+ u32 power;
+ u8 val;
+ int err;
+
+ power = 1000;
+ if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_POWER_DECL))
+ power = 1500;
+ if (sfp->id.ext.options & cpu_to_be16(SFP_OPTIONS_HIGH_POWER_LEVEL))
+ power = 2000;
+
+ if (sfp->id.ext.sff8472_compliance == SFP_SFF8472_COMPLIANCE_NONE &&
+ (sfp->id.ext.diagmon & (SFP_DIAGMON_DDM | SFP_DIAGMON_ADDRMODE)) !=
+ SFP_DIAGMON_DDM) {
+ /* The module appears not to implement bus address 0xa2,
+ * or requires an address change sequence, so assume that
+ * the module powers up in the indicated power mode.
+ */
+ if (power > sfp->max_power_mW) {
+ dev_err(sfp->dev,
+ "Host does not support %u.%uW modules\n",
+ power / 1000, (power / 100) % 10);
+ return -EINVAL;
+ }
+ return 0;
+ }
+
+ if (power > sfp->max_power_mW) {
+ dev_warn(sfp->dev,
+ "Host does not support %u.%uW modules, module left in power mode 1\n",
+ power / 1000, (power / 100) % 10);
+ return 0;
+ }
+
+ if (power <= 1000)
+ return 0;
+
+ err = sfp_read(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
+ if (err != sizeof(val)) {
+ dev_err(sfp->dev, "Failed to read EEPROM: %d\n", err);
+ err = -EAGAIN;
+ goto err;
+ }
+
+ val |= BIT(0);
+
+ err = sfp_write(sfp, true, SFP_EXT_STATUS, &val, sizeof(val));
+ if (err != sizeof(val)) {
+ dev_err(sfp->dev, "Failed to write EEPROM: %d\n", err);
+ err = -EAGAIN;
+ goto err;
+ }
+
+ dev_info(sfp->dev, "Module switched to %u.%uW power level\n",
+ power / 1000, (power / 100) % 10);
+ return T_HPOWER_LEVEL;
+
+err:
+ return err;
+}
+
static int sfp_sm_mod_probe(struct sfp *sfp)
{
/* SFP module inserted - read I2C data */
struct sfp_eeprom_id id;
u8 check;
- int err;
+ int ret;
- err = sfp_read(sfp, false, 0, &id, sizeof(id));
- if (err < 0) {
- dev_err(sfp->dev, "failed to read EEPROM: %d\n", err);
+ ret = sfp_read(sfp, false, 0, &id, sizeof(id));
+ if (ret < 0) {
+ dev_err(sfp->dev, "failed to read EEPROM: %d\n", ret);
return -EAGAIN;
}
- if (err != sizeof(id)) {
- dev_err(sfp->dev, "EEPROM short read: %d\n", err);
+ if (ret != sizeof(id)) {
+ dev_err(sfp->dev, "EEPROM short read: %d\n", ret);
return -EAGAIN;
}
@@ -521,7 +615,11 @@ static int sfp_sm_mod_probe(struct sfp *sfp)
dev_warn(sfp->dev,
"module address swap to access page 0xA2 is not supported.\n");
- return sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ ret = sfp_module_insert(sfp->sfp_bus, &sfp->id);
+ if (ret < 0)
+ return ret;
+
+ return sfp_sm_mod_hpower(sfp);
}
static void sfp_sm_mod_remove(struct sfp *sfp)
@@ -560,17 +658,25 @@ static void sfp_sm_event(struct sfp *sfp, unsigned int event)
if (event == SFP_E_REMOVE) {
sfp_sm_ins_next(sfp, SFP_MOD_EMPTY, 0);
} else if (event == SFP_E_TIMEOUT) {
- int err = sfp_sm_mod_probe(sfp);
+ int val = sfp_sm_mod_probe(sfp);
- if (err == 0)
+ if (val == 0)
sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
- else if (err == -EAGAIN)
- sfp_sm_set_timer(sfp, T_PROBE_RETRY);
- else
+ else if (val > 0)
+ sfp_sm_ins_next(sfp, SFP_MOD_HPOWER, val);
+ else if (val != -EAGAIN)
sfp_sm_ins_next(sfp, SFP_MOD_ERROR, 0);
+ else
+ sfp_sm_set_timer(sfp, T_PROBE_RETRY);
}
break;
+ case SFP_MOD_HPOWER:
+ if (event == SFP_E_TIMEOUT) {
+ sfp_sm_ins_next(sfp, SFP_MOD_PRESENT, 0);
+ break;
+ }
+ /* fallthrough */
case SFP_MOD_PRESENT:
case SFP_MOD_ERROR:
if (event == SFP_E_REMOVE) {
@@ -889,6 +995,14 @@ static int sfp_probe(struct platform_device *pdev)
if (!(sfp->gpio[GPIO_MODDEF0]))
sfp->get_state = sff_gpio_get_state;
+ device_property_read_u32(&pdev->dev, "maximum-power-milliwatt",
+ &sfp->max_power_mW);
+ if (!sfp->max_power_mW)
+ sfp->max_power_mW = 1000;
+
+ dev_info(sfp->dev, "Host maximum power %u.%uW\n",
+ sfp->max_power_mW / 1000, (sfp->max_power_mW / 100) % 10);
+
sfp->sfp_bus = sfp_register_socket(sfp->dev, sfp, &sfp_module_ops);
if (!sfp->sfp_bus)
return -ENOMEM;
diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c
index fb2cef764e9a..22f3bdd8206c 100644
--- a/drivers/net/phy/teranetics.c
+++ b/drivers/net/phy/teranetics.c
@@ -34,39 +34,17 @@ MODULE_LICENSE("GPL v2");
MDIO_PHYXS_LNSTAT_SYNC3 | \
MDIO_PHYXS_LNSTAT_ALIGN)
-static int teranetics_config_init(struct phy_device *phydev)
-{
- phydev->supported = SUPPORTED_10000baseT_Full;
- phydev->advertising = SUPPORTED_10000baseT_Full;
-
- return 0;
-}
-
-static int teranetics_soft_reset(struct phy_device *phydev)
-{
- return 0;
-}
-
static int teranetics_aneg_done(struct phy_device *phydev)
{
- int reg;
-
/* auto negotiation state can only be checked when using copper
* port, if using fiber port, just lie it's done.
*/
- if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) {
- reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1);
- return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE);
- }
+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93))
+ return genphy_c45_aneg_done(phydev);
return 1;
}
-static int teranetics_config_aneg(struct phy_device *phydev)
-{
- return 0;
-}
-
static int teranetics_read_status(struct phy_device *phydev)
{
int reg;
@@ -102,10 +80,10 @@ static struct phy_driver teranetics_driver[] = {
.phy_id = PHY_ID_TN2020,
.phy_id_mask = 0xffffffff,
.name = "Teranetics TN2020",
- .soft_reset = teranetics_soft_reset,
+ .soft_reset = gen10g_no_soft_reset,
.aneg_done = teranetics_aneg_done,
- .config_init = teranetics_config_init,
- .config_aneg = teranetics_config_aneg,
+ .config_init = gen10g_config_init,
+ .config_aneg = gen10g_config_aneg,
.read_status = teranetics_read_status,
.match_phy_device = teranetics_match_phy_device,
},
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa2a9bdd1866..7dc2f34e7229 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -971,6 +971,7 @@ static struct pernet_operations ppp_net_ops = {
.exit = ppp_exit_net,
.id = &ppp_net_id,
.size = sizeof(struct ppp_net),
+ .async = true,
};
static int ppp_unit_register(struct ppp *ppp, int unit, bool ifname_is_set)
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 5aa59f41bf8c..c10e6181a2f0 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -714,7 +714,7 @@ err_put:
}
static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
+ int peer)
{
int len = sizeof(struct sockaddr_pppox);
struct sockaddr_pppox sp;
@@ -726,9 +726,7 @@ static int pppoe_getname(struct socket *sock, struct sockaddr *uaddr,
memcpy(uaddr, &sp, len);
- *usockaddr_len = len;
-
- return 0;
+ return len;
}
static int pppoe_ioctl(struct socket *sock, unsigned int cmd,
@@ -1163,6 +1161,7 @@ static struct pernet_operations pppoe_net_ops = {
.exit = pppoe_exit_net,
.id = &pppoe_net_id,
.size = sizeof(struct pppoe_net),
+ .async = true,
};
static int __init pppoe_init(void)
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 6dde9a0cfe76..8249d46a7844 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -483,7 +483,7 @@ static int pptp_connect(struct socket *sock, struct sockaddr *uservaddr,
}
static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
- int *usockaddr_len, int peer)
+ int peer)
{
int len = sizeof(struct sockaddr_pppox);
struct sockaddr_pppox sp;
@@ -496,9 +496,7 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
memcpy(uaddr, &sp, len);
- *usockaddr_len = len;
-
- return 0;
+ return len;
}
static int pptp_release(struct socket *sock)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index a468439969df..5dd781e65958 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1105,14 +1105,15 @@ static void team_port_disable_netpoll(struct team_port *port)
}
#endif
-static int team_upper_dev_link(struct team *team, struct team_port *port)
+static int team_upper_dev_link(struct team *team, struct team_port *port,
+ struct netlink_ext_ack *extack)
{
struct netdev_lag_upper_info lag_upper_info;
int err;
lag_upper_info.tx_type = team->mode->lag_tx_type;
err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
- &lag_upper_info, NULL);
+ &lag_upper_info, extack);
if (err)
return err;
port->dev->priv_flags |= IFF_TEAM_PORT;
@@ -1129,7 +1130,8 @@ static void __team_port_change_port_added(struct team_port *port, bool linkup);
static int team_dev_type_check_change(struct net_device *dev,
struct net_device *port_dev);
-static int team_port_add(struct team *team, struct net_device *port_dev)
+static int team_port_add(struct team *team, struct net_device *port_dev,
+ struct netlink_ext_ack *extack)
{
struct net_device *dev = team->dev;
struct team_port *port;
@@ -1137,12 +1139,14 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
int err;
if (port_dev->flags & IFF_LOOPBACK) {
+ NL_SET_ERR_MSG(extack, "Loopback device can't be added as a team port");
netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
portname);
return -EINVAL;
}
if (team_port_exists(port_dev)) {
+ NL_SET_ERR_MSG(extack, "Device is already a port of a team device");
netdev_err(dev, "Device %s is already a port "
"of a team device\n", portname);
return -EBUSY;
@@ -1150,6 +1154,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
+ NL_SET_ERR_MSG(extack, "Device is VLAN challenged and team device has VLAN set up");
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
portname);
return -EPERM;
@@ -1160,6 +1165,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
return err;
if (port_dev->flags & IFF_UP) {
+ NL_SET_ERR_MSG(extack, "Device is up. Set it down before adding it as a team port");
netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
portname);
return -EBUSY;
@@ -1227,7 +1233,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
goto err_handler_register;
}
- err = team_upper_dev_link(team, port);
+ err = team_upper_dev_link(team, port, extack);
if (err) {
netdev_err(dev, "Device %s failed to set upper link\n",
portname);
@@ -1921,7 +1927,7 @@ static int team_add_slave(struct net_device *dev, struct net_device *port_dev,
int err;
mutex_lock(&team->lock);
- err = team_port_add(team, port_dev);
+ err = team_port_add(team, port_dev, extack);
mutex_unlock(&team->lock);
if (!err)
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 7433bb2e4451..475088f947bb 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -78,6 +78,7 @@
#include <linux/mutex.h>
#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
@@ -2286,11 +2287,67 @@ static int tun_validate(struct nlattr *tb[], struct nlattr *data[],
return -EINVAL;
}
+static size_t tun_get_size(const struct net_device *dev)
+{
+ BUILD_BUG_ON(sizeof(u32) != sizeof(uid_t));
+ BUILD_BUG_ON(sizeof(u32) != sizeof(gid_t));
+
+ return nla_total_size(sizeof(uid_t)) + /* OWNER */
+ nla_total_size(sizeof(gid_t)) + /* GROUP */
+ nla_total_size(sizeof(u8)) + /* TYPE */
+ nla_total_size(sizeof(u8)) + /* PI */
+ nla_total_size(sizeof(u8)) + /* VNET_HDR */
+ nla_total_size(sizeof(u8)) + /* PERSIST */
+ nla_total_size(sizeof(u8)) + /* MULTI_QUEUE */
+ nla_total_size(sizeof(u32)) + /* NUM_QUEUES */
+ nla_total_size(sizeof(u32)) + /* NUM_DISABLED_QUEUES */
+ 0;
+}
+
+static int tun_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (nla_put_u8(skb, IFLA_TUN_TYPE, tun->flags & TUN_TYPE_MASK))
+ goto nla_put_failure;
+ if (uid_valid(tun->owner) &&
+ nla_put_u32(skb, IFLA_TUN_OWNER,
+ from_kuid_munged(current_user_ns(), tun->owner)))
+ goto nla_put_failure;
+ if (gid_valid(tun->group) &&
+ nla_put_u32(skb, IFLA_TUN_GROUP,
+ from_kgid_munged(current_user_ns(), tun->group)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_TUN_PI, !(tun->flags & IFF_NO_PI)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_TUN_VNET_HDR, !!(tun->flags & IFF_VNET_HDR)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_TUN_PERSIST, !!(tun->flags & IFF_PERSIST)))
+ goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_TUN_MULTI_QUEUE,
+ !!(tun->flags & IFF_MULTI_QUEUE)))
+ goto nla_put_failure;
+ if (tun->flags & IFF_MULTI_QUEUE) {
+ if (nla_put_u32(skb, IFLA_TUN_NUM_QUEUES, tun->numqueues))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, IFLA_TUN_NUM_DISABLED_QUEUES,
+ tun->numdisabled))
+ goto nla_put_failure;
+ }
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
static struct rtnl_link_ops tun_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct tun_struct),
.setup = tun_setup,
.validate = tun_validate,
+ .get_size = tun_get_size,
+ .fill_info = tun_fill_info,
};
static void tun_sock_write_space(struct sock *sk)
@@ -2782,6 +2839,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct ifreq ifr;
+ struct net *net;
kuid_t owner;
kgid_t group;
int sndbuf;
@@ -2790,7 +2848,8 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
int le;
int ret;
- if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == SOCK_IOC_TYPE) {
+ if (cmd == TUNSETIFF || cmd == TUNSETQUEUE ||
+ (_IOC_TYPE(cmd) == SOCK_IOC_TYPE && cmd != SIOCGSKNS)) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
} else {
@@ -2810,6 +2869,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
rtnl_lock();
tun = tun_get(tfile);
+ net = sock_net(&tfile->sk);
if (cmd == TUNSETIFF) {
ret = -EEXIST;
if (tun)
@@ -2817,7 +2877,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
ifr.ifr_name[IFNAMSIZ-1] = '\0';
- ret = tun_set_iff(sock_net(&tfile->sk), file, &ifr);
+ ret = tun_set_iff(net, file, &ifr);
if (ret)
goto unlock;
@@ -2839,6 +2899,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tfile->ifindex = ifindex;
goto unlock;
}
+ if (cmd == SIOCGSKNS) {
+ ret = -EPERM;
+ if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+ goto unlock;
+
+ ret = open_related_ns(&net->ns, get_net_ns);
+ goto unlock;
+ }
ret = -EBADFD;
if (!tun)
diff --git a/drivers/net/usb/kalmia.c b/drivers/net/usb/kalmia.c
index ce0b0b4e3a57..1ec523b0e932 100644
--- a/drivers/net/usb/kalmia.c
+++ b/drivers/net/usb/kalmia.c
@@ -114,14 +114,14 @@ kalmia_init_and_get_ethernet_addr(struct usbnet *dev, u8 *ethernet_addr)
return -ENOMEM;
memcpy(usb_buf, init_msg_1, 12);
- status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_1)
- / sizeof(init_msg_1[0]), usb_buf, 24);
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_1),
+ usb_buf, 24);
if (status != 0)
return status;
memcpy(usb_buf, init_msg_2, 12);
- status = kalmia_send_init_packet(dev, usb_buf, sizeof(init_msg_2)
- / sizeof(init_msg_2[0]), usb_buf, 28);
+ status = kalmia_send_init_packet(dev, usb_buf, ARRAY_SIZE(init_msg_2),
+ usb_buf, 28);
if (status != 0)
return status;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 139c61c8244a..c6be49d3a9eb 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -736,7 +736,6 @@ static int vrf_rtable_create(struct net_device *dev)
return -ENOMEM;
rth->dst.output = vrf_output;
- rth->rt_table_id = vrf->tb_id;
rcu_assign_pointer(vrf->rth, rth);
@@ -942,6 +941,7 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
const struct net_device *dev,
struct flowi6 *fl6,
int ifindex,
+ const struct sk_buff *skb,
int flags)
{
struct net_vrf *vrf = netdev_priv(dev);
@@ -960,7 +960,7 @@ static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
if (!table)
return NULL;
- return ip6_pol_route(net, table, ifindex, fl6, flags);
+ return ip6_pol_route(net, table, ifindex, fl6, skb, flags);
}
static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
@@ -978,7 +978,7 @@ static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
struct net *net = dev_net(vrf_dev);
struct rt6_info *rt6;
- rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
+ rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
if (unlikely(!rt6))
return;
@@ -1111,7 +1111,7 @@ static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
if (!ipv6_addr_any(&fl6->saddr))
flags |= RT6_LOOKUP_F_HAS_SADDR;
- rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
+ rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
if (rt)
dst = &rt->dst;
@@ -1146,6 +1146,7 @@ static inline size_t vrf_fib_rule_nl_size(void)
sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
+ sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
return sz;
}
@@ -1176,6 +1177,9 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
frh->family = family;
frh->action = FR_ACT_TO_TBL;
+ if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
+ goto nla_put_failure;
+
if (nla_put_u8(skb, FRA_L3MDEV, 1))
goto nla_put_failure;
@@ -1431,6 +1435,7 @@ static struct pernet_operations vrf_net_ops __net_initdata = {
.init = vrf_netns_init,
.id = &vrf_net_id,
.size = sizeof(bool),
+ .async = true,
};
static int __init vrf_init_module(void)
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index fab7a4db249e..aa5f034d6ad1 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -3752,6 +3752,7 @@ static struct pernet_operations vxlan_net_ops = {
.exit_batch = vxlan_exit_batch_net,
.id = &vxlan_net_id,
.size = sizeof(struct vxlan_net),
+ .async = true,
};
static int __init vxlan_init_module(void)
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index 768f63f38341..b799a5384abb 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -1599,7 +1599,8 @@ static void wil_probe_client_handle(struct wil6210_priv *wil,
*/
bool alive = (sta->status == wil_sta_connected);
- cfg80211_probe_status(ndev, sta->addr, req->cookie, alive, GFP_KERNEL);
+ cfg80211_probe_status(ndev, sta->addr, req->cookie, alive,
+ 0, false, GFP_KERNEL);
}
static struct list_head *next_probe_client(struct wil6210_priv *wil)
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 6e0af815f25e..7b6c3640a94f 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -253,7 +253,7 @@ static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c)
static unsigned int hwsim_net_id;
-static int hwsim_netgroup;
+static struct ida hwsim_netgroup_ida = IDA_INIT;
struct hwsim_net {
int netgroup;
@@ -267,11 +267,13 @@ static inline int hwsim_net_get_netgroup(struct net *net)
return hwsim_net->netgroup;
}
-static inline void hwsim_net_set_netgroup(struct net *net)
+static inline int hwsim_net_set_netgroup(struct net *net)
{
struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id);
- hwsim_net->netgroup = hwsim_netgroup++;
+ hwsim_net->netgroup = ida_simple_get(&hwsim_netgroup_ida,
+ 0, 0, GFP_KERNEL);
+ return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM;
}
static inline u32 hwsim_net_get_wmediumd(struct net *net)
@@ -493,6 +495,7 @@ static LIST_HEAD(hwsim_radios);
static struct workqueue_struct *hwsim_wq;
static struct rhashtable hwsim_radios_rht;
static int hwsim_radio_idx;
+static int hwsim_radios_generation = 1;
static struct platform_driver mac80211_hwsim_driver = {
.driver = {
@@ -637,6 +640,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING },
[HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG },
[HWSIM_ATTR_FREQ] = { .type = NLA_U32 },
+ [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -2408,6 +2412,7 @@ struct hwsim_new_radio_params {
bool destroy_on_close;
const char *hwname;
bool no_vif;
+ const u8 *perm_addr;
};
static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb,
@@ -2572,15 +2577,25 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
skb_queue_head_init(&data->pending);
SET_IEEE80211_DEV(hw, data->dev);
- eth_zero_addr(addr);
- addr[0] = 0x02;
- addr[3] = idx >> 8;
- addr[4] = idx;
- memcpy(data->addresses[0].addr, addr, ETH_ALEN);
- memcpy(data->addresses[1].addr, addr, ETH_ALEN);
- data->addresses[1].addr[0] |= 0x40;
- hw->wiphy->n_addresses = 2;
- hw->wiphy->addresses = data->addresses;
+ if (!param->perm_addr) {
+ eth_zero_addr(addr);
+ addr[0] = 0x02;
+ addr[3] = idx >> 8;
+ addr[4] = idx;
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ /* Why need here second address ? */
+ data->addresses[1].addr[0] |= 0x40;
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
+ /* possible address clash is checked at hash table insertion */
+ } else {
+ memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN);
+ /* compatibility with automatically generated mac addr */
+ memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN);
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
+ }
data->channels = param->channels;
data->use_chanctx = param->use_chanctx;
@@ -2785,13 +2800,17 @@ static int mac80211_hwsim_new_radio(struct genl_info *info,
err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
if (err < 0) {
- pr_debug("mac80211_hwsim: radio index %d already present\n",
- idx);
+ if (info) {
+ GENL_SET_ERR_MSG(info, "perm addr already present");
+ NL_SET_BAD_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_PERM_ADDR]);
+ }
spin_unlock_bh(&hwsim_radio_lock);
goto failed_final_insert;
}
list_add_tail(&data->list, &hwsim_radios);
+ hwsim_radios_generation++;
spin_unlock_bh(&hwsim_radio_lock);
if (idx > 0)
@@ -3210,6 +3229,19 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
param.regd = hwsim_world_regdom_custom[idx];
}
+ if (info->attrs[HWSIM_ATTR_PERM_ADDR]) {
+ if (!is_valid_ether_addr(
+ nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]))) {
+ GENL_SET_ERR_MSG(info,"MAC is no valid source addr");
+ NL_SET_BAD_ATTR(info->extack,
+ info->attrs[HWSIM_ATTR_PERM_ADDR]);
+ return -EINVAL;
+ }
+
+
+ param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]);
+ }
+
ret = mac80211_hwsim_new_radio(info, &param);
kfree(hwname);
return ret;
@@ -3249,6 +3281,7 @@ static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
list_del(&data->list);
rhashtable_remove_fast(&hwsim_radios_rht, &data->rht,
hwsim_rht_params);
+ hwsim_radios_generation++;
spin_unlock_bh(&hwsim_radio_lock);
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy),
info);
@@ -3305,17 +3338,19 @@ out_err:
static int hwsim_dump_radio_nl(struct sk_buff *skb,
struct netlink_callback *cb)
{
- int idx = cb->args[0];
+ int last_idx = cb->args[0];
struct mac80211_hwsim_data *data = NULL;
- int res;
+ int res = 0;
+ void *hdr;
spin_lock_bh(&hwsim_radio_lock);
+ cb->seq = hwsim_radios_generation;
- if (idx == hwsim_radio_idx)
+ if (last_idx >= hwsim_radio_idx-1)
goto done;
list_for_each_entry(data, &hwsim_radios, list) {
- if (data->idx < idx)
+ if (data->idx <= last_idx)
continue;
if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk)))
@@ -3328,14 +3363,25 @@ static int hwsim_dump_radio_nl(struct sk_buff *skb,
if (res < 0)
break;
- idx = data->idx + 1;
+ last_idx = data->idx;
}
- cb->args[0] = idx;
+ cb->args[0] = last_idx;
+
+ /* list changed, but no new element sent, set interrupted flag */
+ if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) {
+ hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, &hwsim_genl_family,
+ NLM_F_MULTI, HWSIM_CMD_GET_RADIO);
+ if (!hdr)
+ res = -EMSGSIZE;
+ genl_dump_check_consistent(cb, hdr);
+ genlmsg_end(skb, hdr);
+ }
done:
spin_unlock_bh(&hwsim_radio_lock);
- return skb->len;
+ return res ?: skb->len;
}
/* Generic Netlink operations array */
@@ -3393,6 +3439,7 @@ static void destroy_radio(struct work_struct *work)
struct mac80211_hwsim_data *data =
container_of(work, struct mac80211_hwsim_data, destroy_work);
+ hwsim_radios_generation++;
mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL);
}
@@ -3462,9 +3509,7 @@ failure:
static __net_init int hwsim_init_net(struct net *net)
{
- hwsim_net_set_netgroup(net);
-
- return 0;
+ return hwsim_net_set_netgroup(net);
}
static void __net_exit hwsim_exit_net(struct net *net)
@@ -3487,6 +3532,8 @@ static void __net_exit hwsim_exit_net(struct net *net)
queue_work(hwsim_wq, &data->destroy_work);
}
spin_unlock_bh(&hwsim_radio_lock);
+
+ ida_simple_remove(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net));
}
static struct pernet_operations hwsim_net_ops = {
@@ -3494,6 +3541,7 @@ static struct pernet_operations hwsim_net_ops = {
.exit = hwsim_exit_net,
.id = &hwsim_net_id,
.size = sizeof(struct hwsim_net),
+ .async = true,
};
static void hwsim_exit_netlink(void)
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index a96a79c1eff5..0fe3199f8c72 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -68,7 +68,12 @@ enum hwsim_tx_control_flags {
* %HWSIM_ATTR_SIGNAL, %HWSIM_ATTR_COOKIE
* @HWSIM_CMD_NEW_RADIO: create a new radio with the given parameters,
* returns the radio ID (>= 0) or negative on errors, if successful
- * then multicast the result
+ * then multicast the result, uses optional parameter:
+ * %HWSIM_ATTR_REG_STRICT_REG, %HWSIM_ATTR_SUPPORT_P2P_DEVICE,
+ * %HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE, %HWSIM_ATTR_CHANNELS,
+ * %HWSIM_ATTR_NO_VIF, %HWSIM_ATTR_RADIO_NAME, %HWSIM_ATTR_USE_CHANCTX,
+ * %HWSIM_ATTR_REG_HINT_ALPHA2, %HWSIM_ATTR_REG_CUSTOM_REG,
+ * %HWSIM_ATTR_PERM_ADDR
* @HWSIM_CMD_DEL_RADIO: destroy a radio, reply is multicasted
* @HWSIM_CMD_GET_RADIO: fetch information about existing radios, uses:
* %HWSIM_ATTR_RADIO_ID
@@ -126,6 +131,7 @@ enum {
* @HWSIM_ATTR_FREQ: Frequency at which packet is transmitted or received.
* @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding
* rates of %HWSIM_ATTR_TX_INFO
+ * @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -153,6 +159,7 @@ enum {
HWSIM_ATTR_FREQ,
HWSIM_ATTR_PAD,
HWSIM_ATTR_TX_INFO_FLAGS,
+ HWSIM_ATTR_PERM_ADDR,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
index b1cf7c6f407a..ef5887037b22 100644
--- a/drivers/net/xen-netback/rx.c
+++ b/drivers/net/xen-netback/rx.c
@@ -419,7 +419,7 @@ static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
BUG();
}
-void xenvif_rx_skb(struct xenvif_queue *queue)
+static void xenvif_rx_skb(struct xenvif_queue *queue)
{
struct xenvif_pkt_state pkt;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 6198559abbd8..0ad00dbf912d 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -732,7 +732,7 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
struct sockaddr_in6 addr;
- int rc, len;
+ int rc;
switch(param) {
case ISCSI_PARAM_CONN_PORT:
@@ -745,12 +745,12 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
}
if (param == ISCSI_PARAM_LOCAL_PORT)
rc = kernel_getsockname(tcp_sw_conn->sock,
- (struct sockaddr *)&addr, &len);
+ (struct sockaddr *)&addr);
else
rc = kernel_getpeername(tcp_sw_conn->sock,
- (struct sockaddr *)&addr, &len);
+ (struct sockaddr *)&addr);
spin_unlock_bh(&conn->session->frwd_lock);
- if (rc)
+ if (rc < 0)
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
@@ -771,7 +771,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
struct iscsi_tcp_conn *tcp_conn;
struct iscsi_sw_tcp_conn *tcp_sw_conn;
struct sockaddr_in6 addr;
- int rc, len;
+ int rc;
switch (param) {
case ISCSI_HOST_PARAM_IPADDRESS:
@@ -793,9 +793,9 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
}
rc = kernel_getsockname(tcp_sw_conn->sock,
- (struct sockaddr *)&addr, &len);
+ (struct sockaddr *)&addr);
spin_unlock_bh(&session->frwd_lock);
- if (rc)
+ if (rc < 0)
return rc;
return iscsi_conn_get_addr_param((struct sockaddr_storage *)
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 877611d5c42b..321982277697 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -586,7 +586,6 @@ static struct socket *qmi_sock_create(struct qmi_handle *qmi,
struct sockaddr_qrtr *sq)
{
struct socket *sock;
- int sl = sizeof(*sq);
int ret;
ret = sock_create_kern(&init_net, AF_QIPCRTR, SOCK_DGRAM,
@@ -594,7 +593,7 @@ static struct socket *qmi_sock_create(struct qmi_handle *qmi,
if (ret < 0)
return ERR_PTR(ret);
- ret = kernel_getsockname(sock, (struct sockaddr *)sq, &sl);
+ ret = kernel_getsockname(sock, (struct sockaddr *)sq);
if (ret < 0) {
sock_release(sock);
return ERR_PTR(ret);
diff --git a/drivers/staging/ipx/af_ipx.c b/drivers/staging/ipx/af_ipx.c
index d21a9d128d3e..5703dd176787 100644
--- a/drivers/staging/ipx/af_ipx.c
+++ b/drivers/staging/ipx/af_ipx.c
@@ -1577,7 +1577,7 @@ out:
static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+ int peer)
{
struct ipx_address *addr;
struct sockaddr_ipx sipx;
@@ -1585,8 +1585,6 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
struct ipx_sock *ipxs = ipx_sk(sk);
int rc;
- *uaddr_len = sizeof(struct sockaddr_ipx);
-
lock_sock(sk);
if (peer) {
rc = -ENOTCONN;
@@ -1620,7 +1618,7 @@ static int ipx_getname(struct socket *sock, struct sockaddr *uaddr,
sipx.sipx_zero = 0;
memcpy(uaddr, &sipx, sizeof(sipx));
- rc = 0;
+ rc = sizeof(struct sockaddr_ipx);
out:
release_sock(sk);
return rc;
diff --git a/drivers/staging/irda/net/af_irda.c b/drivers/staging/irda/net/af_irda.c
index 2f1e9ab3d6d0..c13553a9ee11 100644
--- a/drivers/staging/irda/net/af_irda.c
+++ b/drivers/staging/irda/net/af_irda.c
@@ -697,7 +697,7 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
*
*/
static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+ int peer)
{
struct sockaddr_irda saddr;
struct sock *sk = sock->sk;
@@ -720,11 +720,9 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
pr_debug("%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel);
pr_debug("%s(), addr = %08x\n", __func__, saddr.sir_addr);
- /* uaddr_len come to us uninitialised */
- *uaddr_len = sizeof (struct sockaddr_irda);
- memcpy(uaddr, &saddr, *uaddr_len);
+ memcpy(uaddr, &saddr, sizeof (struct sockaddr_irda));
- return 0;
+ return sizeof (struct sockaddr_irda);
}
/*
diff --git a/drivers/staging/lustre/lnet/lnet/lib-socket.c b/drivers/staging/lustre/lnet/lnet/lib-socket.c
index ce93806eefca..1bee667802b0 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-socket.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-socket.c
@@ -448,14 +448,13 @@ int
lnet_sock_getaddr(struct socket *sock, bool remote, __u32 *ip, int *port)
{
struct sockaddr_in sin;
- int len = sizeof(sin);
int rc;
if (remote)
- rc = kernel_getpeername(sock, (struct sockaddr *)&sin, &len);
+ rc = kernel_getpeername(sock, (struct sockaddr *)&sin);
else
- rc = kernel_getsockname(sock, (struct sockaddr *)&sin, &len);
- if (rc) {
+ rc = kernel_getsockname(sock, (struct sockaddr *)&sin);
+ if (rc < 0) {
CERROR("Error %d getting sock %s IP/port\n",
rc, remote ? "peer" : "local");
return rc;
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 64c5a57b92e4..99501785cdc1 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1020,7 +1020,7 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
struct socket *new_sock, *sock = np->np_socket;
struct sockaddr_in sock_in;
struct sockaddr_in6 sock_in6;
- int rc, err;
+ int rc;
rc = kernel_accept(sock, &new_sock, 0);
if (rc < 0)
@@ -1033,8 +1033,8 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
memset(&sock_in6, 0, sizeof(struct sockaddr_in6));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 1);
- if (!rc) {
+ (struct sockaddr *)&sock_in6, 1);
+ if (rc >= 0) {
if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
memcpy(&conn->login_sockaddr, &sock_in6, sizeof(sock_in6));
} else {
@@ -1047,8 +1047,8 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
}
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in6, &err, 0);
- if (!rc) {
+ (struct sockaddr *)&sock_in6, 0);
+ if (rc >= 0) {
if (!ipv6_addr_v4mapped(&sock_in6.sin6_addr)) {
memcpy(&conn->local_sockaddr, &sock_in6, sizeof(sock_in6));
} else {
@@ -1063,13 +1063,13 @@ int iscsit_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
memset(&sock_in, 0, sizeof(struct sockaddr_in));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 1);
- if (!rc)
+ (struct sockaddr *)&sock_in, 1);
+ if (rc >= 0)
memcpy(&conn->login_sockaddr, &sock_in, sizeof(sock_in));
rc = conn->sock->ops->getname(conn->sock,
- (struct sockaddr *)&sock_in, &err, 0);
- if (!rc)
+ (struct sockaddr *)&sock_in, 0);
+ if (rc >= 0)
memcpy(&conn->local_sockaddr, &sock_in, sizeof(sock_in));
}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 610cba276d47..b5fb56b822fd 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1038,7 +1038,7 @@ static struct socket *get_raw_socket(int fd)
struct sockaddr_ll sa;
char buf[MAX_ADDR_LEN];
} uaddr;
- int uaddr_len = sizeof uaddr, r;
+ int r;
struct socket *sock = sockfd_lookup(fd, &r);
if (!sock)
@@ -1050,9 +1050,8 @@ static struct socket *get_raw_socket(int fd)
goto err;
}
- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
- &uaddr_len, 0);
- if (r)
+ r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
+ if (r < 0)
goto err;
if (uaddr.sa.sll_family != AF_PACKET) {